1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <drm/drmP.h> 29 #include <drm/drm_vma_manager.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_gem_clflush.h" 33 #include "i915_vgpu.h" 34 #include "i915_trace.h" 35 #include "intel_drv.h" 36 #include "intel_frontbuffer.h" 37 #include "intel_mocs.h" 38 #include <linux/dma-fence-array.h> 39 #include <linux/kthread.h> 40 #include <linux/reservation.h> 41 #include <linux/shmem_fs.h> 42 #include <linux/slab.h> 43 #include <linux/stop_machine.h> 44 #include <linux/swap.h> 45 #include <linux/pci.h> 46 #include <linux/dma-buf.h> 47 #include <linux/swiotlb.h> 48 49 #include <sys/mman.h> 50 #include <vm/vm_map.h> 51 #include <vm/vm_param.h> 52 53 static void i915_gem_flush_free_objects(struct drm_i915_private *i915); 54 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 55 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 56 57 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 58 { 59 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) 60 return false; 61 62 if (!i915_gem_object_is_coherent(obj)) 63 return true; 64 65 return obj->pin_display; 66 } 67 68 static int 69 insert_mappable_node(struct i915_ggtt *ggtt, 70 struct drm_mm_node *node, u32 size) 71 { 72 memset(node, 0, sizeof(*node)); 73 return drm_mm_insert_node_in_range(&ggtt->base.mm, node, 74 size, 0, I915_COLOR_UNEVICTABLE, 75 0, ggtt->mappable_end, 76 DRM_MM_INSERT_LOW); 77 } 78 79 static void 80 remove_mappable_node(struct drm_mm_node *node) 81 { 82 drm_mm_remove_node(node); 83 } 84 85 /* some bookkeeping */ 86 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 87 u64 size) 88 { 89 lockmgr(&dev_priv->mm.object_stat_lock, LK_EXCLUSIVE); 90 dev_priv->mm.object_count++; 91 dev_priv->mm.object_memory += size; 92 lockmgr(&dev_priv->mm.object_stat_lock, LK_RELEASE); 93 } 94 95 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, 96 u64 size) 97 { 98 lockmgr(&dev_priv->mm.object_stat_lock, LK_EXCLUSIVE); 99 dev_priv->mm.object_count--; 100 dev_priv->mm.object_memory -= size; 101 lockmgr(&dev_priv->mm.object_stat_lock, LK_RELEASE); 102 } 103 104 static int 105 i915_gem_wait_for_error(struct i915_gpu_error *error) 106 { 107 int ret; 108 109 might_sleep(); 110 111 /* 112 * Only wait 10 seconds for the gpu reset to complete to avoid hanging 113 * userspace. If it takes that long something really bad is going on and 114 * we should simply try to bail out and fail as gracefully as possible. 115 */ 116 ret = wait_event_interruptible_timeout(error->reset_queue, 117 !i915_reset_backoff(error), 118 I915_RESET_TIMEOUT); 119 if (ret == 0) { 120 DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); 121 return -EIO; 122 } else if (ret < 0) { 123 return ret; 124 } else { 125 return 0; 126 } 127 } 128 129 int i915_mutex_lock_interruptible(struct drm_device *dev) 130 { 131 struct drm_i915_private *dev_priv = to_i915(dev); 132 int ret; 133 134 ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 135 if (ret) 136 return ret; 137 138 ret = mutex_lock_interruptible(&dev->struct_mutex); 139 if (ret) 140 return ret; 141 142 return 0; 143 } 144 145 int 146 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 147 struct drm_file *file) 148 { 149 struct drm_i915_private *dev_priv = to_i915(dev); 150 struct i915_ggtt *ggtt = &dev_priv->ggtt; 151 struct drm_i915_gem_get_aperture *args = data; 152 struct i915_vma *vma; 153 size_t pinned; 154 155 pinned = 0; 156 mutex_lock(&dev->struct_mutex); 157 list_for_each_entry(vma, &ggtt->base.active_list, vm_link) 158 if (i915_vma_is_pinned(vma)) 159 pinned += vma->node.size; 160 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link) 161 if (i915_vma_is_pinned(vma)) 162 pinned += vma->node.size; 163 mutex_unlock(&dev->struct_mutex); 164 165 args->aper_size = ggtt->base.total; 166 args->aper_available_size = args->aper_size - pinned; 167 168 return 0; 169 } 170 171 static struct sg_table * 172 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) 173 { 174 #if 0 175 struct address_space *mapping = obj->base.filp->f_mapping; 176 #else 177 vm_object_t vm_obj = obj->base.filp; 178 #endif 179 drm_dma_handle_t *phys; 180 struct sg_table *st; 181 struct scatterlist *sg; 182 char *vaddr; 183 int i; 184 185 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) 186 return ERR_PTR(-EINVAL); 187 188 /* Always aligning to the object size, allows a single allocation 189 * to handle all possible callers, and given typical object sizes, 190 * the alignment of the buddy allocation will naturally match. 191 */ 192 phys = drm_pci_alloc(obj->base.dev, 193 obj->base.size, 194 roundup_pow_of_two(obj->base.size)); 195 if (!phys) 196 return ERR_PTR(-ENOMEM); 197 198 vaddr = phys->vaddr; 199 VM_OBJECT_LOCK(vm_obj); 200 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 201 struct page *page; 202 char *src; 203 204 #if 0 205 page = shmem_read_mapping_page(mapping, i); 206 #else 207 page = shmem_read_mapping_page(vm_obj, i); 208 #endif 209 if (IS_ERR(page)) { 210 st = ERR_CAST(page); 211 goto err_phys; 212 } 213 214 src = kmap_atomic(page); 215 memcpy(vaddr, src, PAGE_SIZE); 216 drm_clflush_virt_range(vaddr, PAGE_SIZE); 217 kunmap_atomic(src); 218 219 put_page(page); 220 vaddr += PAGE_SIZE; 221 } 222 VM_OBJECT_UNLOCK(vm_obj); 223 224 i915_gem_chipset_flush(to_i915(obj->base.dev)); 225 226 st = kmalloc(sizeof(*st), M_DRM, GFP_KERNEL); 227 if (!st) { 228 st = ERR_PTR(-ENOMEM); 229 goto err_phys; 230 } 231 232 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 233 kfree(st); 234 st = ERR_PTR(-ENOMEM); 235 goto err_phys; 236 } 237 238 sg = st->sgl; 239 sg->offset = 0; 240 sg->length = obj->base.size; 241 242 sg_dma_address(sg) = phys->busaddr; 243 sg_dma_len(sg) = obj->base.size; 244 245 obj->phys_handle = phys; 246 return st; 247 248 err_phys: 249 drm_pci_free(obj->base.dev, phys); 250 return st; 251 } 252 253 static void 254 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 255 struct sg_table *pages, 256 bool needs_clflush) 257 { 258 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 259 260 if (obj->mm.madv == I915_MADV_DONTNEED) 261 obj->mm.dirty = false; 262 263 if (needs_clflush && 264 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && 265 !i915_gem_object_is_coherent(obj)) 266 drm_clflush_sg(pages); 267 268 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 269 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 270 } 271 272 static void 273 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, 274 struct sg_table *pages) 275 { 276 __i915_gem_object_release_shmem(obj, pages, false); 277 278 if (obj->mm.dirty) { 279 #if 0 280 struct address_space *mapping = obj->base.filp->f_mapping; 281 #else 282 vm_object_t vm_obj = obj->base.filp; 283 #endif 284 char *vaddr = obj->phys_handle->vaddr; 285 int i; 286 287 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 288 struct page *page; 289 char *dst; 290 291 page = shmem_read_mapping_page(vm_obj, i); 292 if (IS_ERR(page)) 293 continue; 294 295 dst = kmap_atomic(page); 296 drm_clflush_virt_range(vaddr, PAGE_SIZE); 297 memcpy(dst, vaddr, PAGE_SIZE); 298 kunmap_atomic(dst); 299 300 set_page_dirty(page); 301 if (obj->mm.madv == I915_MADV_WILLNEED) 302 mark_page_accessed(page); 303 put_page(page); 304 vaddr += PAGE_SIZE; 305 } 306 obj->mm.dirty = false; 307 } 308 309 sg_free_table(pages); 310 kfree(pages); 311 312 drm_pci_free(obj->base.dev, obj->phys_handle); 313 } 314 315 static void 316 i915_gem_object_release_phys(struct drm_i915_gem_object *obj) 317 { 318 i915_gem_object_unpin_pages(obj); 319 } 320 321 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { 322 .get_pages = i915_gem_object_get_pages_phys, 323 .put_pages = i915_gem_object_put_pages_phys, 324 .release = i915_gem_object_release_phys, 325 }; 326 327 static const struct drm_i915_gem_object_ops i915_gem_object_ops; 328 329 int i915_gem_object_unbind(struct drm_i915_gem_object *obj) 330 { 331 struct i915_vma *vma; 332 LINUX_LIST_HEAD(still_in_list); 333 int ret; 334 335 lockdep_assert_held(&obj->base.dev->struct_mutex); 336 337 /* Closed vma are removed from the obj->vma_list - but they may 338 * still have an active binding on the object. To remove those we 339 * must wait for all rendering to complete to the object (as unbinding 340 * must anyway), and retire the requests. 341 */ 342 ret = i915_gem_object_wait(obj, 343 I915_WAIT_INTERRUPTIBLE | 344 I915_WAIT_LOCKED | 345 I915_WAIT_ALL, 346 MAX_SCHEDULE_TIMEOUT, 347 NULL); 348 if (ret) 349 return ret; 350 351 i915_gem_retire_requests(to_i915(obj->base.dev)); 352 353 while ((vma = list_first_entry_or_null(&obj->vma_list, 354 struct i915_vma, 355 obj_link))) { 356 list_move_tail(&vma->obj_link, &still_in_list); 357 ret = i915_vma_unbind(vma); 358 if (ret) 359 break; 360 } 361 list_splice(&still_in_list, &obj->vma_list); 362 363 return ret; 364 } 365 366 static long 367 i915_gem_object_wait_fence(struct dma_fence *fence, 368 unsigned int flags, 369 long timeout, 370 struct intel_rps_client *rps) 371 { 372 struct drm_i915_gem_request *rq; 373 374 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); 375 376 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 377 return timeout; 378 379 if (!dma_fence_is_i915(fence)) 380 return dma_fence_wait_timeout(fence, 381 flags & I915_WAIT_INTERRUPTIBLE, 382 timeout); 383 384 rq = to_request(fence); 385 if (i915_gem_request_completed(rq)) 386 goto out; 387 388 /* This client is about to stall waiting for the GPU. In many cases 389 * this is undesirable and limits the throughput of the system, as 390 * many clients cannot continue processing user input/output whilst 391 * blocked. RPS autotuning may take tens of milliseconds to respond 392 * to the GPU load and thus incurs additional latency for the client. 393 * We can circumvent that by promoting the GPU frequency to maximum 394 * before we wait. This makes the GPU throttle up much more quickly 395 * (good for benchmarks and user experience, e.g. window animations), 396 * but at a cost of spending more power processing the workload 397 * (bad for battery). Not all clients even want their results 398 * immediately and for them we should just let the GPU select its own 399 * frequency to maximise efficiency. To prevent a single client from 400 * forcing the clocks too high for the whole system, we only allow 401 * each client to waitboost once in a busy period. 402 */ 403 if (rps) { 404 if (INTEL_GEN(rq->i915) >= 6) 405 gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies); 406 else 407 rps = NULL; 408 } 409 410 timeout = i915_wait_request(rq, flags, timeout); 411 412 out: 413 if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq)) 414 i915_gem_request_retire_upto(rq); 415 416 if (rps && i915_gem_request_global_seqno(rq) == intel_engine_last_submit(rq->engine)) { 417 /* The GPU is now idle and this client has stalled. 418 * Since no other client has submitted a request in the 419 * meantime, assume that this client is the only one 420 * supplying work to the GPU but is unable to keep that 421 * work supplied because it is waiting. Since the GPU is 422 * then never kept fully busy, RPS autoclocking will 423 * keep the clocks relatively low, causing further delays. 424 * Compensate by giving the synchronous client credit for 425 * a waitboost next time. 426 */ 427 lockmgr(&rq->i915->rps.client_lock, LK_EXCLUSIVE); 428 list_del_init(&rps->link); 429 lockmgr(&rq->i915->rps.client_lock, LK_RELEASE); 430 } 431 432 return timeout; 433 } 434 435 static long 436 i915_gem_object_wait_reservation(struct reservation_object *resv, 437 unsigned int flags, 438 long timeout, 439 struct intel_rps_client *rps) 440 { 441 unsigned int seq = __read_seqcount_begin(&resv->seq); 442 struct dma_fence *excl; 443 bool prune_fences = false; 444 445 if (flags & I915_WAIT_ALL) { 446 struct dma_fence **shared; 447 unsigned int count, i; 448 int ret; 449 450 ret = reservation_object_get_fences_rcu(resv, 451 &excl, &count, &shared); 452 if (ret) 453 return ret; 454 455 for (i = 0; i < count; i++) { 456 timeout = i915_gem_object_wait_fence(shared[i], 457 flags, timeout, 458 rps); 459 if (timeout < 0) 460 break; 461 462 dma_fence_put(shared[i]); 463 } 464 465 for (; i < count; i++) 466 dma_fence_put(shared[i]); 467 kfree(shared); 468 469 prune_fences = count && timeout >= 0; 470 } else { 471 excl = reservation_object_get_excl_rcu(resv); 472 } 473 474 if (excl && timeout >= 0) { 475 timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps); 476 prune_fences = timeout >= 0; 477 } 478 479 dma_fence_put(excl); 480 481 /* Oportunistically prune the fences iff we know they have *all* been 482 * signaled and that the reservation object has not been changed (i.e. 483 * no new fences have been added). 484 */ 485 if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) { 486 if (reservation_object_trylock(resv)) { 487 if (!__read_seqcount_retry(&resv->seq, seq)) 488 reservation_object_add_excl_fence(resv, NULL); 489 reservation_object_unlock(resv); 490 } 491 } 492 493 return timeout; 494 } 495 496 static void __fence_set_priority(struct dma_fence *fence, int prio) 497 { 498 struct drm_i915_gem_request *rq; 499 struct intel_engine_cs *engine; 500 501 if (!dma_fence_is_i915(fence)) 502 return; 503 504 rq = to_request(fence); 505 engine = rq->engine; 506 if (!engine->schedule) 507 return; 508 509 engine->schedule(rq, prio); 510 } 511 512 static void fence_set_priority(struct dma_fence *fence, int prio) 513 { 514 /* Recurse once into a fence-array */ 515 if (dma_fence_is_array(fence)) { 516 struct dma_fence_array *array = to_dma_fence_array(fence); 517 int i; 518 519 for (i = 0; i < array->num_fences; i++) 520 __fence_set_priority(array->fences[i], prio); 521 } else { 522 __fence_set_priority(fence, prio); 523 } 524 } 525 526 int 527 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 528 unsigned int flags, 529 int prio) 530 { 531 struct dma_fence *excl; 532 533 if (flags & I915_WAIT_ALL) { 534 struct dma_fence **shared; 535 unsigned int count, i; 536 int ret; 537 538 ret = reservation_object_get_fences_rcu(obj->resv, 539 &excl, &count, &shared); 540 if (ret) 541 return ret; 542 543 for (i = 0; i < count; i++) { 544 fence_set_priority(shared[i], prio); 545 dma_fence_put(shared[i]); 546 } 547 548 kfree(shared); 549 } else { 550 excl = reservation_object_get_excl_rcu(obj->resv); 551 } 552 553 if (excl) { 554 fence_set_priority(excl, prio); 555 dma_fence_put(excl); 556 } 557 return 0; 558 } 559 560 /** 561 * Waits for rendering to the object to be completed 562 * @obj: i915 gem object 563 * @flags: how to wait (under a lock, for all rendering or just for writes etc) 564 * @timeout: how long to wait 565 * @rps: client (user process) to charge for any waitboosting 566 */ 567 int 568 i915_gem_object_wait(struct drm_i915_gem_object *obj, 569 unsigned int flags, 570 long timeout, 571 struct intel_rps_client *rps) 572 { 573 might_sleep(); 574 #if IS_ENABLED(CONFIG_LOCKDEP) 575 GEM_BUG_ON(debug_locks && 576 !!lockdep_is_held(&obj->base.dev->struct_mutex) != 577 !!(flags & I915_WAIT_LOCKED)); 578 #endif 579 GEM_BUG_ON(timeout < 0); 580 581 timeout = i915_gem_object_wait_reservation(obj->resv, 582 flags, timeout, 583 rps); 584 return timeout < 0 ? timeout : 0; 585 } 586 587 static struct intel_rps_client *to_rps_client(struct drm_file *file) 588 { 589 struct drm_i915_file_private *fpriv = file->driver_priv; 590 591 return &fpriv->rps; 592 } 593 594 int 595 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 596 int align) 597 { 598 int ret; 599 600 if (align > obj->base.size) 601 return -EINVAL; 602 603 if (obj->ops == &i915_gem_phys_ops) 604 return 0; 605 606 if (obj->mm.madv != I915_MADV_WILLNEED) 607 return -EFAULT; 608 609 if (obj->base.filp == NULL) 610 return -EINVAL; 611 612 ret = i915_gem_object_unbind(obj); 613 if (ret) 614 return ret; 615 616 __i915_gem_object_put_pages(obj, I915_MM_NORMAL); 617 if (obj->mm.pages) 618 return -EBUSY; 619 620 GEM_BUG_ON(obj->ops != &i915_gem_object_ops); 621 obj->ops = &i915_gem_phys_ops; 622 623 ret = i915_gem_object_pin_pages(obj); 624 if (ret) 625 goto err_xfer; 626 627 return 0; 628 629 err_xfer: 630 obj->ops = &i915_gem_object_ops; 631 return ret; 632 } 633 634 static int 635 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, 636 struct drm_i915_gem_pwrite *args, 637 struct drm_file *file) 638 { 639 void *vaddr = obj->phys_handle->vaddr + args->offset; 640 char __user *user_data = u64_to_user_ptr(args->data_ptr); 641 642 /* We manually control the domain here and pretend that it 643 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 644 */ 645 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 646 if (copy_from_user(vaddr, user_data, args->size)) 647 return -EFAULT; 648 649 drm_clflush_virt_range(vaddr, args->size); 650 i915_gem_chipset_flush(to_i915(obj->base.dev)); 651 652 intel_fb_obj_flush(obj, ORIGIN_CPU); 653 return 0; 654 } 655 656 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv) 657 { 658 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); 659 } 660 661 void i915_gem_object_free(struct drm_i915_gem_object *obj) 662 { 663 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 664 kmem_cache_free(dev_priv->objects, obj); 665 } 666 667 static int 668 i915_gem_create(struct drm_file *file, 669 struct drm_i915_private *dev_priv, 670 uint64_t size, 671 uint32_t *handle_p) 672 { 673 struct drm_i915_gem_object *obj; 674 int ret; 675 u32 handle; 676 677 size = roundup(size, PAGE_SIZE); 678 if (size == 0) 679 return -EINVAL; 680 681 /* Allocate the new object */ 682 obj = i915_gem_object_create(dev_priv, size); 683 if (IS_ERR(obj)) 684 return PTR_ERR(obj); 685 686 ret = drm_gem_handle_create(file, &obj->base, &handle); 687 /* drop reference from allocate - handle holds it now */ 688 i915_gem_object_put(obj); 689 if (ret) 690 return ret; 691 692 *handle_p = handle; 693 return 0; 694 } 695 696 int 697 i915_gem_dumb_create(struct drm_file *file, 698 struct drm_device *dev, 699 struct drm_mode_create_dumb *args) 700 { 701 /* have to work out size/pitch and return them */ 702 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); 703 args->size = args->pitch * args->height; 704 return i915_gem_create(file, to_i915(dev), 705 args->size, &args->handle); 706 } 707 708 /** 709 * Creates a new mm object and returns a handle to it. 710 * @dev: drm device pointer 711 * @data: ioctl data blob 712 * @file: drm file pointer 713 */ 714 int 715 i915_gem_create_ioctl(struct drm_device *dev, void *data, 716 struct drm_file *file) 717 { 718 struct drm_i915_private *dev_priv = to_i915(dev); 719 struct drm_i915_gem_create *args = data; 720 721 i915_gem_flush_free_objects(dev_priv); 722 723 return i915_gem_create(file, dev_priv, 724 args->size, &args->handle); 725 } 726 727 static inline int 728 __copy_to_user_swizzled(char __user *cpu_vaddr, 729 const char *gpu_vaddr, int gpu_offset, 730 int length) 731 { 732 int ret, cpu_offset = 0; 733 734 while (length > 0) { 735 int cacheline_end = ALIGN(gpu_offset + 1, 64); 736 int this_length = min(cacheline_end - gpu_offset, length); 737 int swizzled_gpu_offset = gpu_offset ^ 64; 738 739 ret = __copy_to_user(cpu_vaddr + cpu_offset, 740 gpu_vaddr + swizzled_gpu_offset, 741 this_length); 742 if (ret) 743 return ret + length; 744 745 cpu_offset += this_length; 746 gpu_offset += this_length; 747 length -= this_length; 748 } 749 750 return 0; 751 } 752 753 static inline int 754 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, 755 const char __user *cpu_vaddr, 756 int length) 757 { 758 int ret, cpu_offset = 0; 759 760 while (length > 0) { 761 int cacheline_end = ALIGN(gpu_offset + 1, 64); 762 int this_length = min(cacheline_end - gpu_offset, length); 763 int swizzled_gpu_offset = gpu_offset ^ 64; 764 765 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, 766 cpu_vaddr + cpu_offset, 767 this_length); 768 if (ret) 769 return ret + length; 770 771 cpu_offset += this_length; 772 gpu_offset += this_length; 773 length -= this_length; 774 } 775 776 return 0; 777 } 778 779 /* 780 * Pins the specified object's pages and synchronizes the object with 781 * GPU accesses. Sets needs_clflush to non-zero if the caller should 782 * flush the object from the CPU cache. 783 */ 784 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 785 unsigned int *needs_clflush) 786 { 787 int ret; 788 789 lockdep_assert_held(&obj->base.dev->struct_mutex); 790 791 *needs_clflush = 0; 792 if (!i915_gem_object_has_struct_page(obj)) 793 return -ENODEV; 794 795 ret = i915_gem_object_wait(obj, 796 I915_WAIT_INTERRUPTIBLE | 797 I915_WAIT_LOCKED, 798 MAX_SCHEDULE_TIMEOUT, 799 NULL); 800 if (ret) 801 return ret; 802 803 ret = i915_gem_object_pin_pages(obj); 804 if (ret) 805 return ret; 806 807 if (i915_gem_object_is_coherent(obj) || 808 !static_cpu_has(X86_FEATURE_CLFLUSH)) { 809 ret = i915_gem_object_set_to_cpu_domain(obj, false); 810 if (ret) 811 goto err_unpin; 812 else 813 goto out; 814 } 815 816 i915_gem_object_flush_gtt_write_domain(obj); 817 818 /* If we're not in the cpu read domain, set ourself into the gtt 819 * read domain and manually flush cachelines (if required). This 820 * optimizes for the case when the gpu will dirty the data 821 * anyway again before the next pread happens. 822 */ 823 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 824 *needs_clflush = CLFLUSH_BEFORE; 825 826 out: 827 /* return with the pages pinned */ 828 return 0; 829 830 err_unpin: 831 i915_gem_object_unpin_pages(obj); 832 return ret; 833 } 834 835 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, 836 unsigned int *needs_clflush) 837 { 838 int ret; 839 840 lockdep_assert_held(&obj->base.dev->struct_mutex); 841 842 *needs_clflush = 0; 843 if (!i915_gem_object_has_struct_page(obj)) 844 return -ENODEV; 845 846 ret = i915_gem_object_wait(obj, 847 I915_WAIT_INTERRUPTIBLE | 848 I915_WAIT_LOCKED | 849 I915_WAIT_ALL, 850 MAX_SCHEDULE_TIMEOUT, 851 NULL); 852 if (ret) 853 return ret; 854 855 ret = i915_gem_object_pin_pages(obj); 856 if (ret) 857 return ret; 858 859 if (i915_gem_object_is_coherent(obj) || 860 !static_cpu_has(X86_FEATURE_CLFLUSH)) { 861 ret = i915_gem_object_set_to_cpu_domain(obj, true); 862 if (ret) 863 goto err_unpin; 864 else 865 goto out; 866 } 867 868 i915_gem_object_flush_gtt_write_domain(obj); 869 870 /* If we're not in the cpu write domain, set ourself into the 871 * gtt write domain and manually flush cachelines (as required). 872 * This optimizes for the case when the gpu will use the data 873 * right away and we therefore have to clflush anyway. 874 */ 875 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) 876 *needs_clflush |= CLFLUSH_AFTER; 877 878 /* Same trick applies to invalidate partially written cachelines read 879 * before writing. 880 */ 881 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 882 *needs_clflush |= CLFLUSH_BEFORE; 883 884 out: 885 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 886 obj->mm.dirty = true; 887 /* return with the pages pinned */ 888 return 0; 889 890 err_unpin: 891 i915_gem_object_unpin_pages(obj); 892 return ret; 893 } 894 895 static void 896 shmem_clflush_swizzled_range(char *addr, unsigned long length, 897 bool swizzled) 898 { 899 if (unlikely(swizzled)) { 900 unsigned long start = (unsigned long) addr; 901 unsigned long end = (unsigned long) addr + length; 902 903 /* For swizzling simply ensure that we always flush both 904 * channels. Lame, but simple and it works. Swizzled 905 * pwrite/pread is far from a hotpath - current userspace 906 * doesn't use it at all. */ 907 start = round_down(start, 128); 908 end = round_up(end, 128); 909 910 drm_clflush_virt_range((void *)start, end - start); 911 } else { 912 drm_clflush_virt_range(addr, length); 913 } 914 915 } 916 917 /* Only difference to the fast-path function is that this can handle bit17 918 * and uses non-atomic copy and kmap functions. */ 919 static int 920 shmem_pread_slow(struct page *page, int offset, int length, 921 char __user *user_data, 922 bool page_do_bit17_swizzling, bool needs_clflush) 923 { 924 char *vaddr; 925 int ret; 926 927 vaddr = kmap(page); 928 if (needs_clflush) 929 shmem_clflush_swizzled_range(vaddr + offset, length, 930 page_do_bit17_swizzling); 931 932 if (page_do_bit17_swizzling) 933 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length); 934 else 935 ret = __copy_to_user(user_data, vaddr + offset, length); 936 kunmap(page); 937 938 return ret ? - EFAULT : 0; 939 } 940 941 static int 942 shmem_pread(struct page *page, int offset, int length, char __user *user_data, 943 bool page_do_bit17_swizzling, bool needs_clflush) 944 { 945 int ret; 946 947 ret = -ENODEV; 948 if (!page_do_bit17_swizzling) { 949 char *vaddr = kmap_atomic(page); 950 951 if (needs_clflush) 952 drm_clflush_virt_range(vaddr + offset, length); 953 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length); 954 kunmap_atomic(vaddr); 955 } 956 if (ret == 0) 957 return 0; 958 959 return shmem_pread_slow(page, offset, length, user_data, 960 page_do_bit17_swizzling, needs_clflush); 961 } 962 963 static int 964 i915_gem_shmem_pread(struct drm_i915_gem_object *obj, 965 struct drm_i915_gem_pread *args) 966 { 967 char __user *user_data; 968 u64 remain; 969 unsigned int obj_do_bit17_swizzling; 970 unsigned int needs_clflush; 971 unsigned int idx, offset; 972 int ret; 973 974 obj_do_bit17_swizzling = 0; 975 if (i915_gem_object_needs_bit17_swizzle(obj)) 976 obj_do_bit17_swizzling = BIT(17); 977 978 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex); 979 if (ret) 980 return ret; 981 982 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); 983 mutex_unlock(&obj->base.dev->struct_mutex); 984 if (ret) 985 return ret; 986 987 remain = args->size; 988 user_data = u64_to_user_ptr(args->data_ptr); 989 offset = offset_in_page(args->offset); 990 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { 991 struct page *page = i915_gem_object_get_page(obj, idx); 992 int length; 993 994 length = remain; 995 if (offset + length > PAGE_SIZE) 996 length = PAGE_SIZE - offset; 997 998 ret = shmem_pread(page, offset, length, user_data, 999 page_to_phys(page) & obj_do_bit17_swizzling, 1000 needs_clflush); 1001 if (ret) 1002 break; 1003 1004 remain -= length; 1005 user_data += length; 1006 offset = 0; 1007 } 1008 1009 i915_gem_obj_finish_shmem_access(obj); 1010 return ret; 1011 } 1012 1013 static inline bool 1014 gtt_user_read(struct io_mapping *mapping, 1015 loff_t base, int offset, 1016 char __user *user_data, int length) 1017 { 1018 void *vaddr; 1019 unsigned long unwritten; 1020 1021 /* We can use the cpu mem copy function because this is X86. */ 1022 vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base); 1023 unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length); 1024 io_mapping_unmap_atomic(vaddr); 1025 if (unwritten) { 1026 vaddr = (void __force *) 1027 io_mapping_map_wc(mapping, base, PAGE_SIZE); 1028 unwritten = copy_to_user(user_data, vaddr + offset, length); 1029 io_mapping_unmap(vaddr); 1030 } 1031 return unwritten; 1032 } 1033 1034 static int 1035 i915_gem_gtt_pread(struct drm_i915_gem_object *obj, 1036 const struct drm_i915_gem_pread *args) 1037 { 1038 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1039 struct i915_ggtt *ggtt = &i915->ggtt; 1040 struct drm_mm_node node; 1041 struct i915_vma *vma; 1042 void __user *user_data; 1043 u64 remain, offset; 1044 int ret; 1045 1046 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1047 if (ret) 1048 return ret; 1049 1050 intel_runtime_pm_get(i915); 1051 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 1052 PIN_MAPPABLE | PIN_NONBLOCK); 1053 if (!IS_ERR(vma)) { 1054 node.start = i915_ggtt_offset(vma); 1055 node.allocated = false; 1056 ret = i915_vma_put_fence(vma); 1057 if (ret) { 1058 i915_vma_unpin(vma); 1059 vma = ERR_PTR(ret); 1060 } 1061 } 1062 if (IS_ERR(vma)) { 1063 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); 1064 if (ret) 1065 goto out_unlock; 1066 GEM_BUG_ON(!node.allocated); 1067 } 1068 1069 ret = i915_gem_object_set_to_gtt_domain(obj, false); 1070 if (ret) 1071 goto out_unpin; 1072 1073 mutex_unlock(&i915->drm.struct_mutex); 1074 1075 user_data = u64_to_user_ptr(args->data_ptr); 1076 remain = args->size; 1077 offset = args->offset; 1078 1079 while (remain > 0) { 1080 /* Operation in this page 1081 * 1082 * page_base = page offset within aperture 1083 * page_offset = offset within page 1084 * page_length = bytes to copy for this page 1085 */ 1086 u32 page_base = node.start; 1087 unsigned page_offset = offset_in_page(offset); 1088 unsigned page_length = PAGE_SIZE - page_offset; 1089 page_length = remain < page_length ? remain : page_length; 1090 if (node.allocated) { 1091 wmb(); 1092 ggtt->base.insert_page(&ggtt->base, 1093 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), 1094 node.start, I915_CACHE_NONE, 0); 1095 wmb(); 1096 } else { 1097 page_base += offset & LINUX_PAGE_MASK; 1098 } 1099 1100 if (gtt_user_read(&ggtt->mappable, page_base, page_offset, 1101 user_data, page_length)) { 1102 ret = -EFAULT; 1103 break; 1104 } 1105 1106 remain -= page_length; 1107 user_data += page_length; 1108 offset += page_length; 1109 } 1110 1111 mutex_lock(&i915->drm.struct_mutex); 1112 out_unpin: 1113 if (node.allocated) { 1114 wmb(); 1115 ggtt->base.clear_range(&ggtt->base, 1116 node.start, node.size); 1117 remove_mappable_node(&node); 1118 } else { 1119 i915_vma_unpin(vma); 1120 } 1121 out_unlock: 1122 intel_runtime_pm_put(i915); 1123 mutex_unlock(&i915->drm.struct_mutex); 1124 1125 return ret; 1126 } 1127 1128 /** 1129 * Reads data from the object referenced by handle. 1130 * @dev: drm device pointer 1131 * @data: ioctl data blob 1132 * @file: drm file pointer 1133 * 1134 * On error, the contents of *data are undefined. 1135 */ 1136 int 1137 i915_gem_pread_ioctl(struct drm_device *dev, void *data, 1138 struct drm_file *file) 1139 { 1140 struct drm_i915_gem_pread *args = data; 1141 struct drm_i915_gem_object *obj; 1142 int ret; 1143 1144 if (args->size == 0) 1145 return 0; 1146 1147 #if 0 1148 if (!access_ok(VERIFY_WRITE, 1149 u64_to_user_ptr(args->data_ptr), 1150 args->size)) 1151 return -EFAULT; 1152 #endif 1153 1154 obj = i915_gem_object_lookup(file, args->handle); 1155 if (!obj) 1156 return -ENOENT; 1157 1158 /* Bounds check source. */ 1159 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { 1160 ret = -EINVAL; 1161 goto out; 1162 } 1163 1164 trace_i915_gem_object_pread(obj, args->offset, args->size); 1165 1166 ret = i915_gem_object_wait(obj, 1167 I915_WAIT_INTERRUPTIBLE, 1168 MAX_SCHEDULE_TIMEOUT, 1169 to_rps_client(file)); 1170 if (ret) 1171 goto out; 1172 1173 ret = i915_gem_object_pin_pages(obj); 1174 if (ret) 1175 goto out; 1176 1177 ret = i915_gem_shmem_pread(obj, args); 1178 if (ret == -EFAULT || ret == -ENODEV) 1179 ret = i915_gem_gtt_pread(obj, args); 1180 1181 i915_gem_object_unpin_pages(obj); 1182 out: 1183 i915_gem_object_put(obj); 1184 return ret; 1185 } 1186 1187 /* This is the fast write path which cannot handle 1188 * page faults in the source data 1189 */ 1190 1191 static inline bool 1192 ggtt_write(struct io_mapping *mapping, 1193 loff_t base, int offset, 1194 char __user *user_data, int length) 1195 { 1196 void *vaddr; 1197 unsigned long unwritten; 1198 1199 /* We can use the cpu mem copy function because this is X86. */ 1200 vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base); 1201 unwritten = __copy_from_user_inatomic_nocache(vaddr + offset, 1202 user_data, length); 1203 io_mapping_unmap_atomic(vaddr); 1204 if (unwritten) { 1205 vaddr = (void __force *) 1206 io_mapping_map_wc(mapping, base, PAGE_SIZE); 1207 unwritten = copy_from_user(vaddr + offset, user_data, length); 1208 io_mapping_unmap(vaddr); 1209 } 1210 1211 return unwritten; 1212 } 1213 1214 /** 1215 * This is the fast pwrite path, where we copy the data directly from the 1216 * user into the GTT, uncached. 1217 * @obj: i915 GEM object 1218 * @args: pwrite arguments structure 1219 */ 1220 static int 1221 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, 1222 const struct drm_i915_gem_pwrite *args) 1223 { 1224 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1225 struct i915_ggtt *ggtt = &i915->ggtt; 1226 struct drm_mm_node node; 1227 struct i915_vma *vma; 1228 u64 remain, offset; 1229 void __user *user_data; 1230 int ret; 1231 1232 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1233 if (ret) 1234 return ret; 1235 1236 intel_runtime_pm_get(i915); 1237 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 1238 PIN_MAPPABLE | PIN_NONBLOCK); 1239 if (!IS_ERR(vma)) { 1240 node.start = i915_ggtt_offset(vma); 1241 node.allocated = false; 1242 ret = i915_vma_put_fence(vma); 1243 if (ret) { 1244 i915_vma_unpin(vma); 1245 vma = ERR_PTR(ret); 1246 } 1247 } 1248 if (IS_ERR(vma)) { 1249 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); 1250 if (ret) 1251 goto out_unlock; 1252 GEM_BUG_ON(!node.allocated); 1253 } 1254 1255 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1256 if (ret) 1257 goto out_unpin; 1258 1259 mutex_unlock(&i915->drm.struct_mutex); 1260 1261 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 1262 1263 user_data = u64_to_user_ptr(args->data_ptr); 1264 offset = args->offset; 1265 remain = args->size; 1266 while (remain) { 1267 /* Operation in this page 1268 * 1269 * page_base = page offset within aperture 1270 * page_offset = offset within page 1271 * page_length = bytes to copy for this page 1272 */ 1273 u32 page_base = node.start; 1274 unsigned int page_offset = offset_in_page(offset); 1275 unsigned int page_length = PAGE_SIZE - page_offset; 1276 page_length = remain < page_length ? remain : page_length; 1277 if (node.allocated) { 1278 wmb(); /* flush the write before we modify the GGTT */ 1279 ggtt->base.insert_page(&ggtt->base, 1280 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), 1281 node.start, I915_CACHE_NONE, 0); 1282 wmb(); /* flush modifications to the GGTT (insert_page) */ 1283 } else { 1284 page_base += offset & LINUX_PAGE_MASK; 1285 } 1286 /* If we get a fault while copying data, then (presumably) our 1287 * source page isn't available. Return the error and we'll 1288 * retry in the slow path. 1289 * If the object is non-shmem backed, we retry again with the 1290 * path that handles page fault. 1291 */ 1292 if (ggtt_write(&ggtt->mappable, page_base, page_offset, 1293 user_data, page_length)) { 1294 ret = -EFAULT; 1295 break; 1296 } 1297 1298 remain -= page_length; 1299 user_data += page_length; 1300 offset += page_length; 1301 } 1302 intel_fb_obj_flush(obj, ORIGIN_CPU); 1303 1304 mutex_lock(&i915->drm.struct_mutex); 1305 out_unpin: 1306 if (node.allocated) { 1307 wmb(); 1308 ggtt->base.clear_range(&ggtt->base, 1309 node.start, node.size); 1310 remove_mappable_node(&node); 1311 } else { 1312 i915_vma_unpin(vma); 1313 } 1314 out_unlock: 1315 intel_runtime_pm_put(i915); 1316 mutex_unlock(&i915->drm.struct_mutex); 1317 return ret; 1318 } 1319 1320 static int 1321 shmem_pwrite_slow(struct page *page, int offset, int length, 1322 char __user *user_data, 1323 bool page_do_bit17_swizzling, 1324 bool needs_clflush_before, 1325 bool needs_clflush_after) 1326 { 1327 char *vaddr; 1328 int ret; 1329 1330 vaddr = kmap(page); 1331 if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) 1332 shmem_clflush_swizzled_range(vaddr + offset, length, 1333 page_do_bit17_swizzling); 1334 if (page_do_bit17_swizzling) 1335 ret = __copy_from_user_swizzled(vaddr, offset, user_data, 1336 length); 1337 else 1338 ret = __copy_from_user(vaddr + offset, user_data, length); 1339 if (needs_clflush_after) 1340 shmem_clflush_swizzled_range(vaddr + offset, length, 1341 page_do_bit17_swizzling); 1342 kunmap(page); 1343 1344 return ret ? -EFAULT : 0; 1345 } 1346 1347 /* Per-page copy function for the shmem pwrite fastpath. 1348 * Flushes invalid cachelines before writing to the target if 1349 * needs_clflush_before is set and flushes out any written cachelines after 1350 * writing if needs_clflush is set. 1351 */ 1352 static int 1353 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data, 1354 bool page_do_bit17_swizzling, 1355 bool needs_clflush_before, 1356 bool needs_clflush_after) 1357 { 1358 int ret; 1359 1360 ret = -ENODEV; 1361 if (!page_do_bit17_swizzling) { 1362 char *vaddr = kmap_atomic(page); 1363 1364 if (needs_clflush_before) 1365 drm_clflush_virt_range(vaddr + offset, len); 1366 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len); 1367 if (needs_clflush_after) 1368 drm_clflush_virt_range(vaddr + offset, len); 1369 1370 kunmap_atomic(vaddr); 1371 } 1372 if (ret == 0) 1373 return ret; 1374 1375 return shmem_pwrite_slow(page, offset, len, user_data, 1376 page_do_bit17_swizzling, 1377 needs_clflush_before, 1378 needs_clflush_after); 1379 } 1380 1381 static int 1382 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, 1383 const struct drm_i915_gem_pwrite *args) 1384 { 1385 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1386 void __user *user_data; 1387 u64 remain; 1388 unsigned int obj_do_bit17_swizzling; 1389 unsigned int partial_cacheline_write; 1390 unsigned int needs_clflush; 1391 unsigned int offset, idx; 1392 int ret; 1393 #ifdef __DragonFly__ 1394 vm_object_t vm_obj; 1395 #endif 1396 1397 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1398 if (ret) 1399 return ret; 1400 1401 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); 1402 mutex_unlock(&i915->drm.struct_mutex); 1403 if (ret) 1404 return ret; 1405 1406 obj_do_bit17_swizzling = 0; 1407 if (i915_gem_object_needs_bit17_swizzle(obj)) 1408 obj_do_bit17_swizzling = BIT(17); 1409 1410 /* If we don't overwrite a cacheline completely we need to be 1411 * careful to have up-to-date data by first clflushing. Don't 1412 * overcomplicate things and flush the entire patch. 1413 */ 1414 partial_cacheline_write = 0; 1415 if (needs_clflush & CLFLUSH_BEFORE) 1416 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1; 1417 1418 user_data = u64_to_user_ptr(args->data_ptr); 1419 remain = args->size; 1420 offset = offset_in_page(args->offset); 1421 #ifdef __DragonFly__ 1422 vm_obj = obj->base.filp; 1423 VM_OBJECT_LOCK(vm_obj); 1424 vm_object_pip_add(vm_obj, 1); 1425 #endif 1426 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { 1427 struct page *page = i915_gem_object_get_page(obj, idx); 1428 int length; 1429 1430 length = remain; 1431 if (offset + length > PAGE_SIZE) 1432 length = PAGE_SIZE - offset; 1433 1434 ret = shmem_pwrite(page, offset, length, user_data, 1435 page_to_phys(page) & obj_do_bit17_swizzling, 1436 (offset | length) & partial_cacheline_write, 1437 needs_clflush & CLFLUSH_AFTER); 1438 if (ret) 1439 break; 1440 1441 remain -= length; 1442 user_data += length; 1443 offset = 0; 1444 } 1445 #ifdef __DragonFly__ 1446 if (vm_obj != obj->base.filp) { 1447 kprintf("i915_gem_shmem_pwrite: VM_OBJECT CHANGED! %p %p\n", 1448 vm_obj, obj->base.filp); 1449 } 1450 vm_object_pip_wakeup(vm_obj); 1451 VM_OBJECT_UNLOCK(vm_obj); 1452 #endif 1453 1454 intel_fb_obj_flush(obj, ORIGIN_CPU); 1455 i915_gem_obj_finish_shmem_access(obj); 1456 return ret; 1457 } 1458 1459 /** 1460 * Writes data to the object referenced by handle. 1461 * @dev: drm device 1462 * @data: ioctl data blob 1463 * @file: drm file 1464 * 1465 * On error, the contents of the buffer that were to be modified are undefined. 1466 */ 1467 int 1468 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1469 struct drm_file *file) 1470 { 1471 struct drm_i915_gem_pwrite *args = data; 1472 struct drm_i915_gem_object *obj; 1473 int ret; 1474 1475 if (args->size == 0) 1476 return 0; 1477 1478 #if 0 1479 if (!access_ok(VERIFY_READ, 1480 u64_to_user_ptr(args->data_ptr), 1481 args->size)) 1482 return -EFAULT; 1483 #endif 1484 1485 obj = i915_gem_object_lookup(file, args->handle); 1486 if (!obj) 1487 return -ENOENT; 1488 1489 /* Bounds check destination. */ 1490 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { 1491 ret = -EINVAL; 1492 goto err; 1493 } 1494 1495 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1496 1497 ret = -ENODEV; 1498 if (obj->ops->pwrite) 1499 ret = obj->ops->pwrite(obj, args); 1500 if (ret != -ENODEV) 1501 goto err; 1502 1503 ret = i915_gem_object_wait(obj, 1504 I915_WAIT_INTERRUPTIBLE | 1505 I915_WAIT_ALL, 1506 MAX_SCHEDULE_TIMEOUT, 1507 to_rps_client(file)); 1508 if (ret) 1509 goto err; 1510 1511 ret = i915_gem_object_pin_pages(obj); 1512 if (ret) 1513 goto err; 1514 1515 ret = -EFAULT; 1516 /* We can only do the GTT pwrite on untiled buffers, as otherwise 1517 * it would end up going through the fenced access, and we'll get 1518 * different detiling behavior between reading and writing. 1519 * pread/pwrite currently are reading and writing from the CPU 1520 * perspective, requiring manual detiling by the client. 1521 */ 1522 if (!i915_gem_object_has_struct_page(obj) || 1523 cpu_write_needs_clflush(obj)) 1524 /* Note that the gtt paths might fail with non-page-backed user 1525 * pointers (e.g. gtt mappings when moving data between 1526 * textures). Fallback to the shmem path in that case. 1527 */ 1528 ret = i915_gem_gtt_pwrite_fast(obj, args); 1529 1530 if (ret == -EFAULT || ret == -ENOSPC) { 1531 if (obj->phys_handle) 1532 ret = i915_gem_phys_pwrite(obj, args, file); 1533 else 1534 ret = i915_gem_shmem_pwrite(obj, args); 1535 } 1536 1537 i915_gem_object_unpin_pages(obj); 1538 err: 1539 i915_gem_object_put(obj); 1540 return ret; 1541 } 1542 1543 static inline enum fb_op_origin 1544 write_origin(struct drm_i915_gem_object *obj, unsigned domain) 1545 { 1546 return (domain == I915_GEM_DOMAIN_GTT ? 1547 obj->frontbuffer_ggtt_origin : ORIGIN_CPU); 1548 } 1549 1550 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) 1551 { 1552 struct drm_i915_private *i915; 1553 struct list_head *list; 1554 struct i915_vma *vma; 1555 1556 list_for_each_entry(vma, &obj->vma_list, obj_link) { 1557 if (!i915_vma_is_ggtt(vma)) 1558 break; 1559 1560 if (i915_vma_is_active(vma)) 1561 continue; 1562 1563 if (!drm_mm_node_allocated(&vma->node)) 1564 continue; 1565 1566 list_move_tail(&vma->vm_link, &vma->vm->inactive_list); 1567 } 1568 1569 i915 = to_i915(obj->base.dev); 1570 list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list; 1571 list_move_tail(&obj->global_link, list); 1572 } 1573 1574 /** 1575 * Called when user space prepares to use an object with the CPU, either 1576 * through the mmap ioctl's mapping or a GTT mapping. 1577 * @dev: drm device 1578 * @data: ioctl data blob 1579 * @file: drm file 1580 */ 1581 int 1582 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1583 struct drm_file *file) 1584 { 1585 struct drm_i915_gem_set_domain *args = data; 1586 struct drm_i915_gem_object *obj; 1587 uint32_t read_domains = args->read_domains; 1588 uint32_t write_domain = args->write_domain; 1589 int err; 1590 1591 /* Only handle setting domains to types used by the CPU. */ 1592 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS) 1593 return -EINVAL; 1594 1595 /* Having something in the write domain implies it's in the read 1596 * domain, and only that read domain. Enforce that in the request. 1597 */ 1598 if (write_domain != 0 && read_domains != write_domain) 1599 return -EINVAL; 1600 1601 obj = i915_gem_object_lookup(file, args->handle); 1602 if (!obj) 1603 return -ENOENT; 1604 1605 /* Try to flush the object off the GPU without holding the lock. 1606 * We will repeat the flush holding the lock in the normal manner 1607 * to catch cases where we are gazumped. 1608 */ 1609 err = i915_gem_object_wait(obj, 1610 I915_WAIT_INTERRUPTIBLE | 1611 (write_domain ? I915_WAIT_ALL : 0), 1612 MAX_SCHEDULE_TIMEOUT, 1613 to_rps_client(file)); 1614 if (err) 1615 goto out; 1616 1617 /* Flush and acquire obj->pages so that we are coherent through 1618 * direct access in memory with previous cached writes through 1619 * shmemfs and that our cache domain tracking remains valid. 1620 * For example, if the obj->filp was moved to swap without us 1621 * being notified and releasing the pages, we would mistakenly 1622 * continue to assume that the obj remained out of the CPU cached 1623 * domain. 1624 */ 1625 err = i915_gem_object_pin_pages(obj); 1626 if (err) 1627 goto out; 1628 1629 err = i915_mutex_lock_interruptible(dev); 1630 if (err) 1631 goto out_unpin; 1632 1633 if (read_domains & I915_GEM_DOMAIN_GTT) 1634 err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1635 else 1636 err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1637 1638 /* And bump the LRU for this access */ 1639 i915_gem_object_bump_inactive_ggtt(obj); 1640 1641 mutex_unlock(&dev->struct_mutex); 1642 1643 if (write_domain != 0) 1644 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain)); 1645 1646 out_unpin: 1647 i915_gem_object_unpin_pages(obj); 1648 out: 1649 i915_gem_object_put(obj); 1650 return err; 1651 } 1652 1653 /** 1654 * Called when user space has done writes to this buffer 1655 * @dev: drm device 1656 * @data: ioctl data blob 1657 * @file: drm file 1658 */ 1659 int 1660 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1661 struct drm_file *file) 1662 { 1663 struct drm_i915_gem_sw_finish *args = data; 1664 struct drm_i915_gem_object *obj; 1665 1666 obj = i915_gem_object_lookup(file, args->handle); 1667 if (!obj) 1668 return -ENOENT; 1669 1670 /* Pinned buffers may be scanout, so flush the cache */ 1671 i915_gem_object_flush_if_display(obj); 1672 i915_gem_object_put(obj); 1673 1674 return 0; 1675 } 1676 1677 /** 1678 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address 1679 * it is mapped to. 1680 * @dev: drm device 1681 * @data: ioctl data blob 1682 * @file: drm file 1683 * 1684 * While the mapping holds a reference on the contents of the object, it doesn't 1685 * imply a ref on the object itself. 1686 * 1687 * IMPORTANT: 1688 * 1689 * DRM driver writers who look a this function as an example for how to do GEM 1690 * mmap support, please don't implement mmap support like here. The modern way 1691 * to implement DRM mmap support is with an mmap offset ioctl (like 1692 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. 1693 * That way debug tooling like valgrind will understand what's going on, hiding 1694 * the mmap call in a driver private ioctl will break that. The i915 driver only 1695 * does cpu mmaps this way because we didn't know better. 1696 */ 1697 int 1698 i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1699 struct drm_file *file) 1700 { 1701 struct drm_i915_gem_mmap *args = data; 1702 struct drm_i915_gem_object *obj; 1703 unsigned long addr; 1704 #ifdef __DragonFly__ 1705 struct proc *p = curproc; 1706 vm_map_t map = &p->p_vmspace->vm_map; 1707 vm_size_t size; 1708 int error = 0, rv; 1709 #endif 1710 1711 if (args->flags & ~(I915_MMAP_WC)) 1712 return -EINVAL; 1713 1714 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT)) 1715 return -ENODEV; 1716 1717 obj = i915_gem_object_lookup(file, args->handle); 1718 if (!obj) 1719 return -ENOENT; 1720 1721 /* prime objects have no backing filp to GEM mmap 1722 * pages from. 1723 */ 1724 if (!obj->base.filp) { 1725 i915_gem_object_put(obj); 1726 return -EINVAL; 1727 } 1728 1729 if (args->size == 0) 1730 goto out; 1731 1732 size = round_page(args->size); 1733 if (map->size + size > p->p_rlimit[RLIMIT_VMEM].rlim_cur) { 1734 error = -ENOMEM; 1735 goto out; 1736 } 1737 1738 /* 1739 * Call hint to ensure that NULL is not returned as a valid address 1740 * and to reduce vm_map traversals. XXX causes instability, use a 1741 * fixed low address as the start point instead to avoid the NULL 1742 * return issue. 1743 */ 1744 addr = PAGE_SIZE; 1745 1746 /* 1747 * Use 256KB alignment. It is unclear why this matters for a 1748 * virtual address but it appears to fix a number of application/X 1749 * crashes and kms console switching is much faster. 1750 */ 1751 vm_object_hold(obj->base.filp); 1752 vm_object_reference_locked(obj->base.filp); 1753 vm_object_drop(obj->base.filp); 1754 1755 /* Something gets wrong here: fails to mmap 4096 */ 1756 rv = vm_map_find(map, obj->base.filp, NULL, 1757 args->offset, &addr, args->size, 1758 256 * 1024, /* align */ 1759 TRUE, /* fitit */ 1760 VM_MAPTYPE_NORMAL, VM_SUBSYS_DRM_GEM, 1761 VM_PROT_READ | VM_PROT_WRITE, /* prot */ 1762 VM_PROT_READ | VM_PROT_WRITE, /* max */ 1763 MAP_SHARED /* cow */); 1764 if (rv != KERN_SUCCESS) { 1765 vm_object_deallocate(obj->base.filp); 1766 error = -vm_mmap_to_errno(rv); 1767 } else { 1768 args->addr_ptr = (uint64_t)addr; 1769 } 1770 1771 if (args->flags & I915_MMAP_WC) { /* I915_PARAM_MMAP_VERSION */ 1772 #if 0 1773 addr = vm_mmap(obj->base.filp, 0, args->size, 1774 PROT_READ | PROT_WRITE, MAP_SHARED, 1775 args->offset); 1776 if (args->flags & I915_MMAP_WC) { 1777 struct mm_struct *mm = current->mm; 1778 struct vm_area_struct *vma; 1779 1780 if (down_write_killable(&mm->mmap_sem)) { 1781 i915_gem_object_put(obj); 1782 return -EINTR; 1783 } 1784 vma = find_vma(mm, addr); 1785 if (vma) 1786 vma->vm_page_prot = 1787 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1788 else 1789 addr = -ENOMEM; 1790 up_write(&mm->mmap_sem); 1791 #endif 1792 1793 /* This may race, but that's ok, it only gets set */ 1794 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU); 1795 } 1796 1797 out: 1798 i915_gem_object_put(obj); 1799 if (IS_ERR((void *)addr)) 1800 return addr; 1801 1802 args->addr_ptr = (uint64_t) addr; 1803 1804 return 0; 1805 } 1806 1807 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj) 1808 { 1809 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; 1810 } 1811 1812 /** 1813 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps 1814 * 1815 * A history of the GTT mmap interface: 1816 * 1817 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to 1818 * aligned and suitable for fencing, and still fit into the available 1819 * mappable space left by the pinned display objects. A classic problem 1820 * we called the page-fault-of-doom where we would ping-pong between 1821 * two objects that could not fit inside the GTT and so the memcpy 1822 * would page one object in at the expense of the other between every 1823 * single byte. 1824 * 1825 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none 1826 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the 1827 * object is too large for the available space (or simply too large 1828 * for the mappable aperture!), a view is created instead and faulted 1829 * into userspace. (This view is aligned and sized appropriately for 1830 * fenced access.) 1831 * 1832 * Restrictions: 1833 * 1834 * * snoopable objects cannot be accessed via the GTT. It can cause machine 1835 * hangs on some architectures, corruption on others. An attempt to service 1836 * a GTT page fault from a snoopable object will generate a SIGBUS. 1837 * 1838 * * the object must be able to fit into RAM (physical memory, though no 1839 * limited to the mappable aperture). 1840 * 1841 * 1842 * Caveats: 1843 * 1844 * * a new GTT page fault will synchronize rendering from the GPU and flush 1845 * all data to system memory. Subsequent access will not be synchronized. 1846 * 1847 * * all mappings are revoked on runtime device suspend. 1848 * 1849 * * there are only 8, 16 or 32 fence registers to share between all users 1850 * (older machines require fence register for display and blitter access 1851 * as well). Contention of the fence registers will cause the previous users 1852 * to be unmapped and any new access will generate new page faults. 1853 * 1854 * * running out of memory while servicing a fault may generate a SIGBUS, 1855 * rather than the expected SIGSEGV. 1856 */ 1857 int i915_gem_mmap_gtt_version(void) 1858 { 1859 return 1; 1860 } 1861 1862 static inline struct i915_ggtt_view 1863 compute_partial_view(struct drm_i915_gem_object *obj, 1864 pgoff_t page_offset, 1865 unsigned int chunk) 1866 { 1867 struct i915_ggtt_view view; 1868 1869 if (i915_gem_object_is_tiled(obj)) 1870 chunk = roundup(chunk, tile_row_pages(obj)); 1871 1872 view.type = I915_GGTT_VIEW_PARTIAL; 1873 view.partial.offset = rounddown(page_offset, chunk); 1874 view.partial.size = 1875 min_t(unsigned int, chunk, 1876 (obj->base.size >> PAGE_SHIFT) - view.partial.offset); 1877 1878 /* If the partial covers the entire object, just create a normal VMA. */ 1879 if (chunk >= obj->base.size >> PAGE_SHIFT) 1880 view.type = I915_GGTT_VIEW_NORMAL; 1881 1882 return view; 1883 } 1884 1885 /** 1886 * i915_gem_fault - fault a page into the GTT 1887 * 1888 * vm_obj is locked on entry and expected to be locked on return. 1889 * 1890 * This is a OBJT_MGTDEVICE object, *mres will be NULL and should be set 1891 * to the desired vm_page. The page is not indexed into the vm_obj. 1892 * 1893 * XXX Most GEM calls appear to be interruptable, but we can't hard loop 1894 * in that case. Release all resources and wait 1 tick before retrying. 1895 * This is a huge problem which needs to be fixed by getting rid of most 1896 * of the interruptability. The linux code does not retry but does appear 1897 * to have some sort of mechanism (VM_FAULT_NOPAGE ?) for the higher level 1898 * to be able to retry. 1899 * 1900 * -- 1901 * @vma: VMA in question 1902 * @vmf: fault info 1903 * 1904 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped 1905 * from userspace. The fault handler takes care of binding the object to 1906 * the GTT (if needed), allocating and programming a fence register (again, 1907 * only if needed based on whether the old reg is still valid or the object 1908 * is tiled) and inserting a new PTE into the faulting process. 1909 * 1910 * Note that the faulting process may involve evicting existing objects 1911 * from the GTT and/or fence registers to make room. So performance may 1912 * suffer if the GTT working set is large or there are few fence registers 1913 * left. 1914 * 1915 * The current feature set supported by i915_gem_fault() and thus GTT mmaps 1916 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). 1917 * vm_obj is locked on entry and expected to be locked on return. The VM 1918 * pager has placed an anonymous memory page at (obj,offset) which we have 1919 * to replace. 1920 */ 1921 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres) 1922 { 1923 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */ 1924 struct drm_i915_gem_object *obj = to_intel_bo(vm_obj->handle); 1925 struct drm_device *dev = obj->base.dev; 1926 struct drm_i915_private *dev_priv = to_i915(dev); 1927 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1928 bool write = !!(prot & VM_PROT_WRITE); 1929 struct i915_vma *vma; 1930 pgoff_t page_offset; 1931 vm_page_t m; 1932 unsigned int flags; 1933 int ret; 1934 int didref = 0; 1935 struct vm_area_struct tmp_vm_area; 1936 struct vm_area_struct *area = &tmp_vm_area; 1937 1938 /* Fill-in vm_area_struct */ 1939 area->vm_private_data = vm_obj->handle; 1940 area->vm_start = 0; 1941 area->vm_end = obj->base.size; 1942 1943 /* We don't use vmf->pgoff since that has the fake offset */ 1944 page_offset = (unsigned long)offset >> PAGE_SHIFT; 1945 1946 /* 1947 * vm_fault() has supplied us with a busied page placeholding 1948 * the operation. This presents a lock order reversal issue 1949 * again i915_gem_release_mmap() for our device mutex. 1950 * 1951 * Deal with the problem by getting rid of the placeholder now, 1952 * and then dealing with the potential for a new placeholder when 1953 * we try to insert later. 1954 */ 1955 KKASSERT(*mres == NULL); 1956 m = NULL; 1957 1958 retry: 1959 trace_i915_gem_object_fault(obj, page_offset, true, write); 1960 1961 /* Try to flush the object off the GPU first without holding the lock. 1962 * Upon acquiring the lock, we will perform our sanity checks and then 1963 * repeat the flush holding the lock in the normal manner to catch cases 1964 * where we are gazumped. 1965 */ 1966 ret = i915_gem_object_wait(obj, 1967 I915_WAIT_INTERRUPTIBLE, 1968 MAX_SCHEDULE_TIMEOUT, 1969 NULL); 1970 if (ret) 1971 goto err; 1972 1973 ret = i915_gem_object_pin_pages(obj); 1974 if (ret) 1975 goto err; 1976 1977 intel_runtime_pm_get(dev_priv); 1978 1979 ret = i915_mutex_lock_interruptible(dev); 1980 if (ret) { 1981 if (ret != -EINTR) 1982 kprintf("i915: caught bug(%d) (mutex_lock_inter)\n", ret); 1983 goto err_rpm; 1984 } 1985 1986 /* Access to snoopable pages through the GTT is incoherent. */ 1987 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) { 1988 kprintf("i915: caught bug() (cache_level %d %d)\n", 1989 (obj->cache_level), !HAS_LLC(dev_priv)); 1990 ret = -EFAULT; 1991 goto err_unlock; 1992 } 1993 1994 /* If the object is smaller than a couple of partial vma, it is 1995 * not worth only creating a single partial vma - we may as well 1996 * clear enough space for the full object. 1997 */ 1998 flags = PIN_MAPPABLE; 1999 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) 2000 flags |= PIN_NONBLOCK | PIN_NONFAULT; 2001 2002 /* Now pin it into the GTT as needed */ 2003 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); 2004 if (IS_ERR(vma)) { 2005 /* Use a partial view if it is bigger than available space */ 2006 struct i915_ggtt_view view = 2007 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); 2008 2009 kprintf("i915_gem_fault: CHUNKING PASS\n"); 2010 2011 /* Userspace is now writing through an untracked VMA, abandon 2012 * all hope that the hardware is able to track future writes. 2013 */ 2014 obj->frontbuffer_ggtt_origin = ORIGIN_CPU; 2015 2016 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 2017 } 2018 if (IS_ERR(vma)) { 2019 kprintf("i915: caught bug() (VMA error %ld objsize %ld)\n", 2020 PTR_ERR(vma), obj->base.size); 2021 ret = PTR_ERR(vma); 2022 goto err_unlock; 2023 } 2024 2025 ret = i915_gem_object_set_to_gtt_domain(obj, write); 2026 if (ret) { 2027 kprintf("i915: caught bug(%d) (set_to_gtt_dom)\n", ret); 2028 goto err_unpin; 2029 } 2030 2031 ret = i915_vma_get_fence(vma); 2032 if (ret) { 2033 kprintf("i915: caught bug(%d) (vma_get_fence)\n", ret); 2034 goto err_unpin; 2035 } 2036 2037 /* 2038 * START FREEBSD MAGIC 2039 * 2040 * Add a pip count to avoid destruction and certain other 2041 * complex operations (such as collapses?) while unlocked. 2042 */ 2043 vm_object_pip_add(vm_obj, 1); 2044 didref = 1; 2045 2046 ret = 0; 2047 2048 /* Mark as being mmapped into userspace for later revocation */ 2049 assert_rpm_wakelock_held(dev_priv); 2050 if (list_empty(&obj->userfault_link)) 2051 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list); 2052 2053 /* Finally, remap it using the new GTT offset */ 2054 m = vm_phys_fictitious_to_vm_page(ggtt->mappable_base + 2055 i915_ggtt_offset(vma) + offset); 2056 if (m == NULL) { 2057 kprintf("i915: caught bug() (phys_fict_to_vm)\n"); 2058 ret = -EFAULT; 2059 goto err_unpin; 2060 } 2061 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("not fictitious %p", m)); 2062 KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m)); 2063 2064 /* 2065 * Try to busy the page. Fails on non-zero return. 2066 */ 2067 if (vm_page_busy_try(m, false)) { 2068 kprintf("i915_gem_fault: BUSY(2)\n"); 2069 ret = -EINTR; 2070 goto err_unpin; 2071 } 2072 m->valid = VM_PAGE_BITS_ALL; 2073 *mres = m; 2074 2075 __i915_vma_unpin(vma); 2076 mutex_unlock(&dev->struct_mutex); 2077 ret = VM_PAGER_OK; 2078 goto done; 2079 2080 /* 2081 * ALTERNATIVE ERROR RETURN. 2082 * 2083 * OBJECT EXPECTED TO BE LOCKED. 2084 */ 2085 err_unpin: 2086 __i915_vma_unpin(vma); 2087 err_unlock: 2088 mutex_unlock(&dev->struct_mutex); 2089 err_rpm: 2090 intel_runtime_pm_put(dev_priv); 2091 i915_gem_object_unpin_pages(obj); 2092 err: 2093 switch (ret) { 2094 case -EIO: 2095 /* 2096 * We eat errors when the gpu is terminally wedged to avoid 2097 * userspace unduly crashing (gl has no provisions for mmaps to 2098 * fail). But any other -EIO isn't ours (e.g. swap in failure) 2099 * and so needs to be reported. 2100 */ 2101 if (!i915_terminally_wedged(&dev_priv->gpu_error)) { 2102 // ret = VM_FAULT_SIGBUS; 2103 break; 2104 } 2105 case -EAGAIN: 2106 /* 2107 * EAGAIN means the gpu is hung and we'll wait for the error 2108 * handler to reset everything when re-faulting in 2109 * i915_mutex_lock_interruptible. 2110 */ 2111 case -ERESTARTSYS: 2112 case -EINTR: 2113 #ifdef __DragonFly__ 2114 if (didref) { 2115 kprintf("i915: caught bug(%d) (retry)\n", ret); 2116 vm_object_pip_wakeup(vm_obj); 2117 didref = 0; 2118 } 2119 VM_OBJECT_UNLOCK(vm_obj); 2120 int dummy; 2121 tsleep(&dummy, 0, "delay", 1); /* XXX */ 2122 VM_OBJECT_LOCK(vm_obj); 2123 goto retry; 2124 #endif 2125 default: 2126 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); 2127 ret = VM_PAGER_ERROR; 2128 break; 2129 } 2130 2131 #ifdef __DragonFly__ 2132 done: 2133 if (didref) 2134 vm_object_pip_wakeup(vm_obj); 2135 else 2136 kprintf("i915: caught bug(%d)\n", ret); 2137 #endif 2138 2139 return ret; 2140 } 2141 2142 #ifdef __DragonFly__ 2143 static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, 2144 struct address_space *file_mapping) 2145 { 2146 struct drm_i915_gem_object *obj = container_of( 2147 node,struct drm_i915_gem_object, base.vma_node); 2148 vm_object_t devobj; 2149 2150 devobj = cdev_pager_lookup(obj); 2151 if (devobj != NULL) { 2152 VM_OBJECT_LOCK(devobj); 2153 vm_object_page_remove(devobj, 0, 0, false); 2154 VM_OBJECT_UNLOCK(devobj); 2155 vm_object_deallocate(devobj); 2156 } 2157 } 2158 #endif 2159 2160 /** 2161 * i915_gem_release_mmap - remove physical page mappings 2162 * @obj: obj in question 2163 * 2164 * Preserve the reservation of the mmapping with the DRM core code, but 2165 * relinquish ownership of the pages back to the system. 2166 * 2167 * It is vital that we remove the page mapping if we have mapped a tiled 2168 * object through the GTT and then lose the fence register due to 2169 * resource pressure. Similarly if the object has been moved out of the 2170 * aperture, than pages mapped into userspace must be revoked. Removing the 2171 * mapping will then trigger a page fault on the next user access, allowing 2172 * fixup by i915_gem_fault(). 2173 */ 2174 void 2175 i915_gem_release_mmap(struct drm_i915_gem_object *obj) 2176 { 2177 struct drm_i915_private *i915 = to_i915(obj->base.dev); 2178 2179 /* Serialisation between user GTT access and our code depends upon 2180 * revoking the CPU's PTE whilst the mutex is held. The next user 2181 * pagefault then has to wait until we release the mutex. 2182 * 2183 * Note that RPM complicates somewhat by adding an additional 2184 * requirement that operations to the GGTT be made holding the RPM 2185 * wakeref. 2186 */ 2187 lockdep_assert_held(&i915->drm.struct_mutex); 2188 intel_runtime_pm_get(i915); 2189 2190 if (list_empty(&obj->userfault_link)) 2191 goto out; 2192 2193 list_del_init(&obj->userfault_link); 2194 #ifndef __DragonFly__ 2195 drm_vma_node_unmap(&obj->base.vma_node, 2196 obj->base.dev->anon_inode->i_mapping); 2197 #else 2198 drm_vma_node_unmap(&obj->base.vma_node, NULL); 2199 #endif 2200 2201 /* Ensure that the CPU's PTE are revoked and there are not outstanding 2202 * memory transactions from userspace before we return. The TLB 2203 * flushing implied above by changing the PTE above *should* be 2204 * sufficient, an extra barrier here just provides us with a bit 2205 * of paranoid documentation about our requirement to serialise 2206 * memory writes before touching registers / GSM. 2207 */ 2208 wmb(); 2209 2210 out: 2211 intel_runtime_pm_put(i915); 2212 } 2213 2214 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) 2215 { 2216 struct drm_i915_gem_object *obj, *on; 2217 int i; 2218 2219 /* 2220 * Only called during RPM suspend. All users of the userfault_list 2221 * must be holding an RPM wakeref to ensure that this can not 2222 * run concurrently with themselves (and use the struct_mutex for 2223 * protection between themselves). 2224 */ 2225 2226 list_for_each_entry_safe(obj, on, 2227 &dev_priv->mm.userfault_list, userfault_link) { 2228 list_del_init(&obj->userfault_link); 2229 #ifndef __DragonFly__ 2230 drm_vma_node_unmap(&obj->base.vma_node, 2231 obj->base.dev->anon_inode->i_mapping); 2232 #else 2233 drm_vma_node_unmap(&obj->base.vma_node, NULL); 2234 #endif 2235 } 2236 2237 /* The fence will be lost when the device powers down. If any were 2238 * in use by hardware (i.e. they are pinned), we should not be powering 2239 * down! All other fences will be reacquired by the user upon waking. 2240 */ 2241 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2242 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2243 2244 /* Ideally we want to assert that the fence register is not 2245 * live at this point (i.e. that no piece of code will be 2246 * trying to write through fence + GTT, as that both violates 2247 * our tracking of activity and associated locking/barriers, 2248 * but also is illegal given that the hw is powered down). 2249 * 2250 * Previously we used reg->pin_count as a "liveness" indicator. 2251 * That is not sufficient, and we need a more fine-grained 2252 * tool if we want to have a sanity check here. 2253 */ 2254 2255 if (!reg->vma) 2256 continue; 2257 2258 GEM_BUG_ON(!list_empty(®->vma->obj->userfault_link)); 2259 reg->dirty = true; 2260 } 2261 } 2262 2263 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) 2264 { 2265 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 2266 int err; 2267 2268 err = drm_gem_create_mmap_offset(&obj->base); 2269 if (likely(!err)) 2270 return 0; 2271 2272 /* Attempt to reap some mmap space from dead objects */ 2273 do { 2274 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); 2275 if (err) 2276 break; 2277 2278 i915_gem_drain_freed_objects(dev_priv); 2279 err = drm_gem_create_mmap_offset(&obj->base); 2280 if (!err) 2281 break; 2282 2283 } while (flush_delayed_work(&dev_priv->gt.retire_work)); 2284 2285 return err; 2286 } 2287 2288 #if 0 2289 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) 2290 { 2291 drm_gem_free_mmap_offset(&obj->base); 2292 } 2293 #endif 2294 2295 int 2296 i915_gem_mmap_gtt(struct drm_file *file, 2297 struct drm_device *dev, 2298 uint32_t handle, 2299 uint64_t *offset) 2300 { 2301 struct drm_i915_gem_object *obj; 2302 int ret; 2303 2304 obj = i915_gem_object_lookup(file, handle); 2305 if (!obj) 2306 return -ENOENT; 2307 2308 ret = i915_gem_object_create_mmap_offset(obj); 2309 if (ret == 0) 2310 *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) | 2311 DRM_GEM_MAPPING_KEY; 2312 2313 i915_gem_object_put(obj); 2314 return ret; 2315 } 2316 2317 /** 2318 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 2319 * @dev: DRM device 2320 * @data: GTT mapping ioctl data 2321 * @file: GEM object info 2322 * 2323 * Simply returns the fake offset to userspace so it can mmap it. 2324 * The mmap call will end up in drm_gem_mmap(), which will set things 2325 * up so we can get faults in the handler above. 2326 * 2327 * The fault handler will take care of binding the object into the GTT 2328 * (since it may have been evicted to make room for something), allocating 2329 * a fence register, and mapping the appropriate aperture address into 2330 * userspace. 2331 */ 2332 int 2333 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2334 struct drm_file *file) 2335 { 2336 struct drm_i915_gem_mmap_gtt *args = data; 2337 2338 return i915_gem_mmap_gtt(file, dev, args->handle, (uint64_t *)&args->offset); 2339 } 2340 2341 /* Immediately discard the backing storage */ 2342 static void 2343 i915_gem_object_truncate(struct drm_i915_gem_object *obj) 2344 { 2345 vm_object_t vm_obj = obj->base.filp; 2346 2347 if (obj->base.filp == NULL) 2348 return; 2349 2350 VM_OBJECT_LOCK(vm_obj); 2351 vm_object_page_remove(vm_obj, 0, 0, false); 2352 VM_OBJECT_UNLOCK(vm_obj); 2353 2354 /* Our goal here is to return as much of the memory as 2355 * is possible back to the system as we are called from OOM. 2356 * To do this we must instruct the shmfs to drop all of its 2357 * backing pages, *now*. 2358 */ 2359 #if 0 2360 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 2361 #endif 2362 obj->mm.madv = __I915_MADV_PURGED; 2363 obj->mm.pages = ERR_PTR(-EFAULT); 2364 } 2365 2366 /* Try to discard unwanted pages */ 2367 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj) 2368 { 2369 #if 0 2370 struct address_space *mapping; 2371 #endif 2372 2373 lockdep_assert_held(&obj->mm.lock); 2374 GEM_BUG_ON(obj->mm.pages); 2375 2376 switch (obj->mm.madv) { 2377 case I915_MADV_DONTNEED: 2378 i915_gem_object_truncate(obj); 2379 case __I915_MADV_PURGED: 2380 return; 2381 } 2382 2383 if (obj->base.filp == NULL) 2384 return; 2385 2386 #if 0 2387 mapping = obj->base.filp->f_mapping, 2388 invalidate_mapping_pages(mapping, 0, (loff_t)-1); 2389 #endif 2390 invalidate_mapping_pages(obj->base.filp, 0, (loff_t)-1); 2391 } 2392 2393 static void 2394 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, 2395 struct sg_table *pages) 2396 { 2397 struct sgt_iter sgt_iter; 2398 struct page *page; 2399 2400 __i915_gem_object_release_shmem(obj, pages, true); 2401 2402 i915_gem_gtt_finish_pages(obj, pages); 2403 2404 if (i915_gem_object_needs_bit17_swizzle(obj)) 2405 i915_gem_object_save_bit_17_swizzle(obj, pages); 2406 2407 for_each_sgt_page(page, sgt_iter, pages) { 2408 if (obj->mm.dirty) 2409 set_page_dirty(page); 2410 2411 if (obj->mm.madv == I915_MADV_WILLNEED) 2412 mark_page_accessed(page); 2413 2414 put_page(page); 2415 } 2416 obj->mm.dirty = false; 2417 2418 sg_free_table(pages); 2419 kfree(pages); 2420 } 2421 2422 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) 2423 { 2424 struct radix_tree_iter iter; 2425 void **slot; 2426 2427 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) 2428 radix_tree_delete(&obj->mm.get_page.radix, iter.index); 2429 } 2430 2431 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, 2432 enum i915_mm_subclass subclass) 2433 { 2434 struct sg_table *pages; 2435 2436 if (i915_gem_object_has_pinned_pages(obj)) 2437 return; 2438 2439 GEM_BUG_ON(obj->bind_count); 2440 if (!READ_ONCE(obj->mm.pages)) 2441 return; 2442 2443 /* May be called by shrinker from within get_pages() (on another bo) */ 2444 mutex_lock_nested(&obj->mm.lock, subclass); 2445 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) 2446 goto unlock; 2447 2448 /* ->put_pages might need to allocate memory for the bit17 swizzle 2449 * array, hence protect them from being reaped by removing them from gtt 2450 * lists early. */ 2451 pages = fetch_and_zero(&obj->mm.pages); 2452 GEM_BUG_ON(!pages); 2453 2454 if (obj->mm.mapping) { 2455 void *ptr; 2456 2457 ptr = ptr_mask_bits(obj->mm.mapping); 2458 if (is_vmalloc_addr(ptr)) 2459 vunmap(ptr); 2460 else 2461 kunmap(kmap_to_page(ptr)); 2462 2463 obj->mm.mapping = NULL; 2464 } 2465 2466 __i915_gem_object_reset_page_iter(obj); 2467 2468 if (!IS_ERR(pages)) 2469 obj->ops->put_pages(obj, pages); 2470 2471 unlock: 2472 mutex_unlock(&obj->mm.lock); 2473 } 2474 2475 static bool i915_sg_trim(struct sg_table *orig_st) 2476 { 2477 struct sg_table new_st; 2478 struct scatterlist *sg, *new_sg; 2479 unsigned int i; 2480 2481 if (orig_st->nents == orig_st->orig_nents) 2482 return false; 2483 2484 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN)) 2485 return false; 2486 2487 new_sg = new_st.sgl; 2488 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { 2489 sg_set_page(new_sg, sg_page(sg), sg->length, 0); 2490 /* called before being DMA mapped, no need to copy sg->dma_* */ 2491 new_sg = sg_next(new_sg); 2492 } 2493 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ 2494 2495 sg_free_table(orig_st); 2496 2497 *orig_st = new_st; 2498 return true; 2499 } 2500 2501 static struct sg_table * 2502 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) 2503 { 2504 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 2505 const unsigned long page_count = obj->base.size / PAGE_SIZE; 2506 unsigned long i; 2507 struct vm_object *mapping; 2508 struct sg_table *st; 2509 struct scatterlist *sg; 2510 struct sgt_iter sgt_iter; 2511 struct page *page; 2512 unsigned long last_pfn = 0; /* suppress gcc warning */ 2513 unsigned int max_segment; 2514 gfp_t noreclaim; 2515 int ret; 2516 2517 /* Assert that the object is not currently in any GPU domain. As it 2518 * wasn't in the GTT, there shouldn't be any way it could have been in 2519 * a GPU cache 2520 */ 2521 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2522 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2523 2524 max_segment = swiotlb_max_segment(); 2525 if (!max_segment) 2526 max_segment = rounddown(UINT_MAX, PAGE_SIZE); 2527 2528 st = kmalloc(sizeof(*st), M_DRM, GFP_KERNEL); 2529 if (st == NULL) 2530 return ERR_PTR(-ENOMEM); 2531 2532 rebuild_st: 2533 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 2534 kfree(st); 2535 return ERR_PTR(-ENOMEM); 2536 } 2537 2538 /* Get the list of pages out of our struct file. They'll be pinned 2539 * at this point until we release them. 2540 * 2541 * Fail silently without starting the shrinker 2542 */ 2543 #ifdef __DragonFly__ 2544 mapping = obj->base.filp; 2545 VM_OBJECT_LOCK(mapping); 2546 #endif 2547 noreclaim = mapping_gfp_constraint(mapping, 2548 ~(__GFP_IO | __GFP_RECLAIM)); 2549 noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 2550 2551 sg = st->sgl; 2552 st->nents = 0; 2553 for (i = 0; i < page_count; i++) { 2554 const unsigned int shrink[] = { 2555 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE, 2556 0, 2557 }, *s = shrink; 2558 gfp_t gfp = noreclaim; 2559 2560 do { 2561 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2562 if (likely(!IS_ERR(page))) 2563 break; 2564 2565 if (!*s) { 2566 ret = PTR_ERR(page); 2567 goto err_sg; 2568 } 2569 2570 i915_gem_shrink(dev_priv, 2 * page_count, *s++); 2571 cond_resched(); 2572 2573 /* We've tried hard to allocate the memory by reaping 2574 * our own buffer, now let the real VM do its job and 2575 * go down in flames if truly OOM. 2576 * 2577 * However, since graphics tend to be disposable, 2578 * defer the oom here by reporting the ENOMEM back 2579 * to userspace. 2580 */ 2581 if (!*s) { 2582 /* reclaim and warn, but no oom */ 2583 gfp = mapping_gfp_mask(mapping); 2584 2585 /* Our bo are always dirty and so we require 2586 * kswapd to reclaim our pages (direct reclaim 2587 * does not effectively begin pageout of our 2588 * buffers on its own). However, direct reclaim 2589 * only waits for kswapd when under allocation 2590 * congestion. So as a result __GFP_RECLAIM is 2591 * unreliable and fails to actually reclaim our 2592 * dirty pages -- unless you try over and over 2593 * again with !__GFP_NORETRY. However, we still 2594 * want to fail this allocation rather than 2595 * trigger the out-of-memory killer and for 2596 * this we want the future __GFP_MAYFAIL. 2597 */ 2598 } 2599 } while (1); 2600 2601 if (!i || 2602 sg->length >= max_segment || 2603 page_to_pfn(page) != last_pfn + 1) { 2604 if (i) 2605 sg = sg_next(sg); 2606 st->nents++; 2607 sg_set_page(sg, page, PAGE_SIZE, 0); 2608 } else { 2609 sg->length += PAGE_SIZE; 2610 } 2611 last_pfn = page_to_pfn(page); 2612 2613 /* Check that the i965g/gm workaround works. */ 2614 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); 2615 } 2616 if (sg) /* loop terminated early; short sg table */ 2617 sg_mark_end(sg); 2618 #ifdef __DragonFly__ 2619 VM_OBJECT_UNLOCK(mapping); 2620 #endif 2621 2622 /* Trim unused sg entries to avoid wasting memory. */ 2623 i915_sg_trim(st); 2624 2625 ret = i915_gem_gtt_prepare_pages(obj, st); 2626 if (ret) { 2627 /* DMA remapping failed? One possible cause is that 2628 * it could not reserve enough large entries, asking 2629 * for PAGE_SIZE chunks instead may be helpful. 2630 */ 2631 if (max_segment > PAGE_SIZE) { 2632 for_each_sgt_page(page, sgt_iter, st) 2633 put_page(page); 2634 sg_free_table(st); 2635 2636 max_segment = PAGE_SIZE; 2637 goto rebuild_st; 2638 } else { 2639 dev_warn(&dev_priv->drm.pdev->dev, 2640 "Failed to DMA remap %lu pages\n", 2641 page_count); 2642 goto err_pages; 2643 } 2644 } 2645 2646 if (i915_gem_object_needs_bit17_swizzle(obj)) 2647 i915_gem_object_do_bit_17_swizzle(obj, st); 2648 2649 return st; 2650 2651 err_sg: 2652 sg_mark_end(sg); 2653 err_pages: 2654 for_each_sgt_page(page, sgt_iter, st) 2655 put_page(page); 2656 #ifdef __DragonFly__ 2657 VM_OBJECT_UNLOCK(mapping); 2658 #endif 2659 sg_free_table(st); 2660 kfree(st); 2661 2662 /* shmemfs first checks if there is enough memory to allocate the page 2663 * and reports ENOSPC should there be insufficient, along with the usual 2664 * ENOMEM for a genuine allocation failure. 2665 * 2666 * We use ENOSPC in our driver to mean that we have run out of aperture 2667 * space and so want to translate the error from shmemfs back to our 2668 * usual understanding of ENOMEM. 2669 */ 2670 if (ret == -ENOSPC) 2671 ret = -ENOMEM; 2672 2673 return ERR_PTR(ret); 2674 } 2675 2676 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 2677 struct sg_table *pages) 2678 { 2679 lockdep_assert_held(&obj->mm.lock); 2680 2681 obj->mm.get_page.sg_pos = pages->sgl; 2682 obj->mm.get_page.sg_idx = 0; 2683 2684 obj->mm.pages = pages; 2685 2686 if (i915_gem_object_is_tiled(obj) && 2687 to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 2688 GEM_BUG_ON(obj->mm.quirked); 2689 __i915_gem_object_pin_pages(obj); 2690 obj->mm.quirked = true; 2691 } 2692 } 2693 2694 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2695 { 2696 struct sg_table *pages; 2697 2698 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 2699 2700 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { 2701 DRM_DEBUG("Attempting to obtain a purgeable object\n"); 2702 return -EFAULT; 2703 } 2704 2705 pages = obj->ops->get_pages(obj); 2706 if (unlikely(IS_ERR(pages))) 2707 return PTR_ERR(pages); 2708 2709 __i915_gem_object_set_pages(obj, pages); 2710 return 0; 2711 } 2712 2713 /* Ensure that the associated pages are gathered from the backing storage 2714 * and pinned into our object. i915_gem_object_pin_pages() may be called 2715 * multiple times before they are released by a single call to 2716 * i915_gem_object_unpin_pages() - once the pages are no longer referenced 2717 * either as a result of memory pressure (reaping pages under the shrinker) 2718 * or as the object is itself released. 2719 */ 2720 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2721 { 2722 int err; 2723 2724 err = mutex_lock_interruptible(&obj->mm.lock); 2725 if (err) 2726 return err; 2727 2728 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { 2729 err = ____i915_gem_object_get_pages(obj); 2730 if (err) 2731 goto unlock; 2732 2733 smp_mb__before_atomic(); 2734 } 2735 atomic_inc(&obj->mm.pages_pin_count); 2736 2737 unlock: 2738 mutex_unlock(&obj->mm.lock); 2739 return err; 2740 } 2741 2742 /* The 'mapping' part of i915_gem_object_pin_map() below */ 2743 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, 2744 enum i915_map_type type) 2745 { 2746 unsigned long n_pages = obj->base.size >> PAGE_SHIFT; 2747 struct sg_table *sgt = obj->mm.pages; 2748 struct sgt_iter sgt_iter; 2749 struct page *page; 2750 struct page *stack_pages[32]; 2751 struct page **pages = stack_pages; 2752 unsigned long i = 0; 2753 pgprot_t pgprot; 2754 void *addr; 2755 2756 /* A single page can always be kmapped */ 2757 if (n_pages == 1 && type == I915_MAP_WB) 2758 return kmap(sg_page(sgt->sgl)); 2759 2760 if (n_pages > ARRAY_SIZE(stack_pages)) { 2761 /* Too big for stack -- allocate temporary array instead */ 2762 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY); 2763 if (!pages) 2764 return NULL; 2765 } 2766 2767 for_each_sgt_page(page, sgt_iter, sgt) 2768 pages[i++] = page; 2769 2770 /* Check that we have the expected number of pages */ 2771 GEM_BUG_ON(i != n_pages); 2772 2773 switch (type) { 2774 case I915_MAP_WB: 2775 pgprot = PAGE_KERNEL; 2776 break; 2777 case I915_MAP_WC: 2778 pgprot = pgprot_writecombine(PAGE_KERNEL_IO); 2779 break; 2780 } 2781 addr = vmap(pages, n_pages, 0, pgprot); 2782 2783 if (pages != stack_pages) 2784 drm_free_large(pages); 2785 2786 return addr; 2787 } 2788 2789 /* get, pin, and map the pages of the object into kernel space */ 2790 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 2791 enum i915_map_type type) 2792 { 2793 enum i915_map_type has_type; 2794 bool pinned; 2795 void *ptr; 2796 int ret; 2797 2798 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 2799 2800 ret = mutex_lock_interruptible(&obj->mm.lock); 2801 if (ret) 2802 return ERR_PTR(ret); 2803 2804 pinned = true; 2805 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 2806 if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { 2807 ret = ____i915_gem_object_get_pages(obj); 2808 if (ret) 2809 goto err_unlock; 2810 2811 smp_mb__before_atomic(); 2812 } 2813 atomic_inc(&obj->mm.pages_pin_count); 2814 pinned = false; 2815 } 2816 GEM_BUG_ON(!obj->mm.pages); 2817 2818 ptr = ptr_unpack_bits(obj->mm.mapping, has_type); 2819 if (ptr && has_type != type) { 2820 if (pinned) { 2821 ret = -EBUSY; 2822 goto err_unpin; 2823 } 2824 2825 if (is_vmalloc_addr(ptr)) 2826 vunmap(ptr); 2827 else 2828 kunmap(kmap_to_page(ptr)); 2829 2830 ptr = obj->mm.mapping = NULL; 2831 } 2832 2833 if (!ptr) { 2834 ptr = i915_gem_object_map(obj, type); 2835 if (!ptr) { 2836 ret = -ENOMEM; 2837 goto err_unpin; 2838 } 2839 2840 obj->mm.mapping = ptr_pack_bits(ptr, type); 2841 } 2842 2843 out_unlock: 2844 mutex_unlock(&obj->mm.lock); 2845 return ptr; 2846 2847 err_unpin: 2848 atomic_dec(&obj->mm.pages_pin_count); 2849 err_unlock: 2850 ptr = ERR_PTR(ret); 2851 goto out_unlock; 2852 } 2853 2854 static int 2855 i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, 2856 const struct drm_i915_gem_pwrite *arg) 2857 { 2858 #ifndef __DragonFly__ 2859 struct address_space *mapping = obj->base.filp->f_mapping; 2860 #else 2861 struct vm_object *mapping = obj->base.filp; 2862 #endif 2863 char __user *user_data = u64_to_user_ptr(arg->data_ptr); 2864 u64 remain, offset; 2865 unsigned int pg; 2866 2867 /* Before we instantiate/pin the backing store for our use, we 2868 * can prepopulate the shmemfs filp efficiently using a write into 2869 * the pagecache. We avoid the penalty of instantiating all the 2870 * pages, important if the user is just writing to a few and never 2871 * uses the object on the GPU, and using a direct write into shmemfs 2872 * allows it to avoid the cost of retrieving a page (either swapin 2873 * or clearing-before-use) before it is overwritten. 2874 */ 2875 if (READ_ONCE(obj->mm.pages)) 2876 return -ENODEV; 2877 2878 /* Before the pages are instantiated the object is treated as being 2879 * in the CPU domain. The pages will be clflushed as required before 2880 * use, and we can freely write into the pages directly. If userspace 2881 * races pwrite with any other operation; corruption will ensue - 2882 * that is userspace's prerogative! 2883 */ 2884 2885 remain = arg->size; 2886 offset = arg->offset; 2887 pg = offset_in_page(offset); 2888 2889 do { 2890 unsigned int len, unwritten; 2891 struct page *page; 2892 void *vaddr; 2893 #if 0 2894 void *data, *vaddr; 2895 int err; 2896 #endif 2897 2898 len = PAGE_SIZE - pg; 2899 if (len > remain) 2900 len = remain; 2901 2902 #ifndef __DragonFly__ 2903 err = pagecache_write_begin(obj->base.filp, mapping, 2904 offset, len, 0, 2905 &page, &data); 2906 if (err < 0) 2907 return err; 2908 #else 2909 page = shmem_read_mapping_page(mapping, OFF_TO_IDX(offset)); 2910 #endif 2911 2912 vaddr = kmap(page); 2913 unwritten = copy_from_user(vaddr + pg, user_data, len); 2914 kunmap(page); 2915 2916 #ifndef __DragonFly__ 2917 err = pagecache_write_end(obj->base.filp, mapping, 2918 offset, len, len - unwritten, 2919 page, data); 2920 if (err < 0) 2921 return err; 2922 #else 2923 put_page(page); 2924 #endif 2925 2926 if (unwritten) 2927 return -EFAULT; 2928 2929 remain -= len; 2930 user_data += len; 2931 offset += len; 2932 pg = 0; 2933 } while (remain); 2934 2935 return 0; 2936 } 2937 2938 static bool ban_context(const struct i915_gem_context *ctx) 2939 { 2940 return (i915_gem_context_is_bannable(ctx) && 2941 ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD); 2942 } 2943 2944 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) 2945 { 2946 ctx->guilty_count++; 2947 ctx->ban_score += CONTEXT_SCORE_GUILTY; 2948 if (ban_context(ctx)) 2949 i915_gem_context_set_banned(ctx); 2950 2951 DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", 2952 ctx->name, ctx->ban_score, 2953 yesno(i915_gem_context_is_banned(ctx))); 2954 2955 if (!i915_gem_context_is_banned(ctx) || IS_ERR_OR_NULL(ctx->file_priv)) 2956 return; 2957 2958 ctx->file_priv->context_bans++; 2959 DRM_DEBUG_DRIVER("client %s has had %d context banned\n", 2960 ctx->name, ctx->file_priv->context_bans); 2961 } 2962 2963 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) 2964 { 2965 ctx->active_count++; 2966 } 2967 2968 struct drm_i915_gem_request * 2969 i915_gem_find_active_request(struct intel_engine_cs *engine) 2970 { 2971 struct drm_i915_gem_request *request, *active = NULL; 2972 unsigned long flags; 2973 2974 /* We are called by the error capture and reset at a random 2975 * point in time. In particular, note that neither is crucially 2976 * ordered with an interrupt. After a hang, the GPU is dead and we 2977 * assume that no more writes can happen (we waited long enough for 2978 * all writes that were in transaction to be flushed) - adding an 2979 * extra delay for a recent interrupt is pointless. Hence, we do 2980 * not need an engine->irq_seqno_barrier() before the seqno reads. 2981 */ 2982 spin_lock_irqsave(&engine->timeline->lock, flags); 2983 list_for_each_entry(request, &engine->timeline->requests, link) { 2984 if (__i915_gem_request_completed(request, 2985 request->global_seqno)) 2986 continue; 2987 2988 GEM_BUG_ON(request->engine != engine); 2989 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 2990 &request->fence.flags)); 2991 2992 active = request; 2993 break; 2994 } 2995 spin_unlock_irqrestore(&engine->timeline->lock, flags); 2996 2997 return active; 2998 } 2999 3000 static bool engine_stalled(struct intel_engine_cs *engine) 3001 { 3002 if (!engine->hangcheck.stalled) 3003 return false; 3004 3005 /* Check for possible seqno movement after hang declaration */ 3006 if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) { 3007 DRM_DEBUG_DRIVER("%s pardoned\n", engine->name); 3008 return false; 3009 } 3010 3011 return true; 3012 } 3013 3014 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) 3015 { 3016 struct intel_engine_cs *engine; 3017 enum intel_engine_id id; 3018 int err = 0; 3019 3020 /* Ensure irq handler finishes, and not run again. */ 3021 for_each_engine(engine, dev_priv, id) { 3022 struct drm_i915_gem_request *request; 3023 3024 /* Prevent the signaler thread from updating the request 3025 * state (by calling dma_fence_signal) as we are processing 3026 * the reset. The write from the GPU of the seqno is 3027 * asynchronous and the signaler thread may see a different 3028 * value to us and declare the request complete, even though 3029 * the reset routine have picked that request as the active 3030 * (incomplete) request. This conflict is not handled 3031 * gracefully! 3032 */ 3033 kthread_park(engine->breadcrumbs.signaler); 3034 3035 /* Prevent request submission to the hardware until we have 3036 * completed the reset in i915_gem_reset_finish(). If a request 3037 * is completed by one engine, it may then queue a request 3038 * to a second via its engine->irq_tasklet *just* as we are 3039 * calling engine->init_hw() and also writing the ELSP. 3040 * Turning off the engine->irq_tasklet until the reset is over 3041 * prevents the race. 3042 */ 3043 tasklet_kill(&engine->irq_tasklet); 3044 tasklet_disable(&engine->irq_tasklet); 3045 3046 if (engine->irq_seqno_barrier) 3047 engine->irq_seqno_barrier(engine); 3048 3049 if (engine_stalled(engine)) { 3050 request = i915_gem_find_active_request(engine); 3051 if (request && request->fence.error == -EIO) 3052 err = -EIO; /* Previous reset failed! */ 3053 } 3054 } 3055 3056 i915_gem_revoke_fences(dev_priv); 3057 3058 return err; 3059 } 3060 3061 static void skip_request(struct drm_i915_gem_request *request) 3062 { 3063 void *vaddr = request->ring->vaddr; 3064 u32 head; 3065 3066 /* As this request likely depends on state from the lost 3067 * context, clear out all the user operations leaving the 3068 * breadcrumb at the end (so we get the fence notifications). 3069 */ 3070 head = request->head; 3071 if (request->postfix < head) { 3072 memset(vaddr + head, 0, request->ring->size - head); 3073 head = 0; 3074 } 3075 memset(vaddr + head, 0, request->postfix - head); 3076 3077 dma_fence_set_error(&request->fence, -EIO); 3078 } 3079 3080 static void engine_skip_context(struct drm_i915_gem_request *request) 3081 { 3082 struct intel_engine_cs *engine = request->engine; 3083 struct i915_gem_context *hung_ctx = request->ctx; 3084 struct intel_timeline *timeline; 3085 unsigned long flags; 3086 3087 timeline = i915_gem_context_lookup_timeline(hung_ctx, engine); 3088 3089 spin_lock_irqsave(&engine->timeline->lock, flags); 3090 lockmgr(&timeline->lock, LK_EXCLUSIVE); 3091 3092 list_for_each_entry_continue(request, &engine->timeline->requests, link) 3093 if (request->ctx == hung_ctx) 3094 skip_request(request); 3095 3096 list_for_each_entry(request, &timeline->requests, link) 3097 skip_request(request); 3098 3099 lockmgr(&timeline->lock, LK_RELEASE); 3100 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3101 } 3102 3103 /* Returns true if the request was guilty of hang */ 3104 static bool i915_gem_reset_request(struct drm_i915_gem_request *request) 3105 { 3106 /* Read once and return the resolution */ 3107 const bool guilty = engine_stalled(request->engine); 3108 3109 /* The guilty request will get skipped on a hung engine. 3110 * 3111 * Users of client default contexts do not rely on logical 3112 * state preserved between batches so it is safe to execute 3113 * queued requests following the hang. Non default contexts 3114 * rely on preserved state, so skipping a batch loses the 3115 * evolution of the state and it needs to be considered corrupted. 3116 * Executing more queued batches on top of corrupted state is 3117 * risky. But we take the risk by trying to advance through 3118 * the queued requests in order to make the client behaviour 3119 * more predictable around resets, by not throwing away random 3120 * amount of batches it has prepared for execution. Sophisticated 3121 * clients can use gem_reset_stats_ioctl and dma fence status 3122 * (exported via sync_file info ioctl on explicit fences) to observe 3123 * when it loses the context state and should rebuild accordingly. 3124 * 3125 * The context ban, and ultimately the client ban, mechanism are safety 3126 * valves if client submission ends up resulting in nothing more than 3127 * subsequent hangs. 3128 */ 3129 3130 if (guilty) { 3131 i915_gem_context_mark_guilty(request->ctx); 3132 skip_request(request); 3133 } else { 3134 i915_gem_context_mark_innocent(request->ctx); 3135 dma_fence_set_error(&request->fence, -EAGAIN); 3136 } 3137 3138 return guilty; 3139 } 3140 3141 static void i915_gem_reset_engine(struct intel_engine_cs *engine) 3142 { 3143 struct drm_i915_gem_request *request; 3144 3145 request = i915_gem_find_active_request(engine); 3146 if (request && i915_gem_reset_request(request)) { 3147 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", 3148 engine->name, request->global_seqno); 3149 3150 /* If this context is now banned, skip all pending requests. */ 3151 if (i915_gem_context_is_banned(request->ctx)) 3152 engine_skip_context(request); 3153 } 3154 3155 /* Setup the CS to resume from the breadcrumb of the hung request */ 3156 engine->reset_hw(engine, request); 3157 } 3158 3159 void i915_gem_reset(struct drm_i915_private *dev_priv) 3160 { 3161 struct intel_engine_cs *engine; 3162 enum intel_engine_id id; 3163 3164 lockdep_assert_held(&dev_priv->drm.struct_mutex); 3165 3166 i915_gem_retire_requests(dev_priv); 3167 3168 for_each_engine(engine, dev_priv, id) { 3169 struct i915_gem_context *ctx; 3170 3171 i915_gem_reset_engine(engine); 3172 ctx = fetch_and_zero(&engine->last_retired_context); 3173 if (ctx) 3174 engine->context_unpin(engine, ctx); 3175 } 3176 3177 i915_gem_restore_fences(dev_priv); 3178 3179 if (dev_priv->gt.awake) { 3180 intel_sanitize_gt_powersave(dev_priv); 3181 intel_enable_gt_powersave(dev_priv); 3182 if (INTEL_GEN(dev_priv) >= 6) 3183 gen6_rps_busy(dev_priv); 3184 } 3185 } 3186 3187 void i915_gem_reset_finish(struct drm_i915_private *dev_priv) 3188 { 3189 struct intel_engine_cs *engine; 3190 enum intel_engine_id id; 3191 3192 lockdep_assert_held(&dev_priv->drm.struct_mutex); 3193 3194 for_each_engine(engine, dev_priv, id) { 3195 tasklet_enable(&engine->irq_tasklet); 3196 kthread_unpark(engine->breadcrumbs.signaler); 3197 } 3198 } 3199 3200 static void nop_submit_request(struct drm_i915_gem_request *request) 3201 { 3202 dma_fence_set_error(&request->fence, -EIO); 3203 i915_gem_request_submit(request); 3204 intel_engine_init_global_seqno(request->engine, request->global_seqno); 3205 } 3206 3207 static void engine_set_wedged(struct intel_engine_cs *engine) 3208 { 3209 struct drm_i915_gem_request *request; 3210 unsigned long flags; 3211 3212 /* We need to be sure that no thread is running the old callback as 3213 * we install the nop handler (otherwise we would submit a request 3214 * to hardware that will never complete). In order to prevent this 3215 * race, we wait until the machine is idle before making the swap 3216 * (using stop_machine()). 3217 */ 3218 engine->submit_request = nop_submit_request; 3219 3220 /* Mark all executing requests as skipped */ 3221 spin_lock_irqsave(&engine->timeline->lock, flags); 3222 list_for_each_entry(request, &engine->timeline->requests, link) 3223 dma_fence_set_error(&request->fence, -EIO); 3224 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3225 3226 /* Mark all pending requests as complete so that any concurrent 3227 * (lockless) lookup doesn't try and wait upon the request as we 3228 * reset it. 3229 */ 3230 intel_engine_init_global_seqno(engine, 3231 intel_engine_last_submit(engine)); 3232 3233 /* 3234 * Clear the execlists queue up before freeing the requests, as those 3235 * are the ones that keep the context and ringbuffer backing objects 3236 * pinned in place. 3237 */ 3238 3239 if (i915.enable_execlists) { 3240 unsigned long flags; 3241 3242 spin_lock_irqsave(&engine->timeline->lock, flags); 3243 3244 i915_gem_request_put(engine->execlist_port[0].request); 3245 i915_gem_request_put(engine->execlist_port[1].request); 3246 memset(engine->execlist_port, 0, sizeof(engine->execlist_port)); 3247 engine->execlist_queue = LINUX_RB_ROOT; 3248 engine->execlist_first = NULL; 3249 3250 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3251 } 3252 } 3253 3254 static int __i915_gem_set_wedged_BKL(void *data) 3255 { 3256 struct drm_i915_private *i915 = data; 3257 struct intel_engine_cs *engine; 3258 enum intel_engine_id id; 3259 3260 for_each_engine(engine, i915, id) 3261 engine_set_wedged(engine); 3262 3263 return 0; 3264 } 3265 3266 void i915_gem_set_wedged(struct drm_i915_private *dev_priv) 3267 { 3268 lockdep_assert_held(&dev_priv->drm.struct_mutex); 3269 set_bit(I915_WEDGED, &dev_priv->gpu_error.flags); 3270 3271 /* Retire completed requests first so the list of inflight/incomplete 3272 * requests is accurate and we don't try and mark successful requests 3273 * as in error during __i915_gem_set_wedged_BKL(). 3274 */ 3275 i915_gem_retire_requests(dev_priv); 3276 3277 stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL); 3278 3279 i915_gem_context_lost(dev_priv); 3280 3281 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); 3282 } 3283 3284 bool i915_gem_unset_wedged(struct drm_i915_private *i915) 3285 { 3286 struct i915_gem_timeline *tl; 3287 int i; 3288 3289 lockdep_assert_held(&i915->drm.struct_mutex); 3290 if (!test_bit(I915_WEDGED, &i915->gpu_error.flags)) 3291 return true; 3292 3293 /* Before unwedging, make sure that all pending operations 3294 * are flushed and errored out - we may have requests waiting upon 3295 * third party fences. We marked all inflight requests as EIO, and 3296 * every execbuf since returned EIO, for consistency we want all 3297 * the currently pending requests to also be marked as EIO, which 3298 * is done inside our nop_submit_request - and so we must wait. 3299 * 3300 * No more can be submitted until we reset the wedged bit. 3301 */ 3302 list_for_each_entry(tl, &i915->gt.timelines, link) { 3303 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { 3304 struct drm_i915_gem_request *rq; 3305 3306 rq = i915_gem_active_peek(&tl->engine[i].last_request, 3307 &i915->drm.struct_mutex); 3308 if (!rq) 3309 continue; 3310 3311 /* We can't use our normal waiter as we want to 3312 * avoid recursively trying to handle the current 3313 * reset. The basic dma_fence_default_wait() installs 3314 * a callback for dma_fence_signal(), which is 3315 * triggered by our nop handler (indirectly, the 3316 * callback enables the signaler thread which is 3317 * woken by the nop_submit_request() advancing the seqno 3318 * and when the seqno passes the fence, the signaler 3319 * then signals the fence waking us up). 3320 */ 3321 if (dma_fence_default_wait(&rq->fence, true, 3322 MAX_SCHEDULE_TIMEOUT) < 0) 3323 return false; 3324 } 3325 } 3326 3327 /* Undo nop_submit_request. We prevent all new i915 requests from 3328 * being queued (by disallowing execbuf whilst wedged) so having 3329 * waited for all active requests above, we know the system is idle 3330 * and do not have to worry about a thread being inside 3331 * engine->submit_request() as we swap over. So unlike installing 3332 * the nop_submit_request on reset, we can do this from normal 3333 * context and do not require stop_machine(). 3334 */ 3335 intel_engines_reset_default_submission(i915); 3336 3337 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ 3338 clear_bit(I915_WEDGED, &i915->gpu_error.flags); 3339 3340 return true; 3341 } 3342 3343 static void 3344 i915_gem_retire_work_handler(struct work_struct *work) 3345 { 3346 struct drm_i915_private *dev_priv = 3347 container_of(work, typeof(*dev_priv), gt.retire_work.work); 3348 struct drm_device *dev = &dev_priv->drm; 3349 3350 /* Come back later if the device is busy... */ 3351 if (mutex_trylock(&dev->struct_mutex)) { 3352 i915_gem_retire_requests(dev_priv); 3353 mutex_unlock(&dev->struct_mutex); 3354 } 3355 3356 /* Keep the retire handler running until we are finally idle. 3357 * We do not need to do this test under locking as in the worst-case 3358 * we queue the retire worker once too often. 3359 */ 3360 if (READ_ONCE(dev_priv->gt.awake)) { 3361 i915_queue_hangcheck(dev_priv); 3362 queue_delayed_work(dev_priv->wq, 3363 &dev_priv->gt.retire_work, 3364 round_jiffies_up_relative(HZ)); 3365 } 3366 } 3367 3368 static void 3369 i915_gem_idle_work_handler(struct work_struct *work) 3370 { 3371 struct drm_i915_private *dev_priv = 3372 container_of(work, typeof(*dev_priv), gt.idle_work.work); 3373 struct drm_device *dev = &dev_priv->drm; 3374 struct intel_engine_cs *engine; 3375 enum intel_engine_id id; 3376 bool rearm_hangcheck; 3377 3378 if (!READ_ONCE(dev_priv->gt.awake)) 3379 return; 3380 3381 /* 3382 * Wait for last execlists context complete, but bail out in case a 3383 * new request is submitted. 3384 */ 3385 wait_for(intel_engines_are_idle(dev_priv), 10); 3386 if (READ_ONCE(dev_priv->gt.active_requests)) 3387 return; 3388 3389 rearm_hangcheck = 3390 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 3391 3392 if (!mutex_trylock(&dev->struct_mutex)) { 3393 /* Currently busy, come back later */ 3394 mod_delayed_work(dev_priv->wq, 3395 &dev_priv->gt.idle_work, 3396 msecs_to_jiffies(50)); 3397 goto out_rearm; 3398 } 3399 3400 /* 3401 * New request retired after this work handler started, extend active 3402 * period until next instance of the work. 3403 */ 3404 if (work_pending(work)) 3405 goto out_unlock; 3406 3407 if (dev_priv->gt.active_requests) 3408 goto out_unlock; 3409 3410 if (wait_for(intel_engines_are_idle(dev_priv), 10)) 3411 DRM_ERROR("Timeout waiting for engines to idle\n"); 3412 3413 for_each_engine(engine, dev_priv, id) { 3414 intel_engine_disarm_breadcrumbs(engine); 3415 i915_gem_batch_pool_fini(&engine->batch_pool); 3416 } 3417 3418 GEM_BUG_ON(!dev_priv->gt.awake); 3419 dev_priv->gt.awake = false; 3420 rearm_hangcheck = false; 3421 3422 if (INTEL_GEN(dev_priv) >= 6) 3423 gen6_rps_idle(dev_priv); 3424 intel_runtime_pm_put(dev_priv); 3425 out_unlock: 3426 mutex_unlock(&dev->struct_mutex); 3427 3428 out_rearm: 3429 if (rearm_hangcheck) { 3430 GEM_BUG_ON(!dev_priv->gt.awake); 3431 i915_queue_hangcheck(dev_priv); 3432 } 3433 } 3434 3435 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) 3436 { 3437 struct drm_i915_gem_object *obj = to_intel_bo(gem); 3438 struct drm_i915_file_private *fpriv = file->driver_priv; 3439 struct i915_vma *vma, *vn; 3440 3441 mutex_lock(&obj->base.dev->struct_mutex); 3442 list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link) 3443 if (vma->vm->file == fpriv) 3444 i915_vma_close(vma); 3445 3446 if (i915_gem_object_is_active(obj) && 3447 !i915_gem_object_has_active_reference(obj)) { 3448 i915_gem_object_set_active_reference(obj); 3449 i915_gem_object_get(obj); 3450 } 3451 mutex_unlock(&obj->base.dev->struct_mutex); 3452 } 3453 3454 static unsigned long to_wait_timeout(s64 timeout_ns) 3455 { 3456 if (timeout_ns < 0) 3457 return MAX_SCHEDULE_TIMEOUT; 3458 3459 if (timeout_ns == 0) 3460 return 0; 3461 3462 return nsecs_to_jiffies_timeout(timeout_ns); 3463 } 3464 3465 /** 3466 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT 3467 * @dev: drm device pointer 3468 * @data: ioctl data blob 3469 * @file: drm file pointer 3470 * 3471 * Returns 0 if successful, else an error is returned with the remaining time in 3472 * the timeout parameter. 3473 * -ETIME: object is still busy after timeout 3474 * -ERESTARTSYS: signal interrupted the wait 3475 * -ENONENT: object doesn't exist 3476 * Also possible, but rare: 3477 * -EAGAIN: GPU wedged 3478 * -ENOMEM: damn 3479 * -ENODEV: Internal IRQ fail 3480 * -E?: The add request failed 3481 * 3482 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any 3483 * non-zero timeout parameter the wait ioctl will wait for the given number of 3484 * nanoseconds on an object becoming unbusy. Since the wait itself does so 3485 * without holding struct_mutex the object may become re-busied before this 3486 * function completes. A similar but shorter * race condition exists in the busy 3487 * ioctl 3488 */ 3489 int 3490 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 3491 { 3492 struct drm_i915_gem_wait *args = data; 3493 struct drm_i915_gem_object *obj; 3494 ktime_t start; 3495 long ret; 3496 3497 if (args->flags != 0) 3498 return -EINVAL; 3499 3500 obj = i915_gem_object_lookup(file, args->bo_handle); 3501 if (!obj) 3502 return -ENOENT; 3503 3504 start = ktime_get(); 3505 3506 ret = i915_gem_object_wait(obj, 3507 I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, 3508 to_wait_timeout(args->timeout_ns), 3509 to_rps_client(file)); 3510 3511 if (args->timeout_ns > 0) { 3512 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); 3513 if (args->timeout_ns < 0) 3514 args->timeout_ns = 0; 3515 3516 /* 3517 * Apparently ktime isn't accurate enough and occasionally has a 3518 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch 3519 * things up to make the test happy. We allow up to 1 jiffy. 3520 * 3521 * This is a regression from the timespec->ktime conversion. 3522 */ 3523 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) 3524 args->timeout_ns = 0; 3525 3526 /* 3527 * Apparently ktime isn't accurate enough and occasionally has a 3528 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch 3529 * things up to make the test happy. We allow up to 1 jiffy. 3530 * 3531 * This is a regression from the timespec->ktime conversion. 3532 */ 3533 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) 3534 args->timeout_ns = 0; 3535 } 3536 3537 i915_gem_object_put(obj); 3538 return ret; 3539 } 3540 3541 static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags) 3542 { 3543 int ret, i; 3544 3545 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { 3546 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags); 3547 if (ret) 3548 return ret; 3549 } 3550 3551 return 0; 3552 } 3553 3554 static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms) 3555 { 3556 return wait_for(intel_engine_is_idle(engine), timeout_ms); 3557 } 3558 3559 static int wait_for_engines(struct drm_i915_private *i915) 3560 { 3561 struct intel_engine_cs *engine; 3562 enum intel_engine_id id; 3563 3564 for_each_engine(engine, i915, id) { 3565 if (GEM_WARN_ON(wait_for_engine(engine, 50))) { 3566 i915_gem_set_wedged(i915); 3567 return -EIO; 3568 } 3569 3570 GEM_BUG_ON(intel_engine_get_seqno(engine) != 3571 intel_engine_last_submit(engine)); 3572 } 3573 3574 return 0; 3575 } 3576 3577 int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) 3578 { 3579 int ret; 3580 3581 /* If the device is asleep, we have no requests outstanding */ 3582 if (!READ_ONCE(i915->gt.awake)) 3583 return 0; 3584 3585 if (flags & I915_WAIT_LOCKED) { 3586 struct i915_gem_timeline *tl; 3587 3588 lockdep_assert_held(&i915->drm.struct_mutex); 3589 3590 list_for_each_entry(tl, &i915->gt.timelines, link) { 3591 ret = wait_for_timeline(tl, flags); 3592 if (ret) 3593 return ret; 3594 } 3595 3596 i915_gem_retire_requests(i915); 3597 GEM_BUG_ON(i915->gt.active_requests); 3598 3599 ret = wait_for_engines(i915); 3600 } else { 3601 ret = wait_for_timeline(&i915->gt.global_timeline, flags); 3602 } 3603 3604 return ret; 3605 } 3606 3607 /** Flushes the GTT write domain for the object if it's dirty. */ 3608 static void 3609 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) 3610 { 3611 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3612 3613 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) 3614 return; 3615 3616 /* No actual flushing is required for the GTT write domain. Writes 3617 * to it "immediately" go to main memory as far as we know, so there's 3618 * no chipset flush. It also doesn't land in render cache. 3619 * 3620 * However, we do have to enforce the order so that all writes through 3621 * the GTT land before any writes to the device, such as updates to 3622 * the GATT itself. 3623 * 3624 * We also have to wait a bit for the writes to land from the GTT. 3625 * An uncached read (i.e. mmio) seems to be ideal for the round-trip 3626 * timing. This issue has only been observed when switching quickly 3627 * between GTT writes and CPU reads from inside the kernel on recent hw, 3628 * and it appears to only affect discrete GTT blocks (i.e. on LLC 3629 * system agents we cannot reproduce this behaviour). 3630 */ 3631 wmb(); 3632 if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) { 3633 if (intel_runtime_pm_get_if_in_use(dev_priv)) { 3634 spin_lock_irq(&dev_priv->uncore.lock); 3635 POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base)); 3636 spin_unlock_irq(&dev_priv->uncore.lock); 3637 intel_runtime_pm_put(dev_priv); 3638 } 3639 } 3640 3641 intel_fb_obj_flush(obj, write_origin(obj, I915_GEM_DOMAIN_GTT)); 3642 3643 obj->base.write_domain = 0; 3644 } 3645 3646 /** Flushes the CPU write domain for the object if it's dirty. */ 3647 static void 3648 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) 3649 { 3650 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) 3651 return; 3652 3653 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 3654 obj->base.write_domain = 0; 3655 } 3656 3657 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) 3658 { 3659 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU && !obj->cache_dirty) 3660 return; 3661 3662 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); 3663 obj->base.write_domain = 0; 3664 } 3665 3666 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) 3667 { 3668 if (!READ_ONCE(obj->pin_display)) 3669 return; 3670 3671 mutex_lock(&obj->base.dev->struct_mutex); 3672 __i915_gem_object_flush_for_display(obj); 3673 mutex_unlock(&obj->base.dev->struct_mutex); 3674 } 3675 3676 /** 3677 * Moves a single object to the GTT read, and possibly write domain. 3678 * @obj: object to act on 3679 * @write: ask for write access or read only 3680 * 3681 * This function returns when the move is complete, including waiting on 3682 * flushes to occur. 3683 */ 3684 int 3685 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 3686 { 3687 int ret; 3688 3689 lockdep_assert_held(&obj->base.dev->struct_mutex); 3690 3691 ret = i915_gem_object_wait(obj, 3692 I915_WAIT_INTERRUPTIBLE | 3693 I915_WAIT_LOCKED | 3694 (write ? I915_WAIT_ALL : 0), 3695 MAX_SCHEDULE_TIMEOUT, 3696 NULL); 3697 if (ret) 3698 return ret; 3699 3700 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3701 return 0; 3702 3703 /* Flush and acquire obj->pages so that we are coherent through 3704 * direct access in memory with previous cached writes through 3705 * shmemfs and that our cache domain tracking remains valid. 3706 * For example, if the obj->filp was moved to swap without us 3707 * being notified and releasing the pages, we would mistakenly 3708 * continue to assume that the obj remained out of the CPU cached 3709 * domain. 3710 */ 3711 ret = i915_gem_object_pin_pages(obj); 3712 if (ret) 3713 return ret; 3714 3715 i915_gem_object_flush_cpu_write_domain(obj); 3716 3717 /* Serialise direct access to this object with the barriers for 3718 * coherent writes from the GPU, by effectively invalidating the 3719 * GTT domain upon first access. 3720 */ 3721 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 3722 mb(); 3723 3724 /* It should now be out of any other write domains, and we can update 3725 * the domain values for our changes. 3726 */ 3727 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 3728 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3729 if (write) { 3730 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 3731 obj->base.write_domain = I915_GEM_DOMAIN_GTT; 3732 obj->mm.dirty = true; 3733 } 3734 3735 i915_gem_object_unpin_pages(obj); 3736 return 0; 3737 } 3738 3739 /** 3740 * Changes the cache-level of an object across all VMA. 3741 * @obj: object to act on 3742 * @cache_level: new cache level to set for the object 3743 * 3744 * After this function returns, the object will be in the new cache-level 3745 * across all GTT and the contents of the backing storage will be coherent, 3746 * with respect to the new cache-level. In order to keep the backing storage 3747 * coherent for all users, we only allow a single cache level to be set 3748 * globally on the object and prevent it from being changed whilst the 3749 * hardware is reading from the object. That is if the object is currently 3750 * on the scanout it will be set to uncached (or equivalent display 3751 * cache coherency) and all non-MOCS GPU access will also be uncached so 3752 * that all direct access to the scanout remains coherent. 3753 */ 3754 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3755 enum i915_cache_level cache_level) 3756 { 3757 struct i915_vma *vma; 3758 int ret; 3759 3760 lockdep_assert_held(&obj->base.dev->struct_mutex); 3761 3762 if (obj->cache_level == cache_level) 3763 return 0; 3764 3765 /* Inspect the list of currently bound VMA and unbind any that would 3766 * be invalid given the new cache-level. This is principally to 3767 * catch the issue of the CS prefetch crossing page boundaries and 3768 * reading an invalid PTE on older architectures. 3769 */ 3770 restart: 3771 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3772 if (!drm_mm_node_allocated(&vma->node)) 3773 continue; 3774 3775 if (i915_vma_is_pinned(vma)) { 3776 DRM_DEBUG("can not change the cache level of pinned objects\n"); 3777 return -EBUSY; 3778 } 3779 3780 if (i915_gem_valid_gtt_space(vma, cache_level)) 3781 continue; 3782 3783 ret = i915_vma_unbind(vma); 3784 if (ret) 3785 return ret; 3786 3787 /* As unbinding may affect other elements in the 3788 * obj->vma_list (due to side-effects from retiring 3789 * an active vma), play safe and restart the iterator. 3790 */ 3791 goto restart; 3792 } 3793 3794 /* We can reuse the existing drm_mm nodes but need to change the 3795 * cache-level on the PTE. We could simply unbind them all and 3796 * rebind with the correct cache-level on next use. However since 3797 * we already have a valid slot, dma mapping, pages etc, we may as 3798 * rewrite the PTE in the belief that doing so tramples upon less 3799 * state and so involves less work. 3800 */ 3801 if (obj->bind_count) { 3802 /* Before we change the PTE, the GPU must not be accessing it. 3803 * If we wait upon the object, we know that all the bound 3804 * VMA are no longer active. 3805 */ 3806 ret = i915_gem_object_wait(obj, 3807 I915_WAIT_INTERRUPTIBLE | 3808 I915_WAIT_LOCKED | 3809 I915_WAIT_ALL, 3810 MAX_SCHEDULE_TIMEOUT, 3811 NULL); 3812 if (ret) 3813 return ret; 3814 3815 if (!HAS_LLC(to_i915(obj->base.dev)) && 3816 cache_level != I915_CACHE_NONE) { 3817 /* Access to snoopable pages through the GTT is 3818 * incoherent and on some machines causes a hard 3819 * lockup. Relinquish the CPU mmaping to force 3820 * userspace to refault in the pages and we can 3821 * then double check if the GTT mapping is still 3822 * valid for that pointer access. 3823 */ 3824 i915_gem_release_mmap(obj); 3825 3826 /* As we no longer need a fence for GTT access, 3827 * we can relinquish it now (and so prevent having 3828 * to steal a fence from someone else on the next 3829 * fence request). Note GPU activity would have 3830 * dropped the fence as all snoopable access is 3831 * supposed to be linear. 3832 */ 3833 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3834 ret = i915_vma_put_fence(vma); 3835 if (ret) 3836 return ret; 3837 } 3838 } else { 3839 /* We either have incoherent backing store and 3840 * so no GTT access or the architecture is fully 3841 * coherent. In such cases, existing GTT mmaps 3842 * ignore the cache bit in the PTE and we can 3843 * rewrite it without confusing the GPU or having 3844 * to force userspace to fault back in its mmaps. 3845 */ 3846 } 3847 3848 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3849 if (!drm_mm_node_allocated(&vma->node)) 3850 continue; 3851 3852 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE); 3853 if (ret) 3854 return ret; 3855 } 3856 } 3857 3858 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU && 3859 i915_gem_object_is_coherent(obj)) 3860 obj->cache_dirty = true; 3861 3862 list_for_each_entry(vma, &obj->vma_list, obj_link) 3863 vma->node.color = cache_level; 3864 obj->cache_level = cache_level; 3865 3866 return 0; 3867 } 3868 3869 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 3870 struct drm_file *file) 3871 { 3872 struct drm_i915_gem_caching *args = data; 3873 struct drm_i915_gem_object *obj; 3874 int err = 0; 3875 3876 rcu_read_lock(); 3877 obj = i915_gem_object_lookup_rcu(file, args->handle); 3878 if (!obj) { 3879 err = -ENOENT; 3880 goto out; 3881 } 3882 3883 switch (obj->cache_level) { 3884 case I915_CACHE_LLC: 3885 case I915_CACHE_L3_LLC: 3886 args->caching = I915_CACHING_CACHED; 3887 break; 3888 3889 case I915_CACHE_WT: 3890 args->caching = I915_CACHING_DISPLAY; 3891 break; 3892 3893 default: 3894 args->caching = I915_CACHING_NONE; 3895 break; 3896 } 3897 out: 3898 rcu_read_unlock(); 3899 return err; 3900 } 3901 3902 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 3903 struct drm_file *file) 3904 { 3905 struct drm_i915_private *i915 = to_i915(dev); 3906 struct drm_i915_gem_caching *args = data; 3907 struct drm_i915_gem_object *obj; 3908 enum i915_cache_level level; 3909 int ret = 0; 3910 3911 switch (args->caching) { 3912 case I915_CACHING_NONE: 3913 level = I915_CACHE_NONE; 3914 break; 3915 case I915_CACHING_CACHED: 3916 /* 3917 * Due to a HW issue on BXT A stepping, GPU stores via a 3918 * snooped mapping may leave stale data in a corresponding CPU 3919 * cacheline, whereas normally such cachelines would get 3920 * invalidated. 3921 */ 3922 if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) 3923 return -ENODEV; 3924 3925 level = I915_CACHE_LLC; 3926 break; 3927 case I915_CACHING_DISPLAY: 3928 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE; 3929 break; 3930 default: 3931 return -EINVAL; 3932 } 3933 3934 obj = i915_gem_object_lookup(file, args->handle); 3935 if (!obj) 3936 return -ENOENT; 3937 3938 if (obj->cache_level == level) 3939 goto out; 3940 3941 ret = i915_gem_object_wait(obj, 3942 I915_WAIT_INTERRUPTIBLE, 3943 MAX_SCHEDULE_TIMEOUT, 3944 to_rps_client(file)); 3945 if (ret) 3946 goto out; 3947 3948 ret = i915_mutex_lock_interruptible(dev); 3949 if (ret) 3950 goto out; 3951 3952 ret = i915_gem_object_set_cache_level(obj, level); 3953 mutex_unlock(&dev->struct_mutex); 3954 3955 out: 3956 i915_gem_object_put(obj); 3957 return ret; 3958 } 3959 3960 /* 3961 * Prepare buffer for display plane (scanout, cursors, etc). 3962 * Can be called from an uninterruptible phase (modesetting) and allows 3963 * any flushes to be pipelined (for pageflips). 3964 */ 3965 struct i915_vma * 3966 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3967 u32 alignment, 3968 const struct i915_ggtt_view *view) 3969 { 3970 struct i915_vma *vma; 3971 int ret; 3972 3973 lockdep_assert_held(&obj->base.dev->struct_mutex); 3974 3975 /* Mark the pin_display early so that we account for the 3976 * display coherency whilst setting up the cache domains. 3977 */ 3978 obj->pin_display++; 3979 3980 /* The display engine is not coherent with the LLC cache on gen6. As 3981 * a result, we make sure that the pinning that is about to occur is 3982 * done with uncached PTEs. This is lowest common denominator for all 3983 * chipsets. 3984 * 3985 * However for gen6+, we could do better by using the GFDT bit instead 3986 * of uncaching, which would allow us to flush all the LLC-cached data 3987 * with that bit in the PTE to main memory with just one PIPE_CONTROL. 3988 */ 3989 ret = i915_gem_object_set_cache_level(obj, 3990 HAS_WT(to_i915(obj->base.dev)) ? 3991 I915_CACHE_WT : I915_CACHE_NONE); 3992 if (ret) { 3993 vma = ERR_PTR(ret); 3994 goto err_unpin_display; 3995 } 3996 3997 /* As the user may map the buffer once pinned in the display plane 3998 * (e.g. libkms for the bootup splash), we have to ensure that we 3999 * always use map_and_fenceable for all scanout buffers. However, 4000 * it may simply be too big to fit into mappable, in which case 4001 * put it anyway and hope that userspace can cope (but always first 4002 * try to preserve the existing ABI). 4003 */ 4004 vma = ERR_PTR(-ENOSPC); 4005 if (!view || view->type == I915_GGTT_VIEW_NORMAL) 4006 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 4007 PIN_MAPPABLE | PIN_NONBLOCK); 4008 if (IS_ERR(vma)) { 4009 struct drm_i915_private *i915 = to_i915(obj->base.dev); 4010 unsigned int flags; 4011 4012 /* Valleyview is definitely limited to scanning out the first 4013 * 512MiB. Lets presume this behaviour was inherited from the 4014 * g4x display engine and that all earlier gen are similarly 4015 * limited. Testing suggests that it is a little more 4016 * complicated than this. For example, Cherryview appears quite 4017 * happy to scanout from anywhere within its global aperture. 4018 */ 4019 flags = 0; 4020 if (HAS_GMCH_DISPLAY(i915)) 4021 flags = PIN_MAPPABLE; 4022 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); 4023 } 4024 if (IS_ERR(vma)) 4025 goto err_unpin_display; 4026 4027 vma->display_alignment = max_t(u64, vma->display_alignment, alignment); 4028 4029 /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ 4030 __i915_gem_object_flush_for_display(obj); 4031 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); 4032 4033 /* It should now be out of any other write domains, and we can update 4034 * the domain values for our changes. 4035 */ 4036 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 4037 4038 return vma; 4039 4040 err_unpin_display: 4041 obj->pin_display--; 4042 return vma; 4043 } 4044 4045 void 4046 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) 4047 { 4048 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 4049 4050 if (WARN_ON(vma->obj->pin_display == 0)) 4051 return; 4052 4053 if (--vma->obj->pin_display == 0) 4054 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 4055 4056 /* Bump the LRU to try and avoid premature eviction whilst flipping */ 4057 i915_gem_object_bump_inactive_ggtt(vma->obj); 4058 4059 i915_vma_unpin(vma); 4060 } 4061 4062 /** 4063 * Moves a single object to the CPU read, and possibly write domain. 4064 * @obj: object to act on 4065 * @write: requesting write or read-only access 4066 * 4067 * This function returns when the move is complete, including waiting on 4068 * flushes to occur. 4069 */ 4070 int 4071 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) 4072 { 4073 int ret; 4074 4075 lockdep_assert_held(&obj->base.dev->struct_mutex); 4076 4077 ret = i915_gem_object_wait(obj, 4078 I915_WAIT_INTERRUPTIBLE | 4079 I915_WAIT_LOCKED | 4080 (write ? I915_WAIT_ALL : 0), 4081 MAX_SCHEDULE_TIMEOUT, 4082 NULL); 4083 if (ret) 4084 return ret; 4085 4086 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) 4087 return 0; 4088 4089 i915_gem_object_flush_gtt_write_domain(obj); 4090 4091 /* Flush the CPU cache if it's still invalid. */ 4092 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { 4093 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 4094 obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 4095 } 4096 4097 /* It should now be out of any other write domains, and we can update 4098 * the domain values for our changes. 4099 */ 4100 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 4101 4102 /* If we're writing through the CPU, then the GPU read domains will 4103 * need to be invalidated at next use. 4104 */ 4105 if (write) { 4106 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4107 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4108 } 4109 4110 return 0; 4111 } 4112 4113 /* Throttle our rendering by waiting until the ring has completed our requests 4114 * emitted over 20 msec ago. 4115 * 4116 * Note that if we were to use the current jiffies each time around the loop, 4117 * we wouldn't escape the function with any frames outstanding if the time to 4118 * render a frame was over 20ms. 4119 * 4120 * This should get us reasonable parallelism between CPU and GPU but also 4121 * relatively low latency when blocking on a particular request to finish. 4122 */ 4123 static int 4124 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) 4125 { 4126 struct drm_i915_private *dev_priv = to_i915(dev); 4127 struct drm_i915_file_private *file_priv = file->driver_priv; 4128 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; 4129 struct drm_i915_gem_request *request, *target = NULL; 4130 long ret; 4131 4132 /* ABI: return -EIO if already wedged */ 4133 if (i915_terminally_wedged(&dev_priv->gpu_error)) 4134 return -EIO; 4135 4136 lockmgr(&file_priv->mm.lock, LK_EXCLUSIVE); 4137 list_for_each_entry(request, &file_priv->mm.request_list, client_link) { 4138 if (time_after_eq(request->emitted_jiffies, recent_enough)) 4139 break; 4140 4141 if (target) { 4142 list_del(&target->client_link); 4143 target->file_priv = NULL; 4144 } 4145 4146 target = request; 4147 } 4148 if (target) 4149 i915_gem_request_get(target); 4150 lockmgr(&file_priv->mm.lock, LK_RELEASE); 4151 4152 if (target == NULL) 4153 return 0; 4154 4155 ret = i915_wait_request(target, 4156 I915_WAIT_INTERRUPTIBLE, 4157 MAX_SCHEDULE_TIMEOUT); 4158 i915_gem_request_put(target); 4159 4160 return ret < 0 ? ret : 0; 4161 } 4162 4163 struct i915_vma * 4164 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 4165 const struct i915_ggtt_view *view, 4166 u64 size, 4167 u64 alignment, 4168 u64 flags) 4169 { 4170 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 4171 struct i915_address_space *vm = &dev_priv->ggtt.base; 4172 struct i915_vma *vma; 4173 int ret; 4174 4175 lockdep_assert_held(&obj->base.dev->struct_mutex); 4176 4177 vma = i915_vma_instance(obj, vm, view); 4178 if (unlikely(IS_ERR(vma))) 4179 return vma; 4180 4181 if (i915_vma_misplaced(vma, size, alignment, flags)) { 4182 if (flags & PIN_NONBLOCK && 4183 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) 4184 return ERR_PTR(-ENOSPC); 4185 4186 if (flags & PIN_MAPPABLE) { 4187 /* If the required space is larger than the available 4188 * aperture, we will not able to find a slot for the 4189 * object and unbinding the object now will be in 4190 * vain. Worse, doing so may cause us to ping-pong 4191 * the object in and out of the Global GTT and 4192 * waste a lot of cycles under the mutex. 4193 */ 4194 if (vma->fence_size > dev_priv->ggtt.mappable_end) 4195 return ERR_PTR(-E2BIG); 4196 4197 /* If NONBLOCK is set the caller is optimistically 4198 * trying to cache the full object within the mappable 4199 * aperture, and *must* have a fallback in place for 4200 * situations where we cannot bind the object. We 4201 * can be a little more lax here and use the fallback 4202 * more often to avoid costly migrations of ourselves 4203 * and other objects within the aperture. 4204 * 4205 * Half-the-aperture is used as a simple heuristic. 4206 * More interesting would to do search for a free 4207 * block prior to making the commitment to unbind. 4208 * That caters for the self-harm case, and with a 4209 * little more heuristics (e.g. NOFAULT, NOEVICT) 4210 * we could try to minimise harm to others. 4211 */ 4212 if (flags & PIN_NONBLOCK && 4213 vma->fence_size > dev_priv->ggtt.mappable_end / 2) 4214 return ERR_PTR(-ENOSPC); 4215 } 4216 4217 WARN(i915_vma_is_pinned(vma), 4218 "bo is already pinned in ggtt with incorrect alignment:" 4219 " offset=%08x, req.alignment=%llx," 4220 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n", 4221 i915_ggtt_offset(vma), alignment, 4222 !!(flags & PIN_MAPPABLE), 4223 i915_vma_is_map_and_fenceable(vma)); 4224 ret = i915_vma_unbind(vma); 4225 if (ret) 4226 return ERR_PTR(ret); 4227 } 4228 4229 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); 4230 if (ret) 4231 return ERR_PTR(ret); 4232 4233 return vma; 4234 } 4235 4236 static __always_inline unsigned int __busy_read_flag(unsigned int id) 4237 { 4238 /* Note that we could alias engines in the execbuf API, but 4239 * that would be very unwise as it prevents userspace from 4240 * fine control over engine selection. Ahem. 4241 * 4242 * This should be something like EXEC_MAX_ENGINE instead of 4243 * I915_NUM_ENGINES. 4244 */ 4245 BUILD_BUG_ON(I915_NUM_ENGINES > 16); 4246 return 0x10000 << id; 4247 } 4248 4249 static __always_inline unsigned int __busy_write_id(unsigned int id) 4250 { 4251 /* The uABI guarantees an active writer is also amongst the read 4252 * engines. This would be true if we accessed the activity tracking 4253 * under the lock, but as we perform the lookup of the object and 4254 * its activity locklessly we can not guarantee that the last_write 4255 * being active implies that we have set the same engine flag from 4256 * last_read - hence we always set both read and write busy for 4257 * last_write. 4258 */ 4259 return id | __busy_read_flag(id); 4260 } 4261 4262 #pragma GCC diagnostic push 4263 #pragma GCC diagnostic ignored "-Wdiscarded-qualifiers" 4264 4265 static __always_inline unsigned int 4266 __busy_set_if_active(const struct dma_fence *fence, 4267 unsigned int (*flag)(unsigned int id)) 4268 { 4269 struct drm_i915_gem_request *rq; 4270 4271 /* We have to check the current hw status of the fence as the uABI 4272 * guarantees forward progress. We could rely on the idle worker 4273 * to eventually flush us, but to minimise latency just ask the 4274 * hardware. 4275 * 4276 * Note we only report on the status of native fences. 4277 */ 4278 if (!dma_fence_is_i915(fence)) 4279 return 0; 4280 4281 /* opencode to_request() in order to avoid const warnings */ 4282 rq = container_of(fence, struct drm_i915_gem_request, fence); 4283 if (i915_gem_request_completed(rq)) 4284 return 0; 4285 4286 return flag(rq->engine->exec_id); 4287 } 4288 #pragma GCC diagnostic pop 4289 4290 static __always_inline unsigned int 4291 busy_check_reader(const struct dma_fence *fence) 4292 { 4293 return __busy_set_if_active(fence, __busy_read_flag); 4294 } 4295 4296 static __always_inline unsigned int 4297 busy_check_writer(const struct dma_fence *fence) 4298 { 4299 if (!fence) 4300 return 0; 4301 4302 return __busy_set_if_active(fence, __busy_write_id); 4303 } 4304 4305 int 4306 i915_gem_busy_ioctl(struct drm_device *dev, void *data, 4307 struct drm_file *file) 4308 { 4309 struct drm_i915_gem_busy *args = data; 4310 struct drm_i915_gem_object *obj; 4311 struct reservation_object_list *list; 4312 unsigned int seq; 4313 int err; 4314 4315 err = -ENOENT; 4316 rcu_read_lock(); 4317 obj = i915_gem_object_lookup_rcu(file, args->handle); 4318 if (!obj) 4319 goto out; 4320 4321 /* A discrepancy here is that we do not report the status of 4322 * non-i915 fences, i.e. even though we may report the object as idle, 4323 * a call to set-domain may still stall waiting for foreign rendering. 4324 * This also means that wait-ioctl may report an object as busy, 4325 * where busy-ioctl considers it idle. 4326 * 4327 * We trade the ability to warn of foreign fences to report on which 4328 * i915 engines are active for the object. 4329 * 4330 * Alternatively, we can trade that extra information on read/write 4331 * activity with 4332 * args->busy = 4333 * !reservation_object_test_signaled_rcu(obj->resv, true); 4334 * to report the overall busyness. This is what the wait-ioctl does. 4335 * 4336 */ 4337 retry: 4338 seq = raw_read_seqcount(&obj->resv->seq); 4339 4340 /* Translate the exclusive fence to the READ *and* WRITE engine */ 4341 args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); 4342 4343 /* Translate shared fences to READ set of engines */ 4344 list = rcu_dereference(obj->resv->fence); 4345 if (list) { 4346 unsigned int shared_count = list->shared_count, i; 4347 4348 for (i = 0; i < shared_count; ++i) { 4349 struct dma_fence *fence = 4350 rcu_dereference(list->shared[i]); 4351 4352 args->busy |= busy_check_reader(fence); 4353 } 4354 } 4355 4356 if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) 4357 goto retry; 4358 4359 err = 0; 4360 out: 4361 rcu_read_unlock(); 4362 return err; 4363 } 4364 4365 int 4366 i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 4367 struct drm_file *file_priv) 4368 { 4369 return i915_gem_ring_throttle(dev, file_priv); 4370 } 4371 4372 int 4373 i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 4374 struct drm_file *file_priv) 4375 { 4376 struct drm_i915_private *dev_priv = to_i915(dev); 4377 struct drm_i915_gem_madvise *args = data; 4378 struct drm_i915_gem_object *obj; 4379 int err; 4380 4381 switch (args->madv) { 4382 case I915_MADV_DONTNEED: 4383 case I915_MADV_WILLNEED: 4384 break; 4385 default: 4386 return -EINVAL; 4387 } 4388 4389 obj = i915_gem_object_lookup(file_priv, args->handle); 4390 if (!obj) 4391 return -ENOENT; 4392 4393 err = mutex_lock_interruptible(&obj->mm.lock); 4394 if (err) 4395 goto out; 4396 4397 if (obj->mm.pages && 4398 i915_gem_object_is_tiled(obj) && 4399 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 4400 if (obj->mm.madv == I915_MADV_WILLNEED) { 4401 GEM_BUG_ON(!obj->mm.quirked); 4402 __i915_gem_object_unpin_pages(obj); 4403 obj->mm.quirked = false; 4404 } 4405 if (args->madv == I915_MADV_WILLNEED) { 4406 GEM_BUG_ON(obj->mm.quirked); 4407 __i915_gem_object_pin_pages(obj); 4408 obj->mm.quirked = true; 4409 } 4410 } 4411 4412 if (obj->mm.madv != __I915_MADV_PURGED) 4413 obj->mm.madv = args->madv; 4414 4415 /* if the object is no longer attached, discard its backing storage */ 4416 if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages) 4417 i915_gem_object_truncate(obj); 4418 4419 args->retained = obj->mm.madv != __I915_MADV_PURGED; 4420 mutex_unlock(&obj->mm.lock); 4421 4422 out: 4423 i915_gem_object_put(obj); 4424 return err; 4425 } 4426 4427 static void 4428 frontbuffer_retire(struct i915_gem_active *active, 4429 struct drm_i915_gem_request *request) 4430 { 4431 struct drm_i915_gem_object *obj = 4432 container_of(active, typeof(*obj), frontbuffer_write); 4433 4434 intel_fb_obj_flush(obj, ORIGIN_CS); 4435 } 4436 4437 void i915_gem_object_init(struct drm_i915_gem_object *obj, 4438 const struct drm_i915_gem_object_ops *ops) 4439 { 4440 lockinit(&obj->mm.lock, "i9goml", 0, LK_CANRECURSE); 4441 4442 INIT_LIST_HEAD(&obj->global_link); 4443 INIT_LIST_HEAD(&obj->userfault_link); 4444 INIT_LIST_HEAD(&obj->obj_exec_link); 4445 INIT_LIST_HEAD(&obj->vma_list); 4446 INIT_LIST_HEAD(&obj->batch_pool_link); 4447 4448 obj->ops = ops; 4449 4450 reservation_object_init(&obj->__builtin_resv); 4451 obj->resv = &obj->__builtin_resv; 4452 4453 obj->frontbuffer_ggtt_origin = ORIGIN_GTT; 4454 init_request_active(&obj->frontbuffer_write, frontbuffer_retire); 4455 4456 obj->mm.madv = I915_MADV_WILLNEED; 4457 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); 4458 lockinit(&obj->mm.get_page.lock, "i915ogpl", 0, LK_CANRECURSE); 4459 4460 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); 4461 } 4462 4463 static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4464 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 4465 I915_GEM_OBJECT_IS_SHRINKABLE, 4466 4467 .get_pages = i915_gem_object_get_pages_gtt, 4468 .put_pages = i915_gem_object_put_pages_gtt, 4469 4470 .pwrite = i915_gem_object_pwrite_gtt, 4471 }; 4472 4473 struct drm_i915_gem_object * 4474 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) 4475 { 4476 struct drm_i915_gem_object *obj; 4477 #if 0 4478 struct address_space *mapping; 4479 #endif 4480 gfp_t mask; 4481 int ret; 4482 4483 /* There is a prevalence of the assumption that we fit the object's 4484 * page count inside a 32bit _signed_ variable. Let's document this and 4485 * catch if we ever need to fix it. In the meantime, if you do spot 4486 * such a local variable, please consider fixing! 4487 */ 4488 if (WARN_ON(size >> PAGE_SHIFT > INT_MAX)) 4489 return ERR_PTR(-E2BIG); 4490 4491 if (overflows_type(size, obj->base.size)) 4492 return ERR_PTR(-E2BIG); 4493 4494 obj = i915_gem_object_alloc(dev_priv); 4495 if (obj == NULL) 4496 return ERR_PTR(-ENOMEM); 4497 4498 ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size); 4499 if (ret) 4500 goto fail; 4501 4502 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 4503 if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) { 4504 /* 965gm cannot relocate objects above 4GiB. */ 4505 mask &= ~__GFP_HIGHMEM; 4506 mask |= __GFP_DMA32; 4507 } 4508 4509 #if 0 4510 mapping = obj->base.filp->f_mapping; 4511 mapping_set_gfp_mask(mapping, mask); 4512 #endif 4513 4514 i915_gem_object_init(obj, &i915_gem_object_ops); 4515 4516 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4517 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4518 4519 if (HAS_LLC(dev_priv)) { 4520 /* On some devices, we can have the GPU use the LLC (the CPU 4521 * cache) for about a 10% performance improvement 4522 * compared to uncached. Graphics requests other than 4523 * display scanout are coherent with the CPU in 4524 * accessing this cache. This means in this mode we 4525 * don't need to clflush on the CPU side, and on the 4526 * GPU side we only need to flush internal caches to 4527 * get data visible to the CPU. 4528 * 4529 * However, we maintain the display planes as UC, and so 4530 * need to rebind when first used as such. 4531 */ 4532 obj->cache_level = I915_CACHE_LLC; 4533 } else 4534 obj->cache_level = I915_CACHE_NONE; 4535 4536 trace_i915_gem_object_create(obj); 4537 4538 return obj; 4539 4540 fail: 4541 i915_gem_object_free(obj); 4542 return ERR_PTR(ret); 4543 } 4544 4545 static bool discard_backing_storage(struct drm_i915_gem_object *obj) 4546 { 4547 /* If we are the last user of the backing storage (be it shmemfs 4548 * pages or stolen etc), we know that the pages are going to be 4549 * immediately released. In this case, we can then skip copying 4550 * back the contents from the GPU. 4551 */ 4552 4553 if (obj->mm.madv != I915_MADV_WILLNEED) 4554 return false; 4555 4556 if (obj->base.filp == NULL) 4557 return true; 4558 4559 /* At first glance, this looks racy, but then again so would be 4560 * userspace racing mmap against close. However, the first external 4561 * reference to the filp can only be obtained through the 4562 * i915_gem_mmap_ioctl() which safeguards us against the user 4563 * acquiring such a reference whilst we are in the middle of 4564 * freeing the object. 4565 */ 4566 #if 0 4567 return atomic_long_read(&obj->base.filp->f_count) == 1; 4568 #else 4569 return false; 4570 #endif 4571 } 4572 4573 static void __i915_gem_free_objects(struct drm_i915_private *i915, 4574 struct llist_node *freed) 4575 { 4576 struct drm_i915_gem_object *obj, *on; 4577 4578 mutex_lock(&i915->drm.struct_mutex); 4579 intel_runtime_pm_get(i915); 4580 llist_for_each_entry_safe(obj, on, freed, freed) { 4581 struct i915_vma *vma, *vn; 4582 4583 trace_i915_gem_object_destroy(obj); 4584 4585 GEM_BUG_ON(i915_gem_object_is_active(obj)); 4586 list_for_each_entry_safe(vma, vn, 4587 &obj->vma_list, obj_link) { 4588 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 4589 GEM_BUG_ON(i915_vma_is_active(vma)); 4590 vma->flags &= ~I915_VMA_PIN_MASK; 4591 i915_vma_close(vma); 4592 } 4593 GEM_BUG_ON(!list_empty(&obj->vma_list)); 4594 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree)); 4595 4596 list_del(&obj->global_link); 4597 } 4598 intel_runtime_pm_put(i915); 4599 mutex_unlock(&i915->drm.struct_mutex); 4600 4601 llist_for_each_entry_safe(obj, on, freed, freed) { 4602 GEM_BUG_ON(obj->bind_count); 4603 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); 4604 4605 if (obj->ops->release) 4606 obj->ops->release(obj); 4607 4608 #if 0 4609 if (WARN_ON(i915_gem_object_has_pinned_pages(obj))) 4610 #else 4611 if (i915_gem_object_has_pinned_pages(obj)) 4612 #endif 4613 atomic_set(&obj->mm.pages_pin_count, 0); 4614 __i915_gem_object_put_pages(obj, I915_MM_NORMAL); 4615 GEM_BUG_ON(obj->mm.pages); 4616 4617 if (obj->base.import_attach) 4618 drm_prime_gem_destroy(&obj->base, NULL); 4619 4620 reservation_object_fini(&obj->__builtin_resv); 4621 drm_gem_object_release(&obj->base); 4622 i915_gem_info_remove_obj(i915, obj->base.size); 4623 4624 kfree(obj->bit_17); 4625 i915_gem_object_free(obj); 4626 } 4627 } 4628 4629 static void i915_gem_flush_free_objects(struct drm_i915_private *i915) 4630 { 4631 struct llist_node *freed; 4632 4633 freed = llist_del_all(&i915->mm.free_list); 4634 if (unlikely(freed)) 4635 __i915_gem_free_objects(i915, freed); 4636 } 4637 4638 static void __i915_gem_free_work(struct work_struct *work) 4639 { 4640 struct drm_i915_private *i915 = 4641 container_of(work, struct drm_i915_private, mm.free_work); 4642 struct llist_node *freed; 4643 4644 /* All file-owned VMA should have been released by this point through 4645 * i915_gem_close_object(), or earlier by i915_gem_context_close(). 4646 * However, the object may also be bound into the global GTT (e.g. 4647 * older GPUs without per-process support, or for direct access through 4648 * the GTT either for the user or for scanout). Those VMA still need to 4649 * unbound now. 4650 */ 4651 4652 while ((freed = llist_del_all(&i915->mm.free_list))) 4653 __i915_gem_free_objects(i915, freed); 4654 } 4655 4656 static void __i915_gem_free_object_rcu(struct rcu_head *head) 4657 { 4658 struct drm_i915_gem_object *obj = 4659 container_of(head, typeof(*obj), rcu); 4660 struct drm_i915_private *i915 = to_i915(obj->base.dev); 4661 4662 /* We can't simply use call_rcu() from i915_gem_free_object() 4663 * as we need to block whilst unbinding, and the call_rcu 4664 * task may be called from softirq context. So we take a 4665 * detour through a worker. 4666 */ 4667 if (llist_add(&obj->freed, &i915->mm.free_list)) 4668 schedule_work(&i915->mm.free_work); 4669 } 4670 4671 void i915_gem_free_object(struct drm_gem_object *gem_obj) 4672 { 4673 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4674 4675 if (obj->mm.quirked) 4676 __i915_gem_object_unpin_pages(obj); 4677 4678 if (discard_backing_storage(obj)) 4679 obj->mm.madv = I915_MADV_DONTNEED; 4680 4681 /* Before we free the object, make sure any pure RCU-only 4682 * read-side critical sections are complete, e.g. 4683 * i915_gem_busy_ioctl(). For the corresponding synchronized 4684 * lookup see i915_gem_object_lookup_rcu(). 4685 */ 4686 call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 4687 } 4688 4689 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) 4690 { 4691 lockdep_assert_held(&obj->base.dev->struct_mutex); 4692 4693 GEM_BUG_ON(i915_gem_object_has_active_reference(obj)); 4694 if (i915_gem_object_is_active(obj)) 4695 i915_gem_object_set_active_reference(obj); 4696 else 4697 i915_gem_object_put(obj); 4698 } 4699 4700 static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv) 4701 { 4702 struct intel_engine_cs *engine; 4703 enum intel_engine_id id; 4704 4705 for_each_engine(engine, dev_priv, id) 4706 GEM_BUG_ON(engine->last_retired_context && 4707 !i915_gem_context_is_kernel(engine->last_retired_context)); 4708 } 4709 4710 void i915_gem_sanitize(struct drm_i915_private *i915) 4711 { 4712 /* 4713 * If we inherit context state from the BIOS or earlier occupants 4714 * of the GPU, the GPU may be in an inconsistent state when we 4715 * try to take over. The only way to remove the earlier state 4716 * is by resetting. However, resetting on earlier gen is tricky as 4717 * it may impact the display and we are uncertain about the stability 4718 * of the reset, so we only reset recent machines with logical 4719 * context support (that must be reset to remove any stray contexts). 4720 */ 4721 if (HAS_HW_CONTEXTS(i915)) { 4722 int reset = intel_gpu_reset(i915, ALL_ENGINES); 4723 WARN_ON(reset && reset != -ENODEV); 4724 } 4725 } 4726 4727 int i915_gem_suspend(struct drm_i915_private *dev_priv) 4728 { 4729 struct drm_device *dev = &dev_priv->drm; 4730 int ret; 4731 4732 intel_runtime_pm_get(dev_priv); 4733 intel_suspend_gt_powersave(dev_priv); 4734 4735 mutex_lock(&dev->struct_mutex); 4736 4737 /* We have to flush all the executing contexts to main memory so 4738 * that they can saved in the hibernation image. To ensure the last 4739 * context image is coherent, we have to switch away from it. That 4740 * leaves the dev_priv->kernel_context still active when 4741 * we actually suspend, and its image in memory may not match the GPU 4742 * state. Fortunately, the kernel_context is disposable and we do 4743 * not rely on its state. 4744 */ 4745 ret = i915_gem_switch_to_kernel_context(dev_priv); 4746 if (ret) 4747 goto err_unlock; 4748 4749 ret = i915_gem_wait_for_idle(dev_priv, 4750 I915_WAIT_INTERRUPTIBLE | 4751 I915_WAIT_LOCKED); 4752 if (ret) 4753 goto err_unlock; 4754 4755 assert_kernel_context_is_current(dev_priv); 4756 i915_gem_context_lost(dev_priv); 4757 mutex_unlock(&dev->struct_mutex); 4758 4759 intel_guc_suspend(dev_priv); 4760 4761 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4762 cancel_delayed_work_sync(&dev_priv->gt.retire_work); 4763 4764 /* As the idle_work is rearming if it detects a race, play safe and 4765 * repeat the flush until it is definitely idle. 4766 */ 4767 while (flush_delayed_work(&dev_priv->gt.idle_work)) 4768 ; 4769 4770 i915_gem_drain_freed_objects(dev_priv); 4771 4772 /* Assert that we sucessfully flushed all the work and 4773 * reset the GPU back to its idle, low power state. 4774 */ 4775 WARN_ON(dev_priv->gt.awake); 4776 WARN_ON(!intel_engines_are_idle(dev_priv)); 4777 4778 /* 4779 * Neither the BIOS, ourselves or any other kernel 4780 * expects the system to be in execlists mode on startup, 4781 * so we need to reset the GPU back to legacy mode. And the only 4782 * known way to disable logical contexts is through a GPU reset. 4783 * 4784 * So in order to leave the system in a known default configuration, 4785 * always reset the GPU upon unload and suspend. Afterwards we then 4786 * clean up the GEM state tracking, flushing off the requests and 4787 * leaving the system in a known idle state. 4788 * 4789 * Note that is of the upmost importance that the GPU is idle and 4790 * all stray writes are flushed *before* we dismantle the backing 4791 * storage for the pinned objects. 4792 * 4793 * However, since we are uncertain that resetting the GPU on older 4794 * machines is a good idea, we don't - just in case it leaves the 4795 * machine in an unusable condition. 4796 */ 4797 i915_gem_sanitize(dev_priv); 4798 goto out_rpm_put; 4799 4800 err_unlock: 4801 mutex_unlock(&dev->struct_mutex); 4802 out_rpm_put: 4803 intel_runtime_pm_put(dev_priv); 4804 return ret; 4805 } 4806 4807 void i915_gem_resume(struct drm_i915_private *dev_priv) 4808 { 4809 struct drm_device *dev = &dev_priv->drm; 4810 4811 WARN_ON(dev_priv->gt.awake); 4812 4813 mutex_lock(&dev->struct_mutex); 4814 i915_gem_restore_gtt_mappings(dev_priv); 4815 4816 /* As we didn't flush the kernel context before suspend, we cannot 4817 * guarantee that the context image is complete. So let's just reset 4818 * it and start again. 4819 */ 4820 dev_priv->gt.resume(dev_priv); 4821 4822 mutex_unlock(&dev->struct_mutex); 4823 } 4824 4825 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv) 4826 { 4827 if (INTEL_GEN(dev_priv) < 5 || 4828 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) 4829 return; 4830 4831 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 4832 DISP_TILE_SURFACE_SWIZZLING); 4833 4834 if (IS_GEN5(dev_priv)) 4835 return; 4836 4837 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); 4838 if (IS_GEN6(dev_priv)) 4839 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); 4840 else if (IS_GEN7(dev_priv)) 4841 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 4842 else if (IS_GEN8(dev_priv)) 4843 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); 4844 else 4845 BUG(); 4846 } 4847 4848 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base) 4849 { 4850 I915_WRITE(RING_CTL(base), 0); 4851 I915_WRITE(RING_HEAD(base), 0); 4852 I915_WRITE(RING_TAIL(base), 0); 4853 I915_WRITE(RING_START(base), 0); 4854 } 4855 4856 static void init_unused_rings(struct drm_i915_private *dev_priv) 4857 { 4858 if (IS_I830(dev_priv)) { 4859 init_unused_ring(dev_priv, PRB1_BASE); 4860 init_unused_ring(dev_priv, SRB0_BASE); 4861 init_unused_ring(dev_priv, SRB1_BASE); 4862 init_unused_ring(dev_priv, SRB2_BASE); 4863 init_unused_ring(dev_priv, SRB3_BASE); 4864 } else if (IS_GEN2(dev_priv)) { 4865 init_unused_ring(dev_priv, SRB0_BASE); 4866 init_unused_ring(dev_priv, SRB1_BASE); 4867 } else if (IS_GEN3(dev_priv)) { 4868 init_unused_ring(dev_priv, PRB1_BASE); 4869 init_unused_ring(dev_priv, PRB2_BASE); 4870 } 4871 } 4872 4873 static int __i915_gem_restart_engines(void *data) 4874 { 4875 struct drm_i915_private *i915 = data; 4876 struct intel_engine_cs *engine; 4877 enum intel_engine_id id; 4878 int err; 4879 4880 for_each_engine(engine, i915, id) { 4881 err = engine->init_hw(engine); 4882 if (err) 4883 return err; 4884 } 4885 4886 return 0; 4887 } 4888 4889 int i915_gem_init_hw(struct drm_i915_private *dev_priv) 4890 { 4891 int ret; 4892 4893 dev_priv->gt.last_init_time = ktime_get(); 4894 4895 /* Double layer security blanket, see i915_gem_init() */ 4896 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4897 4898 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9) 4899 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4900 4901 if (IS_HASWELL(dev_priv)) 4902 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? 4903 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 4904 4905 if (HAS_PCH_NOP(dev_priv)) { 4906 if (IS_IVYBRIDGE(dev_priv)) { 4907 u32 temp = I915_READ(GEN7_MSG_CTL); 4908 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 4909 I915_WRITE(GEN7_MSG_CTL, temp); 4910 } else if (INTEL_GEN(dev_priv) >= 7) { 4911 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); 4912 temp &= ~RESET_PCH_HANDSHAKE_ENABLE; 4913 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); 4914 } 4915 } 4916 4917 i915_gem_init_swizzling(dev_priv); 4918 4919 /* 4920 * At least 830 can leave some of the unused rings 4921 * "active" (ie. head != tail) after resume which 4922 * will prevent c3 entry. Makes sure all unused rings 4923 * are totally idle. 4924 */ 4925 init_unused_rings(dev_priv); 4926 4927 BUG_ON(!dev_priv->kernel_context); 4928 4929 ret = i915_ppgtt_init_hw(dev_priv); 4930 if (ret) { 4931 DRM_ERROR("PPGTT enable HW failed %d\n", ret); 4932 goto out; 4933 } 4934 4935 /* Need to do basic initialisation of all rings first: */ 4936 ret = __i915_gem_restart_engines(dev_priv); 4937 if (ret) 4938 goto out; 4939 4940 intel_mocs_init_l3cc_table(dev_priv); 4941 4942 /* We can't enable contexts until all firmware is loaded */ 4943 ret = intel_uc_init_hw(dev_priv); 4944 if (ret) 4945 goto out; 4946 4947 out: 4948 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4949 return ret; 4950 } 4951 4952 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value) 4953 { 4954 if (INTEL_INFO(dev_priv)->gen < 6) 4955 return false; 4956 4957 /* TODO: make semaphores and Execlists play nicely together */ 4958 if (i915.enable_execlists) 4959 return false; 4960 4961 if (value >= 0) 4962 return value; 4963 4964 #ifdef CONFIG_INTEL_IOMMU 4965 /* Enable semaphores on SNB when IO remapping is off */ 4966 if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped) 4967 return false; 4968 #endif 4969 4970 return true; 4971 } 4972 4973 int i915_gem_init(struct drm_i915_private *dev_priv) 4974 { 4975 int ret; 4976 4977 mutex_lock(&dev_priv->drm.struct_mutex); 4978 4979 i915_gem_clflush_init(dev_priv); 4980 4981 if (!i915.enable_execlists) { 4982 dev_priv->gt.resume = intel_legacy_submission_resume; 4983 dev_priv->gt.cleanup_engine = intel_engine_cleanup; 4984 } else { 4985 dev_priv->gt.resume = intel_lr_context_resume; 4986 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup; 4987 } 4988 4989 /* This is just a security blanket to placate dragons. 4990 * On some systems, we very sporadically observe that the first TLBs 4991 * used by the CS may be stale, despite us poking the TLB reset. If 4992 * we hold the forcewake during initialisation these problems 4993 * just magically go away. 4994 */ 4995 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4996 4997 i915_gem_init_userptr(dev_priv); 4998 4999 ret = i915_gem_init_ggtt(dev_priv); 5000 if (ret) 5001 goto out_unlock; 5002 5003 ret = i915_gem_context_init(dev_priv); 5004 if (ret) 5005 goto out_unlock; 5006 5007 ret = intel_engines_init(dev_priv); 5008 if (ret) 5009 goto out_unlock; 5010 5011 ret = i915_gem_init_hw(dev_priv); 5012 if (ret == -EIO) { 5013 /* Allow engine initialisation to fail by marking the GPU as 5014 * wedged. But we only want to do this where the GPU is angry, 5015 * for all other failure, such as an allocation failure, bail. 5016 */ 5017 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); 5018 i915_gem_set_wedged(dev_priv); 5019 ret = 0; 5020 } 5021 5022 out_unlock: 5023 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5024 mutex_unlock(&dev_priv->drm.struct_mutex); 5025 5026 return ret; 5027 } 5028 5029 void i915_gem_init_mmio(struct drm_i915_private *i915) 5030 { 5031 i915_gem_sanitize(i915); 5032 } 5033 5034 void 5035 i915_gem_cleanup_engines(struct drm_i915_private *dev_priv) 5036 { 5037 struct intel_engine_cs *engine; 5038 enum intel_engine_id id; 5039 5040 for_each_engine(engine, dev_priv, id) 5041 dev_priv->gt.cleanup_engine(engine); 5042 } 5043 5044 void 5045 i915_gem_load_init_fences(struct drm_i915_private *dev_priv) 5046 { 5047 int i; 5048 5049 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && 5050 !IS_CHERRYVIEW(dev_priv)) 5051 dev_priv->num_fence_regs = 32; 5052 else if (INTEL_INFO(dev_priv)->gen >= 4 || 5053 IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 5054 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) 5055 dev_priv->num_fence_regs = 16; 5056 else 5057 dev_priv->num_fence_regs = 8; 5058 5059 if (intel_vgpu_active(dev_priv)) 5060 dev_priv->num_fence_regs = 5061 I915_READ(vgtif_reg(avail_rs.fence_num)); 5062 5063 /* Initialize fence registers to zero */ 5064 for (i = 0; i < dev_priv->num_fence_regs; i++) { 5065 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; 5066 5067 fence->i915 = dev_priv; 5068 fence->id = i; 5069 list_add_tail(&fence->link, &dev_priv->mm.fence_list); 5070 } 5071 i915_gem_restore_fences(dev_priv); 5072 5073 i915_gem_detect_bit_6_swizzle(dev_priv); 5074 } 5075 5076 int 5077 i915_gem_load_init(struct drm_i915_private *dev_priv) 5078 { 5079 int err = -ENOMEM; 5080 5081 dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); 5082 if (!dev_priv->objects) 5083 goto err_out; 5084 5085 dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 5086 if (!dev_priv->vmas) 5087 goto err_objects; 5088 5089 dev_priv->requests = KMEM_CACHE(drm_i915_gem_request, 5090 SLAB_HWCACHE_ALIGN | 5091 SLAB_RECLAIM_ACCOUNT | 5092 SLAB_TYPESAFE_BY_RCU); 5093 if (!dev_priv->requests) 5094 goto err_vmas; 5095 5096 dev_priv->dependencies = KMEM_CACHE(i915_dependency, 5097 SLAB_HWCACHE_ALIGN | 5098 SLAB_RECLAIM_ACCOUNT); 5099 if (!dev_priv->dependencies) 5100 goto err_requests; 5101 5102 mutex_lock(&dev_priv->drm.struct_mutex); 5103 INIT_LIST_HEAD(&dev_priv->gt.timelines); 5104 err = i915_gem_timeline_init__global(dev_priv); 5105 mutex_unlock(&dev_priv->drm.struct_mutex); 5106 if (err) 5107 goto err_dependencies; 5108 5109 INIT_LIST_HEAD(&dev_priv->context_list); 5110 INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work); 5111 init_llist_head(&dev_priv->mm.free_list); 5112 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 5113 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 5114 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 5115 INIT_LIST_HEAD(&dev_priv->mm.userfault_list); 5116 INIT_DELAYED_WORK(&dev_priv->gt.retire_work, 5117 i915_gem_retire_work_handler); 5118 INIT_DELAYED_WORK(&dev_priv->gt.idle_work, 5119 i915_gem_idle_work_handler); 5120 init_waitqueue_head(&dev_priv->gpu_error.wait_queue); 5121 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 5122 5123 init_waitqueue_head(&dev_priv->pending_flip_queue); 5124 5125 dev_priv->mm.interruptible = true; 5126 5127 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); 5128 5129 lockinit(&dev_priv->fb_tracking.lock, "drmftl", 0, 0); 5130 5131 return 0; 5132 5133 err_dependencies: 5134 kmem_cache_destroy(dev_priv->dependencies); 5135 err_requests: 5136 kmem_cache_destroy(dev_priv->requests); 5137 err_vmas: 5138 kmem_cache_destroy(dev_priv->vmas); 5139 err_objects: 5140 kmem_cache_destroy(dev_priv->objects); 5141 err_out: 5142 return err; 5143 } 5144 5145 void i915_gem_load_cleanup(struct drm_i915_private *dev_priv) 5146 { 5147 i915_gem_drain_freed_objects(dev_priv); 5148 WARN_ON(!llist_empty(&dev_priv->mm.free_list)); 5149 WARN_ON(dev_priv->mm.object_count); 5150 5151 mutex_lock(&dev_priv->drm.struct_mutex); 5152 i915_gem_timeline_fini(&dev_priv->gt.global_timeline); 5153 WARN_ON(!list_empty(&dev_priv->gt.timelines)); 5154 mutex_unlock(&dev_priv->drm.struct_mutex); 5155 5156 kmem_cache_destroy(dev_priv->dependencies); 5157 kmem_cache_destroy(dev_priv->requests); 5158 kmem_cache_destroy(dev_priv->vmas); 5159 kmem_cache_destroy(dev_priv->objects); 5160 5161 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ 5162 rcu_barrier(); 5163 } 5164 5165 int i915_gem_freeze(struct drm_i915_private *dev_priv) 5166 { 5167 mutex_lock(&dev_priv->drm.struct_mutex); 5168 i915_gem_shrink_all(dev_priv); 5169 mutex_unlock(&dev_priv->drm.struct_mutex); 5170 5171 return 0; 5172 } 5173 5174 int i915_gem_freeze_late(struct drm_i915_private *dev_priv) 5175 { 5176 struct drm_i915_gem_object *obj; 5177 struct list_head *phases[] = { 5178 &dev_priv->mm.unbound_list, 5179 &dev_priv->mm.bound_list, 5180 NULL 5181 }, **p; 5182 5183 /* Called just before we write the hibernation image. 5184 * 5185 * We need to update the domain tracking to reflect that the CPU 5186 * will be accessing all the pages to create and restore from the 5187 * hibernation, and so upon restoration those pages will be in the 5188 * CPU domain. 5189 * 5190 * To make sure the hibernation image contains the latest state, 5191 * we update that state just before writing out the image. 5192 * 5193 * To try and reduce the hibernation image, we manually shrink 5194 * the objects as well. 5195 */ 5196 5197 mutex_lock(&dev_priv->drm.struct_mutex); 5198 i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND); 5199 5200 for (p = phases; *p; p++) { 5201 list_for_each_entry(obj, *p, global_link) { 5202 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 5203 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 5204 } 5205 } 5206 mutex_unlock(&dev_priv->drm.struct_mutex); 5207 5208 return 0; 5209 } 5210 5211 void i915_gem_release(struct drm_device *dev, struct drm_file *file) 5212 { 5213 struct drm_i915_file_private *file_priv = file->driver_priv; 5214 struct drm_i915_gem_request *request; 5215 5216 /* Clean up our request list when the client is going away, so that 5217 * later retire_requests won't dereference our soon-to-be-gone 5218 * file_priv. 5219 */ 5220 lockmgr(&file_priv->mm.lock, LK_EXCLUSIVE); 5221 list_for_each_entry(request, &file_priv->mm.request_list, client_link) 5222 request->file_priv = NULL; 5223 lockmgr(&file_priv->mm.lock, LK_RELEASE); 5224 5225 if (!list_empty(&file_priv->rps.link)) { 5226 lockmgr(&to_i915(dev)->rps.client_lock, LK_EXCLUSIVE); 5227 list_del(&file_priv->rps.link); 5228 lockmgr(&to_i915(dev)->rps.client_lock, LK_RELEASE); 5229 } 5230 } 5231 5232 #ifdef __DragonFly__ 5233 int 5234 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 5235 vm_ooffset_t foff, struct ucred *cred, u_short *color) 5236 { 5237 *color = 0; /* XXXKIB */ 5238 return (0); 5239 } 5240 5241 void 5242 i915_gem_pager_dtor(void *handle) 5243 { 5244 struct drm_gem_object *obj = handle; 5245 struct drm_device *dev = obj->dev; 5246 5247 drm_gem_free_mmap_offset(obj); 5248 mutex_lock(&dev->struct_mutex); 5249 i915_gem_release_mmap(to_intel_bo(obj)); 5250 drm_gem_object_unreference(obj); 5251 mutex_unlock(&dev->struct_mutex); 5252 } 5253 #endif 5254 5255 int i915_gem_open(struct drm_device *dev, struct drm_file *file) 5256 { 5257 struct drm_i915_file_private *file_priv; 5258 int ret; 5259 5260 DRM_DEBUG("\n"); 5261 5262 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 5263 if (!file_priv) 5264 return -ENOMEM; 5265 5266 file->driver_priv = file_priv; 5267 file_priv->dev_priv = to_i915(dev); 5268 file_priv->file = file; 5269 INIT_LIST_HEAD(&file_priv->rps.link); 5270 5271 lockinit(&file_priv->mm.lock, "i915_priv", 0, 0); 5272 INIT_LIST_HEAD(&file_priv->mm.request_list); 5273 5274 file_priv->bsd_engine = -1; 5275 5276 ret = i915_gem_context_open(dev, file); 5277 if (ret) 5278 kfree(file_priv); 5279 5280 return ret; 5281 } 5282 5283 /** 5284 * i915_gem_track_fb - update frontbuffer tracking 5285 * @old: current GEM buffer for the frontbuffer slots 5286 * @new: new GEM buffer for the frontbuffer slots 5287 * @frontbuffer_bits: bitmask of frontbuffer slots 5288 * 5289 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them 5290 * from @old and setting them in @new. Both @old and @new can be NULL. 5291 */ 5292 void i915_gem_track_fb(struct drm_i915_gem_object *old, 5293 struct drm_i915_gem_object *new, 5294 unsigned frontbuffer_bits) 5295 { 5296 /* Control of individual bits within the mask are guarded by 5297 * the owning plane->mutex, i.e. we can never see concurrent 5298 * manipulation of individual bits. But since the bitfield as a whole 5299 * is updated using RMW, we need to use atomics in order to update 5300 * the bits. 5301 */ 5302 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 5303 sizeof(atomic_t) * BITS_PER_BYTE); 5304 5305 if (old) { 5306 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); 5307 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits); 5308 } 5309 5310 if (new) { 5311 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits); 5312 atomic_or(frontbuffer_bits, &new->frontbuffer_bits); 5313 } 5314 } 5315 5316 /* XXX */ 5317 static int 5318 pagecache_write_begin(struct vm_object *obj, struct address_space *mapping, 5319 loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) 5320 { 5321 *pagep = shmem_read_mapping_page(obj, OFF_TO_IDX(pos)); 5322 5323 return 0; 5324 } 5325 5326 #if 0 5327 static int 5328 pagecache_write_end(struct file *, struct address_space *mapping, 5329 loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) 5330 { 5331 } 5332 #endif 5333 5334 /* Allocate a new GEM object and fill it with the supplied data */ 5335 struct drm_i915_gem_object * 5336 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, 5337 const void *data, size_t size) 5338 { 5339 struct drm_i915_gem_object *obj; 5340 struct vm_object *file; 5341 size_t offset; 5342 int err; 5343 5344 obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE)); 5345 if (IS_ERR(obj)) 5346 return obj; 5347 5348 GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU); 5349 5350 file = obj->base.filp; 5351 offset = 0; 5352 do { 5353 unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 5354 struct page *page; 5355 void *pgdata, *vaddr; 5356 5357 err = pagecache_write_begin(file, NULL, 5358 offset, len, 0, 5359 &page, &pgdata); 5360 if (err < 0) 5361 goto fail; 5362 5363 vaddr = kmap(page); 5364 memcpy(vaddr, data, len); 5365 kunmap(page); 5366 5367 #ifndef __DragonFly__ 5368 err = pagecache_write_end(file, file->f_mapping, 5369 offset, len, len, 5370 page, pgdata); 5371 if (err < 0) 5372 goto fail; 5373 #else 5374 put_page(page); 5375 #endif 5376 5377 size -= len; 5378 data += len; 5379 offset += len; 5380 } while (size); 5381 5382 return obj; 5383 5384 fail: 5385 i915_gem_object_put(obj); 5386 return ERR_PTR(err); 5387 } 5388 5389 struct scatterlist * 5390 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 5391 unsigned int n, 5392 unsigned int *offset) 5393 { 5394 struct i915_gem_object_page_iter *iter = &obj->mm.get_page; 5395 struct scatterlist *sg; 5396 unsigned int idx, count; 5397 5398 might_sleep(); 5399 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); 5400 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 5401 5402 /* As we iterate forward through the sg, we record each entry in a 5403 * radixtree for quick repeated (backwards) lookups. If we have seen 5404 * this index previously, we will have an entry for it. 5405 * 5406 * Initial lookup is O(N), but this is amortized to O(1) for 5407 * sequential page access (where each new request is consecutive 5408 * to the previous one). Repeated lookups are O(lg(obj->base.size)), 5409 * i.e. O(1) with a large constant! 5410 */ 5411 if (n < READ_ONCE(iter->sg_idx)) 5412 goto lookup; 5413 5414 mutex_lock(&iter->lock); 5415 5416 /* We prefer to reuse the last sg so that repeated lookup of this 5417 * (or the subsequent) sg are fast - comparing against the last 5418 * sg is faster than going through the radixtree. 5419 */ 5420 5421 sg = iter->sg_pos; 5422 idx = iter->sg_idx; 5423 count = __sg_page_count(sg); 5424 5425 while (idx + count <= n) { 5426 unsigned long exception, i; 5427 int ret; 5428 5429 /* If we cannot allocate and insert this entry, or the 5430 * individual pages from this range, cancel updating the 5431 * sg_idx so that on this lookup we are forced to linearly 5432 * scan onwards, but on future lookups we will try the 5433 * insertion again (in which case we need to be careful of 5434 * the error return reporting that we have already inserted 5435 * this index). 5436 */ 5437 ret = radix_tree_insert(&iter->radix, idx, sg); 5438 if (ret && ret != -EEXIST) 5439 goto scan; 5440 5441 exception = 5442 RADIX_TREE_EXCEPTIONAL_ENTRY | 5443 idx << RADIX_TREE_EXCEPTIONAL_SHIFT; 5444 for (i = 1; i < count; i++) { 5445 ret = radix_tree_insert(&iter->radix, idx + i, 5446 (void *)exception); 5447 if (ret && ret != -EEXIST) 5448 goto scan; 5449 } 5450 5451 idx += count; 5452 sg = ____sg_next(sg); 5453 count = __sg_page_count(sg); 5454 } 5455 5456 scan: 5457 iter->sg_pos = sg; 5458 iter->sg_idx = idx; 5459 5460 mutex_unlock(&iter->lock); 5461 5462 if (unlikely(n < idx)) /* insertion completed by another thread */ 5463 goto lookup; 5464 5465 /* In case we failed to insert the entry into the radixtree, we need 5466 * to look beyond the current sg. 5467 */ 5468 while (idx + count <= n) { 5469 idx += count; 5470 sg = ____sg_next(sg); 5471 count = __sg_page_count(sg); 5472 } 5473 5474 *offset = n - idx; 5475 return sg; 5476 5477 lookup: 5478 rcu_read_lock(); 5479 5480 sg = radix_tree_lookup(&iter->radix, n); 5481 GEM_BUG_ON(!sg); 5482 5483 /* If this index is in the middle of multi-page sg entry, 5484 * the radixtree will contain an exceptional entry that points 5485 * to the start of that range. We will return the pointer to 5486 * the base page and the offset of this page within the 5487 * sg entry's range. 5488 */ 5489 *offset = 0; 5490 if (unlikely(radix_tree_exception(sg))) { 5491 unsigned long base = 5492 (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT; 5493 5494 sg = radix_tree_lookup(&iter->radix, base); 5495 GEM_BUG_ON(!sg); 5496 5497 *offset = n - base; 5498 } 5499 5500 rcu_read_unlock(); 5501 5502 return sg; 5503 } 5504 5505 struct page * 5506 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) 5507 { 5508 struct scatterlist *sg; 5509 unsigned int offset; 5510 5511 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 5512 5513 sg = i915_gem_object_get_sg(obj, n, &offset); 5514 return nth_page(sg_page(sg), offset); 5515 } 5516 5517 /* Like i915_gem_object_get_page(), but mark the returned page dirty */ 5518 struct page * 5519 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 5520 unsigned int n) 5521 { 5522 struct page *page; 5523 5524 page = i915_gem_object_get_page(obj, n); 5525 if (!obj->mm.dirty) 5526 set_page_dirty(page); 5527 5528 return page; 5529 } 5530 5531 dma_addr_t 5532 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 5533 unsigned long n) 5534 { 5535 struct scatterlist *sg; 5536 unsigned int offset; 5537 5538 sg = i915_gem_object_get_sg(obj, n, &offset); 5539 return sg_dma_address(sg) + (offset << PAGE_SHIFT); 5540 } 5541 5542 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 5543 #include "selftests/scatterlist.c" 5544 #include "selftests/mock_gem_device.c" 5545 #include "selftests/huge_gem_object.c" 5546 #include "selftests/i915_gem_object.c" 5547 #include "selftests/i915_gem_coherency.c" 5548 #endif 5549