1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include "i915_vma.h" 26 27 #include "i915_drv.h" 28 #include "intel_ringbuffer.h" 29 #include "intel_frontbuffer.h" 30 31 #include <drm/drm_gem.h> 32 33 static void 34 i915_vma_retire(struct i915_gem_active *active, 35 struct drm_i915_gem_request *rq) 36 { 37 const unsigned int idx = rq->engine->id; 38 struct i915_vma *vma = 39 container_of(active, struct i915_vma, last_read[idx]); 40 struct drm_i915_gem_object *obj = vma->obj; 41 42 GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx)); 43 44 i915_vma_clear_active(vma, idx); 45 if (i915_vma_is_active(vma)) 46 return; 47 48 list_move_tail(&vma->vm_link, &vma->vm->inactive_list); 49 if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma))) 50 WARN_ON(i915_vma_unbind(vma)); 51 52 GEM_BUG_ON(!i915_gem_object_is_active(obj)); 53 if (--obj->active_count) 54 return; 55 56 /* Bump our place on the bound list to keep it roughly in LRU order 57 * so that we don't steal from recently used but inactive objects 58 * (unless we are forced to ofc!) 59 */ 60 if (obj->bind_count) 61 list_move_tail(&obj->global_link, &rq->i915->mm.bound_list); 62 63 obj->mm.dirty = true; /* be paranoid */ 64 65 if (i915_gem_object_has_active_reference(obj)) { 66 i915_gem_object_clear_active_reference(obj); 67 i915_gem_object_put(obj); 68 } 69 } 70 71 static struct i915_vma * 72 __i915_vma_create(struct drm_i915_gem_object *obj, 73 struct i915_address_space *vm, 74 const struct i915_ggtt_view *view) 75 { 76 struct i915_vma *vma; 77 struct rb_node *rb, **p; 78 int i; 79 80 GEM_BUG_ON(vm->closed); 81 82 vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL); 83 if (vma == NULL) 84 return ERR_PTR(-ENOMEM); 85 86 INIT_LIST_HEAD(&vma->exec_list); 87 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) 88 init_request_active(&vma->last_read[i], i915_vma_retire); 89 init_request_active(&vma->last_fence, NULL); 90 list_add(&vma->vm_link, &vm->unbound_list); 91 vma->vm = vm; 92 vma->obj = obj; 93 vma->size = obj->base.size; 94 95 if (view) { 96 vma->ggtt_view = *view; 97 if (view->type == I915_GGTT_VIEW_PARTIAL) { 98 vma->size = view->params.partial.size; 99 vma->size <<= PAGE_SHIFT; 100 } else if (view->type == I915_GGTT_VIEW_ROTATED) { 101 vma->size = 102 intel_rotation_info_size(&view->params.rotated); 103 vma->size <<= PAGE_SHIFT; 104 } 105 } 106 107 if (i915_is_ggtt(vm)) { 108 vma->flags |= I915_VMA_GGTT; 109 list_add(&vma->obj_link, &obj->vma_list); 110 } else { 111 i915_ppgtt_get(i915_vm_to_ppgtt(vm)); 112 list_add_tail(&vma->obj_link, &obj->vma_list); 113 } 114 115 rb = NULL; 116 p = &obj->vma_tree.rb_node; 117 while (*p) { 118 struct i915_vma *pos; 119 120 rb = *p; 121 pos = rb_entry(rb, struct i915_vma, obj_node); 122 if (i915_vma_compare(pos, vm, view) < 0) 123 p = &rb->rb_right; 124 else 125 p = &rb->rb_left; 126 } 127 rb_link_node(&vma->obj_node, rb, p); 128 rb_insert_color(&vma->obj_node, &obj->vma_tree); 129 130 return vma; 131 } 132 133 struct i915_vma * 134 i915_vma_create(struct drm_i915_gem_object *obj, 135 struct i915_address_space *vm, 136 const struct i915_ggtt_view *view) 137 { 138 lockdep_assert_held(&obj->base.dev->struct_mutex); 139 GEM_BUG_ON(view && !i915_is_ggtt(vm)); 140 GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view)); 141 142 return __i915_vma_create(obj, vm, view); 143 } 144 145 /** 146 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 147 * @vma: VMA to map 148 * @cache_level: mapping cache level 149 * @flags: flags like global or local mapping 150 * 151 * DMA addresses are taken from the scatter-gather table of this object (or of 152 * this VMA in case of non-default GGTT views) and PTE entries set up. 153 * Note that DMA addresses are also the only part of the SG table we care about. 154 */ 155 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 156 u32 flags) 157 { 158 u32 bind_flags; 159 u32 vma_flags; 160 int ret; 161 162 if (WARN_ON(flags == 0)) 163 return -EINVAL; 164 165 bind_flags = 0; 166 if (flags & PIN_GLOBAL) 167 bind_flags |= I915_VMA_GLOBAL_BIND; 168 if (flags & PIN_USER) 169 bind_flags |= I915_VMA_LOCAL_BIND; 170 171 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 172 if (flags & PIN_UPDATE) 173 bind_flags |= vma_flags; 174 else 175 bind_flags &= ~vma_flags; 176 if (bind_flags == 0) 177 return 0; 178 179 if (vma_flags == 0 && vma->vm->allocate_va_range) { 180 trace_i915_va_alloc(vma); 181 ret = vma->vm->allocate_va_range(vma->vm, 182 vma->node.start, 183 vma->node.size); 184 if (ret) 185 return ret; 186 } 187 188 trace_i915_vma_bind(vma, bind_flags); 189 ret = vma->vm->bind_vma(vma, cache_level, bind_flags); 190 if (ret) 191 return ret; 192 193 vma->flags |= bind_flags; 194 return 0; 195 } 196 197 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 198 { 199 void __iomem *ptr; 200 201 /* Access through the GTT requires the device to be awake. */ 202 assert_rpm_wakelock_held(to_i915(vma->vm->dev)); 203 204 lockdep_assert_held(&vma->vm->dev->struct_mutex); 205 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) 206 return IO_ERR_PTR(-ENODEV); 207 208 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 209 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); 210 211 ptr = vma->iomap; 212 if (ptr == NULL) { 213 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable, 214 vma->node.start, 215 vma->node.size); 216 if (ptr == NULL) 217 return IO_ERR_PTR(-ENOMEM); 218 219 vma->iomap = ptr; 220 } 221 222 __i915_vma_pin(vma); 223 return ptr; 224 } 225 226 void i915_vma_unpin_and_release(struct i915_vma **p_vma) 227 { 228 struct i915_vma *vma; 229 struct drm_i915_gem_object *obj; 230 231 vma = fetch_and_zero(p_vma); 232 if (!vma) 233 return; 234 235 obj = vma->obj; 236 237 i915_vma_unpin(vma); 238 i915_vma_close(vma); 239 240 __i915_gem_object_release_unless_active(obj); 241 } 242 243 bool 244 i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 245 { 246 if (!drm_mm_node_allocated(&vma->node)) 247 return false; 248 249 if (vma->node.size < size) 250 return true; 251 252 if (alignment && vma->node.start & (alignment - 1)) 253 return true; 254 255 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma)) 256 return true; 257 258 if (flags & PIN_OFFSET_BIAS && 259 vma->node.start < (flags & PIN_OFFSET_MASK)) 260 return true; 261 262 if (flags & PIN_OFFSET_FIXED && 263 vma->node.start != (flags & PIN_OFFSET_MASK)) 264 return true; 265 266 return false; 267 } 268 269 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) 270 { 271 struct drm_i915_gem_object *obj = vma->obj; 272 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 273 bool mappable, fenceable; 274 u32 fence_size, fence_alignment; 275 276 fence_size = i915_gem_get_ggtt_size(dev_priv, 277 vma->size, 278 i915_gem_object_get_tiling(obj)); 279 fence_alignment = i915_gem_get_ggtt_alignment(dev_priv, 280 vma->size, 281 i915_gem_object_get_tiling(obj), 282 true); 283 284 fenceable = (vma->node.size == fence_size && 285 (vma->node.start & (fence_alignment - 1)) == 0); 286 287 mappable = (vma->node.start + fence_size <= 288 dev_priv->ggtt.mappable_end); 289 290 /* 291 * Explicitly disable for rotated VMA since the display does not 292 * need the fence and the VMA is not accessible to other users. 293 */ 294 if (mappable && fenceable && 295 vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED) 296 vma->flags |= I915_VMA_CAN_FENCE; 297 else 298 vma->flags &= ~I915_VMA_CAN_FENCE; 299 } 300 301 bool i915_gem_valid_gtt_space(struct i915_vma *vma, 302 unsigned long cache_level) 303 { 304 struct drm_mm_node *gtt_space = &vma->node; 305 struct drm_mm_node *other; 306 307 /* 308 * On some machines we have to be careful when putting differing types 309 * of snoopable memory together to avoid the prefetcher crossing memory 310 * domains and dying. During vm initialisation, we decide whether or not 311 * these constraints apply and set the drm_mm.color_adjust 312 * appropriately. 313 */ 314 if (vma->vm->mm.color_adjust == NULL) 315 return true; 316 317 if (!drm_mm_node_allocated(gtt_space)) 318 return true; 319 320 if (list_empty(>t_space->node_list)) 321 return true; 322 323 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); 324 if (other->allocated && !other->hole_follows && other->color != cache_level) 325 return false; 326 327 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); 328 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level) 329 return false; 330 331 return true; 332 } 333 334 /** 335 * i915_vma_insert - finds a slot for the vma in its address space 336 * @vma: the vma 337 * @size: requested size in bytes (can be larger than the VMA) 338 * @alignment: required alignment 339 * @flags: mask of PIN_* flags to use 340 * 341 * First we try to allocate some free space that meets the requirements for 342 * the VMA. Failiing that, if the flags permit, it will evict an old VMA, 343 * preferrably the oldest idle entry to make room for the new VMA. 344 * 345 * Returns: 346 * 0 on success, negative error code otherwise. 347 */ 348 static int 349 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) 350 { 351 struct drm_i915_private *dev_priv = to_i915(vma->vm->dev); 352 struct drm_i915_gem_object *obj = vma->obj; 353 u64 start, end; 354 int ret; 355 356 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); 357 GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); 358 359 size = max(size, vma->size); 360 if (flags & PIN_MAPPABLE) 361 size = i915_gem_get_ggtt_size(dev_priv, size, 362 i915_gem_object_get_tiling(obj)); 363 364 alignment = max(max(alignment, vma->display_alignment), 365 i915_gem_get_ggtt_alignment(dev_priv, size, 366 i915_gem_object_get_tiling(obj), 367 flags & PIN_MAPPABLE)); 368 369 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 370 371 end = vma->vm->total; 372 if (flags & PIN_MAPPABLE) 373 end = min_t(u64, end, dev_priv->ggtt.mappable_end); 374 if (flags & PIN_ZONE_4G) 375 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE); 376 377 /* If binding the object/GGTT view requires more space than the entire 378 * aperture has, reject it early before evicting everything in a vain 379 * attempt to find space. 380 */ 381 if (size > end) { 382 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n", 383 size, obj->base.size, 384 flags & PIN_MAPPABLE ? "mappable" : "total", 385 end); 386 return -E2BIG; 387 } 388 389 ret = i915_gem_object_pin_pages(obj); 390 if (ret) 391 return ret; 392 393 if (flags & PIN_OFFSET_FIXED) { 394 u64 offset = flags & PIN_OFFSET_MASK; 395 if (offset & (alignment - 1) || offset > end - size) { 396 ret = -EINVAL; 397 goto err_unpin; 398 } 399 400 vma->node.start = offset; 401 vma->node.size = size; 402 vma->node.color = obj->cache_level; 403 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); 404 if (ret) { 405 ret = i915_gem_evict_for_vma(vma); 406 if (ret == 0) 407 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node); 408 if (ret) 409 goto err_unpin; 410 } 411 } else { 412 u32 search_flag, alloc_flag; 413 414 if (flags & PIN_HIGH) { 415 search_flag = DRM_MM_SEARCH_BELOW; 416 alloc_flag = DRM_MM_CREATE_TOP; 417 } else { 418 search_flag = DRM_MM_SEARCH_DEFAULT; 419 alloc_flag = DRM_MM_CREATE_DEFAULT; 420 } 421 422 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks, 423 * so we know that we always have a minimum alignment of 4096. 424 * The drm_mm range manager is optimised to return results 425 * with zero alignment, so where possible use the optimal 426 * path. 427 */ 428 if (alignment <= 4096) 429 alignment = 0; 430 431 search_free: 432 ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm, 433 &vma->node, 434 size, alignment, 435 obj->cache_level, 436 start, end, 437 search_flag, 438 alloc_flag); 439 if (ret) { 440 ret = i915_gem_evict_something(vma->vm, size, alignment, 441 obj->cache_level, 442 start, end, 443 flags); 444 if (ret == 0) 445 goto search_free; 446 447 goto err_unpin; 448 } 449 450 GEM_BUG_ON(vma->node.start < start); 451 GEM_BUG_ON(vma->node.start + vma->node.size > end); 452 } 453 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level)); 454 455 list_move_tail(&obj->global_link, &dev_priv->mm.bound_list); 456 list_move_tail(&vma->vm_link, &vma->vm->inactive_list); 457 obj->bind_count++; 458 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); 459 460 return 0; 461 462 err_unpin: 463 i915_gem_object_unpin_pages(obj); 464 return ret; 465 } 466 467 int __i915_vma_do_pin(struct i915_vma *vma, 468 u64 size, u64 alignment, u64 flags) 469 { 470 unsigned int bound = vma->flags; 471 int ret; 472 473 lockdep_assert_held(&vma->vm->dev->struct_mutex); 474 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); 475 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); 476 477 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { 478 ret = -EBUSY; 479 goto err; 480 } 481 482 if ((bound & I915_VMA_BIND_MASK) == 0) { 483 ret = i915_vma_insert(vma, size, alignment, flags); 484 if (ret) 485 goto err; 486 } 487 488 ret = i915_vma_bind(vma, vma->obj->cache_level, flags); 489 if (ret) 490 goto err; 491 492 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) 493 __i915_vma_set_map_and_fenceable(vma); 494 495 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); 496 return 0; 497 498 err: 499 __i915_vma_unpin(vma); 500 return ret; 501 } 502 503 void i915_vma_destroy(struct i915_vma *vma) 504 { 505 GEM_BUG_ON(vma->node.allocated); 506 GEM_BUG_ON(i915_vma_is_active(vma)); 507 GEM_BUG_ON(!i915_vma_is_closed(vma)); 508 GEM_BUG_ON(vma->fence); 509 510 list_del(&vma->vm_link); 511 if (!i915_vma_is_ggtt(vma)) 512 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); 513 514 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); 515 } 516 517 void i915_vma_close(struct i915_vma *vma) 518 { 519 GEM_BUG_ON(i915_vma_is_closed(vma)); 520 vma->flags |= I915_VMA_CLOSED; 521 522 list_del(&vma->obj_link); 523 rb_erase(&vma->obj_node, &vma->obj->vma_tree); 524 525 if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma)) 526 WARN_ON(i915_vma_unbind(vma)); 527 } 528 529 static void __i915_vma_iounmap(struct i915_vma *vma) 530 { 531 GEM_BUG_ON(i915_vma_is_pinned(vma)); 532 533 if (vma->iomap == NULL) 534 return; 535 536 io_mapping_unmap(vma->iomap); 537 vma->iomap = NULL; 538 } 539 540 int i915_vma_unbind(struct i915_vma *vma) 541 { 542 struct drm_i915_gem_object *obj = vma->obj; 543 unsigned long active; 544 int ret; 545 546 lockdep_assert_held(&obj->base.dev->struct_mutex); 547 548 /* First wait upon any activity as retiring the request may 549 * have side-effects such as unpinning or even unbinding this vma. 550 */ 551 active = i915_vma_get_active(vma); 552 if (active) { 553 int idx; 554 555 /* When a closed VMA is retired, it is unbound - eek. 556 * In order to prevent it from being recursively closed, 557 * take a pin on the vma so that the second unbind is 558 * aborted. 559 * 560 * Even more scary is that the retire callback may free 561 * the object (last active vma). To prevent the explosion 562 * we defer the actual object free to a worker that can 563 * only proceed once it acquires the struct_mutex (which 564 * we currently hold, therefore it cannot free this object 565 * before we are finished). 566 */ 567 __i915_vma_pin(vma); 568 569 for_each_active(active, idx) { 570 ret = i915_gem_active_retire(&vma->last_read[idx], 571 &vma->vm->dev->struct_mutex); 572 if (ret) 573 break; 574 } 575 576 __i915_vma_unpin(vma); 577 if (ret) 578 return ret; 579 580 GEM_BUG_ON(i915_vma_is_active(vma)); 581 } 582 583 if (i915_vma_is_pinned(vma)) 584 return -EBUSY; 585 586 if (!drm_mm_node_allocated(&vma->node)) 587 goto destroy; 588 589 GEM_BUG_ON(obj->bind_count == 0); 590 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 591 592 if (i915_vma_is_map_and_fenceable(vma)) { 593 /* release the fence reg _after_ flushing */ 594 ret = i915_vma_put_fence(vma); 595 if (ret) 596 return ret; 597 598 /* Force a pagefault for domain tracking on next user access */ 599 i915_gem_release_mmap(obj); 600 601 __i915_vma_iounmap(vma); 602 vma->flags &= ~I915_VMA_CAN_FENCE; 603 } 604 605 if (likely(!vma->vm->closed)) { 606 trace_i915_vma_unbind(vma); 607 vma->vm->unbind_vma(vma); 608 } 609 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); 610 611 drm_mm_remove_node(&vma->node); 612 list_move_tail(&vma->vm_link, &vma->vm->unbound_list); 613 614 if (vma->pages != obj->mm.pages) { 615 GEM_BUG_ON(!vma->pages); 616 sg_free_table(vma->pages); 617 kfree(vma->pages); 618 } 619 vma->pages = NULL; 620 621 /* Since the unbound list is global, only move to that list if 622 * no more VMAs exist. */ 623 if (--obj->bind_count == 0) 624 list_move_tail(&obj->global_link, 625 &to_i915(obj->base.dev)->mm.unbound_list); 626 627 /* And finally now the object is completely decoupled from this vma, 628 * we can drop its hold on the backing storage and allow it to be 629 * reaped by the shrinker. 630 */ 631 i915_gem_object_unpin_pages(obj); 632 633 destroy: 634 if (unlikely(i915_vma_is_closed(vma))) 635 i915_vma_destroy(vma); 636 637 return 0; 638 } 639 640