1 /* 2 * Copyright © 2010 Daniel Vetter 3 * Copyright © 2011-2014 Intel Corporation 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 22 * IN THE SOFTWARE. 23 * 24 */ 25 26 #include <linux/seq_file.h> 27 #include <linux/stop_machine.h> 28 #include <drm/drmP.h> 29 #include <drm/i915_drm.h> 30 #include "i915_drv.h" 31 #include "i915_vgpu.h" 32 #include "i915_trace.h" 33 #include "intel_drv.h" 34 35 #include <linux/bitmap.h> 36 37 #include <sys/mplock2.h> 38 39 /** 40 * DOC: Global GTT views 41 * 42 * Background and previous state 43 * 44 * Historically objects could exists (be bound) in global GTT space only as 45 * singular instances with a view representing all of the object's backing pages 46 * in a linear fashion. This view will be called a normal view. 47 * 48 * To support multiple views of the same object, where the number of mapped 49 * pages is not equal to the backing store, or where the layout of the pages 50 * is not linear, concept of a GGTT view was added. 51 * 52 * One example of an alternative view is a stereo display driven by a single 53 * image. In this case we would have a framebuffer looking like this 54 * (2x2 pages): 55 * 56 * 12 57 * 34 58 * 59 * Above would represent a normal GGTT view as normally mapped for GPU or CPU 60 * rendering. In contrast, fed to the display engine would be an alternative 61 * view which could look something like this: 62 * 63 * 1212 64 * 3434 65 * 66 * In this example both the size and layout of pages in the alternative view is 67 * different from the normal view. 68 * 69 * Implementation and usage 70 * 71 * GGTT views are implemented using VMAs and are distinguished via enum 72 * i915_ggtt_view_type and struct i915_ggtt_view. 73 * 74 * A new flavour of core GEM functions which work with GGTT bound objects were 75 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid 76 * renaming in large amounts of code. They take the struct i915_ggtt_view 77 * parameter encapsulating all metadata required to implement a view. 78 * 79 * As a helper for callers which are only interested in the normal view, 80 * globally const i915_ggtt_view_normal singleton instance exists. All old core 81 * GEM API functions, the ones not taking the view parameter, are operating on, 82 * or with the normal GGTT view. 83 * 84 * Code wanting to add or use a new GGTT view needs to: 85 * 86 * 1. Add a new enum with a suitable name. 87 * 2. Extend the metadata in the i915_ggtt_view structure if required. 88 * 3. Add support to i915_get_vma_pages(). 89 * 90 * New views are required to build a scatter-gather table from within the 91 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and 92 * exists for the lifetime of an VMA. 93 * 94 * Core API is designed to have copy semantics which means that passed in 95 * struct i915_ggtt_view does not need to be persistent (left around after 96 * calling the core API functions). 97 * 98 */ 99 100 static inline struct i915_ggtt * 101 i915_vm_to_ggtt(struct i915_address_space *vm) 102 { 103 GEM_BUG_ON(!i915_is_ggtt(vm)); 104 return container_of(vm, struct i915_ggtt, base); 105 } 106 107 static int 108 i915_get_ggtt_vma_pages(struct i915_vma *vma); 109 110 const struct i915_ggtt_view i915_ggtt_view_normal = { 111 .type = I915_GGTT_VIEW_NORMAL, 112 }; 113 const struct i915_ggtt_view i915_ggtt_view_rotated = { 114 .type = I915_GGTT_VIEW_ROTATED, 115 }; 116 117 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, 118 int enable_ppgtt) 119 { 120 bool has_aliasing_ppgtt; 121 bool has_full_ppgtt; 122 bool has_full_48bit_ppgtt; 123 124 has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6; 125 has_full_ppgtt = INTEL_GEN(dev_priv) >= 7; 126 has_full_48bit_ppgtt = 127 IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9; 128 129 if (intel_vgpu_active(dev_priv)) 130 has_full_ppgtt = false; /* emulation is too hard */ 131 132 if (!has_aliasing_ppgtt) 133 return 0; 134 135 /* 136 * We don't allow disabling PPGTT for gen9+ as it's a requirement for 137 * execlists, the sole mechanism available to submit work. 138 */ 139 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9) 140 return 0; 141 142 if (enable_ppgtt == 1) 143 return 1; 144 145 if (enable_ppgtt == 2 && has_full_ppgtt) 146 return 2; 147 148 if (enable_ppgtt == 3 && has_full_48bit_ppgtt) 149 return 3; 150 151 #ifdef CONFIG_INTEL_IOMMU 152 /* Disable ppgtt on SNB if VT-d is on. */ 153 if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) { 154 DRM_INFO("Disabling PPGTT because VT-d is on\n"); 155 return 0; 156 } 157 #endif 158 159 /* Early VLV doesn't have this */ 160 if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) { 161 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); 162 return 0; 163 } 164 165 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) 166 return has_full_48bit_ppgtt ? 3 : 2; 167 else 168 return has_aliasing_ppgtt ? 1 : 0; 169 } 170 171 static int ppgtt_bind_vma(struct i915_vma *vma, 172 enum i915_cache_level cache_level, 173 u32 unused) 174 { 175 u32 pte_flags = 0; 176 177 /* Currently applicable only to VLV */ 178 if (vma->obj->gt_ro) 179 pte_flags |= PTE_READ_ONLY; 180 181 vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, 182 cache_level, pte_flags); 183 184 return 0; 185 } 186 187 static void ppgtt_unbind_vma(struct i915_vma *vma) 188 { 189 vma->vm->clear_range(vma->vm, 190 vma->node.start, 191 vma->obj->base.size, 192 true); 193 } 194 195 static gen8_pte_t gen8_pte_encode(dma_addr_t addr, 196 enum i915_cache_level level, 197 bool valid) 198 { 199 gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; 200 pte |= addr; 201 202 switch (level) { 203 case I915_CACHE_NONE: 204 pte |= PPAT_UNCACHED_INDEX; 205 break; 206 case I915_CACHE_WT: 207 pte |= PPAT_DISPLAY_ELLC_INDEX; 208 break; 209 default: 210 pte |= PPAT_CACHED_INDEX; 211 break; 212 } 213 214 return pte; 215 } 216 217 static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, 218 const enum i915_cache_level level) 219 { 220 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; 221 pde |= addr; 222 if (level != I915_CACHE_NONE) 223 pde |= PPAT_CACHED_PDE_INDEX; 224 else 225 pde |= PPAT_UNCACHED_INDEX; 226 return pde; 227 } 228 229 #define gen8_pdpe_encode gen8_pde_encode 230 #define gen8_pml4e_encode gen8_pde_encode 231 232 static gen6_pte_t snb_pte_encode(dma_addr_t addr, 233 enum i915_cache_level level, 234 bool valid, u32 unused) 235 { 236 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; 237 pte |= GEN6_PTE_ADDR_ENCODE(addr); 238 239 switch (level) { 240 case I915_CACHE_L3_LLC: 241 case I915_CACHE_LLC: 242 pte |= GEN6_PTE_CACHE_LLC; 243 break; 244 case I915_CACHE_NONE: 245 pte |= GEN6_PTE_UNCACHED; 246 break; 247 default: 248 MISSING_CASE(level); 249 } 250 251 return pte; 252 } 253 254 static gen6_pte_t ivb_pte_encode(dma_addr_t addr, 255 enum i915_cache_level level, 256 bool valid, u32 unused) 257 { 258 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; 259 pte |= GEN6_PTE_ADDR_ENCODE(addr); 260 261 switch (level) { 262 case I915_CACHE_L3_LLC: 263 pte |= GEN7_PTE_CACHE_L3_LLC; 264 break; 265 case I915_CACHE_LLC: 266 pte |= GEN6_PTE_CACHE_LLC; 267 break; 268 case I915_CACHE_NONE: 269 pte |= GEN6_PTE_UNCACHED; 270 break; 271 default: 272 MISSING_CASE(level); 273 } 274 275 return pte; 276 } 277 278 static gen6_pte_t byt_pte_encode(dma_addr_t addr, 279 enum i915_cache_level level, 280 bool valid, u32 flags) 281 { 282 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; 283 pte |= GEN6_PTE_ADDR_ENCODE(addr); 284 285 if (!(flags & PTE_READ_ONLY)) 286 pte |= BYT_PTE_WRITEABLE; 287 288 if (level != I915_CACHE_NONE) 289 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; 290 291 return pte; 292 } 293 294 static gen6_pte_t hsw_pte_encode(dma_addr_t addr, 295 enum i915_cache_level level, 296 bool valid, u32 unused) 297 { 298 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; 299 pte |= HSW_PTE_ADDR_ENCODE(addr); 300 301 if (level != I915_CACHE_NONE) 302 pte |= HSW_WB_LLC_AGE3; 303 304 return pte; 305 } 306 307 static gen6_pte_t iris_pte_encode(dma_addr_t addr, 308 enum i915_cache_level level, 309 bool valid, u32 unused) 310 { 311 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; 312 pte |= HSW_PTE_ADDR_ENCODE(addr); 313 314 switch (level) { 315 case I915_CACHE_NONE: 316 break; 317 case I915_CACHE_WT: 318 pte |= HSW_WT_ELLC_LLC_AGE3; 319 break; 320 default: 321 pte |= HSW_WB_ELLC_LLC_AGE3; 322 break; 323 } 324 325 return pte; 326 } 327 328 static int __setup_page_dma(struct drm_device *dev, 329 struct i915_page_dma *p, gfp_t flags) 330 { 331 struct device *device = &dev->pdev->dev; 332 333 p->page = alloc_page(flags); 334 if (!p->page) 335 return -ENOMEM; 336 337 p->daddr = dma_map_page(device, 338 p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); 339 340 if (dma_mapping_error(device, p->daddr)) { 341 __free_page(p->page); 342 return -EINVAL; 343 } 344 345 return 0; 346 } 347 348 static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p) 349 { 350 return __setup_page_dma(dev, p, GFP_KERNEL); 351 } 352 353 static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p) 354 { 355 if (WARN_ON(!p->page)) 356 return; 357 358 dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL); 359 __free_page(p->page); 360 memset(p, 0, sizeof(*p)); 361 } 362 363 static void *kmap_page_dma(struct i915_page_dma *p) 364 { 365 return kmap_atomic(p->page); 366 } 367 368 /* We use the flushing unmap only with ppgtt structures: 369 * page directories, page tables and scratch pages. 370 */ 371 static void kunmap_page_dma(struct drm_device *dev, void *vaddr) 372 { 373 /* There are only few exceptions for gen >=6. chv and bxt. 374 * And we are not sure about the latter so play safe for now. 375 */ 376 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) 377 drm_clflush_virt_range(vaddr, PAGE_SIZE); 378 379 kunmap_atomic(vaddr); 380 } 381 382 #define kmap_px(px) kmap_page_dma(px_base(px)) 383 #define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr)) 384 385 #define setup_px(dev, px) setup_page_dma((dev), px_base(px)) 386 #define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px)) 387 #define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v)) 388 #define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v)) 389 390 static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p, 391 const uint64_t val) 392 { 393 int i; 394 uint64_t * const vaddr = kmap_page_dma(p); 395 396 for (i = 0; i < 512; i++) 397 vaddr[i] = val; 398 399 kunmap_page_dma(dev, vaddr); 400 } 401 402 static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p, 403 const uint32_t val32) 404 { 405 uint64_t v = val32; 406 407 v = v << 32 | val32; 408 409 fill_page_dma(dev, p, v); 410 } 411 412 static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev) 413 { 414 struct i915_page_scratch *sp; 415 int ret; 416 417 sp = kzalloc(sizeof(*sp), GFP_KERNEL); 418 if (sp == NULL) 419 return ERR_PTR(-ENOMEM); 420 421 ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO); 422 if (ret) { 423 kfree(sp); 424 return ERR_PTR(ret); 425 } 426 427 set_pages_uc(px_page(sp), 1); 428 429 return sp; 430 } 431 432 static void free_scratch_page(struct drm_device *dev, 433 struct i915_page_scratch *sp) 434 { 435 set_pages_wb(px_page(sp), 1); 436 437 cleanup_px(dev, sp); 438 kfree(sp); 439 } 440 441 static struct i915_page_table *alloc_pt(struct drm_device *dev) 442 { 443 struct i915_page_table *pt; 444 const size_t count = INTEL_INFO(dev)->gen >= 8 ? 445 GEN8_PTES : GEN6_PTES; 446 int ret = -ENOMEM; 447 448 pt = kzalloc(sizeof(*pt), GFP_KERNEL); 449 if (!pt) 450 return ERR_PTR(-ENOMEM); 451 452 pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes), 453 GFP_KERNEL); 454 455 if (!pt->used_ptes) 456 goto fail_bitmap; 457 458 ret = setup_px(dev, pt); 459 if (ret) 460 goto fail_page_m; 461 462 return pt; 463 464 fail_page_m: 465 kfree(pt->used_ptes); 466 fail_bitmap: 467 kfree(pt); 468 469 return ERR_PTR(ret); 470 } 471 472 static void free_pt(struct drm_device *dev, struct i915_page_table *pt) 473 { 474 cleanup_px(dev, pt); 475 kfree(pt->used_ptes); 476 kfree(pt); 477 } 478 479 static void gen8_initialize_pt(struct i915_address_space *vm, 480 struct i915_page_table *pt) 481 { 482 gen8_pte_t scratch_pte; 483 484 scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), 485 I915_CACHE_LLC, true); 486 487 fill_px(vm->dev, pt, scratch_pte); 488 } 489 490 static void gen6_initialize_pt(struct i915_address_space *vm, 491 struct i915_page_table *pt) 492 { 493 gen6_pte_t scratch_pte; 494 495 WARN_ON(px_dma(vm->scratch_page) == 0); 496 497 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), 498 I915_CACHE_LLC, true, 0); 499 500 fill32_px(vm->dev, pt, scratch_pte); 501 } 502 503 static struct i915_page_directory *alloc_pd(struct drm_device *dev) 504 { 505 struct i915_page_directory *pd; 506 int ret = -ENOMEM; 507 508 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 509 if (!pd) 510 return ERR_PTR(-ENOMEM); 511 512 pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES), 513 sizeof(*pd->used_pdes), GFP_KERNEL); 514 if (!pd->used_pdes) 515 goto fail_bitmap; 516 517 ret = setup_px(dev, pd); 518 if (ret) 519 goto fail_page_m; 520 521 return pd; 522 523 fail_page_m: 524 kfree(pd->used_pdes); 525 fail_bitmap: 526 kfree(pd); 527 528 return ERR_PTR(ret); 529 } 530 531 static void free_pd(struct drm_device *dev, struct i915_page_directory *pd) 532 { 533 if (px_page(pd)) { 534 cleanup_px(dev, pd); 535 kfree(pd->used_pdes); 536 kfree(pd); 537 } 538 } 539 540 static void gen8_initialize_pd(struct i915_address_space *vm, 541 struct i915_page_directory *pd) 542 { 543 gen8_pde_t scratch_pde; 544 545 scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC); 546 547 fill_px(vm->dev, pd, scratch_pde); 548 } 549 550 static int __pdp_init(struct drm_device *dev, 551 struct i915_page_directory_pointer *pdp) 552 { 553 size_t pdpes = I915_PDPES_PER_PDP(dev); 554 555 pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes), 556 sizeof(unsigned long), 557 GFP_KERNEL); 558 if (!pdp->used_pdpes) 559 return -ENOMEM; 560 561 pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory), 562 GFP_KERNEL); 563 if (!pdp->page_directory) { 564 kfree(pdp->used_pdpes); 565 /* the PDP might be the statically allocated top level. Keep it 566 * as clean as possible */ 567 pdp->used_pdpes = NULL; 568 return -ENOMEM; 569 } 570 571 return 0; 572 } 573 574 static void __pdp_fini(struct i915_page_directory_pointer *pdp) 575 { 576 kfree(pdp->used_pdpes); 577 kfree(pdp->page_directory); 578 pdp->page_directory = NULL; 579 } 580 581 static struct 582 i915_page_directory_pointer *alloc_pdp(struct drm_device *dev) 583 { 584 struct i915_page_directory_pointer *pdp; 585 int ret = -ENOMEM; 586 587 WARN_ON(!USES_FULL_48BIT_PPGTT(dev)); 588 589 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); 590 if (!pdp) 591 return ERR_PTR(-ENOMEM); 592 593 ret = __pdp_init(dev, pdp); 594 if (ret) 595 goto fail_bitmap; 596 597 ret = setup_px(dev, pdp); 598 if (ret) 599 goto fail_page_m; 600 601 return pdp; 602 603 fail_page_m: 604 __pdp_fini(pdp); 605 fail_bitmap: 606 kfree(pdp); 607 608 return ERR_PTR(ret); 609 } 610 611 static void free_pdp(struct drm_device *dev, 612 struct i915_page_directory_pointer *pdp) 613 { 614 __pdp_fini(pdp); 615 if (USES_FULL_48BIT_PPGTT(dev)) { 616 cleanup_px(dev, pdp); 617 kfree(pdp); 618 } 619 } 620 621 static void gen8_initialize_pdp(struct i915_address_space *vm, 622 struct i915_page_directory_pointer *pdp) 623 { 624 gen8_ppgtt_pdpe_t scratch_pdpe; 625 626 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC); 627 628 fill_px(vm->dev, pdp, scratch_pdpe); 629 } 630 631 static void gen8_initialize_pml4(struct i915_address_space *vm, 632 struct i915_pml4 *pml4) 633 { 634 gen8_ppgtt_pml4e_t scratch_pml4e; 635 636 scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp), 637 I915_CACHE_LLC); 638 639 fill_px(vm->dev, pml4, scratch_pml4e); 640 } 641 642 static void 643 gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt, 644 struct i915_page_directory_pointer *pdp, 645 struct i915_page_directory *pd, 646 int index) 647 { 648 gen8_ppgtt_pdpe_t *page_directorypo; 649 650 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) 651 return; 652 653 page_directorypo = kmap_px(pdp); 654 page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC); 655 kunmap_px(ppgtt, page_directorypo); 656 } 657 658 static void 659 gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt, 660 struct i915_pml4 *pml4, 661 struct i915_page_directory_pointer *pdp, 662 int index) 663 { 664 gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4); 665 666 WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)); 667 pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC); 668 kunmap_px(ppgtt, pagemap); 669 } 670 671 /* Broadwell Page Directory Pointer Descriptors */ 672 static int gen8_write_pdp(struct drm_i915_gem_request *req, 673 unsigned entry, 674 dma_addr_t addr) 675 { 676 struct intel_engine_cs *engine = req->engine; 677 int ret; 678 679 BUG_ON(entry >= 4); 680 681 ret = intel_ring_begin(req, 6); 682 if (ret) 683 return ret; 684 685 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); 686 intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry)); 687 intel_ring_emit(engine, upper_32_bits(addr)); 688 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); 689 intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry)); 690 intel_ring_emit(engine, lower_32_bits(addr)); 691 intel_ring_advance(engine); 692 693 return 0; 694 } 695 696 static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt, 697 struct drm_i915_gem_request *req) 698 { 699 int i, ret; 700 701 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { 702 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 703 704 ret = gen8_write_pdp(req, i, pd_daddr); 705 if (ret) 706 return ret; 707 } 708 709 return 0; 710 } 711 712 static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt, 713 struct drm_i915_gem_request *req) 714 { 715 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4)); 716 } 717 718 static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm, 719 struct i915_page_directory_pointer *pdp, 720 uint64_t start, 721 uint64_t length, 722 gen8_pte_t scratch_pte) 723 { 724 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 725 gen8_pte_t *pt_vaddr; 726 unsigned pdpe = gen8_pdpe_index(start); 727 unsigned pde = gen8_pde_index(start); 728 unsigned pte = gen8_pte_index(start); 729 unsigned num_entries = length >> PAGE_SHIFT; 730 unsigned last_pte, i; 731 732 if (WARN_ON(!pdp)) 733 return; 734 735 while (num_entries) { 736 struct i915_page_directory *pd; 737 struct i915_page_table *pt; 738 739 if (WARN_ON(!pdp->page_directory[pdpe])) 740 break; 741 742 pd = pdp->page_directory[pdpe]; 743 744 if (WARN_ON(!pd->page_table[pde])) 745 break; 746 747 pt = pd->page_table[pde]; 748 749 if (WARN_ON(!px_page(pt))) 750 break; 751 752 last_pte = pte + num_entries; 753 if (last_pte > GEN8_PTES) 754 last_pte = GEN8_PTES; 755 756 pt_vaddr = kmap_px(pt); 757 758 for (i = pte; i < last_pte; i++) { 759 pt_vaddr[i] = scratch_pte; 760 num_entries--; 761 } 762 763 kunmap_px(ppgtt, pt_vaddr); 764 765 pte = 0; 766 if (++pde == I915_PDES) { 767 if (++pdpe == I915_PDPES_PER_PDP(vm->dev)) 768 break; 769 pde = 0; 770 } 771 } 772 } 773 774 static void gen8_ppgtt_clear_range(struct i915_address_space *vm, 775 uint64_t start, 776 uint64_t length, 777 bool use_scratch) 778 { 779 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 780 gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), 781 I915_CACHE_LLC, use_scratch); 782 783 if (!USES_FULL_48BIT_PPGTT(vm->dev)) { 784 gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length, 785 scratch_pte); 786 } else { 787 uint64_t pml4e; 788 struct i915_page_directory_pointer *pdp; 789 790 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) { 791 gen8_ppgtt_clear_pte_range(vm, pdp, start, length, 792 scratch_pte); 793 } 794 } 795 } 796 797 static void 798 gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, 799 struct i915_page_directory_pointer *pdp, 800 struct sg_page_iter *sg_iter, 801 uint64_t start, 802 enum i915_cache_level cache_level) 803 { 804 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 805 gen8_pte_t *pt_vaddr; 806 unsigned pdpe = gen8_pdpe_index(start); 807 unsigned pde = gen8_pde_index(start); 808 unsigned pte = gen8_pte_index(start); 809 810 pt_vaddr = NULL; 811 812 while (__sg_page_iter_next(sg_iter)) { 813 if (pt_vaddr == NULL) { 814 struct i915_page_directory *pd = pdp->page_directory[pdpe]; 815 struct i915_page_table *pt = pd->page_table[pde]; 816 pt_vaddr = kmap_px(pt); 817 } 818 819 pt_vaddr[pte] = 820 gen8_pte_encode(sg_page_iter_dma_address(sg_iter), 821 cache_level, true); 822 if (++pte == GEN8_PTES) { 823 kunmap_px(ppgtt, pt_vaddr); 824 pt_vaddr = NULL; 825 if (++pde == I915_PDES) { 826 if (++pdpe == I915_PDPES_PER_PDP(vm->dev)) 827 break; 828 pde = 0; 829 } 830 pte = 0; 831 } 832 } 833 834 if (pt_vaddr) 835 kunmap_px(ppgtt, pt_vaddr); 836 } 837 838 static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, 839 struct sg_table *pages, 840 uint64_t start, 841 enum i915_cache_level cache_level, 842 u32 unused) 843 { 844 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 845 struct sg_page_iter sg_iter; 846 847 __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0); 848 849 if (!USES_FULL_48BIT_PPGTT(vm->dev)) { 850 gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start, 851 cache_level); 852 } else { 853 struct i915_page_directory_pointer *pdp; 854 uint64_t pml4e; 855 uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT; 856 857 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) { 858 gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, 859 start, cache_level); 860 } 861 } 862 } 863 864 static void gen8_free_page_tables(struct drm_device *dev, 865 struct i915_page_directory *pd) 866 { 867 int i; 868 869 if (!px_page(pd)) 870 return; 871 872 for_each_set_bit(i, pd->used_pdes, I915_PDES) { 873 if (WARN_ON(!pd->page_table[i])) 874 continue; 875 876 free_pt(dev, pd->page_table[i]); 877 pd->page_table[i] = NULL; 878 } 879 } 880 881 static int gen8_init_scratch(struct i915_address_space *vm) 882 { 883 struct drm_device *dev = vm->dev; 884 int ret; 885 886 vm->scratch_page = alloc_scratch_page(dev); 887 if (IS_ERR(vm->scratch_page)) 888 return PTR_ERR(vm->scratch_page); 889 890 vm->scratch_pt = alloc_pt(dev); 891 if (IS_ERR(vm->scratch_pt)) { 892 ret = PTR_ERR(vm->scratch_pt); 893 goto free_scratch_page; 894 } 895 896 vm->scratch_pd = alloc_pd(dev); 897 if (IS_ERR(vm->scratch_pd)) { 898 ret = PTR_ERR(vm->scratch_pd); 899 goto free_pt; 900 } 901 902 if (USES_FULL_48BIT_PPGTT(dev)) { 903 vm->scratch_pdp = alloc_pdp(dev); 904 if (IS_ERR(vm->scratch_pdp)) { 905 ret = PTR_ERR(vm->scratch_pdp); 906 goto free_pd; 907 } 908 } 909 910 gen8_initialize_pt(vm, vm->scratch_pt); 911 gen8_initialize_pd(vm, vm->scratch_pd); 912 if (USES_FULL_48BIT_PPGTT(dev)) 913 gen8_initialize_pdp(vm, vm->scratch_pdp); 914 915 return 0; 916 917 free_pd: 918 free_pd(dev, vm->scratch_pd); 919 free_pt: 920 free_pt(dev, vm->scratch_pt); 921 free_scratch_page: 922 free_scratch_page(dev, vm->scratch_page); 923 924 return ret; 925 } 926 927 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) 928 { 929 enum vgt_g2v_type msg; 930 struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); 931 int i; 932 933 if (USES_FULL_48BIT_PPGTT(dev_priv)) { 934 u64 daddr = px_dma(&ppgtt->pml4); 935 936 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); 937 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); 938 939 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : 940 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); 941 } else { 942 for (i = 0; i < GEN8_LEGACY_PDPES; i++) { 943 u64 daddr = i915_page_dir_dma_addr(ppgtt, i); 944 945 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr)); 946 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr)); 947 } 948 949 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : 950 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); 951 } 952 953 I915_WRITE(vgtif_reg(g2v_notify), msg); 954 955 return 0; 956 } 957 958 static void gen8_free_scratch(struct i915_address_space *vm) 959 { 960 struct drm_device *dev = vm->dev; 961 962 if (USES_FULL_48BIT_PPGTT(dev)) 963 free_pdp(dev, vm->scratch_pdp); 964 free_pd(dev, vm->scratch_pd); 965 free_pt(dev, vm->scratch_pt); 966 free_scratch_page(dev, vm->scratch_page); 967 } 968 969 static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev, 970 struct i915_page_directory_pointer *pdp) 971 { 972 int i; 973 974 for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) { 975 if (WARN_ON(!pdp->page_directory[i])) 976 continue; 977 978 gen8_free_page_tables(dev, pdp->page_directory[i]); 979 free_pd(dev, pdp->page_directory[i]); 980 } 981 982 free_pdp(dev, pdp); 983 } 984 985 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) 986 { 987 int i; 988 989 for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) { 990 if (WARN_ON(!ppgtt->pml4.pdps[i])) 991 continue; 992 993 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]); 994 } 995 996 cleanup_px(ppgtt->base.dev, &ppgtt->pml4); 997 } 998 999 static void gen8_ppgtt_cleanup(struct i915_address_space *vm) 1000 { 1001 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1002 1003 if (intel_vgpu_active(to_i915(vm->dev))) 1004 gen8_ppgtt_notify_vgt(ppgtt, false); 1005 1006 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) 1007 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp); 1008 else 1009 gen8_ppgtt_cleanup_4lvl(ppgtt); 1010 1011 gen8_free_scratch(vm); 1012 } 1013 1014 /** 1015 * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range. 1016 * @vm: Master vm structure. 1017 * @pd: Page directory for this address range. 1018 * @start: Starting virtual address to begin allocations. 1019 * @length: Size of the allocations. 1020 * @new_pts: Bitmap set by function with new allocations. Likely used by the 1021 * caller to free on error. 1022 * 1023 * Allocate the required number of page tables. Extremely similar to 1024 * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by 1025 * the page directory boundary (instead of the page directory pointer). That 1026 * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is 1027 * possible, and likely that the caller will need to use multiple calls of this 1028 * function to achieve the appropriate allocation. 1029 * 1030 * Return: 0 if success; negative error code otherwise. 1031 */ 1032 static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm, 1033 struct i915_page_directory *pd, 1034 uint64_t start, 1035 uint64_t length, 1036 unsigned long *new_pts) 1037 { 1038 struct drm_device *dev = vm->dev; 1039 struct i915_page_table *pt; 1040 uint32_t pde; 1041 1042 gen8_for_each_pde(pt, pd, start, length, pde) { 1043 /* Don't reallocate page tables */ 1044 if (test_bit(pde, pd->used_pdes)) { 1045 /* Scratch is never allocated this way */ 1046 WARN_ON(pt == vm->scratch_pt); 1047 continue; 1048 } 1049 1050 pt = alloc_pt(dev); 1051 if (IS_ERR(pt)) 1052 goto unwind_out; 1053 1054 gen8_initialize_pt(vm, pt); 1055 pd->page_table[pde] = pt; 1056 __set_bit(pde, new_pts); 1057 trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT); 1058 } 1059 1060 return 0; 1061 1062 unwind_out: 1063 for_each_set_bit(pde, new_pts, I915_PDES) 1064 free_pt(dev, pd->page_table[pde]); 1065 1066 return -ENOMEM; 1067 } 1068 1069 /** 1070 * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range. 1071 * @vm: Master vm structure. 1072 * @pdp: Page directory pointer for this address range. 1073 * @start: Starting virtual address to begin allocations. 1074 * @length: Size of the allocations. 1075 * @new_pds: Bitmap set by function with new allocations. Likely used by the 1076 * caller to free on error. 1077 * 1078 * Allocate the required number of page directories starting at the pde index of 1079 * @start, and ending at the pde index @start + @length. This function will skip 1080 * over already allocated page directories within the range, and only allocate 1081 * new ones, setting the appropriate pointer within the pdp as well as the 1082 * correct position in the bitmap @new_pds. 1083 * 1084 * The function will only allocate the pages within the range for a give page 1085 * directory pointer. In other words, if @start + @length straddles a virtually 1086 * addressed PDP boundary (512GB for 4k pages), there will be more allocations 1087 * required by the caller, This is not currently possible, and the BUG in the 1088 * code will prevent it. 1089 * 1090 * Return: 0 if success; negative error code otherwise. 1091 */ 1092 static int 1093 gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm, 1094 struct i915_page_directory_pointer *pdp, 1095 uint64_t start, 1096 uint64_t length, 1097 unsigned long *new_pds) 1098 { 1099 struct drm_device *dev = vm->dev; 1100 struct i915_page_directory *pd; 1101 uint32_t pdpe; 1102 uint32_t pdpes = I915_PDPES_PER_PDP(dev); 1103 1104 WARN_ON(!bitmap_empty(new_pds, pdpes)); 1105 1106 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { 1107 if (test_bit(pdpe, pdp->used_pdpes)) 1108 continue; 1109 1110 pd = alloc_pd(dev); 1111 if (IS_ERR(pd)) 1112 goto unwind_out; 1113 1114 gen8_initialize_pd(vm, pd); 1115 pdp->page_directory[pdpe] = pd; 1116 __set_bit(pdpe, new_pds); 1117 trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT); 1118 } 1119 1120 return 0; 1121 1122 unwind_out: 1123 for_each_set_bit(pdpe, new_pds, pdpes) 1124 free_pd(dev, pdp->page_directory[pdpe]); 1125 1126 return -ENOMEM; 1127 } 1128 1129 /** 1130 * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range. 1131 * @vm: Master vm structure. 1132 * @pml4: Page map level 4 for this address range. 1133 * @start: Starting virtual address to begin allocations. 1134 * @length: Size of the allocations. 1135 * @new_pdps: Bitmap set by function with new allocations. Likely used by the 1136 * caller to free on error. 1137 * 1138 * Allocate the required number of page directory pointers. Extremely similar to 1139 * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs(). 1140 * The main difference is here we are limited by the pml4 boundary (instead of 1141 * the page directory pointer). 1142 * 1143 * Return: 0 if success; negative error code otherwise. 1144 */ 1145 static int 1146 gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm, 1147 struct i915_pml4 *pml4, 1148 uint64_t start, 1149 uint64_t length, 1150 unsigned long *new_pdps) 1151 { 1152 struct drm_device *dev = vm->dev; 1153 struct i915_page_directory_pointer *pdp; 1154 uint32_t pml4e; 1155 1156 WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4)); 1157 1158 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { 1159 if (!test_bit(pml4e, pml4->used_pml4es)) { 1160 pdp = alloc_pdp(dev); 1161 if (IS_ERR(pdp)) 1162 goto unwind_out; 1163 1164 gen8_initialize_pdp(vm, pdp); 1165 pml4->pdps[pml4e] = pdp; 1166 __set_bit(pml4e, new_pdps); 1167 trace_i915_page_directory_pointer_entry_alloc(vm, 1168 pml4e, 1169 start, 1170 GEN8_PML4E_SHIFT); 1171 } 1172 } 1173 1174 return 0; 1175 1176 unwind_out: 1177 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) 1178 free_pdp(dev, pml4->pdps[pml4e]); 1179 1180 return -ENOMEM; 1181 } 1182 1183 static void 1184 free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts) 1185 { 1186 kfree(new_pts); 1187 kfree(new_pds); 1188 } 1189 1190 /* Fills in the page directory bitmap, and the array of page tables bitmap. Both 1191 * of these are based on the number of PDPEs in the system. 1192 */ 1193 static 1194 int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds, 1195 unsigned long **new_pts, 1196 uint32_t pdpes) 1197 { 1198 unsigned long *pds; 1199 unsigned long *pts; 1200 1201 pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY); 1202 if (!pds) 1203 return -ENOMEM; 1204 1205 pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long), 1206 GFP_TEMPORARY); 1207 if (!pts) 1208 goto err_out; 1209 1210 *new_pds = pds; 1211 *new_pts = pts; 1212 1213 return 0; 1214 1215 err_out: 1216 free_gen8_temp_bitmaps(pds, pts); 1217 return -ENOMEM; 1218 } 1219 1220 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify 1221 * the page table structures, we mark them dirty so that 1222 * context switching/execlist queuing code takes extra steps 1223 * to ensure that tlbs are flushed. 1224 */ 1225 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) 1226 { 1227 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask; 1228 } 1229 1230 static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, 1231 struct i915_page_directory_pointer *pdp, 1232 uint64_t start, 1233 uint64_t length) 1234 { 1235 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1236 unsigned long *new_page_dirs, *new_page_tables; 1237 struct drm_device *dev = vm->dev; 1238 struct i915_page_directory *pd; 1239 const uint64_t orig_start = start; 1240 const uint64_t orig_length = length; 1241 uint32_t pdpe; 1242 uint32_t pdpes = I915_PDPES_PER_PDP(dev); 1243 int ret; 1244 1245 /* Wrap is never okay since we can only represent 48b, and we don't 1246 * actually use the other side of the canonical address space. 1247 */ 1248 if (WARN_ON(start + length < start)) 1249 return -ENODEV; 1250 1251 if (WARN_ON(start + length > vm->total)) 1252 return -ENODEV; 1253 1254 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes); 1255 if (ret) 1256 return ret; 1257 1258 /* Do the allocations first so we can easily bail out */ 1259 ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length, 1260 new_page_dirs); 1261 if (ret) { 1262 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); 1263 return ret; 1264 } 1265 1266 /* For every page directory referenced, allocate page tables */ 1267 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { 1268 ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length, 1269 new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES)); 1270 if (ret) 1271 goto err_out; 1272 } 1273 1274 start = orig_start; 1275 length = orig_length; 1276 1277 /* Allocations have completed successfully, so set the bitmaps, and do 1278 * the mappings. */ 1279 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { 1280 gen8_pde_t *const page_directory = kmap_px(pd); 1281 struct i915_page_table *pt; 1282 uint64_t pd_len = length; 1283 uint64_t pd_start = start; 1284 uint32_t pde; 1285 1286 /* Every pd should be allocated, we just did that above. */ 1287 WARN_ON(!pd); 1288 1289 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { 1290 /* Same reasoning as pd */ 1291 WARN_ON(!pt); 1292 WARN_ON(!pd_len); 1293 WARN_ON(!gen8_pte_count(pd_start, pd_len)); 1294 1295 /* Set our used ptes within the page table */ 1296 bitmap_set(pt->used_ptes, 1297 gen8_pte_index(pd_start), 1298 gen8_pte_count(pd_start, pd_len)); 1299 1300 /* Our pde is now pointing to the pagetable, pt */ 1301 __set_bit(pde, pd->used_pdes); 1302 1303 /* Map the PDE to the page table */ 1304 page_directory[pde] = gen8_pde_encode(px_dma(pt), 1305 I915_CACHE_LLC); 1306 trace_i915_page_table_entry_map(&ppgtt->base, pde, pt, 1307 gen8_pte_index(start), 1308 gen8_pte_count(start, length), 1309 GEN8_PTES); 1310 1311 /* NB: We haven't yet mapped ptes to pages. At this 1312 * point we're still relying on insert_entries() */ 1313 } 1314 1315 kunmap_px(ppgtt, page_directory); 1316 __set_bit(pdpe, pdp->used_pdpes); 1317 gen8_setup_page_directory(ppgtt, pdp, pd, pdpe); 1318 } 1319 1320 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); 1321 mark_tlbs_dirty(ppgtt); 1322 return 0; 1323 1324 err_out: 1325 while (pdpe--) { 1326 unsigned long temp; 1327 1328 for_each_set_bit(temp, new_page_tables + pdpe * 1329 BITS_TO_LONGS(I915_PDES), I915_PDES) 1330 free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]); 1331 } 1332 1333 for_each_set_bit(pdpe, new_page_dirs, pdpes) 1334 free_pd(dev, pdp->page_directory[pdpe]); 1335 1336 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); 1337 mark_tlbs_dirty(ppgtt); 1338 return ret; 1339 } 1340 1341 static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, 1342 struct i915_pml4 *pml4, 1343 uint64_t start, 1344 uint64_t length) 1345 { 1346 DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4); 1347 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1348 struct i915_page_directory_pointer *pdp; 1349 uint64_t pml4e; 1350 int ret = 0; 1351 1352 /* Do the pml4 allocations first, so we don't need to track the newly 1353 * allocated tables below the pdp */ 1354 bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4); 1355 1356 /* The pagedirectory and pagetable allocations are done in the shared 3 1357 * and 4 level code. Just allocate the pdps. 1358 */ 1359 ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length, 1360 new_pdps); 1361 if (ret) 1362 return ret; 1363 1364 WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2, 1365 "The allocation has spanned more than 512GB. " 1366 "It is highly likely this is incorrect."); 1367 1368 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { 1369 WARN_ON(!pdp); 1370 1371 ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length); 1372 if (ret) 1373 goto err_out; 1374 1375 gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e); 1376 } 1377 1378 bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es, 1379 GEN8_PML4ES_PER_PML4); 1380 1381 return 0; 1382 1383 err_out: 1384 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) 1385 gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]); 1386 1387 return ret; 1388 } 1389 1390 static int gen8_alloc_va_range(struct i915_address_space *vm, 1391 uint64_t start, uint64_t length) 1392 { 1393 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1394 1395 if (USES_FULL_48BIT_PPGTT(vm->dev)) 1396 return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length); 1397 else 1398 return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length); 1399 } 1400 1401 static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp, 1402 uint64_t start, uint64_t length, 1403 gen8_pte_t scratch_pte, 1404 struct seq_file *m) 1405 { 1406 struct i915_page_directory *pd; 1407 uint32_t pdpe; 1408 1409 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { 1410 struct i915_page_table *pt; 1411 uint64_t pd_len = length; 1412 uint64_t pd_start = start; 1413 uint32_t pde; 1414 1415 if (!test_bit(pdpe, pdp->used_pdpes)) 1416 continue; 1417 1418 seq_printf(m, "\tPDPE #%d\n", pdpe); 1419 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { 1420 uint32_t pte; 1421 gen8_pte_t *pt_vaddr; 1422 1423 if (!test_bit(pde, pd->used_pdes)) 1424 continue; 1425 1426 pt_vaddr = kmap_px(pt); 1427 for (pte = 0; pte < GEN8_PTES; pte += 4) { 1428 uint64_t va = 1429 (pdpe << GEN8_PDPE_SHIFT) | 1430 (pde << GEN8_PDE_SHIFT) | 1431 (pte << GEN8_PTE_SHIFT); 1432 int i; 1433 bool found = false; 1434 1435 for (i = 0; i < 4; i++) 1436 if (pt_vaddr[pte + i] != scratch_pte) 1437 found = true; 1438 if (!found) 1439 continue; 1440 1441 seq_printf(m, "\t\t0x%lx [%03d,%03d,%04d]: =", va, pdpe, pde, pte); 1442 for (i = 0; i < 4; i++) { 1443 if (pt_vaddr[pte + i] != scratch_pte) 1444 seq_printf(m, " %lx", pt_vaddr[pte + i]); 1445 else 1446 seq_puts(m, " SCRATCH "); 1447 } 1448 seq_puts(m, "\n"); 1449 } 1450 /* don't use kunmap_px, it could trigger 1451 * an unnecessary flush. 1452 */ 1453 kunmap_atomic(pt_vaddr); 1454 } 1455 } 1456 } 1457 1458 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) 1459 { 1460 struct i915_address_space *vm = &ppgtt->base; 1461 uint64_t start = ppgtt->base.start; 1462 uint64_t length = ppgtt->base.total; 1463 gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), 1464 I915_CACHE_LLC, true); 1465 1466 if (!USES_FULL_48BIT_PPGTT(vm->dev)) { 1467 gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); 1468 } else { 1469 uint64_t pml4e; 1470 struct i915_pml4 *pml4 = &ppgtt->pml4; 1471 struct i915_page_directory_pointer *pdp; 1472 1473 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { 1474 if (!test_bit(pml4e, pml4->used_pml4es)) 1475 continue; 1476 1477 seq_printf(m, " PML4E #%lu\n", pml4e); 1478 gen8_dump_pdp(pdp, start, length, scratch_pte, m); 1479 } 1480 } 1481 } 1482 1483 static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt) 1484 { 1485 unsigned long *new_page_dirs, *new_page_tables; 1486 uint32_t pdpes = I915_PDPES_PER_PDP(dev); 1487 int ret; 1488 1489 /* We allocate temp bitmap for page tables for no gain 1490 * but as this is for init only, lets keep the things simple 1491 */ 1492 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes); 1493 if (ret) 1494 return ret; 1495 1496 /* Allocate for all pdps regardless of how the ppgtt 1497 * was defined. 1498 */ 1499 ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp, 1500 0, 1ULL << 32, 1501 new_page_dirs); 1502 if (!ret) 1503 *ppgtt->pdp.used_pdpes = *new_page_dirs; 1504 1505 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); 1506 1507 return ret; 1508 } 1509 1510 /* 1511 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers 1512 * with a net effect resembling a 2-level page table in normal x86 terms. Each 1513 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address 1514 * space. 1515 * 1516 */ 1517 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 1518 { 1519 int ret; 1520 1521 ret = gen8_init_scratch(&ppgtt->base); 1522 if (ret) 1523 return ret; 1524 1525 ppgtt->base.start = 0; 1526 ppgtt->base.cleanup = gen8_ppgtt_cleanup; 1527 ppgtt->base.allocate_va_range = gen8_alloc_va_range; 1528 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; 1529 ppgtt->base.clear_range = gen8_ppgtt_clear_range; 1530 ppgtt->base.unbind_vma = ppgtt_unbind_vma; 1531 ppgtt->base.bind_vma = ppgtt_bind_vma; 1532 ppgtt->debug_dump = gen8_dump_ppgtt; 1533 1534 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { 1535 ret = setup_px(ppgtt->base.dev, &ppgtt->pml4); 1536 if (ret) 1537 goto free_scratch; 1538 1539 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4); 1540 1541 ppgtt->base.total = 1ULL << 48; 1542 ppgtt->switch_mm = gen8_48b_mm_switch; 1543 } else { 1544 ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp); 1545 if (ret) 1546 goto free_scratch; 1547 1548 ppgtt->base.total = 1ULL << 32; 1549 ppgtt->switch_mm = gen8_legacy_mm_switch; 1550 trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base, 1551 0, 0, 1552 GEN8_PML4E_SHIFT); 1553 1554 if (intel_vgpu_active(to_i915(ppgtt->base.dev))) { 1555 ret = gen8_preallocate_top_level_pdps(ppgtt); 1556 if (ret) 1557 goto free_scratch; 1558 } 1559 } 1560 1561 if (intel_vgpu_active(to_i915(ppgtt->base.dev))) 1562 gen8_ppgtt_notify_vgt(ppgtt, true); 1563 1564 return 0; 1565 1566 free_scratch: 1567 gen8_free_scratch(&ppgtt->base); 1568 return ret; 1569 } 1570 1571 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) 1572 { 1573 struct i915_address_space *vm = &ppgtt->base; 1574 struct i915_page_table *unused; 1575 gen6_pte_t scratch_pte; 1576 uint32_t pd_entry; 1577 uint32_t pte, pde; 1578 uint32_t start = ppgtt->base.start, length = ppgtt->base.total; 1579 1580 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), 1581 I915_CACHE_LLC, true, 0); 1582 1583 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) { 1584 u32 expected; 1585 gen6_pte_t *pt_vaddr; 1586 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]); 1587 pd_entry = readl(ppgtt->pd_addr + pde); 1588 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); 1589 1590 if (pd_entry != expected) 1591 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", 1592 pde, 1593 pd_entry, 1594 expected); 1595 seq_printf(m, "\tPDE: %x\n", pd_entry); 1596 1597 pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]); 1598 1599 for (pte = 0; pte < GEN6_PTES; pte+=4) { 1600 unsigned long va = 1601 (pde * PAGE_SIZE * GEN6_PTES) + 1602 (pte * PAGE_SIZE); 1603 int i; 1604 bool found = false; 1605 for (i = 0; i < 4; i++) 1606 if (pt_vaddr[pte + i] != scratch_pte) 1607 found = true; 1608 if (!found) 1609 continue; 1610 1611 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte); 1612 for (i = 0; i < 4; i++) { 1613 if (pt_vaddr[pte + i] != scratch_pte) 1614 seq_printf(m, " %08x", pt_vaddr[pte + i]); 1615 else 1616 seq_puts(m, " SCRATCH "); 1617 } 1618 seq_puts(m, "\n"); 1619 } 1620 kunmap_px(ppgtt, pt_vaddr); 1621 } 1622 } 1623 1624 /* Write pde (index) from the page directory @pd to the page table @pt */ 1625 static void gen6_write_pde(struct i915_page_directory *pd, 1626 const int pde, struct i915_page_table *pt) 1627 { 1628 /* Caller needs to make sure the write completes if necessary */ 1629 struct i915_hw_ppgtt *ppgtt = 1630 container_of(pd, struct i915_hw_ppgtt, pd); 1631 u32 pd_entry; 1632 1633 pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt)); 1634 pd_entry |= GEN6_PDE_VALID; 1635 1636 writel(pd_entry, ppgtt->pd_addr + pde); 1637 } 1638 1639 /* Write all the page tables found in the ppgtt structure to incrementing page 1640 * directories. */ 1641 static void gen6_write_page_range(struct drm_i915_private *dev_priv, 1642 struct i915_page_directory *pd, 1643 uint32_t start, uint32_t length) 1644 { 1645 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1646 struct i915_page_table *pt; 1647 uint32_t pde; 1648 1649 gen6_for_each_pde(pt, pd, start, length, pde) 1650 gen6_write_pde(pd, pde, pt); 1651 1652 /* Make sure write is complete before other code can use this page 1653 * table. Also require for WC mapped PTEs */ 1654 readl(ggtt->gsm); 1655 } 1656 1657 static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) 1658 { 1659 BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f); 1660 1661 return (ppgtt->pd.base.ggtt_offset / 64) << 16; 1662 } 1663 1664 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, 1665 struct drm_i915_gem_request *req) 1666 { 1667 struct intel_engine_cs *engine = req->engine; 1668 int ret; 1669 1670 /* NB: TLBs must be flushed and invalidated before a switch */ 1671 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 1672 if (ret) 1673 return ret; 1674 1675 ret = intel_ring_begin(req, 6); 1676 if (ret) 1677 return ret; 1678 1679 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2)); 1680 intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine)); 1681 intel_ring_emit(engine, PP_DIR_DCLV_2G); 1682 intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine)); 1683 intel_ring_emit(engine, get_pd_offset(ppgtt)); 1684 intel_ring_emit(engine, MI_NOOP); 1685 intel_ring_advance(engine); 1686 1687 return 0; 1688 } 1689 1690 static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, 1691 struct drm_i915_gem_request *req) 1692 { 1693 struct intel_engine_cs *engine = req->engine; 1694 struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); 1695 1696 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); 1697 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt)); 1698 return 0; 1699 } 1700 1701 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, 1702 struct drm_i915_gem_request *req) 1703 { 1704 struct intel_engine_cs *engine = req->engine; 1705 int ret; 1706 1707 /* NB: TLBs must be flushed and invalidated before a switch */ 1708 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 1709 if (ret) 1710 return ret; 1711 1712 ret = intel_ring_begin(req, 6); 1713 if (ret) 1714 return ret; 1715 1716 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2)); 1717 intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine)); 1718 intel_ring_emit(engine, PP_DIR_DCLV_2G); 1719 intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine)); 1720 intel_ring_emit(engine, get_pd_offset(ppgtt)); 1721 intel_ring_emit(engine, MI_NOOP); 1722 intel_ring_advance(engine); 1723 1724 /* XXX: RCS is the only one to auto invalidate the TLBs? */ 1725 if (engine->id != RCS) { 1726 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 1727 if (ret) 1728 return ret; 1729 } 1730 1731 return 0; 1732 } 1733 1734 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, 1735 struct drm_i915_gem_request *req) 1736 { 1737 struct intel_engine_cs *engine = req->engine; 1738 struct drm_device *dev = ppgtt->base.dev; 1739 struct drm_i915_private *dev_priv = dev->dev_private; 1740 1741 1742 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); 1743 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt)); 1744 1745 POSTING_READ(RING_PP_DIR_DCLV(engine)); 1746 1747 return 0; 1748 } 1749 1750 static void gen8_ppgtt_enable(struct drm_device *dev) 1751 { 1752 struct drm_i915_private *dev_priv = dev->dev_private; 1753 struct intel_engine_cs *engine; 1754 1755 for_each_engine(engine, dev_priv) { 1756 u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0; 1757 I915_WRITE(RING_MODE_GEN7(engine), 1758 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); 1759 } 1760 } 1761 1762 static void gen7_ppgtt_enable(struct drm_device *dev) 1763 { 1764 struct drm_i915_private *dev_priv = dev->dev_private; 1765 struct intel_engine_cs *engine; 1766 uint32_t ecochk, ecobits; 1767 1768 ecobits = I915_READ(GAC_ECO_BITS); 1769 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); 1770 1771 ecochk = I915_READ(GAM_ECOCHK); 1772 if (IS_HASWELL(dev)) { 1773 ecochk |= ECOCHK_PPGTT_WB_HSW; 1774 } else { 1775 ecochk |= ECOCHK_PPGTT_LLC_IVB; 1776 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; 1777 } 1778 I915_WRITE(GAM_ECOCHK, ecochk); 1779 1780 for_each_engine(engine, dev_priv) { 1781 /* GFX_MODE is per-ring on gen7+ */ 1782 I915_WRITE(RING_MODE_GEN7(engine), 1783 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 1784 } 1785 } 1786 1787 static void gen6_ppgtt_enable(struct drm_device *dev) 1788 { 1789 struct drm_i915_private *dev_priv = dev->dev_private; 1790 uint32_t ecochk, gab_ctl, ecobits; 1791 1792 ecobits = I915_READ(GAC_ECO_BITS); 1793 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | 1794 ECOBITS_PPGTT_CACHE64B); 1795 1796 gab_ctl = I915_READ(GAB_CTL); 1797 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); 1798 1799 ecochk = I915_READ(GAM_ECOCHK); 1800 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); 1801 1802 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 1803 } 1804 1805 /* PPGTT support for Sandybdrige/Gen6 and later */ 1806 static void gen6_ppgtt_clear_range(struct i915_address_space *vm, 1807 uint64_t start, 1808 uint64_t length, 1809 bool use_scratch) 1810 { 1811 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1812 gen6_pte_t *pt_vaddr, scratch_pte; 1813 unsigned first_entry = start >> PAGE_SHIFT; 1814 unsigned num_entries = length >> PAGE_SHIFT; 1815 unsigned act_pt = first_entry / GEN6_PTES; 1816 unsigned first_pte = first_entry % GEN6_PTES; 1817 unsigned last_pte, i; 1818 1819 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), 1820 I915_CACHE_LLC, true, 0); 1821 1822 while (num_entries) { 1823 last_pte = first_pte + num_entries; 1824 if (last_pte > GEN6_PTES) 1825 last_pte = GEN6_PTES; 1826 1827 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); 1828 1829 for (i = first_pte; i < last_pte; i++) 1830 pt_vaddr[i] = scratch_pte; 1831 1832 kunmap_px(ppgtt, pt_vaddr); 1833 1834 num_entries -= last_pte - first_pte; 1835 first_pte = 0; 1836 act_pt++; 1837 } 1838 } 1839 1840 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, 1841 struct sg_table *pages, 1842 uint64_t start, 1843 enum i915_cache_level cache_level, u32 flags) 1844 { 1845 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1846 unsigned first_entry = start >> PAGE_SHIFT; 1847 unsigned act_pt = first_entry / GEN6_PTES; 1848 unsigned act_pte = first_entry % GEN6_PTES; 1849 gen6_pte_t *pt_vaddr = NULL; 1850 struct sgt_iter sgt_iter; 1851 dma_addr_t addr; 1852 1853 for_each_sgt_dma(addr, sgt_iter, pages) { 1854 if (pt_vaddr == NULL) 1855 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); 1856 1857 pt_vaddr[act_pte] = 1858 vm->pte_encode(addr, cache_level, true, flags); 1859 1860 if (++act_pte == GEN6_PTES) { 1861 kunmap_px(ppgtt, pt_vaddr); 1862 pt_vaddr = NULL; 1863 act_pt++; 1864 act_pte = 0; 1865 } 1866 } 1867 1868 if (pt_vaddr) 1869 kunmap_px(ppgtt, pt_vaddr); 1870 } 1871 1872 static int gen6_alloc_va_range(struct i915_address_space *vm, 1873 uint64_t start_in, uint64_t length_in) 1874 { 1875 DECLARE_BITMAP(new_page_tables, I915_PDES); 1876 struct drm_device *dev = vm->dev; 1877 struct drm_i915_private *dev_priv = to_i915(dev); 1878 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1879 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1880 struct i915_page_table *pt; 1881 uint32_t start, length, start_save, length_save; 1882 uint32_t pde; 1883 int ret; 1884 1885 if (WARN_ON(start_in + length_in > ppgtt->base.total)) 1886 return -ENODEV; 1887 1888 start = start_save = start_in; 1889 length = length_save = length_in; 1890 1891 bitmap_zero(new_page_tables, I915_PDES); 1892 1893 /* The allocation is done in two stages so that we can bail out with 1894 * minimal amount of pain. The first stage finds new page tables that 1895 * need allocation. The second stage marks use ptes within the page 1896 * tables. 1897 */ 1898 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) { 1899 if (pt != vm->scratch_pt) { 1900 WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES)); 1901 continue; 1902 } 1903 1904 /* We've already allocated a page table */ 1905 WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES)); 1906 1907 pt = alloc_pt(dev); 1908 if (IS_ERR(pt)) { 1909 ret = PTR_ERR(pt); 1910 goto unwind_out; 1911 } 1912 1913 gen6_initialize_pt(vm, pt); 1914 1915 ppgtt->pd.page_table[pde] = pt; 1916 __set_bit(pde, new_page_tables); 1917 trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT); 1918 } 1919 1920 start = start_save; 1921 length = length_save; 1922 1923 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) { 1924 DECLARE_BITMAP(tmp_bitmap, GEN6_PTES); 1925 1926 bitmap_zero(tmp_bitmap, GEN6_PTES); 1927 bitmap_set(tmp_bitmap, gen6_pte_index(start), 1928 gen6_pte_count(start, length)); 1929 1930 if (__test_and_clear_bit(pde, new_page_tables)) 1931 gen6_write_pde(&ppgtt->pd, pde, pt); 1932 1933 trace_i915_page_table_entry_map(vm, pde, pt, 1934 gen6_pte_index(start), 1935 gen6_pte_count(start, length), 1936 GEN6_PTES); 1937 bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes, 1938 GEN6_PTES); 1939 } 1940 1941 WARN_ON(!bitmap_empty(new_page_tables, I915_PDES)); 1942 1943 /* Make sure write is complete before other code can use this page 1944 * table. Also require for WC mapped PTEs */ 1945 readl(ggtt->gsm); 1946 1947 mark_tlbs_dirty(ppgtt); 1948 return 0; 1949 1950 unwind_out: 1951 for_each_set_bit(pde, new_page_tables, I915_PDES) { 1952 struct i915_page_table *pt = ppgtt->pd.page_table[pde]; 1953 1954 ppgtt->pd.page_table[pde] = vm->scratch_pt; 1955 free_pt(vm->dev, pt); 1956 } 1957 1958 mark_tlbs_dirty(ppgtt); 1959 return ret; 1960 } 1961 1962 static int gen6_init_scratch(struct i915_address_space *vm) 1963 { 1964 struct drm_device *dev = vm->dev; 1965 1966 vm->scratch_page = alloc_scratch_page(dev); 1967 if (IS_ERR(vm->scratch_page)) 1968 return PTR_ERR(vm->scratch_page); 1969 1970 vm->scratch_pt = alloc_pt(dev); 1971 if (IS_ERR(vm->scratch_pt)) { 1972 free_scratch_page(dev, vm->scratch_page); 1973 return PTR_ERR(vm->scratch_pt); 1974 } 1975 1976 gen6_initialize_pt(vm, vm->scratch_pt); 1977 1978 return 0; 1979 } 1980 1981 static void gen6_free_scratch(struct i915_address_space *vm) 1982 { 1983 struct drm_device *dev = vm->dev; 1984 1985 free_pt(dev, vm->scratch_pt); 1986 free_scratch_page(dev, vm->scratch_page); 1987 } 1988 1989 static void gen6_ppgtt_cleanup(struct i915_address_space *vm) 1990 { 1991 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1992 struct i915_page_directory *pd = &ppgtt->pd; 1993 struct drm_device *dev = vm->dev; 1994 struct i915_page_table *pt; 1995 uint32_t pde; 1996 1997 drm_mm_remove_node(&ppgtt->node); 1998 1999 gen6_for_all_pdes(pt, pd, pde) 2000 if (pt != vm->scratch_pt) 2001 free_pt(dev, pt); 2002 2003 gen6_free_scratch(vm); 2004 } 2005 2006 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) 2007 { 2008 struct i915_address_space *vm = &ppgtt->base; 2009 struct drm_device *dev = ppgtt->base.dev; 2010 struct drm_i915_private *dev_priv = to_i915(dev); 2011 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2012 bool retried = false; 2013 int ret; 2014 2015 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The 2016 * allocator works in address space sizes, so it's multiplied by page 2017 * size. We allocate at the top of the GTT to avoid fragmentation. 2018 */ 2019 BUG_ON(!drm_mm_initialized(&ggtt->base.mm)); 2020 2021 ret = gen6_init_scratch(vm); 2022 if (ret) 2023 return ret; 2024 2025 alloc: 2026 ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, 2027 &ppgtt->node, GEN6_PD_SIZE, 2028 GEN6_PD_ALIGN, 0, 2029 0, ggtt->base.total, 2030 DRM_MM_TOPDOWN); 2031 if (ret == -ENOSPC && !retried) { 2032 ret = i915_gem_evict_something(dev, &ggtt->base, 2033 GEN6_PD_SIZE, GEN6_PD_ALIGN, 2034 I915_CACHE_NONE, 2035 0, ggtt->base.total, 2036 0); 2037 if (ret) 2038 goto err_out; 2039 2040 retried = true; 2041 goto alloc; 2042 } 2043 2044 if (ret) 2045 goto err_out; 2046 2047 2048 if (ppgtt->node.start < ggtt->mappable_end) 2049 DRM_DEBUG("Forced to use aperture for PDEs\n"); 2050 2051 return 0; 2052 2053 err_out: 2054 gen6_free_scratch(vm); 2055 return ret; 2056 } 2057 2058 static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) 2059 { 2060 return gen6_ppgtt_allocate_page_directories(ppgtt); 2061 } 2062 2063 static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, 2064 uint64_t start, uint64_t length) 2065 { 2066 struct i915_page_table *unused; 2067 uint32_t pde; 2068 2069 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) 2070 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt; 2071 } 2072 2073 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 2074 { 2075 struct drm_device *dev = ppgtt->base.dev; 2076 struct drm_i915_private *dev_priv = to_i915(dev); 2077 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2078 int ret; 2079 2080 ppgtt->base.pte_encode = ggtt->base.pte_encode; 2081 if (IS_GEN6(dev)) { 2082 ppgtt->switch_mm = gen6_mm_switch; 2083 } else if (IS_HASWELL(dev)) { 2084 ppgtt->switch_mm = hsw_mm_switch; 2085 } else if (IS_GEN7(dev)) { 2086 ppgtt->switch_mm = gen7_mm_switch; 2087 } else 2088 BUG(); 2089 2090 if (intel_vgpu_active(dev_priv)) 2091 ppgtt->switch_mm = vgpu_mm_switch; 2092 2093 ret = gen6_ppgtt_alloc(ppgtt); 2094 if (ret) 2095 return ret; 2096 2097 ppgtt->base.allocate_va_range = gen6_alloc_va_range; 2098 ppgtt->base.clear_range = gen6_ppgtt_clear_range; 2099 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; 2100 ppgtt->base.unbind_vma = ppgtt_unbind_vma; 2101 ppgtt->base.bind_vma = ppgtt_bind_vma; 2102 ppgtt->base.cleanup = gen6_ppgtt_cleanup; 2103 ppgtt->base.start = 0; 2104 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE; 2105 ppgtt->debug_dump = gen6_dump_ppgtt; 2106 2107 ppgtt->pd.base.ggtt_offset = 2108 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t); 2109 2110 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + 2111 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t); 2112 2113 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); 2114 2115 gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total); 2116 2117 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n", 2118 ppgtt->node.size >> 20, 2119 ppgtt->node.start / PAGE_SIZE); 2120 2121 DRM_DEBUG("Adding PPGTT at offset %x\n", 2122 ppgtt->pd.base.ggtt_offset << 10); 2123 2124 return 0; 2125 } 2126 2127 static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) 2128 { 2129 ppgtt->base.dev = dev; 2130 2131 if (INTEL_INFO(dev)->gen < 8) 2132 return gen6_ppgtt_init(ppgtt); 2133 else 2134 return gen8_ppgtt_init(ppgtt); 2135 } 2136 2137 static void i915_address_space_init(struct i915_address_space *vm, 2138 struct drm_i915_private *dev_priv) 2139 { 2140 drm_mm_init(&vm->mm, vm->start, vm->total); 2141 vm->dev = dev_priv->dev; 2142 INIT_LIST_HEAD(&vm->active_list); 2143 INIT_LIST_HEAD(&vm->inactive_list); 2144 list_add_tail(&vm->global_link, &dev_priv->vm_list); 2145 } 2146 2147 static void gtt_write_workarounds(struct drm_device *dev) 2148 { 2149 struct drm_i915_private *dev_priv = dev->dev_private; 2150 2151 /* This function is for gtt related workarounds. This function is 2152 * called on driver load and after a GPU reset, so you can place 2153 * workarounds here even if they get overwritten by GPU reset. 2154 */ 2155 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */ 2156 if (IS_BROADWELL(dev)) 2157 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); 2158 else if (IS_CHERRYVIEW(dev)) 2159 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); 2160 else if (IS_SKYLAKE(dev)) 2161 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); 2162 else if (IS_BROXTON(dev)) 2163 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); 2164 } 2165 2166 static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) 2167 { 2168 struct drm_i915_private *dev_priv = dev->dev_private; 2169 int ret = 0; 2170 2171 ret = __hw_ppgtt_init(dev, ppgtt); 2172 if (ret == 0) { 2173 kref_init(&ppgtt->ref); 2174 i915_address_space_init(&ppgtt->base, dev_priv); 2175 } 2176 2177 return ret; 2178 } 2179 2180 int i915_ppgtt_init_hw(struct drm_device *dev) 2181 { 2182 gtt_write_workarounds(dev); 2183 2184 /* In the case of execlists, PPGTT is enabled by the context descriptor 2185 * and the PDPs are contained within the context itself. We don't 2186 * need to do anything here. */ 2187 if (i915.enable_execlists) 2188 return 0; 2189 2190 if (!USES_PPGTT(dev)) 2191 return 0; 2192 2193 if (IS_GEN6(dev)) 2194 gen6_ppgtt_enable(dev); 2195 else if (IS_GEN7(dev)) 2196 gen7_ppgtt_enable(dev); 2197 else if (INTEL_INFO(dev)->gen >= 8) 2198 gen8_ppgtt_enable(dev); 2199 else 2200 MISSING_CASE(INTEL_INFO(dev)->gen); 2201 2202 return 0; 2203 } 2204 2205 struct i915_hw_ppgtt * 2206 i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) 2207 { 2208 struct i915_hw_ppgtt *ppgtt; 2209 int ret; 2210 2211 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 2212 if (!ppgtt) 2213 return ERR_PTR(-ENOMEM); 2214 2215 ret = i915_ppgtt_init(dev, ppgtt); 2216 if (ret) { 2217 kfree(ppgtt); 2218 return ERR_PTR(ret); 2219 } 2220 2221 ppgtt->file_priv = fpriv; 2222 2223 trace_i915_ppgtt_create(&ppgtt->base); 2224 2225 return ppgtt; 2226 } 2227 2228 void i915_ppgtt_release(struct kref *kref) 2229 { 2230 struct i915_hw_ppgtt *ppgtt = 2231 container_of(kref, struct i915_hw_ppgtt, ref); 2232 2233 trace_i915_ppgtt_release(&ppgtt->base); 2234 2235 /* vmas should already be unbound */ 2236 WARN_ON(!list_empty(&ppgtt->base.active_list)); 2237 WARN_ON(!list_empty(&ppgtt->base.inactive_list)); 2238 2239 list_del(&ppgtt->base.global_link); 2240 drm_mm_takedown(&ppgtt->base.mm); 2241 2242 ppgtt->base.cleanup(&ppgtt->base); 2243 kfree(ppgtt); 2244 } 2245 2246 extern int intel_iommu_gfx_mapped; 2247 /* Certain Gen5 chipsets require require idling the GPU before 2248 * unmapping anything from the GTT when VT-d is enabled. 2249 */ 2250 static bool needs_idle_maps(struct drm_device *dev) 2251 { 2252 #ifdef CONFIG_INTEL_IOMMU 2253 /* Query intel_iommu to see if we need the workaround. Presumably that 2254 * was loaded first. 2255 */ 2256 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped) 2257 return true; 2258 #endif 2259 return false; 2260 } 2261 2262 static bool do_idling(struct drm_i915_private *dev_priv) 2263 { 2264 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2265 bool ret = dev_priv->mm.interruptible; 2266 2267 if (unlikely(ggtt->do_idle_maps)) { 2268 dev_priv->mm.interruptible = false; 2269 if (i915_gem_wait_for_idle(dev_priv)) { 2270 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); 2271 /* Wait a bit, in hopes it avoids the hang */ 2272 udelay(10); 2273 } 2274 } 2275 2276 return ret; 2277 } 2278 2279 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) 2280 { 2281 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2282 2283 if (unlikely(ggtt->do_idle_maps)) 2284 dev_priv->mm.interruptible = interruptible; 2285 } 2286 2287 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) 2288 { 2289 struct intel_engine_cs *engine; 2290 2291 if (INTEL_INFO(dev_priv)->gen < 6) 2292 return; 2293 2294 for_each_engine(engine, dev_priv) { 2295 u32 fault_reg; 2296 fault_reg = I915_READ(RING_FAULT_REG(engine)); 2297 if (fault_reg & RING_FAULT_VALID) { 2298 DRM_DEBUG_DRIVER("Unexpected fault\n" 2299 "\tAddr: 0x%08ux\n" 2300 "\tAddress space: %s\n" 2301 "\tSource ID: %d\n" 2302 "\tType: %d\n", 2303 fault_reg & LINUX_PAGE_MASK, 2304 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", 2305 RING_FAULT_SRCID(fault_reg), 2306 RING_FAULT_FAULT_TYPE(fault_reg)); 2307 I915_WRITE(RING_FAULT_REG(engine), 2308 fault_reg & ~RING_FAULT_VALID); 2309 } 2310 } 2311 POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS])); 2312 } 2313 2314 static void i915_ggtt_flush(struct drm_i915_private *dev_priv) 2315 { 2316 if (INTEL_INFO(dev_priv)->gen < 6) { 2317 intel_gtt_chipset_flush(); 2318 } else { 2319 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 2320 POSTING_READ(GFX_FLSH_CNTL_GEN6); 2321 } 2322 } 2323 2324 void i915_gem_suspend_gtt_mappings(struct drm_device *dev) 2325 { 2326 struct drm_i915_private *dev_priv = to_i915(dev); 2327 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2328 2329 /* Don't bother messing with faults pre GEN6 as we have little 2330 * documentation supporting that it's a good idea. 2331 */ 2332 if (INTEL_INFO(dev)->gen < 6) 2333 return; 2334 2335 i915_check_and_clear_faults(dev_priv); 2336 2337 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, 2338 true); 2339 2340 i915_ggtt_flush(dev_priv); 2341 } 2342 2343 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) 2344 { 2345 if (!dma_map_sg(&obj->base.dev->pdev->dev, 2346 obj->pages->sgl, obj->pages->nents, 2347 PCI_DMA_BIDIRECTIONAL)) 2348 return -ENOSPC; 2349 2350 return 0; 2351 } 2352 2353 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) 2354 { 2355 #ifdef writeq 2356 writeq(pte, addr); 2357 #else 2358 iowrite32((u32)pte, addr); 2359 iowrite32(pte >> 32, addr + 4); 2360 #endif 2361 } 2362 2363 static void gen8_ggtt_insert_page(struct i915_address_space *vm, 2364 dma_addr_t addr, 2365 uint64_t offset, 2366 enum i915_cache_level level, 2367 u32 unused) 2368 { 2369 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2370 gen8_pte_t __iomem *pte = 2371 (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + 2372 (offset >> PAGE_SHIFT); 2373 int rpm_atomic_seq; 2374 2375 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2376 2377 gen8_set_pte(pte, gen8_pte_encode(addr, level, true)); 2378 2379 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 2380 POSTING_READ(GFX_FLSH_CNTL_GEN6); 2381 2382 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); 2383 } 2384 2385 static void gen8_ggtt_insert_entries(struct i915_address_space *vm, 2386 struct sg_table *st, 2387 uint64_t start, 2388 enum i915_cache_level level, u32 unused) 2389 { 2390 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2391 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2392 struct sgt_iter sgt_iter; 2393 gen8_pte_t __iomem *gtt_entries; 2394 gen8_pte_t gtt_entry; 2395 dma_addr_t addr; 2396 int rpm_atomic_seq; 2397 int i = 0; 2398 2399 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2400 2401 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT); 2402 2403 for_each_sgt_dma(addr, sgt_iter, st) { 2404 gtt_entry = gen8_pte_encode(addr, level, true); 2405 gen8_set_pte(>t_entries[i++], gtt_entry); 2406 } 2407 2408 /* 2409 * XXX: This serves as a posting read to make sure that the PTE has 2410 * actually been updated. There is some concern that even though 2411 * registers and PTEs are within the same BAR that they are potentially 2412 * of NUMA access patterns. Therefore, even with the way we assume 2413 * hardware should work, we must keep this posting read for paranoia. 2414 */ 2415 if (i != 0) 2416 WARN_ON(readq(>t_entries[i-1]) != gtt_entry); 2417 2418 /* This next bit makes the above posting read even more important. We 2419 * want to flush the TLBs only after we're certain all the PTE updates 2420 * have finished. 2421 */ 2422 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 2423 POSTING_READ(GFX_FLSH_CNTL_GEN6); 2424 2425 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); 2426 } 2427 2428 struct insert_entries { 2429 struct i915_address_space *vm; 2430 struct sg_table *st; 2431 uint64_t start; 2432 enum i915_cache_level level; 2433 u32 flags; 2434 }; 2435 2436 static int gen8_ggtt_insert_entries__cb(void *_arg) 2437 { 2438 struct insert_entries *arg = _arg; 2439 gen8_ggtt_insert_entries(arg->vm, arg->st, 2440 arg->start, arg->level, arg->flags); 2441 return 0; 2442 } 2443 2444 static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm, 2445 struct sg_table *st, 2446 uint64_t start, 2447 enum i915_cache_level level, 2448 u32 flags) 2449 { 2450 struct insert_entries arg = { vm, st, start, level, flags }; 2451 #ifndef __DragonFly__ 2452 stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL); 2453 #else 2454 /* XXX: is this enough ? 2455 * See Linux commit 5bab6f60cb4d1417ad7c599166bcfec87529c1a2 */ 2456 get_mplock(); 2457 gen8_ggtt_insert_entries__cb(&arg); 2458 rel_mplock(); 2459 #endif 2460 } 2461 2462 static void gen6_ggtt_insert_page(struct i915_address_space *vm, 2463 dma_addr_t addr, 2464 uint64_t offset, 2465 enum i915_cache_level level, 2466 u32 flags) 2467 { 2468 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2469 gen6_pte_t __iomem *pte = 2470 (gen6_pte_t __iomem *)dev_priv->ggtt.gsm + 2471 (offset >> PAGE_SHIFT); 2472 int rpm_atomic_seq; 2473 2474 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2475 2476 iowrite32(vm->pte_encode(addr, level, true, flags), pte); 2477 2478 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 2479 POSTING_READ(GFX_FLSH_CNTL_GEN6); 2480 2481 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); 2482 } 2483 2484 /* 2485 * Binds an object into the global gtt with the specified cache level. The object 2486 * will be accessible to the GPU via commands whose operands reference offsets 2487 * within the global GTT as well as accessible by the GPU through the GMADR 2488 * mapped BAR (dev_priv->mm.gtt->gtt). 2489 */ 2490 static void gen6_ggtt_insert_entries(struct i915_address_space *vm, 2491 struct sg_table *st, 2492 uint64_t start, 2493 enum i915_cache_level level, u32 flags) 2494 { 2495 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2496 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2497 struct sgt_iter sgt_iter; 2498 gen6_pte_t __iomem *gtt_entries; 2499 gen6_pte_t gtt_entry; 2500 dma_addr_t addr; 2501 int rpm_atomic_seq; 2502 int i = 0; 2503 2504 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2505 2506 gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT); 2507 2508 for_each_sgt_dma(addr, sgt_iter, st) { 2509 gtt_entry = vm->pte_encode(addr, level, true, flags); 2510 iowrite32(gtt_entry, >t_entries[i++]); 2511 } 2512 2513 /* XXX: This serves as a posting read to make sure that the PTE has 2514 * actually been updated. There is some concern that even though 2515 * registers and PTEs are within the same BAR that they are potentially 2516 * of NUMA access patterns. Therefore, even with the way we assume 2517 * hardware should work, we must keep this posting read for paranoia. 2518 */ 2519 if (i != 0) 2520 WARN_ON(readl(>t_entries[i-1]) != gtt_entry); 2521 2522 /* This next bit makes the above posting read even more important. We 2523 * want to flush the TLBs only after we're certain all the PTE updates 2524 * have finished. 2525 */ 2526 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 2527 POSTING_READ(GFX_FLSH_CNTL_GEN6); 2528 2529 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); 2530 } 2531 2532 static void nop_clear_range(struct i915_address_space *vm, 2533 uint64_t start, 2534 uint64_t length, 2535 bool use_scratch) 2536 { 2537 } 2538 2539 static void gen8_ggtt_clear_range(struct i915_address_space *vm, 2540 uint64_t start, 2541 uint64_t length, 2542 bool use_scratch) 2543 { 2544 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2545 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2546 unsigned first_entry = start >> PAGE_SHIFT; 2547 unsigned num_entries = length >> PAGE_SHIFT; 2548 gen8_pte_t scratch_pte, __iomem *gtt_base = 2549 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; 2550 const int max_entries = ggtt_total_entries(ggtt) - first_entry; 2551 int i; 2552 int rpm_atomic_seq; 2553 2554 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2555 2556 if (WARN(num_entries > max_entries, 2557 "First entry = %d; Num entries = %d (max=%d)\n", 2558 first_entry, num_entries, max_entries)) 2559 num_entries = max_entries; 2560 2561 scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), 2562 I915_CACHE_LLC, 2563 use_scratch); 2564 for (i = 0; i < num_entries; i++) 2565 gen8_set_pte(>t_base[i], scratch_pte); 2566 readl(gtt_base); 2567 2568 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); 2569 } 2570 2571 static void gen6_ggtt_clear_range(struct i915_address_space *vm, 2572 uint64_t start, 2573 uint64_t length, 2574 bool use_scratch) 2575 { 2576 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2577 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); 2578 unsigned first_entry = start >> PAGE_SHIFT; 2579 unsigned num_entries = length >> PAGE_SHIFT; 2580 gen6_pte_t scratch_pte, __iomem *gtt_base = 2581 (gen6_pte_t __iomem *)ggtt->gsm + first_entry; 2582 const int max_entries = ggtt_total_entries(ggtt) - first_entry; 2583 int i; 2584 int rpm_atomic_seq; 2585 2586 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2587 2588 if (WARN(num_entries > max_entries, 2589 "First entry = %d; Num entries = %d (max=%d)\n", 2590 first_entry, num_entries, max_entries)) 2591 num_entries = max_entries; 2592 2593 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), 2594 I915_CACHE_LLC, use_scratch, 0); 2595 2596 for (i = 0; i < num_entries; i++) 2597 iowrite32(scratch_pte, >t_base[i]); 2598 readl(gtt_base); 2599 2600 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); 2601 } 2602 2603 static void i915_ggtt_insert_page(struct i915_address_space *vm, 2604 dma_addr_t addr, 2605 uint64_t offset, 2606 enum i915_cache_level cache_level, 2607 u32 unused) 2608 { 2609 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2610 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 2611 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 2612 int rpm_atomic_seq; 2613 2614 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2615 2616 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); 2617 2618 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); 2619 } 2620 2621 static void i915_ggtt_insert_entries(struct i915_address_space *vm, 2622 struct sg_table *pages, 2623 uint64_t start, 2624 enum i915_cache_level cache_level, u32 unused) 2625 { 2626 struct drm_i915_private *dev_priv = vm->dev->dev_private; 2627 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 2628 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 2629 int rpm_atomic_seq; 2630 2631 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2632 2633 intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags); 2634 2635 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); 2636 2637 } 2638 2639 static void i915_ggtt_clear_range(struct i915_address_space *vm, 2640 uint64_t start, 2641 uint64_t length, 2642 bool unused) 2643 { 2644 struct drm_i915_private *dev_priv = vm->dev->dev_private; 2645 unsigned first_entry = start >> PAGE_SHIFT; 2646 unsigned num_entries = length >> PAGE_SHIFT; 2647 int rpm_atomic_seq; 2648 2649 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2650 2651 intel_gtt_clear_range(first_entry, num_entries); 2652 2653 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); 2654 } 2655 2656 static int ggtt_bind_vma(struct i915_vma *vma, 2657 enum i915_cache_level cache_level, 2658 u32 flags) 2659 { 2660 struct drm_i915_gem_object *obj = vma->obj; 2661 u32 pte_flags = 0; 2662 int ret; 2663 2664 ret = i915_get_ggtt_vma_pages(vma); 2665 if (ret) 2666 return ret; 2667 2668 /* Currently applicable only to VLV */ 2669 if (obj->gt_ro) 2670 pte_flags |= PTE_READ_ONLY; 2671 2672 vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages, 2673 vma->node.start, 2674 cache_level, pte_flags); 2675 2676 /* 2677 * Without aliasing PPGTT there's no difference between 2678 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally 2679 * upgrade to both bound if we bind either to avoid double-binding. 2680 */ 2681 vma->bound |= GLOBAL_BIND | LOCAL_BIND; 2682 2683 return 0; 2684 } 2685 2686 static int aliasing_gtt_bind_vma(struct i915_vma *vma, 2687 enum i915_cache_level cache_level, 2688 u32 flags) 2689 { 2690 u32 pte_flags; 2691 int ret; 2692 2693 ret = i915_get_ggtt_vma_pages(vma); 2694 if (ret) 2695 return ret; 2696 2697 /* Currently applicable only to VLV */ 2698 pte_flags = 0; 2699 if (vma->obj->gt_ro) 2700 pte_flags |= PTE_READ_ONLY; 2701 2702 2703 if (flags & GLOBAL_BIND) { 2704 vma->vm->insert_entries(vma->vm, 2705 vma->ggtt_view.pages, 2706 vma->node.start, 2707 cache_level, pte_flags); 2708 } 2709 2710 if (flags & LOCAL_BIND) { 2711 struct i915_hw_ppgtt *appgtt = 2712 to_i915(vma->vm->dev)->mm.aliasing_ppgtt; 2713 appgtt->base.insert_entries(&appgtt->base, 2714 vma->ggtt_view.pages, 2715 vma->node.start, 2716 cache_level, pte_flags); 2717 } 2718 2719 return 0; 2720 } 2721 2722 static void ggtt_unbind_vma(struct i915_vma *vma) 2723 { 2724 struct drm_device *dev = vma->vm->dev; 2725 struct drm_i915_private *dev_priv = dev->dev_private; 2726 struct drm_i915_gem_object *obj = vma->obj; 2727 const uint64_t size = min_t(uint64_t, 2728 obj->base.size, 2729 vma->node.size); 2730 2731 if (vma->bound & GLOBAL_BIND) { 2732 vma->vm->clear_range(vma->vm, 2733 vma->node.start, 2734 size, 2735 true); 2736 } 2737 2738 if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) { 2739 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; 2740 2741 appgtt->base.clear_range(&appgtt->base, 2742 vma->node.start, 2743 size, 2744 true); 2745 } 2746 } 2747 2748 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) 2749 { 2750 struct drm_device *dev = obj->base.dev; 2751 struct drm_i915_private *dev_priv = dev->dev_private; 2752 bool interruptible; 2753 2754 interruptible = do_idling(dev_priv); 2755 2756 dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents, 2757 PCI_DMA_BIDIRECTIONAL); 2758 2759 undo_idling(dev_priv, interruptible); 2760 } 2761 2762 static void i915_gtt_color_adjust(struct drm_mm_node *node, 2763 unsigned long color, 2764 u64 *start, 2765 u64 *end) 2766 { 2767 if (node->color != color) 2768 *start += 4096; 2769 2770 if (!list_empty(&node->node_list)) { 2771 node = list_entry(node->node_list.next, 2772 struct drm_mm_node, 2773 node_list); 2774 if (node->allocated && node->color != color) 2775 *end -= 4096; 2776 } 2777 } 2778 2779 static int i915_gem_setup_global_gtt(struct drm_device *dev, 2780 u64 start, 2781 u64 mappable_end, 2782 u64 end) 2783 { 2784 /* Let GEM Manage all of the aperture. 2785 * 2786 * However, leave one page at the end still bound to the scratch page. 2787 * There are a number of places where the hardware apparently prefetches 2788 * past the end of the object, and we've seen multiple hangs with the 2789 * GPU head pointer stuck in a batchbuffer bound at the last page of the 2790 * aperture. One page should be enough to keep any prefetching inside 2791 * of the aperture. 2792 */ 2793 struct drm_i915_private *dev_priv = to_i915(dev); 2794 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2795 struct drm_mm_node *entry; 2796 struct drm_i915_gem_object *obj; 2797 unsigned long hole_start, hole_end; 2798 int ret; 2799 unsigned long mappable; 2800 int error; 2801 2802 mappable = min(end, mappable_end) - start; 2803 BUG_ON(mappable_end > end); 2804 2805 ggtt->base.start = start; 2806 2807 /* Subtract the guard page before address space initialization to 2808 * shrink the range used by drm_mm */ 2809 ggtt->base.total = end - start - PAGE_SIZE; 2810 i915_address_space_init(&ggtt->base, dev_priv); 2811 ggtt->base.total += PAGE_SIZE; 2812 2813 ret = intel_vgt_balloon(dev_priv); 2814 if (ret) 2815 return ret; 2816 2817 if (!HAS_LLC(dev)) 2818 ggtt->base.mm.color_adjust = i915_gtt_color_adjust; 2819 2820 /* Mark any preallocated objects as occupied */ 2821 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 2822 struct i915_vma *vma = i915_gem_obj_to_vma(obj, &ggtt->base); 2823 2824 DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n", 2825 i915_gem_obj_ggtt_offset(obj), obj->base.size); 2826 2827 WARN_ON(i915_gem_obj_ggtt_bound(obj)); 2828 ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node); 2829 if (ret) { 2830 DRM_DEBUG_KMS("Reservation failed: %i\n", ret); 2831 return ret; 2832 } 2833 vma->bound |= GLOBAL_BIND; 2834 __i915_vma_set_map_and_fenceable(vma); 2835 list_add_tail(&vma->vm_link, &ggtt->base.inactive_list); 2836 } 2837 2838 /* Clear any non-preallocated blocks */ 2839 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) { 2840 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 2841 hole_start, hole_end); 2842 ggtt->base.clear_range(&ggtt->base, hole_start, 2843 hole_end - hole_start, true); 2844 } 2845 2846 #ifdef __DragonFly__ 2847 device_printf(dev->dev->bsddev, 2848 "taking over the fictitious range 0x%llx-0x%llx\n", 2849 dev_priv->ggtt.mappable_base + start, dev_priv->ggtt.mappable_base + start + mappable); 2850 error = -vm_phys_fictitious_reg_range(dev_priv->ggtt.mappable_base + start, 2851 dev_priv->ggtt.mappable_base + start + mappable, VM_MEMATTR_WRITE_COMBINING); 2852 #endif 2853 2854 /* And finally clear the reserved guard page */ 2855 ggtt->base.clear_range(&ggtt->base, end - PAGE_SIZE, PAGE_SIZE, true); 2856 2857 if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) { 2858 struct i915_hw_ppgtt *ppgtt; 2859 2860 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 2861 if (!ppgtt) 2862 return -ENOMEM; 2863 2864 ret = __hw_ppgtt_init(dev, ppgtt); 2865 if (ret) { 2866 ppgtt->base.cleanup(&ppgtt->base); 2867 kfree(ppgtt); 2868 return ret; 2869 } 2870 2871 if (ppgtt->base.allocate_va_range) 2872 ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0, 2873 ppgtt->base.total); 2874 if (ret) { 2875 ppgtt->base.cleanup(&ppgtt->base); 2876 kfree(ppgtt); 2877 return ret; 2878 } 2879 2880 ppgtt->base.clear_range(&ppgtt->base, 2881 ppgtt->base.start, 2882 ppgtt->base.total, 2883 true); 2884 2885 dev_priv->mm.aliasing_ppgtt = ppgtt; 2886 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma); 2887 ggtt->base.bind_vma = aliasing_gtt_bind_vma; 2888 } 2889 2890 return 0; 2891 } 2892 2893 /** 2894 * i915_gem_init_ggtt - Initialize GEM for Global GTT 2895 * @dev: DRM device 2896 */ 2897 void i915_gem_init_ggtt(struct drm_device *dev) 2898 { 2899 struct drm_i915_private *dev_priv = to_i915(dev); 2900 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2901 2902 i915_gem_setup_global_gtt(dev, 0, ggtt->mappable_end, ggtt->base.total); 2903 } 2904 2905 /** 2906 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization 2907 * @dev: DRM device 2908 */ 2909 void i915_ggtt_cleanup_hw(struct drm_device *dev) 2910 { 2911 struct drm_i915_private *dev_priv = to_i915(dev); 2912 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2913 2914 if (dev_priv->mm.aliasing_ppgtt) { 2915 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2916 2917 ppgtt->base.cleanup(&ppgtt->base); 2918 kfree(ppgtt); 2919 } 2920 2921 i915_gem_cleanup_stolen(dev); 2922 2923 if (drm_mm_initialized(&ggtt->base.mm)) { 2924 intel_vgt_deballoon(dev_priv); 2925 2926 drm_mm_takedown(&ggtt->base.mm); 2927 list_del(&ggtt->base.global_link); 2928 } 2929 2930 ggtt->base.cleanup(&ggtt->base); 2931 } 2932 2933 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) 2934 { 2935 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; 2936 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; 2937 return snb_gmch_ctl << 20; 2938 } 2939 2940 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) 2941 { 2942 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; 2943 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; 2944 if (bdw_gmch_ctl) 2945 bdw_gmch_ctl = 1 << bdw_gmch_ctl; 2946 2947 #ifdef CONFIG_X86_32 2948 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */ 2949 if (bdw_gmch_ctl > 4) 2950 bdw_gmch_ctl = 4; 2951 #endif 2952 2953 return bdw_gmch_ctl << 20; 2954 } 2955 2956 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) 2957 { 2958 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; 2959 gmch_ctrl &= SNB_GMCH_GGMS_MASK; 2960 2961 if (gmch_ctrl) 2962 return 1 << (20 + gmch_ctrl); 2963 2964 return 0; 2965 } 2966 2967 static size_t gen6_get_stolen_size(u16 snb_gmch_ctl) 2968 { 2969 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; 2970 snb_gmch_ctl &= SNB_GMCH_GMS_MASK; 2971 return snb_gmch_ctl << 25; /* 32 MB units */ 2972 } 2973 2974 static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl) 2975 { 2976 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; 2977 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK; 2978 return bdw_gmch_ctl << 25; /* 32 MB units */ 2979 } 2980 2981 static size_t chv_get_stolen_size(u16 gmch_ctrl) 2982 { 2983 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; 2984 gmch_ctrl &= SNB_GMCH_GMS_MASK; 2985 2986 /* 2987 * 0x0 to 0x10: 32MB increments starting at 0MB 2988 * 0x11 to 0x16: 4MB increments starting at 8MB 2989 * 0x17 to 0x1d: 4MB increments start at 36MB 2990 */ 2991 if (gmch_ctrl < 0x11) 2992 return gmch_ctrl << 25; 2993 else if (gmch_ctrl < 0x17) 2994 return (gmch_ctrl - 0x11 + 2) << 22; 2995 else 2996 return (gmch_ctrl - 0x17 + 9) << 22; 2997 } 2998 2999 static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl) 3000 { 3001 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; 3002 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK; 3003 3004 if (gen9_gmch_ctl < 0xf0) 3005 return gen9_gmch_ctl << 25; /* 32 MB units */ 3006 else 3007 /* 4MB increments starting at 0xf0 for 4MB */ 3008 return (gen9_gmch_ctl - 0xf0 + 1) << 22; 3009 } 3010 3011 static int ggtt_probe_common(struct drm_device *dev, 3012 size_t gtt_size) 3013 { 3014 struct drm_i915_private *dev_priv = to_i915(dev); 3015 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3016 struct i915_page_scratch *scratch_page; 3017 phys_addr_t ggtt_phys_addr; 3018 3019 /* For Modern GENs the PTEs and register space are split in the BAR */ 3020 ggtt_phys_addr = pci_resource_start(dev->pdev, 0) + 3021 (pci_resource_len(dev->pdev, 0) / 2); 3022 3023 /* 3024 * On BXT writes larger than 64 bit to the GTT pagetable range will be 3025 * dropped. For WC mappings in general we have 64 byte burst writes 3026 * when the WC buffer is flushed, so we can't use it, but have to 3027 * resort to an uncached mapping. The WC issue is easily caught by the 3028 * readback check when writing GTT PTE entries. 3029 */ 3030 if (IS_BROXTON(dev)) 3031 ggtt->gsm = ioremap_nocache(ggtt_phys_addr, gtt_size); 3032 else 3033 ggtt->gsm = ioremap_wc(ggtt_phys_addr, gtt_size); 3034 if (!ggtt->gsm) { 3035 DRM_ERROR("Failed to map the gtt page table\n"); 3036 return -ENOMEM; 3037 } 3038 3039 scratch_page = alloc_scratch_page(dev); 3040 if (IS_ERR(scratch_page)) { 3041 DRM_ERROR("Scratch setup failed\n"); 3042 /* iounmap will also get called at remove, but meh */ 3043 iounmap(ggtt->gsm); 3044 return PTR_ERR(scratch_page); 3045 } 3046 3047 ggtt->base.scratch_page = scratch_page; 3048 3049 return 0; 3050 } 3051 3052 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability 3053 * bits. When using advanced contexts each context stores its own PAT, but 3054 * writing this data shouldn't be harmful even in those cases. */ 3055 static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) 3056 { 3057 uint64_t pat; 3058 3059 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ 3060 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ 3061 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ 3062 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ 3063 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | 3064 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | 3065 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | 3066 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); 3067 3068 if (!USES_PPGTT(dev_priv)) 3069 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, 3070 * so RTL will always use the value corresponding to 3071 * pat_sel = 000". 3072 * So let's disable cache for GGTT to avoid screen corruptions. 3073 * MOCS still can be used though. 3074 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work 3075 * before this patch, i.e. the same uncached + snooping access 3076 * like on gen6/7 seems to be in effect. 3077 * - So this just fixes blitter/render access. Again it looks 3078 * like it's not just uncached access, but uncached + snooping. 3079 * So we can still hold onto all our assumptions wrt cpu 3080 * clflushing on LLC machines. 3081 */ 3082 pat = GEN8_PPAT(0, GEN8_PPAT_UC); 3083 3084 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b 3085 * write would work. */ 3086 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); 3087 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); 3088 } 3089 3090 static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) 3091 { 3092 uint64_t pat; 3093 3094 /* 3095 * Map WB on BDW to snooped on CHV. 3096 * 3097 * Only the snoop bit has meaning for CHV, the rest is 3098 * ignored. 3099 * 3100 * The hardware will never snoop for certain types of accesses: 3101 * - CPU GTT (GMADR->GGTT->no snoop->memory) 3102 * - PPGTT page tables 3103 * - some other special cycles 3104 * 3105 * As with BDW, we also need to consider the following for GT accesses: 3106 * "For GGTT, there is NO pat_sel[2:0] from the entry, 3107 * so RTL will always use the value corresponding to 3108 * pat_sel = 000". 3109 * Which means we must set the snoop bit in PAT entry 0 3110 * in order to keep the global status page working. 3111 */ 3112 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | 3113 GEN8_PPAT(1, 0) | 3114 GEN8_PPAT(2, 0) | 3115 GEN8_PPAT(3, 0) | 3116 GEN8_PPAT(4, CHV_PPAT_SNOOP) | 3117 GEN8_PPAT(5, CHV_PPAT_SNOOP) | 3118 GEN8_PPAT(6, CHV_PPAT_SNOOP) | 3119 GEN8_PPAT(7, CHV_PPAT_SNOOP); 3120 3121 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); 3122 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); 3123 } 3124 3125 static int gen8_gmch_probe(struct i915_ggtt *ggtt) 3126 { 3127 struct drm_device *dev = ggtt->base.dev; 3128 struct drm_i915_private *dev_priv = to_i915(dev); 3129 u16 snb_gmch_ctl; 3130 int ret; 3131 3132 /* TODO: We're not aware of mappable constraints on gen8 yet */ 3133 ggtt->mappable_base = pci_resource_start(dev->pdev, 2); 3134 ggtt->mappable_end = pci_resource_len(dev->pdev, 2); 3135 3136 #if 0 3137 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39))) 3138 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39)); 3139 #endif 3140 3141 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 3142 3143 if (INTEL_INFO(dev)->gen >= 9) { 3144 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl); 3145 ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl); 3146 } else if (IS_CHERRYVIEW(dev)) { 3147 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl); 3148 ggtt->size = chv_get_total_gtt_size(snb_gmch_ctl); 3149 } else { 3150 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl); 3151 ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl); 3152 } 3153 3154 ggtt->base.total = (ggtt->size / sizeof(gen8_pte_t)) << PAGE_SHIFT; 3155 3156 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) 3157 chv_setup_private_ppat(dev_priv); 3158 else 3159 bdw_setup_private_ppat(dev_priv); 3160 3161 ret = ggtt_probe_common(dev, ggtt->size); 3162 3163 ggtt->base.bind_vma = ggtt_bind_vma; 3164 ggtt->base.unbind_vma = ggtt_unbind_vma; 3165 ggtt->base.insert_page = gen8_ggtt_insert_page; 3166 ggtt->base.clear_range = nop_clear_range; 3167 if (!USES_FULL_PPGTT(dev_priv)) 3168 ggtt->base.clear_range = gen8_ggtt_clear_range; 3169 3170 ggtt->base.insert_entries = gen8_ggtt_insert_entries; 3171 if (IS_CHERRYVIEW(dev_priv)) 3172 ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL; 3173 3174 return ret; 3175 } 3176 3177 static int gen6_gmch_probe(struct i915_ggtt *ggtt) 3178 { 3179 struct drm_device *dev = ggtt->base.dev; 3180 u16 snb_gmch_ctl; 3181 int ret; 3182 3183 ggtt->mappable_base = pci_resource_start(dev->pdev, 2); 3184 ggtt->mappable_end = pci_resource_len(dev->pdev, 2); 3185 3186 /* 64/512MB is the current min/max we actually know of, but this is just 3187 * a coarse sanity check. 3188 */ 3189 if ((ggtt->mappable_end < (64<<20) || (ggtt->mappable_end > (512<<20)))) { 3190 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end); 3191 return -ENXIO; 3192 } 3193 3194 #if 0 3195 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) 3196 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); 3197 #endif 3198 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 3199 3200 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl); 3201 ggtt->size = gen6_get_total_gtt_size(snb_gmch_ctl); 3202 ggtt->base.total = (ggtt->size / sizeof(gen6_pte_t)) << PAGE_SHIFT; 3203 3204 ret = ggtt_probe_common(dev, ggtt->size); 3205 3206 ggtt->base.clear_range = gen6_ggtt_clear_range; 3207 ggtt->base.insert_page = gen6_ggtt_insert_page; 3208 ggtt->base.insert_entries = gen6_ggtt_insert_entries; 3209 ggtt->base.bind_vma = ggtt_bind_vma; 3210 ggtt->base.unbind_vma = ggtt_unbind_vma; 3211 3212 return ret; 3213 } 3214 3215 static void gen6_gmch_remove(struct i915_address_space *vm) 3216 { 3217 struct i915_ggtt *ggtt = container_of(vm, struct i915_ggtt, base); 3218 3219 iounmap(ggtt->gsm); 3220 free_scratch_page(vm->dev, vm->scratch_page); 3221 } 3222 3223 static int i915_gmch_probe(struct i915_ggtt *ggtt) 3224 { 3225 struct drm_device *dev = ggtt->base.dev; 3226 struct drm_i915_private *dev_priv = to_i915(dev); 3227 #if 0 3228 int ret; 3229 3230 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); 3231 if (!ret) { 3232 DRM_ERROR("failed to set up gmch\n"); 3233 return -EIO; 3234 } 3235 #endif 3236 3237 intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size, 3238 &ggtt->mappable_base, &ggtt->mappable_end); 3239 3240 ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev); 3241 ggtt->base.insert_page = i915_ggtt_insert_page; 3242 ggtt->base.insert_entries = i915_ggtt_insert_entries; 3243 ggtt->base.clear_range = i915_ggtt_clear_range; 3244 ggtt->base.bind_vma = ggtt_bind_vma; 3245 ggtt->base.unbind_vma = ggtt_unbind_vma; 3246 3247 if (unlikely(ggtt->do_idle_maps)) 3248 DRM_INFO("applying Ironlake quirks for intel_iommu\n"); 3249 3250 return 0; 3251 } 3252 3253 static void i915_gmch_remove(struct i915_address_space *vm) 3254 { 3255 intel_gmch_remove(); 3256 } 3257 3258 /** 3259 * i915_ggtt_init_hw - Initialize GGTT hardware 3260 * @dev: DRM device 3261 */ 3262 int i915_ggtt_init_hw(struct drm_device *dev) 3263 { 3264 struct drm_i915_private *dev_priv = to_i915(dev); 3265 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3266 int ret; 3267 3268 if (INTEL_INFO(dev)->gen <= 5) { 3269 ggtt->probe = i915_gmch_probe; 3270 ggtt->base.cleanup = i915_gmch_remove; 3271 } else if (INTEL_INFO(dev)->gen < 8) { 3272 ggtt->probe = gen6_gmch_probe; 3273 ggtt->base.cleanup = gen6_gmch_remove; 3274 3275 if (HAS_EDRAM(dev)) 3276 ggtt->base.pte_encode = iris_pte_encode; 3277 else if (IS_HASWELL(dev)) 3278 ggtt->base.pte_encode = hsw_pte_encode; 3279 else if (IS_VALLEYVIEW(dev)) 3280 ggtt->base.pte_encode = byt_pte_encode; 3281 else if (INTEL_INFO(dev)->gen >= 7) 3282 ggtt->base.pte_encode = ivb_pte_encode; 3283 else 3284 ggtt->base.pte_encode = snb_pte_encode; 3285 } else { 3286 ggtt->probe = gen8_gmch_probe; 3287 ggtt->base.cleanup = gen6_gmch_remove; 3288 } 3289 3290 ggtt->base.dev = dev; 3291 ggtt->base.is_ggtt = true; 3292 3293 ret = ggtt->probe(ggtt); 3294 if (ret) 3295 return ret; 3296 3297 if ((ggtt->base.total - 1) >> 32) { 3298 DRM_ERROR("We never expected a Global GTT with more than 32bits" 3299 "of address space! Found %lldM!\n", 3300 ggtt->base.total >> 20); 3301 ggtt->base.total = 1ULL << 32; 3302 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total); 3303 } 3304 3305 /* 3306 * Initialise stolen early so that we may reserve preallocated 3307 * objects for the BIOS to KMS transition. 3308 */ 3309 ret = i915_gem_init_stolen(dev); 3310 if (ret) 3311 goto out_gtt_cleanup; 3312 3313 /* GMADR is the PCI mmio aperture into the global GTT. */ 3314 DRM_INFO("Memory usable by graphics device = %lluM\n", 3315 ggtt->base.total >> 20); 3316 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20); 3317 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20); 3318 #ifdef CONFIG_INTEL_IOMMU 3319 if (intel_iommu_gfx_mapped) 3320 DRM_INFO("VT-d active for gfx access\n"); 3321 #endif 3322 3323 return 0; 3324 3325 out_gtt_cleanup: 3326 ggtt->base.cleanup(&ggtt->base); 3327 3328 return ret; 3329 } 3330 3331 int i915_ggtt_enable_hw(struct drm_device *dev) 3332 { 3333 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 3334 return -EIO; 3335 3336 return 0; 3337 } 3338 3339 void i915_gem_restore_gtt_mappings(struct drm_device *dev) 3340 { 3341 struct drm_i915_private *dev_priv = to_i915(dev); 3342 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3343 struct drm_i915_gem_object *obj; 3344 struct i915_vma *vma; 3345 3346 i915_check_and_clear_faults(dev_priv); 3347 3348 /* First fill our portion of the GTT with scratch pages */ 3349 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, 3350 true); 3351 3352 /* Cache flush objects bound into GGTT and rebind them. */ 3353 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 3354 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3355 if (vma->vm != &ggtt->base) 3356 continue; 3357 3358 WARN_ON(i915_vma_bind(vma, obj->cache_level, 3359 PIN_UPDATE)); 3360 } 3361 3362 if (obj->pin_display) 3363 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); 3364 } 3365 3366 if (INTEL_INFO(dev)->gen >= 8) { 3367 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) 3368 chv_setup_private_ppat(dev_priv); 3369 else 3370 bdw_setup_private_ppat(dev_priv); 3371 3372 return; 3373 } 3374 3375 if (USES_PPGTT(dev)) { 3376 struct i915_address_space *vm; 3377 3378 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 3379 /* TODO: Perhaps it shouldn't be gen6 specific */ 3380 3381 struct i915_hw_ppgtt *ppgtt; 3382 3383 if (vm->is_ggtt) 3384 ppgtt = dev_priv->mm.aliasing_ppgtt; 3385 else 3386 ppgtt = i915_vm_to_ppgtt(vm); 3387 3388 gen6_write_page_range(dev_priv, &ppgtt->pd, 3389 0, ppgtt->base.total); 3390 } 3391 } 3392 3393 i915_ggtt_flush(dev_priv); 3394 } 3395 3396 static struct i915_vma * 3397 __i915_gem_vma_create(struct drm_i915_gem_object *obj, 3398 struct i915_address_space *vm, 3399 const struct i915_ggtt_view *ggtt_view) 3400 { 3401 struct i915_vma *vma; 3402 3403 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) 3404 return ERR_PTR(-EINVAL); 3405 3406 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 3407 if (vma == NULL) 3408 return ERR_PTR(-ENOMEM); 3409 3410 INIT_LIST_HEAD(&vma->vm_link); 3411 INIT_LIST_HEAD(&vma->obj_link); 3412 INIT_LIST_HEAD(&vma->exec_list); 3413 vma->vm = vm; 3414 vma->obj = obj; 3415 vma->is_ggtt = i915_is_ggtt(vm); 3416 3417 if (i915_is_ggtt(vm)) 3418 vma->ggtt_view = *ggtt_view; 3419 else 3420 i915_ppgtt_get(i915_vm_to_ppgtt(vm)); 3421 3422 list_add_tail(&vma->obj_link, &obj->vma_list); 3423 3424 return vma; 3425 } 3426 3427 struct i915_vma * 3428 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 3429 struct i915_address_space *vm) 3430 { 3431 struct i915_vma *vma; 3432 3433 vma = i915_gem_obj_to_vma(obj, vm); 3434 if (!vma) 3435 vma = __i915_gem_vma_create(obj, vm, 3436 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL); 3437 3438 return vma; 3439 } 3440 3441 struct i915_vma * 3442 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, 3443 const struct i915_ggtt_view *view) 3444 { 3445 struct drm_device *dev = obj->base.dev; 3446 struct drm_i915_private *dev_priv = to_i915(dev); 3447 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3448 struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view); 3449 3450 if (!vma) 3451 vma = __i915_gem_vma_create(obj, &ggtt->base, view); 3452 3453 return vma; 3454 3455 } 3456 3457 static struct scatterlist * 3458 rotate_pages(const dma_addr_t *in, unsigned int offset, 3459 unsigned int width, unsigned int height, 3460 unsigned int stride, 3461 struct sg_table *st, struct scatterlist *sg) 3462 { 3463 unsigned int column, row; 3464 unsigned int src_idx; 3465 3466 for (column = 0; column < width; column++) { 3467 src_idx = stride * (height - 1) + column; 3468 for (row = 0; row < height; row++) { 3469 st->nents++; 3470 /* We don't need the pages, but need to initialize 3471 * the entries so the sg list can be happily traversed. 3472 * The only thing we need are DMA addresses. 3473 */ 3474 sg_set_page(sg, NULL, PAGE_SIZE, 0); 3475 sg_dma_address(sg) = in[offset + src_idx]; 3476 sg_dma_len(sg) = PAGE_SIZE; 3477 sg = sg_next(sg); 3478 src_idx -= stride; 3479 } 3480 } 3481 3482 return sg; 3483 } 3484 3485 static struct sg_table * 3486 intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, 3487 struct drm_i915_gem_object *obj) 3488 { 3489 unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height; 3490 unsigned int size_pages_uv; 3491 struct sgt_iter sgt_iter; 3492 dma_addr_t dma_addr; 3493 unsigned long i; 3494 dma_addr_t *page_addr_list; 3495 struct sg_table *st; 3496 unsigned int uv_start_page; 3497 struct scatterlist *sg; 3498 int ret = -ENOMEM; 3499 3500 /* Allocate a temporary list of source pages for random access. */ 3501 page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE, 3502 sizeof(dma_addr_t)); 3503 if (!page_addr_list) 3504 return ERR_PTR(ret); 3505 3506 /* Account for UV plane with NV12. */ 3507 if (rot_info->pixel_format == DRM_FORMAT_NV12) 3508 size_pages_uv = rot_info->plane[1].width * rot_info->plane[1].height; 3509 else 3510 size_pages_uv = 0; 3511 3512 /* Allocate target SG list. */ 3513 st = kmalloc(sizeof(*st), M_DRM, M_WAITOK); 3514 if (!st) 3515 goto err_st_alloc; 3516 3517 ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL); 3518 if (ret) 3519 goto err_sg_alloc; 3520 3521 /* Populate source page list from the object. */ 3522 i = 0; 3523 for_each_sgt_dma(dma_addr, sgt_iter, obj->pages) 3524 page_addr_list[i++] = dma_addr; 3525 3526 GEM_BUG_ON(i != n_pages); 3527 st->nents = 0; 3528 sg = st->sgl; 3529 3530 /* Rotate the pages. */ 3531 sg = rotate_pages(page_addr_list, 0, 3532 rot_info->plane[0].width, rot_info->plane[0].height, 3533 rot_info->plane[0].width, 3534 st, sg); 3535 3536 /* Append the UV plane if NV12. */ 3537 if (rot_info->pixel_format == DRM_FORMAT_NV12) { 3538 uv_start_page = size_pages; 3539 3540 /* Check for tile-row un-alignment. */ 3541 if (offset_in_page(rot_info->uv_offset)) 3542 uv_start_page--; 3543 3544 rot_info->uv_start_page = uv_start_page; 3545 3546 sg = rotate_pages(page_addr_list, rot_info->uv_start_page, 3547 rot_info->plane[1].width, rot_info->plane[1].height, 3548 rot_info->plane[1].width, 3549 st, sg); 3550 } 3551 3552 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages (%u plane 0)).\n", 3553 obj->base.size, rot_info->plane[0].width, 3554 rot_info->plane[0].height, size_pages + size_pages_uv, 3555 size_pages); 3556 3557 drm_free_large(page_addr_list); 3558 3559 return st; 3560 3561 err_sg_alloc: 3562 kfree(st); 3563 err_st_alloc: 3564 drm_free_large(page_addr_list); 3565 3566 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%d) (%ux%u tiles, %u pages (%u plane 0))\n", 3567 obj->base.size, ret, rot_info->plane[0].width, 3568 rot_info->plane[0].height, size_pages + size_pages_uv, 3569 size_pages); 3570 return ERR_PTR(ret); 3571 } 3572 3573 static struct sg_table * 3574 intel_partial_pages(const struct i915_ggtt_view *view, 3575 struct drm_i915_gem_object *obj) 3576 { 3577 struct sg_table *st; 3578 struct scatterlist *sg; 3579 struct sg_page_iter obj_sg_iter; 3580 int ret = -ENOMEM; 3581 3582 st = kmalloc(sizeof(*st), M_DRM, M_WAITOK); 3583 if (!st) 3584 goto err_st_alloc; 3585 3586 ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL); 3587 if (ret) 3588 goto err_sg_alloc; 3589 3590 sg = st->sgl; 3591 st->nents = 0; 3592 for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents, 3593 view->params.partial.offset) 3594 { 3595 if (st->nents >= view->params.partial.size) 3596 break; 3597 3598 sg_set_page(sg, NULL, PAGE_SIZE, 0); 3599 sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter); 3600 sg_dma_len(sg) = PAGE_SIZE; 3601 3602 sg = sg_next(sg); 3603 st->nents++; 3604 } 3605 3606 return st; 3607 3608 err_sg_alloc: 3609 kfree(st); 3610 err_st_alloc: 3611 return ERR_PTR(ret); 3612 } 3613 3614 static int 3615 i915_get_ggtt_vma_pages(struct i915_vma *vma) 3616 { 3617 int ret = 0; 3618 3619 if (vma->ggtt_view.pages) 3620 return 0; 3621 3622 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) 3623 vma->ggtt_view.pages = vma->obj->pages; 3624 else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) 3625 vma->ggtt_view.pages = 3626 intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj); 3627 else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL) 3628 vma->ggtt_view.pages = 3629 intel_partial_pages(&vma->ggtt_view, vma->obj); 3630 else 3631 WARN_ONCE(1, "GGTT view %u not implemented!\n", 3632 vma->ggtt_view.type); 3633 3634 if (!vma->ggtt_view.pages) { 3635 DRM_ERROR("Failed to get pages for GGTT view type %u!\n", 3636 vma->ggtt_view.type); 3637 ret = -EINVAL; 3638 } else if (IS_ERR(vma->ggtt_view.pages)) { 3639 ret = PTR_ERR(vma->ggtt_view.pages); 3640 vma->ggtt_view.pages = NULL; 3641 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", 3642 vma->ggtt_view.type, ret); 3643 } 3644 3645 return ret; 3646 } 3647 3648 /** 3649 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 3650 * @vma: VMA to map 3651 * @cache_level: mapping cache level 3652 * @flags: flags like global or local mapping 3653 * 3654 * DMA addresses are taken from the scatter-gather table of this object (or of 3655 * this VMA in case of non-default GGTT views) and PTE entries set up. 3656 * Note that DMA addresses are also the only part of the SG table we care about. 3657 */ 3658 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 3659 u32 flags) 3660 { 3661 int ret; 3662 u32 bind_flags; 3663 3664 if (WARN_ON(flags == 0)) 3665 return -EINVAL; 3666 3667 bind_flags = 0; 3668 if (flags & PIN_GLOBAL) 3669 bind_flags |= GLOBAL_BIND; 3670 if (flags & PIN_USER) 3671 bind_flags |= LOCAL_BIND; 3672 3673 if (flags & PIN_UPDATE) 3674 bind_flags |= vma->bound; 3675 else 3676 bind_flags &= ~vma->bound; 3677 3678 if (bind_flags == 0) 3679 return 0; 3680 3681 if (vma->bound == 0 && vma->vm->allocate_va_range) { 3682 /* XXX: i915_vma_pin() will fix this +- hack */ 3683 vma->pin_count++; 3684 trace_i915_va_alloc(vma); 3685 ret = vma->vm->allocate_va_range(vma->vm, 3686 vma->node.start, 3687 vma->node.size); 3688 vma->pin_count--; 3689 if (ret) 3690 return ret; 3691 } 3692 3693 ret = vma->vm->bind_vma(vma, cache_level, bind_flags); 3694 if (ret) 3695 return ret; 3696 3697 vma->bound |= bind_flags; 3698 3699 return 0; 3700 } 3701 3702 /** 3703 * i915_ggtt_view_size - Get the size of a GGTT view. 3704 * @obj: Object the view is of. 3705 * @view: The view in question. 3706 * 3707 * @return The size of the GGTT view in bytes. 3708 */ 3709 size_t 3710 i915_ggtt_view_size(struct drm_i915_gem_object *obj, 3711 const struct i915_ggtt_view *view) 3712 { 3713 if (view->type == I915_GGTT_VIEW_NORMAL) { 3714 return obj->base.size; 3715 } else if (view->type == I915_GGTT_VIEW_ROTATED) { 3716 return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT; 3717 } else if (view->type == I915_GGTT_VIEW_PARTIAL) { 3718 return view->params.partial.size << PAGE_SHIFT; 3719 } else { 3720 WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type); 3721 return obj->base.size; 3722 } 3723 } 3724 3725 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) 3726 { 3727 void __iomem *ptr; 3728 3729 lockdep_assert_held(&vma->vm->dev->struct_mutex); 3730 if (WARN_ON(!vma->obj->map_and_fenceable)) 3731 return ERR_PTR(-ENODEV); 3732 3733 GEM_BUG_ON(!vma->is_ggtt); 3734 GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0); 3735 3736 ptr = vma->iomap; 3737 if (ptr == NULL) { 3738 ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable, 3739 vma->node.start, 3740 vma->node.size); 3741 if (ptr == NULL) 3742 return ERR_PTR(-ENOMEM); 3743 3744 vma->iomap = ptr; 3745 } 3746 3747 vma->pin_count++; 3748 return ptr; 3749 } 3750