1 /* 2 * Copyright © 2010 Daniel Vetter 3 * Copyright © 2011-2014 Intel Corporation 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 22 * IN THE SOFTWARE. 23 * 24 */ 25 26 #include <linux/seq_file.h> 27 #include <drm/drmP.h> 28 #include <drm/i915_drm.h> 29 #include "i915_drv.h" 30 #include "i915_vgpu.h" 31 #include "i915_trace.h" 32 #include "intel_drv.h" 33 34 #include <linux/bitmap.h> 35 #include <linux/highmem.h> 36 37 /** 38 * DOC: Global GTT views 39 * 40 * Background and previous state 41 * 42 * Historically objects could exists (be bound) in global GTT space only as 43 * singular instances with a view representing all of the object's backing pages 44 * in a linear fashion. This view will be called a normal view. 45 * 46 * To support multiple views of the same object, where the number of mapped 47 * pages is not equal to the backing store, or where the layout of the pages 48 * is not linear, concept of a GGTT view was added. 49 * 50 * One example of an alternative view is a stereo display driven by a single 51 * image. In this case we would have a framebuffer looking like this 52 * (2x2 pages): 53 * 54 * 12 55 * 34 56 * 57 * Above would represent a normal GGTT view as normally mapped for GPU or CPU 58 * rendering. In contrast, fed to the display engine would be an alternative 59 * view which could look something like this: 60 * 61 * 1212 62 * 3434 63 * 64 * In this example both the size and layout of pages in the alternative view is 65 * different from the normal view. 66 * 67 * Implementation and usage 68 * 69 * GGTT views are implemented using VMAs and are distinguished via enum 70 * i915_ggtt_view_type and struct i915_ggtt_view. 71 * 72 * A new flavour of core GEM functions which work with GGTT bound objects were 73 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid 74 * renaming in large amounts of code. They take the struct i915_ggtt_view 75 * parameter encapsulating all metadata required to implement a view. 76 * 77 * As a helper for callers which are only interested in the normal view, 78 * globally const i915_ggtt_view_normal singleton instance exists. All old core 79 * GEM API functions, the ones not taking the view parameter, are operating on, 80 * or with the normal GGTT view. 81 * 82 * Code wanting to add or use a new GGTT view needs to: 83 * 84 * 1. Add a new enum with a suitable name. 85 * 2. Extend the metadata in the i915_ggtt_view structure if required. 86 * 3. Add support to i915_get_vma_pages(). 87 * 88 * New views are required to build a scatter-gather table from within the 89 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and 90 * exists for the lifetime of an VMA. 91 * 92 * Core API is designed to have copy semantics which means that passed in 93 * struct i915_ggtt_view does not need to be persistent (left around after 94 * calling the core API functions). 95 * 96 */ 97 98 const struct i915_ggtt_view i915_ggtt_view_normal; 99 const struct i915_ggtt_view i915_ggtt_view_rotated = { 100 .type = I915_GGTT_VIEW_ROTATED 101 }; 102 103 static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv); 104 static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); 105 106 static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) 107 { 108 bool has_aliasing_ppgtt; 109 bool has_full_ppgtt; 110 111 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; 112 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; 113 114 if (intel_vgpu_active(dev)) 115 has_full_ppgtt = false; /* emulation is too hard */ 116 117 /* 118 * We don't allow disabling PPGTT for gen9+ as it's a requirement for 119 * execlists, the sole mechanism available to submit work. 120 */ 121 if (INTEL_INFO(dev)->gen < 9 && 122 (enable_ppgtt == 0 || !has_aliasing_ppgtt)) 123 return 0; 124 125 if (enable_ppgtt == 1) 126 return 1; 127 128 if (enable_ppgtt == 2 && has_full_ppgtt) 129 return 2; 130 131 #ifdef CONFIG_INTEL_IOMMU 132 /* Disable ppgtt on SNB if VT-d is on. */ 133 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { 134 DRM_INFO("Disabling PPGTT because VT-d is on\n"); 135 return 0; 136 } 137 #endif 138 139 /* Early VLV doesn't have this */ 140 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && 141 dev->pdev->revision < 0xb) { 142 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); 143 return 0; 144 } 145 146 if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) 147 return 2; 148 else 149 return has_aliasing_ppgtt ? 1 : 0; 150 } 151 152 static void ppgtt_bind_vma(struct i915_vma *vma, 153 enum i915_cache_level cache_level, 154 u32 flags); 155 static void ppgtt_unbind_vma(struct i915_vma *vma); 156 157 static inline gen8_pte_t gen8_pte_encode(dma_addr_t addr, 158 enum i915_cache_level level, 159 bool valid) 160 { 161 gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; 162 pte |= addr; 163 164 switch (level) { 165 case I915_CACHE_NONE: 166 pte |= PPAT_UNCACHED_INDEX; 167 break; 168 case I915_CACHE_WT: 169 pte |= PPAT_DISPLAY_ELLC_INDEX; 170 break; 171 default: 172 pte |= PPAT_CACHED_INDEX; 173 break; 174 } 175 176 return pte; 177 } 178 179 static inline gen8_pde_t gen8_pde_encode(struct drm_device *dev, 180 dma_addr_t addr, 181 enum i915_cache_level level) 182 { 183 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; 184 pde |= addr; 185 if (level != I915_CACHE_NONE) 186 pde |= PPAT_CACHED_PDE_INDEX; 187 else 188 pde |= PPAT_UNCACHED_INDEX; 189 return pde; 190 } 191 192 static gen6_pte_t snb_pte_encode(dma_addr_t addr, 193 enum i915_cache_level level, 194 bool valid, u32 unused) 195 { 196 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; 197 pte |= GEN6_PTE_ADDR_ENCODE(addr); 198 199 switch (level) { 200 case I915_CACHE_L3_LLC: 201 case I915_CACHE_LLC: 202 pte |= GEN6_PTE_CACHE_LLC; 203 break; 204 case I915_CACHE_NONE: 205 pte |= GEN6_PTE_UNCACHED; 206 break; 207 default: 208 MISSING_CASE(level); 209 } 210 211 return pte; 212 } 213 214 static gen6_pte_t ivb_pte_encode(dma_addr_t addr, 215 enum i915_cache_level level, 216 bool valid, u32 unused) 217 { 218 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; 219 pte |= GEN6_PTE_ADDR_ENCODE(addr); 220 221 switch (level) { 222 case I915_CACHE_L3_LLC: 223 pte |= GEN7_PTE_CACHE_L3_LLC; 224 break; 225 case I915_CACHE_LLC: 226 pte |= GEN6_PTE_CACHE_LLC; 227 break; 228 case I915_CACHE_NONE: 229 pte |= GEN6_PTE_UNCACHED; 230 break; 231 default: 232 MISSING_CASE(level); 233 } 234 235 return pte; 236 } 237 238 static gen6_pte_t byt_pte_encode(dma_addr_t addr, 239 enum i915_cache_level level, 240 bool valid, u32 flags) 241 { 242 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; 243 pte |= GEN6_PTE_ADDR_ENCODE(addr); 244 245 if (!(flags & PTE_READ_ONLY)) 246 pte |= BYT_PTE_WRITEABLE; 247 248 if (level != I915_CACHE_NONE) 249 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; 250 251 return pte; 252 } 253 254 static gen6_pte_t hsw_pte_encode(dma_addr_t addr, 255 enum i915_cache_level level, 256 bool valid, u32 unused) 257 { 258 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; 259 pte |= HSW_PTE_ADDR_ENCODE(addr); 260 261 if (level != I915_CACHE_NONE) 262 pte |= HSW_WB_LLC_AGE3; 263 264 return pte; 265 } 266 267 static gen6_pte_t iris_pte_encode(dma_addr_t addr, 268 enum i915_cache_level level, 269 bool valid, u32 unused) 270 { 271 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; 272 pte |= HSW_PTE_ADDR_ENCODE(addr); 273 274 switch (level) { 275 case I915_CACHE_NONE: 276 break; 277 case I915_CACHE_WT: 278 pte |= HSW_WT_ELLC_LLC_AGE3; 279 break; 280 default: 281 pte |= HSW_WB_ELLC_LLC_AGE3; 282 break; 283 } 284 285 return pte; 286 } 287 288 #define i915_dma_unmap_single(px, dev) \ 289 __i915_dma_unmap_single((px)->daddr, dev) 290 291 static inline void __i915_dma_unmap_single(dma_addr_t daddr, 292 struct drm_device *dev) 293 { 294 #if 0 295 struct device *device = &dev->pdev->dev; 296 297 dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 298 #endif 299 } 300 301 /** 302 * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc. 303 * @px: Page table/dir/etc to get a DMA map for 304 * @dev: drm device 305 * 306 * Page table allocations are unified across all gens. They always require a 307 * single 4k allocation, as well as a DMA mapping. If we keep the structs 308 * symmetric here, the simple macro covers us for every page table type. 309 * 310 * Return: 0 if success. 311 */ 312 #define i915_dma_map_single(px, dev) \ 313 i915_dma_map_page_single((px)->page, (dev), &(px)->daddr) 314 315 static inline int i915_dma_map_page_single(struct vm_page *page, 316 struct drm_device *dev, 317 dma_addr_t *daddr) 318 { 319 struct device *device = dev->pdev->dev; 320 321 *daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL); 322 if (dma_mapping_error(device, *daddr)) 323 return -ENOMEM; 324 325 return 0; 326 } 327 328 static void unmap_and_free_pt(struct i915_page_table_entry *pt, 329 struct drm_device *dev) 330 { 331 if (WARN_ON(!pt->page)) 332 return; 333 334 i915_dma_unmap_single(pt, dev); 335 __free_page(pt->page); 336 kfree(pt->used_ptes); 337 kfree(pt); 338 } 339 340 static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev) 341 { 342 struct i915_page_table_entry *pt; 343 const size_t count = INTEL_INFO(dev)->gen >= 8 ? 344 GEN8_PTES : GEN6_PTES; 345 int ret = -ENOMEM; 346 347 pt = kzalloc(sizeof(*pt), GFP_KERNEL); 348 if (!pt) 349 return ERR_PTR(-ENOMEM); 350 351 pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes), 352 GFP_KERNEL); 353 354 if (!pt->used_ptes) 355 goto fail_bitmap; 356 357 pt->page = alloc_page(GFP_KERNEL); 358 if (!pt->page) 359 goto fail_page; 360 361 ret = i915_dma_map_single(pt, dev); 362 if (ret) 363 goto fail_dma; 364 365 return pt; 366 367 fail_dma: 368 __free_page(pt->page); 369 fail_page: 370 kfree(pt->used_ptes); 371 fail_bitmap: 372 kfree(pt); 373 374 return ERR_PTR(ret); 375 } 376 377 /** 378 * alloc_pt_range() - Allocate a multiple page tables 379 * @pd: The page directory which will have at least @count entries 380 * available to point to the allocated page tables. 381 * @pde: First page directory entry for which we are allocating. 382 * @count: Number of pages to allocate. 383 * @dev: DRM device. 384 * 385 * Allocates multiple page table pages and sets the appropriate entries in the 386 * page table structure within the page directory. Function cleans up after 387 * itself on any failures. 388 * 389 * Return: 0 if allocation succeeded. 390 */ 391 static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count, 392 struct drm_device *dev) 393 { 394 int i, ret; 395 396 /* 512 is the max page tables per page_directory on any platform. */ 397 if (WARN_ON(pde + count > I915_PDES)) 398 return -EINVAL; 399 400 for (i = pde; i < pde + count; i++) { 401 struct i915_page_table_entry *pt = alloc_pt_single(dev); 402 403 if (IS_ERR(pt)) { 404 ret = PTR_ERR(pt); 405 goto err_out; 406 } 407 WARN(pd->page_table[i], 408 "Leaking page directory entry %d (%p)\n", 409 i, pd->page_table[i]); 410 pd->page_table[i] = pt; 411 } 412 413 return 0; 414 415 err_out: 416 while (i-- > pde) 417 unmap_and_free_pt(pd->page_table[i], dev); 418 return ret; 419 } 420 421 static void unmap_and_free_pd(struct i915_page_directory_entry *pd) 422 { 423 if (pd->page) { 424 __free_page(pd->page); 425 kfree(pd); 426 } 427 } 428 429 static struct i915_page_directory_entry *alloc_pd_single(void) 430 { 431 struct i915_page_directory_entry *pd; 432 433 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 434 if (!pd) 435 return ERR_PTR(-ENOMEM); 436 437 pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO); 438 if (!pd->page) { 439 kfree(pd); 440 return ERR_PTR(-ENOMEM); 441 } 442 443 return pd; 444 } 445 446 /* Broadwell Page Directory Pointer Descriptors */ 447 static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, 448 uint64_t val) 449 { 450 int ret; 451 452 BUG_ON(entry >= 4); 453 454 ret = intel_ring_begin(ring, 6); 455 if (ret) 456 return ret; 457 458 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 459 intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry)); 460 intel_ring_emit(ring, (u32)(val >> 32)); 461 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 462 intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry)); 463 intel_ring_emit(ring, (u32)(val)); 464 intel_ring_advance(ring); 465 466 return 0; 467 } 468 469 static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, 470 struct intel_engine_cs *ring) 471 { 472 int i, ret; 473 474 /* bit of a hack to find the actual last used pd */ 475 int used_pd = ppgtt->num_pd_entries / I915_PDES; 476 477 for (i = used_pd - 1; i >= 0; i--) { 478 dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr; 479 ret = gen8_write_pdp(ring, i, addr); 480 if (ret) 481 return ret; 482 } 483 484 return 0; 485 } 486 487 static void gen8_ppgtt_clear_range(struct i915_address_space *vm, 488 uint64_t start, 489 uint64_t length, 490 bool use_scratch) 491 { 492 struct i915_hw_ppgtt *ppgtt = 493 container_of(vm, struct i915_hw_ppgtt, base); 494 gen8_pte_t *pt_vaddr, scratch_pte; 495 unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; 496 unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; 497 unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; 498 unsigned num_entries = length >> PAGE_SHIFT; 499 unsigned last_pte, i; 500 501 scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr, 502 I915_CACHE_LLC, use_scratch); 503 504 while (num_entries) { 505 struct i915_page_directory_entry *pd; 506 struct i915_page_table_entry *pt; 507 struct vm_page *page_table; 508 509 if (WARN_ON(!ppgtt->pdp.page_directory[pdpe])) 510 continue; 511 512 pd = ppgtt->pdp.page_directory[pdpe]; 513 514 if (WARN_ON(!pd->page_table[pde])) 515 continue; 516 517 pt = pd->page_table[pde]; 518 519 if (WARN_ON(!pt->page)) 520 continue; 521 522 page_table = pt->page; 523 524 last_pte = pte + num_entries; 525 if (last_pte > GEN8_PTES) 526 last_pte = GEN8_PTES; 527 528 pt_vaddr = kmap_atomic(page_table); 529 530 for (i = pte; i < last_pte; i++) { 531 pt_vaddr[i] = scratch_pte; 532 num_entries--; 533 } 534 535 if (!HAS_LLC(ppgtt->base.dev)) 536 drm_clflush_virt_range(pt_vaddr, PAGE_SIZE); 537 kunmap_atomic(pt_vaddr); 538 539 pte = 0; 540 if (++pde == I915_PDES) { 541 pdpe++; 542 pde = 0; 543 } 544 } 545 } 546 547 static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, 548 vm_page_t *pages, 549 uint64_t start, 550 unsigned int num_entries, 551 enum i915_cache_level cache_level, u32 unused) 552 { 553 struct i915_hw_ppgtt *ppgtt = 554 container_of(vm, struct i915_hw_ppgtt, base); 555 gen8_pte_t *pt_vaddr; 556 unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; 557 unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; 558 unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; 559 int i; 560 561 pt_vaddr = NULL; 562 563 for (i=0;i<num_entries;i++) { 564 if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES)) 565 break; 566 567 if (pt_vaddr == NULL) { 568 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe]; 569 struct i915_page_table_entry *pt = pd->page_table[pde]; 570 struct vm_page *page_table = pt->page; 571 572 pt_vaddr = kmap_atomic(page_table); 573 } 574 575 pt_vaddr[pte] = 576 gen8_pte_encode(VM_PAGE_TO_PHYS(pages[i]), 577 cache_level, true); 578 if (++pte == GEN8_PTES) { 579 if (!HAS_LLC(ppgtt->base.dev)) 580 drm_clflush_virt_range(pt_vaddr, PAGE_SIZE); 581 kunmap_atomic(pt_vaddr); 582 pt_vaddr = NULL; 583 if (++pde == I915_PDES) { 584 pdpe++; 585 pde = 0; 586 } 587 pte = 0; 588 } 589 } 590 if (pt_vaddr) { 591 if (!HAS_LLC(ppgtt->base.dev)) 592 drm_clflush_virt_range(pt_vaddr, PAGE_SIZE); 593 kunmap_atomic(pt_vaddr); 594 } 595 } 596 597 static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct drm_device *dev) 598 { 599 int i; 600 601 if (!pd->page) 602 return; 603 604 for (i = 0; i < I915_PDES; i++) { 605 if (WARN_ON(!pd->page_table[i])) 606 continue; 607 608 unmap_and_free_pt(pd->page_table[i], dev); 609 pd->page_table[i] = NULL; 610 } 611 } 612 613 static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt) 614 { 615 int i; 616 617 for (i = 0; i < ppgtt->num_pd_pages; i++) { 618 if (WARN_ON(!ppgtt->pdp.page_directory[i])) 619 continue; 620 621 gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev); 622 unmap_and_free_pd(ppgtt->pdp.page_directory[i]); 623 } 624 } 625 626 static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) 627 { 628 struct pci_dev *hwdev = ppgtt->base.dev->pdev; 629 int i, j; 630 631 for (i = 0; i < ppgtt->num_pd_pages; i++) { 632 /* TODO: In the future we'll support sparse mappings, so this 633 * will have to change. */ 634 if (!ppgtt->pdp.page_directory[i]->daddr) 635 continue; 636 637 pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE, 638 PCI_DMA_BIDIRECTIONAL); 639 640 for (j = 0; j < I915_PDES; j++) { 641 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; 642 struct i915_page_table_entry *pt; 643 dma_addr_t addr; 644 645 if (WARN_ON(!pd->page_table[j])) 646 continue; 647 648 pt = pd->page_table[j]; 649 addr = pt->daddr; 650 651 if (addr) 652 pci_unmap_page(hwdev, addr, PAGE_SIZE, 653 PCI_DMA_BIDIRECTIONAL); 654 } 655 } 656 } 657 658 static void gen8_ppgtt_cleanup(struct i915_address_space *vm) 659 { 660 struct i915_hw_ppgtt *ppgtt = 661 container_of(vm, struct i915_hw_ppgtt, base); 662 663 gen8_ppgtt_unmap_pages(ppgtt); 664 gen8_ppgtt_free(ppgtt); 665 } 666 667 static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) 668 { 669 int i, ret; 670 671 for (i = 0; i < ppgtt->num_pd_pages; i++) { 672 ret = alloc_pt_range(ppgtt->pdp.page_directory[i], 673 0, I915_PDES, ppgtt->base.dev); 674 if (ret) 675 goto unwind_out; 676 } 677 678 return 0; 679 680 unwind_out: 681 while (i--) 682 gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev); 683 684 return -ENOMEM; 685 } 686 687 static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, 688 const int max_pdp) 689 { 690 int i; 691 692 for (i = 0; i < max_pdp; i++) { 693 ppgtt->pdp.page_directory[i] = alloc_pd_single(); 694 if (IS_ERR(ppgtt->pdp.page_directory[i])) 695 goto unwind_out; 696 } 697 698 ppgtt->num_pd_pages = max_pdp; 699 BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES); 700 701 return 0; 702 703 unwind_out: 704 while (i--) 705 unmap_and_free_pd(ppgtt->pdp.page_directory[i]); 706 707 return -ENOMEM; 708 } 709 710 static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, 711 const int max_pdp) 712 { 713 int ret; 714 715 ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp); 716 if (ret) 717 return ret; 718 719 ret = gen8_ppgtt_allocate_page_tables(ppgtt); 720 if (ret) 721 goto err_out; 722 723 ppgtt->num_pd_entries = max_pdp * I915_PDES; 724 725 return 0; 726 727 err_out: 728 gen8_ppgtt_free(ppgtt); 729 return ret; 730 } 731 732 static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, 733 const int pd) 734 { 735 dma_addr_t pd_addr; 736 int ret; 737 738 pd_addr = pci_map_page(ppgtt->base.dev->pdev, 739 ppgtt->pdp.page_directory[pd]->page, 0, 740 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 741 742 ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); 743 if (ret) 744 return ret; 745 746 ppgtt->pdp.page_directory[pd]->daddr = pd_addr; 747 748 return 0; 749 } 750 751 static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, 752 const int pd, 753 const int pt) 754 { 755 dma_addr_t pt_addr; 756 struct i915_page_directory_entry *pdir = ppgtt->pdp.page_directory[pd]; 757 struct i915_page_table_entry *ptab = pdir->page_table[pt]; 758 struct vm_page *p = ptab->page; 759 int ret; 760 761 pt_addr = pci_map_page(ppgtt->base.dev->pdev, 762 p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 763 ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); 764 if (ret) 765 return ret; 766 767 ptab->daddr = pt_addr; 768 769 return 0; 770 } 771 772 /* 773 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers 774 * with a net effect resembling a 2-level page table in normal x86 terms. Each 775 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address 776 * space. 777 * 778 * FIXME: split allocation into smaller pieces. For now we only ever do this 779 * once, but with full PPGTT, the multiple contiguous allocations will be bad. 780 * TODO: Do something with the size parameter 781 */ 782 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) 783 { 784 const int max_pdp = DIV_ROUND_UP(size, 1 << 30); 785 const int min_pt_pages = I915_PDES * max_pdp; 786 int i, j, ret; 787 788 if (size % (1<<30)) 789 DRM_INFO("Pages will be wasted unless GTT size (%lu) is divisible by 1GB\n", size); 790 791 /* 1. Do all our allocations for page directories and page tables. 792 * We allocate more than was asked so that we can point the unused parts 793 * to valid entries that point to scratch page. Dynamic page tables 794 * will fix this eventually. 795 */ 796 ret = gen8_ppgtt_alloc(ppgtt, GEN8_LEGACY_PDPES); 797 if (ret) 798 return ret; 799 800 /* 801 * 2. Create DMA mappings for the page directories and page tables. 802 */ 803 for (i = 0; i < GEN8_LEGACY_PDPES; i++) { 804 ret = gen8_ppgtt_setup_page_directories(ppgtt, i); 805 if (ret) 806 goto bail; 807 808 for (j = 0; j < I915_PDES; j++) { 809 ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j); 810 if (ret) 811 goto bail; 812 } 813 } 814 815 /* 816 * 3. Map all the page directory entires to point to the page tables 817 * we've allocated. 818 * 819 * For now, the PPGTT helper functions all require that the PDEs are 820 * plugged in correctly. So we do that now/here. For aliasing PPGTT, we 821 * will never need to touch the PDEs again. 822 */ 823 for (i = 0; i < GEN8_LEGACY_PDPES; i++) { 824 struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; 825 gen8_pde_t *pd_vaddr; 826 pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page); 827 for (j = 0; j < I915_PDES; j++) { 828 struct i915_page_table_entry *pt = pd->page_table[j]; 829 dma_addr_t addr = pt->daddr; 830 pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, 831 I915_CACHE_LLC); 832 } 833 if (!HAS_LLC(ppgtt->base.dev)) 834 drm_clflush_virt_range(pd_vaddr, PAGE_SIZE); 835 kunmap_atomic(pd_vaddr); 836 } 837 838 ppgtt->switch_mm = gen8_mm_switch; 839 ppgtt->base.clear_range = gen8_ppgtt_clear_range; 840 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; 841 ppgtt->base.cleanup = gen8_ppgtt_cleanup; 842 ppgtt->base.start = 0; 843 844 /* This is the area that we advertise as usable for the caller */ 845 ppgtt->base.total = max_pdp * I915_PDES * GEN8_PTES * PAGE_SIZE; 846 847 /* Set all ptes to a valid scratch page. Also above requested space */ 848 ppgtt->base.clear_range(&ppgtt->base, 0, 849 ppgtt->num_pd_pages * GEN8_PTES * PAGE_SIZE, 850 true); 851 852 DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", 853 ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); 854 DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%ld wasted)\n", 855 ppgtt->num_pd_entries, 856 (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30)); 857 return 0; 858 859 bail: 860 gen8_ppgtt_unmap_pages(ppgtt); 861 gen8_ppgtt_free(ppgtt); 862 return ret; 863 } 864 865 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) 866 { 867 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; 868 struct i915_address_space *vm = &ppgtt->base; 869 gen6_pte_t __iomem *pd_addr; 870 gen6_pte_t scratch_pte; 871 uint32_t pd_entry; 872 int pte, pde; 873 874 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); 875 876 pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm + 877 ppgtt->pd.pd_offset / sizeof(gen6_pte_t); 878 879 seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm, 880 ppgtt->pd.pd_offset, 881 ppgtt->pd.pd_offset + ppgtt->num_pd_entries); 882 for (pde = 0; pde < ppgtt->num_pd_entries; pde++) { 883 u32 expected; 884 gen6_pte_t *pt_vaddr; 885 dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr; 886 pd_entry = readl(pd_addr + pde); 887 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); 888 889 if (pd_entry != expected) 890 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", 891 pde, 892 pd_entry, 893 expected); 894 seq_printf(m, "\tPDE: %x\n", pd_entry); 895 896 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page); 897 for (pte = 0; pte < GEN6_PTES; pte+=4) { 898 unsigned long va = 899 (pde * PAGE_SIZE * GEN6_PTES) + 900 (pte * PAGE_SIZE); 901 int i; 902 bool found = false; 903 for (i = 0; i < 4; i++) 904 if (pt_vaddr[pte + i] != scratch_pte) 905 found = true; 906 if (!found) 907 continue; 908 909 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte); 910 for (i = 0; i < 4; i++) { 911 if (pt_vaddr[pte + i] != scratch_pte) 912 seq_printf(m, " %08x", pt_vaddr[pte + i]); 913 else 914 seq_puts(m, " SCRATCH "); 915 } 916 seq_puts(m, "\n"); 917 } 918 kunmap_atomic(pt_vaddr); 919 } 920 } 921 922 /* Write pde (index) from the page directory @pd to the page table @pt */ 923 static void gen6_write_pde(struct i915_page_directory_entry *pd, 924 const int pde, struct i915_page_table_entry *pt) 925 { 926 /* Caller needs to make sure the write completes if necessary */ 927 struct i915_hw_ppgtt *ppgtt = 928 container_of(pd, struct i915_hw_ppgtt, pd); 929 u32 pd_entry; 930 931 pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr); 932 pd_entry |= GEN6_PDE_VALID; 933 934 writel(pd_entry, ppgtt->pd_addr + pde); 935 } 936 937 /* Write all the page tables found in the ppgtt structure to incrementing page 938 * directories. */ 939 static void gen6_write_page_range(struct drm_i915_private *dev_priv, 940 struct i915_page_directory_entry *pd, 941 uint32_t start, uint32_t length) 942 { 943 struct i915_page_table_entry *pt; 944 uint32_t pde, temp; 945 946 gen6_for_each_pde(pt, pd, start, length, temp, pde) 947 gen6_write_pde(pd, pde, pt); 948 949 /* Make sure write is complete before other code can use this page 950 * table. Also require for WC mapped PTEs */ 951 readl(dev_priv->gtt.gsm); 952 } 953 954 static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) 955 { 956 BUG_ON(ppgtt->pd.pd_offset & 0x3f); 957 958 return (ppgtt->pd.pd_offset / 64) << 16; 959 } 960 961 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, 962 struct intel_engine_cs *ring) 963 { 964 int ret; 965 966 /* NB: TLBs must be flushed and invalidated before a switch */ 967 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 968 if (ret) 969 return ret; 970 971 ret = intel_ring_begin(ring, 6); 972 if (ret) 973 return ret; 974 975 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); 976 intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); 977 intel_ring_emit(ring, PP_DIR_DCLV_2G); 978 intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); 979 intel_ring_emit(ring, get_pd_offset(ppgtt)); 980 intel_ring_emit(ring, MI_NOOP); 981 intel_ring_advance(ring); 982 983 return 0; 984 } 985 986 static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, 987 struct intel_engine_cs *ring) 988 { 989 struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); 990 991 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); 992 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); 993 return 0; 994 } 995 996 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, 997 struct intel_engine_cs *ring) 998 { 999 int ret; 1000 1001 /* NB: TLBs must be flushed and invalidated before a switch */ 1002 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 1003 if (ret) 1004 return ret; 1005 1006 ret = intel_ring_begin(ring, 6); 1007 if (ret) 1008 return ret; 1009 1010 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); 1011 intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); 1012 intel_ring_emit(ring, PP_DIR_DCLV_2G); 1013 intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); 1014 intel_ring_emit(ring, get_pd_offset(ppgtt)); 1015 intel_ring_emit(ring, MI_NOOP); 1016 intel_ring_advance(ring); 1017 1018 /* XXX: RCS is the only one to auto invalidate the TLBs? */ 1019 if (ring->id != RCS) { 1020 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 1021 if (ret) 1022 return ret; 1023 } 1024 1025 return 0; 1026 } 1027 1028 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, 1029 struct intel_engine_cs *ring) 1030 { 1031 struct drm_device *dev = ppgtt->base.dev; 1032 struct drm_i915_private *dev_priv = dev->dev_private; 1033 1034 1035 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); 1036 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); 1037 1038 POSTING_READ(RING_PP_DIR_DCLV(ring)); 1039 1040 return 0; 1041 } 1042 1043 static void gen8_ppgtt_enable(struct drm_device *dev) 1044 { 1045 struct drm_i915_private *dev_priv = dev->dev_private; 1046 struct intel_engine_cs *ring; 1047 int j; 1048 1049 for_each_ring(ring, dev_priv, j) { 1050 I915_WRITE(RING_MODE_GEN7(ring), 1051 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 1052 } 1053 } 1054 1055 static void gen7_ppgtt_enable(struct drm_device *dev) 1056 { 1057 struct drm_i915_private *dev_priv = dev->dev_private; 1058 struct intel_engine_cs *ring; 1059 uint32_t ecochk, ecobits; 1060 int i; 1061 1062 ecobits = I915_READ(GAC_ECO_BITS); 1063 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); 1064 1065 ecochk = I915_READ(GAM_ECOCHK); 1066 if (IS_HASWELL(dev)) { 1067 ecochk |= ECOCHK_PPGTT_WB_HSW; 1068 } else { 1069 ecochk |= ECOCHK_PPGTT_LLC_IVB; 1070 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; 1071 } 1072 I915_WRITE(GAM_ECOCHK, ecochk); 1073 1074 for_each_ring(ring, dev_priv, i) { 1075 /* GFX_MODE is per-ring on gen7+ */ 1076 I915_WRITE(RING_MODE_GEN7(ring), 1077 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 1078 } 1079 } 1080 1081 static void gen6_ppgtt_enable(struct drm_device *dev) 1082 { 1083 struct drm_i915_private *dev_priv = dev->dev_private; 1084 uint32_t ecochk, gab_ctl, ecobits; 1085 1086 ecobits = I915_READ(GAC_ECO_BITS); 1087 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | 1088 ECOBITS_PPGTT_CACHE64B); 1089 1090 gab_ctl = I915_READ(GAB_CTL); 1091 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); 1092 1093 ecochk = I915_READ(GAM_ECOCHK); 1094 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); 1095 1096 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 1097 } 1098 1099 /* PPGTT support for Sandybdrige/Gen6 and later */ 1100 static void gen6_ppgtt_clear_range(struct i915_address_space *vm, 1101 uint64_t start, 1102 uint64_t length, 1103 bool use_scratch) 1104 { 1105 struct i915_hw_ppgtt *ppgtt = 1106 container_of(vm, struct i915_hw_ppgtt, base); 1107 gen6_pte_t *pt_vaddr, scratch_pte; 1108 unsigned first_entry = start >> PAGE_SHIFT; 1109 unsigned num_entries = length >> PAGE_SHIFT; 1110 unsigned act_pt = first_entry / GEN6_PTES; 1111 unsigned first_pte = first_entry % GEN6_PTES; 1112 unsigned last_pte, i; 1113 1114 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); 1115 1116 while (num_entries) { 1117 last_pte = first_pte + num_entries; 1118 if (last_pte > GEN6_PTES) 1119 last_pte = GEN6_PTES; 1120 1121 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page); 1122 1123 for (i = first_pte; i < last_pte; i++) 1124 pt_vaddr[i] = scratch_pte; 1125 1126 kunmap_atomic(pt_vaddr); 1127 1128 num_entries -= last_pte - first_pte; 1129 first_pte = 0; 1130 act_pt++; 1131 } 1132 } 1133 1134 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, 1135 vm_page_t *pages, 1136 uint64_t start, 1137 unsigned num_entries, 1138 enum i915_cache_level cache_level, u32 flags) 1139 { 1140 struct i915_hw_ppgtt *ppgtt = 1141 container_of(vm, struct i915_hw_ppgtt, base); 1142 gen6_pte_t *pt_vaddr; 1143 unsigned first_entry = start >> PAGE_SHIFT; 1144 unsigned act_pt = first_entry / GEN6_PTES; 1145 unsigned act_pte = first_entry % GEN6_PTES; 1146 1147 pt_vaddr = NULL; 1148 for (int i=0;i<num_entries;i++) { 1149 if (pt_vaddr == NULL) 1150 pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page); 1151 1152 pt_vaddr[act_pte] = 1153 vm->pte_encode(VM_PAGE_TO_PHYS(pages[i]), 1154 cache_level, true, flags); 1155 1156 if (++act_pte == GEN6_PTES) { 1157 kunmap_atomic(pt_vaddr); 1158 pt_vaddr = NULL; 1159 act_pt++; 1160 act_pte = 0; 1161 } 1162 } 1163 if (pt_vaddr) 1164 kunmap_atomic(pt_vaddr); 1165 } 1166 1167 /* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we 1168 * are switching between contexts with the same LRCA, we also must do a force 1169 * restore. 1170 */ 1171 static inline void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) 1172 { 1173 /* If current vm != vm, */ 1174 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask; 1175 } 1176 1177 static void gen6_initialize_pt(struct i915_address_space *vm, 1178 struct i915_page_table_entry *pt) 1179 { 1180 gen6_pte_t *pt_vaddr, scratch_pte; 1181 int i; 1182 1183 WARN_ON(vm->scratch.addr == 0); 1184 1185 scratch_pte = vm->pte_encode(vm->scratch.addr, 1186 I915_CACHE_LLC, true, 0); 1187 1188 pt_vaddr = kmap_atomic(pt->page); 1189 1190 for (i = 0; i < GEN6_PTES; i++) 1191 pt_vaddr[i] = scratch_pte; 1192 1193 kunmap_atomic(pt_vaddr); 1194 } 1195 1196 static int gen6_alloc_va_range(struct i915_address_space *vm, 1197 uint64_t start, uint64_t length) 1198 { 1199 DECLARE_BITMAP(new_page_tables, I915_PDES); 1200 struct drm_device *dev = vm->dev; 1201 struct drm_i915_private *dev_priv = dev->dev_private; 1202 struct i915_hw_ppgtt *ppgtt = 1203 container_of(vm, struct i915_hw_ppgtt, base); 1204 struct i915_page_table_entry *pt; 1205 const uint32_t start_save = start, length_save = length; 1206 uint32_t pde, temp; 1207 int ret; 1208 1209 WARN_ON(upper_32_bits(start)); 1210 1211 bitmap_zero(new_page_tables, I915_PDES); 1212 1213 /* The allocation is done in two stages so that we can bail out with 1214 * minimal amount of pain. The first stage finds new page tables that 1215 * need allocation. The second stage marks use ptes within the page 1216 * tables. 1217 */ 1218 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { 1219 if (pt != ppgtt->scratch_pt) { 1220 WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES)); 1221 continue; 1222 } 1223 1224 /* We've already allocated a page table */ 1225 WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES)); 1226 1227 pt = alloc_pt_single(dev); 1228 if (IS_ERR(pt)) { 1229 ret = PTR_ERR(pt); 1230 goto unwind_out; 1231 } 1232 1233 gen6_initialize_pt(vm, pt); 1234 1235 ppgtt->pd.page_table[pde] = pt; 1236 set_bit(pde, new_page_tables); 1237 trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT); 1238 } 1239 1240 start = start_save; 1241 length = length_save; 1242 1243 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { 1244 DECLARE_BITMAP(tmp_bitmap, GEN6_PTES); 1245 1246 bitmap_zero(tmp_bitmap, GEN6_PTES); 1247 bitmap_set(tmp_bitmap, gen6_pte_index(start), 1248 gen6_pte_count(start, length)); 1249 1250 if (test_and_clear_bit(pde, new_page_tables)) 1251 gen6_write_pde(&ppgtt->pd, pde, pt); 1252 1253 trace_i915_page_table_entry_map(vm, pde, pt, 1254 gen6_pte_index(start), 1255 gen6_pte_count(start, length), 1256 GEN6_PTES); 1257 bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes, 1258 GEN6_PTES); 1259 } 1260 1261 WARN_ON(!bitmap_empty(new_page_tables, I915_PDES)); 1262 1263 /* Make sure write is complete before other code can use this page 1264 * table. Also require for WC mapped PTEs */ 1265 readl(dev_priv->gtt.gsm); 1266 1267 mark_tlbs_dirty(ppgtt); 1268 return 0; 1269 1270 unwind_out: 1271 for_each_set_bit(pde, new_page_tables, I915_PDES) { 1272 struct i915_page_table_entry *pt = ppgtt->pd.page_table[pde]; 1273 1274 ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; 1275 unmap_and_free_pt(pt, vm->dev); 1276 } 1277 1278 mark_tlbs_dirty(ppgtt); 1279 return ret; 1280 } 1281 1282 static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) 1283 { 1284 int i; 1285 1286 for (i = 0; i < ppgtt->num_pd_entries; i++) { 1287 struct i915_page_table_entry *pt = ppgtt->pd.page_table[i]; 1288 1289 if (pt != ppgtt->scratch_pt) 1290 unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev); 1291 } 1292 1293 unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); 1294 unmap_and_free_pd(&ppgtt->pd); 1295 } 1296 1297 static void gen6_ppgtt_cleanup(struct i915_address_space *vm) 1298 { 1299 struct i915_hw_ppgtt *ppgtt = 1300 container_of(vm, struct i915_hw_ppgtt, base); 1301 1302 drm_mm_remove_node(&ppgtt->node); 1303 1304 gen6_ppgtt_free(ppgtt); 1305 } 1306 1307 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) 1308 { 1309 struct drm_device *dev = ppgtt->base.dev; 1310 struct drm_i915_private *dev_priv = dev->dev_private; 1311 bool retried = false; 1312 int ret; 1313 1314 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The 1315 * allocator works in address space sizes, so it's multiplied by page 1316 * size. We allocate at the top of the GTT to avoid fragmentation. 1317 */ 1318 BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); 1319 ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev); 1320 if (IS_ERR(ppgtt->scratch_pt)) 1321 return PTR_ERR(ppgtt->scratch_pt); 1322 1323 gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt); 1324 1325 alloc: 1326 ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, 1327 &ppgtt->node, GEN6_PD_SIZE, 1328 GEN6_PD_ALIGN, 0, 1329 0, dev_priv->gtt.base.total, 1330 DRM_MM_TOPDOWN); 1331 if (ret == -ENOSPC && !retried) { 1332 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, 1333 GEN6_PD_SIZE, GEN6_PD_ALIGN, 1334 I915_CACHE_NONE, 1335 0, dev_priv->gtt.base.total, 1336 0); 1337 if (ret) 1338 goto err_out; 1339 1340 retried = true; 1341 goto alloc; 1342 } 1343 1344 if (ret) 1345 goto err_out; 1346 1347 1348 if (ppgtt->node.start < dev_priv->gtt.mappable_end) 1349 DRM_DEBUG("Forced to use aperture for PDEs\n"); 1350 1351 ppgtt->num_pd_entries = I915_PDES; 1352 return 0; 1353 1354 err_out: 1355 unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); 1356 return ret; 1357 } 1358 1359 static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) 1360 { 1361 return gen6_ppgtt_allocate_page_directories(ppgtt); 1362 } 1363 1364 static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, 1365 uint64_t start, uint64_t length) 1366 { 1367 struct i915_page_table_entry *unused; 1368 uint32_t pde, temp; 1369 1370 gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) 1371 ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; 1372 } 1373 1374 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing) 1375 { 1376 struct drm_device *dev = ppgtt->base.dev; 1377 struct drm_i915_private *dev_priv = dev->dev_private; 1378 int ret; 1379 1380 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; 1381 if (IS_GEN6(dev)) { 1382 ppgtt->switch_mm = gen6_mm_switch; 1383 } else if (IS_HASWELL(dev)) { 1384 ppgtt->switch_mm = hsw_mm_switch; 1385 } else if (IS_GEN7(dev)) { 1386 ppgtt->switch_mm = gen7_mm_switch; 1387 } else 1388 BUG(); 1389 1390 if (intel_vgpu_active(dev)) 1391 ppgtt->switch_mm = vgpu_mm_switch; 1392 1393 ret = gen6_ppgtt_alloc(ppgtt); 1394 if (ret) 1395 return ret; 1396 1397 if (aliasing) { 1398 /* preallocate all pts */ 1399 ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries, 1400 ppgtt->base.dev); 1401 1402 if (ret) { 1403 gen6_ppgtt_cleanup(&ppgtt->base); 1404 return ret; 1405 } 1406 } 1407 1408 ppgtt->base.allocate_va_range = gen6_alloc_va_range; 1409 ppgtt->base.clear_range = gen6_ppgtt_clear_range; 1410 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; 1411 ppgtt->base.cleanup = gen6_ppgtt_cleanup; 1412 ppgtt->base.start = 0; 1413 ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES * PAGE_SIZE; 1414 ppgtt->debug_dump = gen6_dump_ppgtt; 1415 1416 ppgtt->pd.pd_offset = 1417 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t); 1418 1419 ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm + 1420 ppgtt->pd.pd_offset / sizeof(gen6_pte_t); 1421 1422 if (aliasing) 1423 ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); 1424 else 1425 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); 1426 1427 gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total); 1428 1429 DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", 1430 ppgtt->node.size >> 20, 1431 ppgtt->node.start / PAGE_SIZE); 1432 1433 DRM_DEBUG("Adding PPGTT at offset %x\n", 1434 ppgtt->pd.pd_offset << 10); 1435 1436 return 0; 1437 } 1438 1439 static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt, 1440 bool aliasing) 1441 { 1442 struct drm_i915_private *dev_priv = dev->dev_private; 1443 1444 ppgtt->base.dev = dev; 1445 ppgtt->base.scratch = dev_priv->gtt.base.scratch; 1446 1447 if (INTEL_INFO(dev)->gen < 8) 1448 return gen6_ppgtt_init(ppgtt, aliasing); 1449 else 1450 return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); 1451 } 1452 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) 1453 { 1454 struct drm_i915_private *dev_priv = dev->dev_private; 1455 int ret = 0; 1456 1457 ret = __hw_ppgtt_init(dev, ppgtt, false); 1458 if (ret == 0) { 1459 kref_init(&ppgtt->ref); 1460 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, 1461 ppgtt->base.total); 1462 i915_init_vm(dev_priv, &ppgtt->base); 1463 } 1464 1465 return ret; 1466 } 1467 1468 int i915_ppgtt_init_hw(struct drm_device *dev) 1469 { 1470 struct drm_i915_private *dev_priv = dev->dev_private; 1471 struct intel_engine_cs *ring; 1472 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 1473 int i, ret = 0; 1474 1475 /* In the case of execlists, PPGTT is enabled by the context descriptor 1476 * and the PDPs are contained within the context itself. We don't 1477 * need to do anything here. */ 1478 if (i915.enable_execlists) 1479 return 0; 1480 1481 if (!USES_PPGTT(dev)) 1482 return 0; 1483 1484 if (IS_GEN6(dev)) 1485 gen6_ppgtt_enable(dev); 1486 else if (IS_GEN7(dev)) 1487 gen7_ppgtt_enable(dev); 1488 else if (INTEL_INFO(dev)->gen >= 8) 1489 gen8_ppgtt_enable(dev); 1490 else 1491 MISSING_CASE(INTEL_INFO(dev)->gen); 1492 1493 if (ppgtt) { 1494 for_each_ring(ring, dev_priv, i) { 1495 ret = ppgtt->switch_mm(ppgtt, ring); 1496 if (ret != 0) 1497 return ret; 1498 } 1499 } 1500 1501 return ret; 1502 } 1503 struct i915_hw_ppgtt * 1504 i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) 1505 { 1506 struct i915_hw_ppgtt *ppgtt; 1507 int ret; 1508 1509 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 1510 if (!ppgtt) 1511 return ERR_PTR(-ENOMEM); 1512 1513 ret = i915_ppgtt_init(dev, ppgtt); 1514 if (ret) { 1515 kfree(ppgtt); 1516 return ERR_PTR(ret); 1517 } 1518 1519 ppgtt->file_priv = fpriv; 1520 1521 trace_i915_ppgtt_create(&ppgtt->base); 1522 1523 return ppgtt; 1524 } 1525 1526 void i915_ppgtt_release(struct kref *kref) 1527 { 1528 struct i915_hw_ppgtt *ppgtt = 1529 container_of(kref, struct i915_hw_ppgtt, ref); 1530 1531 trace_i915_ppgtt_release(&ppgtt->base); 1532 1533 /* vmas should already be unbound */ 1534 WARN_ON(!list_empty(&ppgtt->base.active_list)); 1535 WARN_ON(!list_empty(&ppgtt->base.inactive_list)); 1536 1537 list_del(&ppgtt->base.global_link); 1538 drm_mm_takedown(&ppgtt->base.mm); 1539 1540 ppgtt->base.cleanup(&ppgtt->base); 1541 kfree(ppgtt); 1542 } 1543 1544 static void 1545 ppgtt_bind_vma(struct i915_vma *vma, 1546 enum i915_cache_level cache_level, 1547 u32 flags) 1548 { 1549 const unsigned int num_entries = vma->obj->base.size >> PAGE_SHIFT; 1550 1551 /* Currently applicable only to VLV */ 1552 if (vma->obj->gt_ro) 1553 flags |= PTE_READ_ONLY; 1554 1555 vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, 1556 num_entries, 1557 cache_level, flags); 1558 } 1559 1560 static void ppgtt_unbind_vma(struct i915_vma *vma) 1561 { 1562 vma->vm->clear_range(vma->vm, 1563 vma->node.start, 1564 vma->obj->base.size, 1565 true); 1566 } 1567 1568 extern int intel_iommu_gfx_mapped; 1569 /* Certain Gen5 chipsets require require idling the GPU before 1570 * unmapping anything from the GTT when VT-d is enabled. 1571 */ 1572 static inline bool needs_idle_maps(struct drm_device *dev) 1573 { 1574 #ifdef CONFIG_INTEL_IOMMU 1575 /* Query intel_iommu to see if we need the workaround. Presumably that 1576 * was loaded first. 1577 */ 1578 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped) 1579 return true; 1580 #endif 1581 return false; 1582 } 1583 1584 static bool do_idling(struct drm_i915_private *dev_priv) 1585 { 1586 bool ret = dev_priv->mm.interruptible; 1587 1588 if (unlikely(dev_priv->gtt.do_idle_maps)) { 1589 dev_priv->mm.interruptible = false; 1590 if (i915_gpu_idle(dev_priv->dev)) { 1591 DRM_ERROR("Couldn't idle GPU\n"); 1592 /* Wait a bit, in hopes it avoids the hang */ 1593 udelay(10); 1594 } 1595 } 1596 1597 return ret; 1598 } 1599 1600 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) 1601 { 1602 if (unlikely(dev_priv->gtt.do_idle_maps)) 1603 dev_priv->mm.interruptible = interruptible; 1604 } 1605 1606 void i915_check_and_clear_faults(struct drm_device *dev) 1607 { 1608 struct drm_i915_private *dev_priv = dev->dev_private; 1609 struct intel_engine_cs *ring; 1610 int i; 1611 1612 if (INTEL_INFO(dev)->gen < 6) 1613 return; 1614 1615 for_each_ring(ring, dev_priv, i) { 1616 u32 fault_reg; 1617 fault_reg = I915_READ(RING_FAULT_REG(ring)); 1618 if (fault_reg & RING_FAULT_VALID) { 1619 #if 0 1620 DRM_DEBUG_DRIVER("Unexpected fault\n" 1621 "\tAddr: 0x%08lx\n" 1622 "\tAddress space: %s\n" 1623 "\tSource ID: %d\n" 1624 "\tType: %d\n", 1625 fault_reg & PAGE_MASK, 1626 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", 1627 RING_FAULT_SRCID(fault_reg), 1628 RING_FAULT_FAULT_TYPE(fault_reg)); 1629 #endif 1630 I915_WRITE(RING_FAULT_REG(ring), 1631 fault_reg & ~RING_FAULT_VALID); 1632 } 1633 } 1634 POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); 1635 } 1636 1637 static void i915_ggtt_flush(struct drm_i915_private *dev_priv) 1638 { 1639 if (INTEL_INFO(dev_priv->dev)->gen < 6) { 1640 intel_gtt_chipset_flush(); 1641 } else { 1642 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 1643 POSTING_READ(GFX_FLSH_CNTL_GEN6); 1644 } 1645 } 1646 1647 void i915_gem_suspend_gtt_mappings(struct drm_device *dev) 1648 { 1649 struct drm_i915_private *dev_priv = dev->dev_private; 1650 1651 /* Don't bother messing with faults pre GEN6 as we have little 1652 * documentation supporting that it's a good idea. 1653 */ 1654 if (INTEL_INFO(dev)->gen < 6) 1655 return; 1656 1657 i915_check_and_clear_faults(dev); 1658 1659 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 1660 dev_priv->gtt.base.start, 1661 dev_priv->gtt.base.total, 1662 true); 1663 1664 i915_ggtt_flush(dev_priv); 1665 } 1666 1667 void i915_gem_restore_gtt_mappings(struct drm_device *dev) 1668 { 1669 struct drm_i915_private *dev_priv = dev->dev_private; 1670 struct drm_i915_gem_object *obj; 1671 struct i915_address_space *vm; 1672 1673 i915_check_and_clear_faults(dev); 1674 1675 /* First fill our portion of the GTT with scratch pages */ 1676 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 1677 dev_priv->gtt.base.start, 1678 dev_priv->gtt.base.total, 1679 true); 1680 1681 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 1682 struct i915_vma *vma = i915_gem_obj_to_vma(obj, 1683 &dev_priv->gtt.base); 1684 if (!vma) 1685 continue; 1686 1687 i915_gem_clflush_object(obj, obj->pin_display); 1688 /* The bind_vma code tries to be smart about tracking mappings. 1689 * Unfortunately above, we've just wiped out the mappings 1690 * without telling our object about it. So we need to fake it. 1691 * 1692 * Bind is not expected to fail since this is only called on 1693 * resume and assumption is all requirements exist already. 1694 */ 1695 vma->bound &= ~GLOBAL_BIND; 1696 WARN_ON(i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND)); 1697 } 1698 1699 1700 if (INTEL_INFO(dev)->gen >= 8) { 1701 if (IS_CHERRYVIEW(dev)) 1702 chv_setup_private_ppat(dev_priv); 1703 else 1704 bdw_setup_private_ppat(dev_priv); 1705 1706 return; 1707 } 1708 1709 if (USES_PPGTT(dev)) { 1710 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 1711 /* TODO: Perhaps it shouldn't be gen6 specific */ 1712 1713 struct i915_hw_ppgtt *ppgtt = 1714 container_of(vm, struct i915_hw_ppgtt, 1715 base); 1716 1717 if (i915_is_ggtt(vm)) 1718 ppgtt = dev_priv->mm.aliasing_ppgtt; 1719 1720 gen6_write_page_range(dev_priv, &ppgtt->pd, 1721 0, ppgtt->base.total); 1722 } 1723 } 1724 1725 i915_ggtt_flush(dev_priv); 1726 } 1727 1728 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) 1729 { 1730 if (obj->has_dma_mapping) 1731 return 0; 1732 1733 #if 0 1734 if (!dma_map_sg(&obj->base.dev->pdev->dev, 1735 obj->pages->sgl, obj->pages->nents, 1736 PCI_DMA_BIDIRECTIONAL)) 1737 return -ENOSPC; 1738 #endif 1739 1740 return 0; 1741 } 1742 1743 static inline void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) 1744 { 1745 #if 0 1746 writeq(pte, addr); 1747 #else 1748 iowrite32((u32)pte, addr); 1749 iowrite32(pte >> 32, addr + 4); 1750 #endif 1751 } 1752 1753 static void gen8_ggtt_insert_entries(struct i915_address_space *vm, 1754 vm_page_t *pages, 1755 uint64_t start, 1756 unsigned int num_entries, 1757 enum i915_cache_level level, u32 unused) 1758 { 1759 struct drm_i915_private *dev_priv = vm->dev->dev_private; 1760 unsigned first_entry = start >> PAGE_SHIFT; 1761 gen8_pte_t __iomem *gtt_entries = 1762 (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 1763 int i = 0; 1764 dma_addr_t addr = 0; 1765 1766 for (i=0;i<num_entries;i++) { 1767 addr = VM_PAGE_TO_PHYS(pages[i]); 1768 gen8_set_pte(>t_entries[i], 1769 gen8_pte_encode(addr, level, true)); 1770 } 1771 1772 /* 1773 * XXX: This serves as a posting read to make sure that the PTE has 1774 * actually been updated. There is some concern that even though 1775 * registers and PTEs are within the same BAR that they are potentially 1776 * of NUMA access patterns. Therefore, even with the way we assume 1777 * hardware should work, we must keep this posting read for paranoia. 1778 */ 1779 if (i != 0) 1780 WARN_ON(readq(>t_entries[i-1]) 1781 != gen8_pte_encode(addr, level, true)); 1782 1783 /* This next bit makes the above posting read even more important. We 1784 * want to flush the TLBs only after we're certain all the PTE updates 1785 * have finished. 1786 */ 1787 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 1788 POSTING_READ(GFX_FLSH_CNTL_GEN6); 1789 } 1790 1791 /* 1792 * Binds an object into the global gtt with the specified cache level. The object 1793 * will be accessible to the GPU via commands whose operands reference offsets 1794 * within the global GTT as well as accessible by the GPU through the GMADR 1795 * mapped BAR (dev_priv->mm.gtt->gtt). 1796 */ 1797 static void gen6_ggtt_insert_entries(struct i915_address_space *vm, 1798 vm_page_t *pages, 1799 uint64_t start, 1800 unsigned int num_entries, 1801 enum i915_cache_level level, u32 flags) 1802 { 1803 struct drm_i915_private *dev_priv = vm->dev->dev_private; 1804 unsigned first_entry = start >> PAGE_SHIFT; 1805 gen6_pte_t __iomem *gtt_entries = 1806 (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 1807 int i = 0; 1808 dma_addr_t addr = 0; /* shut up gcc */ 1809 1810 for (i = 0; i < num_entries; i++) { 1811 addr = VM_PAGE_TO_PHYS(pages[i]); 1812 iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]); 1813 } 1814 1815 /* XXX: This serves as a posting read to make sure that the PTE has 1816 * actually been updated. There is some concern that even though 1817 * registers and PTEs are within the same BAR that they are potentially 1818 * of NUMA access patterns. Therefore, even with the way we assume 1819 * hardware should work, we must keep this posting read for paranoia. 1820 */ 1821 if (i != 0) { 1822 unsigned long gtt = readl(>t_entries[i-1]); 1823 WARN_ON(gtt != vm->pte_encode(addr, level, true, flags)); 1824 } 1825 1826 /* This next bit makes the above posting read even more important. We 1827 * want to flush the TLBs only after we're certain all the PTE updates 1828 * have finished. 1829 */ 1830 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 1831 POSTING_READ(GFX_FLSH_CNTL_GEN6); 1832 } 1833 1834 static void gen8_ggtt_clear_range(struct i915_address_space *vm, 1835 uint64_t start, 1836 uint64_t length, 1837 bool use_scratch) 1838 { 1839 struct drm_i915_private *dev_priv = vm->dev->dev_private; 1840 unsigned first_entry = start >> PAGE_SHIFT; 1841 unsigned num_entries = length >> PAGE_SHIFT; 1842 gen8_pte_t scratch_pte, __iomem *gtt_base = 1843 (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; 1844 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; 1845 int i; 1846 1847 if (WARN(num_entries > max_entries, 1848 "First entry = %d; Num entries = %d (max=%d)\n", 1849 first_entry, num_entries, max_entries)) 1850 num_entries = max_entries; 1851 1852 scratch_pte = gen8_pte_encode(vm->scratch.addr, 1853 I915_CACHE_LLC, 1854 use_scratch); 1855 for (i = 0; i < num_entries; i++) 1856 gen8_set_pte(>t_base[i], scratch_pte); 1857 readl(gtt_base); 1858 } 1859 1860 static void gen6_ggtt_clear_range(struct i915_address_space *vm, 1861 uint64_t start, 1862 uint64_t length, 1863 bool use_scratch) 1864 { 1865 struct drm_i915_private *dev_priv = vm->dev->dev_private; 1866 unsigned first_entry = start >> PAGE_SHIFT; 1867 unsigned num_entries = length >> PAGE_SHIFT; 1868 gen6_pte_t scratch_pte, __iomem *gtt_base = 1869 (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; 1870 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; 1871 int i; 1872 1873 if (WARN(num_entries > max_entries, 1874 "First entry = %d; Num entries = %d (max=%d)\n", 1875 first_entry, num_entries, max_entries)) 1876 num_entries = max_entries; 1877 1878 scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0); 1879 1880 for (i = 0; i < num_entries; i++) 1881 iowrite32(scratch_pte, >t_base[i]); 1882 readl(gtt_base); 1883 } 1884 1885 static void i915_ggtt_bind_vma(struct i915_vma *vma, 1886 enum i915_cache_level cache_level, 1887 u32 unused) 1888 { 1889 const unsigned long entry = vma->node.start >> PAGE_SHIFT; 1890 const unsigned int num_entries = vma->obj->base.size >> PAGE_SHIFT; 1891 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 1892 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 1893 1894 BUG_ON(!i915_is_ggtt(vma->vm)); 1895 intel_gtt_insert_pages(entry, num_entries, vma->ggtt_view.pages, flags); 1896 vma->bound = GLOBAL_BIND; 1897 } 1898 1899 static void i915_ggtt_clear_range(struct i915_address_space *vm, 1900 uint64_t start, 1901 uint64_t length, 1902 bool unused) 1903 { 1904 unsigned first_entry = start >> PAGE_SHIFT; 1905 unsigned num_entries = length >> PAGE_SHIFT; 1906 intel_gtt_clear_range(first_entry, num_entries); 1907 } 1908 1909 static void i915_ggtt_unbind_vma(struct i915_vma *vma) 1910 { 1911 const unsigned int first = vma->node.start >> PAGE_SHIFT; 1912 const unsigned int size = vma->obj->base.size >> PAGE_SHIFT; 1913 1914 BUG_ON(!i915_is_ggtt(vma->vm)); 1915 vma->bound = 0; 1916 intel_gtt_clear_range(first, size); 1917 } 1918 1919 static void ggtt_bind_vma(struct i915_vma *vma, 1920 enum i915_cache_level cache_level, 1921 u32 flags) 1922 { 1923 struct drm_device *dev = vma->vm->dev; 1924 struct drm_i915_private *dev_priv = dev->dev_private; 1925 struct drm_i915_gem_object *obj = vma->obj; 1926 struct vm_page **pages = obj->pages; 1927 1928 /* Currently applicable only to VLV */ 1929 if (obj->gt_ro) 1930 flags |= PTE_READ_ONLY; 1931 1932 if (i915_is_ggtt(vma->vm)) 1933 pages = vma->ggtt_view.pages; 1934 1935 /* If there is no aliasing PPGTT, or the caller needs a global mapping, 1936 * or we have a global mapping already but the cacheability flags have 1937 * changed, set the global PTEs. 1938 * 1939 * If there is an aliasing PPGTT it is anecdotally faster, so use that 1940 * instead if none of the above hold true. 1941 * 1942 * NB: A global mapping should only be needed for special regions like 1943 * "gtt mappable", SNB errata, or if specified via special execbuf 1944 * flags. At all other times, the GPU will use the aliasing PPGTT. 1945 */ 1946 if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { 1947 if (!(vma->bound & GLOBAL_BIND) || 1948 (cache_level != obj->cache_level)) { 1949 vma->vm->insert_entries(vma->vm, pages, 1950 vma->node.start, 1951 obj->base.size >> PAGE_SHIFT, 1952 cache_level, flags); 1953 vma->bound |= GLOBAL_BIND; 1954 } 1955 } 1956 1957 if (dev_priv->mm.aliasing_ppgtt && 1958 (!(vma->bound & LOCAL_BIND) || 1959 (cache_level != obj->cache_level))) { 1960 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; 1961 appgtt->base.insert_entries(&appgtt->base, pages, 1962 vma->node.start, 1963 obj->base.size >> PAGE_SHIFT, 1964 cache_level, flags); 1965 vma->bound |= LOCAL_BIND; 1966 } 1967 } 1968 1969 static void ggtt_unbind_vma(struct i915_vma *vma) 1970 { 1971 struct drm_device *dev = vma->vm->dev; 1972 struct drm_i915_private *dev_priv = dev->dev_private; 1973 struct drm_i915_gem_object *obj = vma->obj; 1974 1975 if (vma->bound & GLOBAL_BIND) { 1976 vma->vm->clear_range(vma->vm, 1977 vma->node.start, 1978 obj->base.size, 1979 true); 1980 vma->bound &= ~GLOBAL_BIND; 1981 } 1982 1983 if (vma->bound & LOCAL_BIND) { 1984 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; 1985 appgtt->base.clear_range(&appgtt->base, 1986 vma->node.start, 1987 obj->base.size, 1988 true); 1989 vma->bound &= ~LOCAL_BIND; 1990 } 1991 } 1992 1993 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) 1994 { 1995 struct drm_device *dev = obj->base.dev; 1996 struct drm_i915_private *dev_priv = dev->dev_private; 1997 bool interruptible; 1998 1999 interruptible = do_idling(dev_priv); 2000 2001 #if 0 2002 if (!obj->has_dma_mapping) 2003 dma_unmap_sg(&dev->pdev->dev, 2004 obj->pages->sgl, obj->pages->nents, 2005 PCI_DMA_BIDIRECTIONAL); 2006 #endif 2007 2008 undo_idling(dev_priv, interruptible); 2009 } 2010 2011 static void i915_gtt_color_adjust(struct drm_mm_node *node, 2012 unsigned long color, 2013 u64 *start, 2014 u64 *end) 2015 { 2016 if (node->color != color) 2017 *start += 4096; 2018 2019 if (!list_empty(&node->node_list)) { 2020 node = list_entry(node->node_list.next, 2021 struct drm_mm_node, 2022 node_list); 2023 if (node->allocated && node->color != color) 2024 *end -= 4096; 2025 } 2026 } 2027 2028 static int i915_gem_setup_global_gtt(struct drm_device *dev, 2029 unsigned long start, 2030 unsigned long mappable_end, 2031 unsigned long end) 2032 { 2033 /* Let GEM Manage all of the aperture. 2034 * 2035 * However, leave one page at the end still bound to the scratch page. 2036 * There are a number of places where the hardware apparently prefetches 2037 * past the end of the object, and we've seen multiple hangs with the 2038 * GPU head pointer stuck in a batchbuffer bound at the last page of the 2039 * aperture. One page should be enough to keep any prefetching inside 2040 * of the aperture. 2041 */ 2042 struct drm_i915_private *dev_priv = dev->dev_private; 2043 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; 2044 unsigned long mappable; 2045 int error; 2046 struct drm_mm_node *entry; 2047 struct drm_i915_gem_object *obj; 2048 unsigned long hole_start, hole_end; 2049 int ret; 2050 2051 kprintf("MAPPABLE_END VS END %016jx %016jx\n", mappable_end, end); 2052 tsleep(&mappable_end, 0, "DELAY", hz); /* for kprintf */ 2053 /*BUG_ON(mappable_end > end);*/ 2054 2055 mappable = min(end, mappable_end) - start; 2056 2057 /* Subtract the guard page ... */ 2058 drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE); 2059 2060 dev_priv->gtt.base.start = start; 2061 dev_priv->gtt.base.total = end - start; 2062 2063 if (intel_vgpu_active(dev)) { 2064 ret = intel_vgt_balloon(dev); 2065 if (ret) 2066 return ret; 2067 } 2068 2069 if (!HAS_LLC(dev)) 2070 dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust; 2071 2072 /* Mark any preallocated objects as occupied */ 2073 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 2074 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); 2075 2076 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", 2077 i915_gem_obj_ggtt_offset(obj), obj->base.size); 2078 2079 WARN_ON(i915_gem_obj_ggtt_bound(obj)); 2080 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node); 2081 if (ret) { 2082 DRM_DEBUG_KMS("Reservation failed: %i\n", ret); 2083 return ret; 2084 } 2085 vma->bound |= GLOBAL_BIND; 2086 } 2087 2088 /* Clear any non-preallocated blocks */ 2089 drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { 2090 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 2091 hole_start, hole_end); 2092 ggtt_vm->clear_range(ggtt_vm, hole_start, 2093 hole_end - hole_start, true); 2094 } 2095 2096 #ifdef __DragonFly__ 2097 device_printf(dev->dev, 2098 "taking over the fictitious range 0x%lx-0x%lx\n", 2099 dev_priv->gtt.mappable_base + start, dev_priv->gtt.mappable_base + start + mappable); 2100 error = -vm_phys_fictitious_reg_range(dev_priv->gtt.mappable_base + start, 2101 dev_priv->gtt.mappable_base + start + mappable, VM_MEMATTR_WRITE_COMBINING); 2102 #endif 2103 2104 /* And finally clear the reserved guard page */ 2105 ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true); 2106 2107 if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) { 2108 struct i915_hw_ppgtt *ppgtt; 2109 2110 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 2111 if (!ppgtt) 2112 return -ENOMEM; 2113 2114 ret = __hw_ppgtt_init(dev, ppgtt, true); 2115 if (ret) { 2116 kfree(ppgtt); 2117 return ret; 2118 } 2119 2120 dev_priv->mm.aliasing_ppgtt = ppgtt; 2121 } 2122 2123 return 0; 2124 } 2125 2126 void i915_gem_init_global_gtt(struct drm_device *dev) 2127 { 2128 struct drm_i915_private *dev_priv = dev->dev_private; 2129 unsigned long gtt_size, mappable_size; 2130 2131 gtt_size = dev_priv->gtt.base.total; 2132 mappable_size = dev_priv->gtt.mappable_end; 2133 2134 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 2135 } 2136 2137 void i915_global_gtt_cleanup(struct drm_device *dev) 2138 { 2139 struct drm_i915_private *dev_priv = dev->dev_private; 2140 struct i915_address_space *vm = &dev_priv->gtt.base; 2141 2142 if (dev_priv->mm.aliasing_ppgtt) { 2143 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2144 2145 ppgtt->base.cleanup(&ppgtt->base); 2146 } 2147 2148 if (drm_mm_initialized(&vm->mm)) { 2149 if (intel_vgpu_active(dev)) 2150 intel_vgt_deballoon(); 2151 2152 drm_mm_takedown(&vm->mm); 2153 list_del(&vm->global_link); 2154 } 2155 2156 vm->cleanup(vm); 2157 } 2158 2159 static int setup_scratch_page(struct drm_device *dev) 2160 { 2161 struct drm_i915_private *dev_priv = dev->dev_private; 2162 struct vm_page *page; 2163 dma_addr_t dma_addr; 2164 2165 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 2166 if (page == NULL) 2167 return -ENOMEM; 2168 set_pages_uc(page, 1); 2169 2170 #ifdef CONFIG_INTEL_IOMMU 2171 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE, 2172 PCI_DMA_BIDIRECTIONAL); 2173 if (pci_dma_mapping_error(dev->pdev, dma_addr)) 2174 return -EINVAL; 2175 #else 2176 dma_addr = page_to_phys(page); 2177 #endif 2178 dev_priv->gtt.base.scratch.page = page; 2179 dev_priv->gtt.base.scratch.addr = dma_addr; 2180 2181 return 0; 2182 } 2183 2184 #if 0 2185 static void teardown_scratch_page(struct drm_device *dev) 2186 { 2187 struct drm_i915_private *dev_priv = dev->dev_private; 2188 struct vm_page *page = dev_priv->gtt.base.scratch.page; 2189 2190 set_pages_wb(page, 1); 2191 pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr, 2192 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 2193 __free_page(page); 2194 } 2195 #endif 2196 2197 static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) 2198 { 2199 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; 2200 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; 2201 return snb_gmch_ctl << 20; 2202 } 2203 2204 static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) 2205 { 2206 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; 2207 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; 2208 if (bdw_gmch_ctl) 2209 bdw_gmch_ctl = 1 << bdw_gmch_ctl; 2210 2211 #ifdef CONFIG_X86_32 2212 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */ 2213 if (bdw_gmch_ctl > 4) 2214 bdw_gmch_ctl = 4; 2215 #endif 2216 2217 return bdw_gmch_ctl << 20; 2218 } 2219 2220 static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) 2221 { 2222 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; 2223 gmch_ctrl &= SNB_GMCH_GGMS_MASK; 2224 2225 if (gmch_ctrl) 2226 return 1 << (20 + gmch_ctrl); 2227 2228 return 0; 2229 } 2230 2231 static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) 2232 { 2233 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; 2234 snb_gmch_ctl &= SNB_GMCH_GMS_MASK; 2235 return snb_gmch_ctl << 25; /* 32 MB units */ 2236 } 2237 2238 static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl) 2239 { 2240 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; 2241 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK; 2242 return bdw_gmch_ctl << 25; /* 32 MB units */ 2243 } 2244 2245 static size_t chv_get_stolen_size(u16 gmch_ctrl) 2246 { 2247 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; 2248 gmch_ctrl &= SNB_GMCH_GMS_MASK; 2249 2250 /* 2251 * 0x0 to 0x10: 32MB increments starting at 0MB 2252 * 0x11 to 0x16: 4MB increments starting at 8MB 2253 * 0x17 to 0x1d: 4MB increments start at 36MB 2254 */ 2255 if (gmch_ctrl < 0x11) 2256 return gmch_ctrl << 25; 2257 else if (gmch_ctrl < 0x17) 2258 return (gmch_ctrl - 0x11 + 2) << 22; 2259 else 2260 return (gmch_ctrl - 0x17 + 9) << 22; 2261 } 2262 2263 static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl) 2264 { 2265 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; 2266 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK; 2267 2268 if (gen9_gmch_ctl < 0xf0) 2269 return gen9_gmch_ctl << 25; /* 32 MB units */ 2270 else 2271 /* 4MB increments starting at 0xf0 for 4MB */ 2272 return (gen9_gmch_ctl - 0xf0 + 1) << 22; 2273 } 2274 2275 static int ggtt_probe_common(struct drm_device *dev, 2276 size_t gtt_size) 2277 { 2278 struct drm_i915_private *dev_priv = dev->dev_private; 2279 phys_addr_t gtt_phys_addr; 2280 int ret; 2281 2282 /* For Modern GENs the PTEs and register space are split in the BAR */ 2283 gtt_phys_addr = pci_resource_start(dev->pdev, 0) + 2284 (pci_resource_len(dev->pdev, 0) / 2); 2285 2286 dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); 2287 if (!dev_priv->gtt.gsm) { 2288 DRM_ERROR("Failed to map the gtt page table\n"); 2289 return -ENOMEM; 2290 } 2291 2292 ret = setup_scratch_page(dev); 2293 if (ret) { 2294 DRM_ERROR("Scratch setup failed\n"); 2295 /* iounmap will also get called at remove, but meh */ 2296 #if 0 2297 iounmap(dev_priv->gtt.gsm); 2298 #endif 2299 } 2300 2301 return ret; 2302 } 2303 2304 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability 2305 * bits. When using advanced contexts each context stores its own PAT, but 2306 * writing this data shouldn't be harmful even in those cases. */ 2307 static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) 2308 { 2309 uint64_t pat; 2310 2311 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ 2312 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ 2313 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ 2314 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ 2315 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | 2316 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | 2317 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | 2318 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); 2319 2320 if (!USES_PPGTT(dev_priv->dev)) 2321 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, 2322 * so RTL will always use the value corresponding to 2323 * pat_sel = 000". 2324 * So let's disable cache for GGTT to avoid screen corruptions. 2325 * MOCS still can be used though. 2326 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work 2327 * before this patch, i.e. the same uncached + snooping access 2328 * like on gen6/7 seems to be in effect. 2329 * - So this just fixes blitter/render access. Again it looks 2330 * like it's not just uncached access, but uncached + snooping. 2331 * So we can still hold onto all our assumptions wrt cpu 2332 * clflushing on LLC machines. 2333 */ 2334 pat = GEN8_PPAT(0, GEN8_PPAT_UC); 2335 2336 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b 2337 * write would work. */ 2338 I915_WRITE(GEN8_PRIVATE_PAT, pat); 2339 I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32); 2340 } 2341 2342 static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) 2343 { 2344 uint64_t pat; 2345 2346 /* 2347 * Map WB on BDW to snooped on CHV. 2348 * 2349 * Only the snoop bit has meaning for CHV, the rest is 2350 * ignored. 2351 * 2352 * The hardware will never snoop for certain types of accesses: 2353 * - CPU GTT (GMADR->GGTT->no snoop->memory) 2354 * - PPGTT page tables 2355 * - some other special cycles 2356 * 2357 * As with BDW, we also need to consider the following for GT accesses: 2358 * "For GGTT, there is NO pat_sel[2:0] from the entry, 2359 * so RTL will always use the value corresponding to 2360 * pat_sel = 000". 2361 * Which means we must set the snoop bit in PAT entry 0 2362 * in order to keep the global status page working. 2363 */ 2364 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | 2365 GEN8_PPAT(1, 0) | 2366 GEN8_PPAT(2, 0) | 2367 GEN8_PPAT(3, 0) | 2368 GEN8_PPAT(4, CHV_PPAT_SNOOP) | 2369 GEN8_PPAT(5, CHV_PPAT_SNOOP) | 2370 GEN8_PPAT(6, CHV_PPAT_SNOOP) | 2371 GEN8_PPAT(7, CHV_PPAT_SNOOP); 2372 2373 I915_WRITE(GEN8_PRIVATE_PAT, pat); 2374 I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32); 2375 } 2376 2377 static int gen8_gmch_probe(struct drm_device *dev, 2378 size_t *gtt_total, 2379 size_t *stolen, 2380 phys_addr_t *mappable_base, 2381 unsigned long *mappable_end) 2382 { 2383 struct drm_i915_private *dev_priv = dev->dev_private; 2384 unsigned int gtt_size; 2385 u16 snb_gmch_ctl; 2386 int ret; 2387 2388 /* TODO: We're not aware of mappable constraints on gen8 yet */ 2389 *mappable_base = pci_resource_start(dev->pdev, 2); 2390 *mappable_end = pci_resource_len(dev->pdev, 2); 2391 2392 #if 0 2393 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39))) 2394 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39)); 2395 #endif 2396 2397 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 2398 2399 if (INTEL_INFO(dev)->gen >= 9) { 2400 *stolen = gen9_get_stolen_size(snb_gmch_ctl); 2401 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); 2402 } else if (IS_CHERRYVIEW(dev)) { 2403 *stolen = chv_get_stolen_size(snb_gmch_ctl); 2404 gtt_size = chv_get_total_gtt_size(snb_gmch_ctl); 2405 } else { 2406 *stolen = gen8_get_stolen_size(snb_gmch_ctl); 2407 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); 2408 } 2409 2410 *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT; 2411 2412 if (IS_CHERRYVIEW(dev)) 2413 chv_setup_private_ppat(dev_priv); 2414 else 2415 bdw_setup_private_ppat(dev_priv); 2416 2417 ret = ggtt_probe_common(dev, gtt_size); 2418 2419 dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range; 2420 dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries; 2421 2422 return ret; 2423 } 2424 2425 static int gen6_gmch_probe(struct drm_device *dev, 2426 size_t *gtt_total, 2427 size_t *stolen, 2428 phys_addr_t *mappable_base, 2429 unsigned long *mappable_end) 2430 { 2431 struct drm_i915_private *dev_priv = dev->dev_private; 2432 unsigned int gtt_size; 2433 u16 snb_gmch_ctl; 2434 int ret; 2435 2436 *mappable_base = pci_resource_start(dev->pdev, 2); 2437 *mappable_end = pci_resource_len(dev->pdev, 2); 2438 2439 /* 64/512MB is the current min/max we actually know of, but this is just 2440 * a coarse sanity check. 2441 */ 2442 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) { 2443 DRM_ERROR("Unknown GMADR size (%lx)\n", 2444 dev_priv->gtt.mappable_end); 2445 return -ENXIO; 2446 } 2447 2448 #if 0 2449 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) 2450 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); 2451 #endif 2452 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 2453 2454 *stolen = gen6_get_stolen_size(snb_gmch_ctl); 2455 2456 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); 2457 *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT; 2458 2459 ret = ggtt_probe_common(dev, gtt_size); 2460 2461 dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; 2462 dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; 2463 2464 return ret; 2465 } 2466 2467 static void gen6_gmch_remove(struct i915_address_space *vm) 2468 { 2469 #if 0 2470 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); 2471 2472 iounmap(gtt->gsm); 2473 teardown_scratch_page(vm->dev); 2474 #endif 2475 } 2476 2477 static int i915_gmch_probe(struct drm_device *dev, 2478 size_t *gtt_total, 2479 size_t *stolen, 2480 phys_addr_t *mappable_base, 2481 unsigned long *mappable_end) 2482 { 2483 struct drm_i915_private *dev_priv = dev->dev_private; 2484 #if 0 2485 int ret; 2486 2487 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); 2488 if (!ret) { 2489 DRM_ERROR("failed to set up gmch\n"); 2490 return -EIO; 2491 } 2492 #endif 2493 2494 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); 2495 2496 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); 2497 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; 2498 2499 if (unlikely(dev_priv->gtt.do_idle_maps)) 2500 DRM_INFO("applying Ironlake quirks for intel_iommu\n"); 2501 2502 return 0; 2503 } 2504 2505 static void i915_gmch_remove(struct i915_address_space *vm) 2506 { 2507 intel_gmch_remove(); 2508 } 2509 2510 int i915_gem_gtt_init(struct drm_device *dev) 2511 { 2512 struct drm_i915_private *dev_priv = dev->dev_private; 2513 struct i915_gtt *gtt = &dev_priv->gtt; 2514 int ret; 2515 2516 if (INTEL_INFO(dev)->gen <= 5) { 2517 gtt->gtt_probe = i915_gmch_probe; 2518 gtt->base.cleanup = i915_gmch_remove; 2519 } else if (INTEL_INFO(dev)->gen < 8) { 2520 gtt->gtt_probe = gen6_gmch_probe; 2521 gtt->base.cleanup = gen6_gmch_remove; 2522 if (IS_HASWELL(dev) && dev_priv->ellc_size) 2523 gtt->base.pte_encode = iris_pte_encode; 2524 else if (IS_HASWELL(dev)) 2525 gtt->base.pte_encode = hsw_pte_encode; 2526 else if (IS_VALLEYVIEW(dev)) 2527 gtt->base.pte_encode = byt_pte_encode; 2528 else if (INTEL_INFO(dev)->gen >= 7) 2529 gtt->base.pte_encode = ivb_pte_encode; 2530 else 2531 gtt->base.pte_encode = snb_pte_encode; 2532 } else { 2533 dev_priv->gtt.gtt_probe = gen8_gmch_probe; 2534 dev_priv->gtt.base.cleanup = gen6_gmch_remove; 2535 } 2536 2537 ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, 2538 >t->mappable_base, >t->mappable_end); 2539 if (ret) 2540 return ret; 2541 2542 gtt->base.dev = dev; 2543 2544 /* GMADR is the PCI mmio aperture into the global GTT. */ 2545 DRM_INFO("Memory usable by graphics device = %zdM\n", 2546 gtt->base.total >> 20); 2547 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); 2548 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); 2549 #ifdef CONFIG_INTEL_IOMMU 2550 if (intel_iommu_gfx_mapped) 2551 DRM_INFO("VT-d active for gfx access\n"); 2552 #endif 2553 /* 2554 * i915.enable_ppgtt is read-only, so do an early pass to validate the 2555 * user's requested state against the hardware/driver capabilities. We 2556 * do this now so that we can print out any log messages once rather 2557 * than every time we check intel_enable_ppgtt(). 2558 */ 2559 i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); 2560 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); 2561 2562 return 0; 2563 } 2564 2565 static struct i915_vma * 2566 __i915_gem_vma_create(struct drm_i915_gem_object *obj, 2567 struct i915_address_space *vm, 2568 const struct i915_ggtt_view *ggtt_view) 2569 { 2570 struct i915_vma *vma; 2571 2572 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) 2573 return ERR_PTR(-EINVAL); 2574 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 2575 if (vma == NULL) 2576 return ERR_PTR(-ENOMEM); 2577 2578 INIT_LIST_HEAD(&vma->vma_link); 2579 INIT_LIST_HEAD(&vma->mm_list); 2580 INIT_LIST_HEAD(&vma->exec_list); 2581 vma->vm = vm; 2582 vma->obj = obj; 2583 2584 if (INTEL_INFO(vm->dev)->gen >= 6) { 2585 if (i915_is_ggtt(vm)) { 2586 vma->ggtt_view = *ggtt_view; 2587 2588 vma->unbind_vma = ggtt_unbind_vma; 2589 vma->bind_vma = ggtt_bind_vma; 2590 } else { 2591 vma->unbind_vma = ppgtt_unbind_vma; 2592 vma->bind_vma = ppgtt_bind_vma; 2593 } 2594 } else { 2595 BUG_ON(!i915_is_ggtt(vm)); 2596 vma->ggtt_view = *ggtt_view; 2597 vma->unbind_vma = i915_ggtt_unbind_vma; 2598 vma->bind_vma = i915_ggtt_bind_vma; 2599 } 2600 2601 list_add_tail(&vma->vma_link, &obj->vma_list); 2602 if (!i915_is_ggtt(vm)) 2603 i915_ppgtt_get(i915_vm_to_ppgtt(vm)); 2604 2605 return vma; 2606 } 2607 2608 struct i915_vma * 2609 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 2610 struct i915_address_space *vm) 2611 { 2612 struct i915_vma *vma; 2613 2614 vma = i915_gem_obj_to_vma(obj, vm); 2615 if (!vma) 2616 vma = __i915_gem_vma_create(obj, vm, 2617 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL); 2618 2619 return vma; 2620 } 2621 2622 struct i915_vma * 2623 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, 2624 const struct i915_ggtt_view *view) 2625 { 2626 struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); 2627 struct i915_vma *vma; 2628 2629 if (WARN_ON(!view)) 2630 return ERR_PTR(-EINVAL); 2631 2632 vma = i915_gem_obj_to_ggtt_view(obj, view); 2633 2634 if (IS_ERR(vma)) 2635 return vma; 2636 2637 if (!vma) 2638 vma = __i915_gem_vma_create(obj, ggtt, view); 2639 2640 return vma; 2641 2642 } 2643 2644 #if 0 2645 static void 2646 rotate_pages(dma_addr_t *in, unsigned int width, unsigned int height, 2647 struct sg_table *st) 2648 { 2649 unsigned int column, row; 2650 unsigned int src_idx; 2651 struct scatterlist *sg = st->sgl; 2652 2653 st->nents = 0; 2654 2655 for (column = 0; column < width; column++) { 2656 src_idx = width * (height - 1) + column; 2657 for (row = 0; row < height; row++) { 2658 st->nents++; 2659 /* We don't need the pages, but need to initialize 2660 * the entries so the sg list can be happily traversed. 2661 * The only thing we need are DMA addresses. 2662 */ 2663 sg_set_page(sg, NULL, PAGE_SIZE, 0); 2664 sg_dma_address(sg) = in[src_idx]; 2665 sg_dma_len(sg) = PAGE_SIZE; 2666 sg = sg_next(sg); 2667 src_idx -= width; 2668 } 2669 } 2670 } 2671 2672 static struct sg_table * 2673 intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, 2674 struct drm_i915_gem_object *obj) 2675 { 2676 struct drm_device *dev = obj->base.dev; 2677 struct intel_rotation_info *rot_info = &ggtt_view->rotation_info; 2678 unsigned long size, pages, rot_pages; 2679 struct sg_page_iter sg_iter; 2680 unsigned long i; 2681 dma_addr_t *page_addr_list; 2682 struct sg_table *st; 2683 unsigned int tile_pitch, tile_height; 2684 unsigned int width_pages, height_pages; 2685 int ret = -ENOMEM; 2686 2687 pages = obj->base.size / PAGE_SIZE; 2688 2689 /* Calculate tiling geometry. */ 2690 tile_height = intel_tile_height(dev, rot_info->pixel_format, 2691 rot_info->fb_modifier); 2692 tile_pitch = PAGE_SIZE / tile_height; 2693 width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch); 2694 height_pages = DIV_ROUND_UP(rot_info->height, tile_height); 2695 rot_pages = width_pages * height_pages; 2696 size = rot_pages * PAGE_SIZE; 2697 2698 /* Allocate a temporary list of source pages for random access. */ 2699 page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t)); 2700 if (!page_addr_list) 2701 return ERR_PTR(ret); 2702 2703 /* Allocate target SG list. */ 2704 st = kmalloc(sizeof(*st), GFP_KERNEL); 2705 if (!st) 2706 goto err_st_alloc; 2707 2708 ret = sg_alloc_table(st, rot_pages, GFP_KERNEL); 2709 if (ret) 2710 goto err_sg_alloc; 2711 2712 /* Populate source page list from the object. */ 2713 i = 0; 2714 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 2715 page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); 2716 i++; 2717 } 2718 2719 /* Rotate the pages. */ 2720 rotate_pages(page_addr_list, width_pages, height_pages, st); 2721 2722 DRM_DEBUG_KMS( 2723 "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n", 2724 size, rot_info->pitch, rot_info->height, 2725 rot_info->pixel_format, width_pages, height_pages, 2726 rot_pages); 2727 2728 drm_free_large(page_addr_list); 2729 2730 return st; 2731 2732 err_sg_alloc: 2733 kfree(st); 2734 err_st_alloc: 2735 drm_free_large(page_addr_list); 2736 2737 DRM_DEBUG_KMS( 2738 "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n", 2739 size, ret, rot_info->pitch, rot_info->height, 2740 rot_info->pixel_format, width_pages, height_pages, 2741 rot_pages); 2742 return ERR_PTR(ret); 2743 } 2744 #endif 2745 2746 static inline int 2747 i915_get_ggtt_vma_pages(struct i915_vma *vma) 2748 { 2749 int ret = 0; 2750 2751 if (vma->ggtt_view.pages) 2752 return 0; 2753 2754 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) 2755 vma->ggtt_view.pages = vma->obj->pages; 2756 #if 0 2757 else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) 2758 vma->ggtt_view.pages = 2759 intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj); 2760 #endif 2761 else 2762 WARN_ONCE(1, "GGTT view %u not implemented!\n", 2763 vma->ggtt_view.type); 2764 2765 if (!vma->ggtt_view.pages) { 2766 DRM_ERROR("Failed to get pages for GGTT view type %u!\n", 2767 vma->ggtt_view.type); 2768 ret = -EINVAL; 2769 } else if (IS_ERR(vma->ggtt_view.pages)) { 2770 ret = PTR_ERR(vma->ggtt_view.pages); 2771 vma->ggtt_view.pages = NULL; 2772 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", 2773 vma->ggtt_view.type, ret); 2774 } 2775 2776 return ret; 2777 } 2778 2779 /** 2780 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 2781 * @vma: VMA to map 2782 * @cache_level: mapping cache level 2783 * @flags: flags like global or local mapping 2784 * 2785 * DMA addresses are taken from the scatter-gather table of this object (or of 2786 * this VMA in case of non-default GGTT views) and PTE entries set up. 2787 * Note that DMA addresses are also the only part of the SG table we care about. 2788 */ 2789 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2790 u32 flags) 2791 { 2792 if (i915_is_ggtt(vma->vm)) { 2793 int ret = i915_get_ggtt_vma_pages(vma); 2794 2795 if (ret) 2796 return ret; 2797 } 2798 2799 vma->bind_vma(vma, cache_level, flags); 2800 2801 return 0; 2802 } 2803