1 /* 2 * Copyright © 2010 Daniel Vetter 3 * Copyright © 2011-2014 Intel Corporation 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 22 * IN THE SOFTWARE. 23 * 24 */ 25 26 #include <linux/seq_file.h> 27 #include <drm/drmP.h> 28 #include <drm/i915_drm.h> 29 #include "i915_drv.h" 30 #include "i915_vgpu.h" 31 #include "i915_trace.h" 32 #include "intel_drv.h" 33 34 #include <linux/bitmap.h> 35 36 /** 37 * DOC: Global GTT views 38 * 39 * Background and previous state 40 * 41 * Historically objects could exists (be bound) in global GTT space only as 42 * singular instances with a view representing all of the object's backing pages 43 * in a linear fashion. This view will be called a normal view. 44 * 45 * To support multiple views of the same object, where the number of mapped 46 * pages is not equal to the backing store, or where the layout of the pages 47 * is not linear, concept of a GGTT view was added. 48 * 49 * One example of an alternative view is a stereo display driven by a single 50 * image. In this case we would have a framebuffer looking like this 51 * (2x2 pages): 52 * 53 * 12 54 * 34 55 * 56 * Above would represent a normal GGTT view as normally mapped for GPU or CPU 57 * rendering. In contrast, fed to the display engine would be an alternative 58 * view which could look something like this: 59 * 60 * 1212 61 * 3434 62 * 63 * In this example both the size and layout of pages in the alternative view is 64 * different from the normal view. 65 * 66 * Implementation and usage 67 * 68 * GGTT views are implemented using VMAs and are distinguished via enum 69 * i915_ggtt_view_type and struct i915_ggtt_view. 70 * 71 * A new flavour of core GEM functions which work with GGTT bound objects were 72 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid 73 * renaming in large amounts of code. They take the struct i915_ggtt_view 74 * parameter encapsulating all metadata required to implement a view. 75 * 76 * As a helper for callers which are only interested in the normal view, 77 * globally const i915_ggtt_view_normal singleton instance exists. All old core 78 * GEM API functions, the ones not taking the view parameter, are operating on, 79 * or with the normal GGTT view. 80 * 81 * Code wanting to add or use a new GGTT view needs to: 82 * 83 * 1. Add a new enum with a suitable name. 84 * 2. Extend the metadata in the i915_ggtt_view structure if required. 85 * 3. Add support to i915_get_vma_pages(). 86 * 87 * New views are required to build a scatter-gather table from within the 88 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and 89 * exists for the lifetime of an VMA. 90 * 91 * Core API is designed to have copy semantics which means that passed in 92 * struct i915_ggtt_view does not need to be persistent (left around after 93 * calling the core API functions). 94 * 95 */ 96 97 static int 98 i915_get_ggtt_vma_pages(struct i915_vma *vma); 99 100 const struct i915_ggtt_view i915_ggtt_view_normal; 101 const struct i915_ggtt_view i915_ggtt_view_rotated = { 102 .type = I915_GGTT_VIEW_ROTATED 103 }; 104 105 static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) 106 { 107 bool has_aliasing_ppgtt; 108 bool has_full_ppgtt; 109 110 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; 111 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; 112 113 if (intel_vgpu_active(dev)) 114 has_full_ppgtt = false; /* emulation is too hard */ 115 116 /* 117 * We don't allow disabling PPGTT for gen9+ as it's a requirement for 118 * execlists, the sole mechanism available to submit work. 119 */ 120 if (INTEL_INFO(dev)->gen < 9 && 121 (enable_ppgtt == 0 || !has_aliasing_ppgtt)) 122 return 0; 123 124 if (enable_ppgtt == 1) 125 return 1; 126 127 if (enable_ppgtt == 2 && has_full_ppgtt) 128 return 2; 129 130 #ifdef CONFIG_INTEL_IOMMU 131 /* Disable ppgtt on SNB if VT-d is on. */ 132 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { 133 DRM_INFO("Disabling PPGTT because VT-d is on\n"); 134 return 0; 135 } 136 #endif 137 138 /* Early VLV doesn't have this */ 139 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && 140 dev->pdev->revision < 0xb) { 141 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); 142 return 0; 143 } 144 145 if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) 146 return 2; 147 else 148 return has_aliasing_ppgtt ? 1 : 0; 149 } 150 151 static int ppgtt_bind_vma(struct i915_vma *vma, 152 enum i915_cache_level cache_level, 153 u32 unused) 154 { 155 u32 pte_flags = 0; 156 157 /* Currently applicable only to VLV */ 158 if (vma->obj->gt_ro) 159 pte_flags |= PTE_READ_ONLY; 160 161 vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, 162 cache_level, pte_flags); 163 164 return 0; 165 } 166 167 static void ppgtt_unbind_vma(struct i915_vma *vma) 168 { 169 vma->vm->clear_range(vma->vm, 170 vma->node.start, 171 vma->obj->base.size, 172 true); 173 } 174 175 static gen8_pte_t gen8_pte_encode(dma_addr_t addr, 176 enum i915_cache_level level, 177 bool valid) 178 { 179 gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; 180 pte |= addr; 181 182 switch (level) { 183 case I915_CACHE_NONE: 184 pte |= PPAT_UNCACHED_INDEX; 185 break; 186 case I915_CACHE_WT: 187 pte |= PPAT_DISPLAY_ELLC_INDEX; 188 break; 189 default: 190 pte |= PPAT_CACHED_INDEX; 191 break; 192 } 193 194 return pte; 195 } 196 197 static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, 198 const enum i915_cache_level level) 199 { 200 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; 201 pde |= addr; 202 if (level != I915_CACHE_NONE) 203 pde |= PPAT_CACHED_PDE_INDEX; 204 else 205 pde |= PPAT_UNCACHED_INDEX; 206 return pde; 207 } 208 209 #define gen8_pdpe_encode gen8_pde_encode 210 #define gen8_pml4e_encode gen8_pde_encode 211 212 static gen6_pte_t snb_pte_encode(dma_addr_t addr, 213 enum i915_cache_level level, 214 bool valid, u32 unused) 215 { 216 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; 217 pte |= GEN6_PTE_ADDR_ENCODE(addr); 218 219 switch (level) { 220 case I915_CACHE_L3_LLC: 221 case I915_CACHE_LLC: 222 pte |= GEN6_PTE_CACHE_LLC; 223 break; 224 case I915_CACHE_NONE: 225 pte |= GEN6_PTE_UNCACHED; 226 break; 227 default: 228 MISSING_CASE(level); 229 } 230 231 return pte; 232 } 233 234 static gen6_pte_t ivb_pte_encode(dma_addr_t addr, 235 enum i915_cache_level level, 236 bool valid, u32 unused) 237 { 238 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; 239 pte |= GEN6_PTE_ADDR_ENCODE(addr); 240 241 switch (level) { 242 case I915_CACHE_L3_LLC: 243 pte |= GEN7_PTE_CACHE_L3_LLC; 244 break; 245 case I915_CACHE_LLC: 246 pte |= GEN6_PTE_CACHE_LLC; 247 break; 248 case I915_CACHE_NONE: 249 pte |= GEN6_PTE_UNCACHED; 250 break; 251 default: 252 MISSING_CASE(level); 253 } 254 255 return pte; 256 } 257 258 static gen6_pte_t byt_pte_encode(dma_addr_t addr, 259 enum i915_cache_level level, 260 bool valid, u32 flags) 261 { 262 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; 263 pte |= GEN6_PTE_ADDR_ENCODE(addr); 264 265 if (!(flags & PTE_READ_ONLY)) 266 pte |= BYT_PTE_WRITEABLE; 267 268 if (level != I915_CACHE_NONE) 269 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; 270 271 return pte; 272 } 273 274 static gen6_pte_t hsw_pte_encode(dma_addr_t addr, 275 enum i915_cache_level level, 276 bool valid, u32 unused) 277 { 278 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; 279 pte |= HSW_PTE_ADDR_ENCODE(addr); 280 281 if (level != I915_CACHE_NONE) 282 pte |= HSW_WB_LLC_AGE3; 283 284 return pte; 285 } 286 287 static gen6_pte_t iris_pte_encode(dma_addr_t addr, 288 enum i915_cache_level level, 289 bool valid, u32 unused) 290 { 291 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; 292 pte |= HSW_PTE_ADDR_ENCODE(addr); 293 294 switch (level) { 295 case I915_CACHE_NONE: 296 break; 297 case I915_CACHE_WT: 298 pte |= HSW_WT_ELLC_LLC_AGE3; 299 break; 300 default: 301 pte |= HSW_WB_ELLC_LLC_AGE3; 302 break; 303 } 304 305 return pte; 306 } 307 308 static int __setup_page_dma(struct drm_device *dev, 309 struct i915_page_dma *p, gfp_t flags) 310 { 311 struct device *device = dev->pdev->dev; 312 313 p->page = alloc_page(flags); 314 if (!p->page) 315 return -ENOMEM; 316 317 p->daddr = dma_map_page(device, 318 p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); 319 320 if (dma_mapping_error(device, p->daddr)) { 321 __free_page(p->page); 322 return -EINVAL; 323 } 324 325 return 0; 326 } 327 328 static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p) 329 { 330 return __setup_page_dma(dev, p, GFP_KERNEL); 331 } 332 333 static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p) 334 { 335 if (WARN_ON(!p->page)) 336 return; 337 338 dma_unmap_page(dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL); 339 __free_page(p->page); 340 memset(p, 0, sizeof(*p)); 341 } 342 343 static void *kmap_page_dma(struct i915_page_dma *p) 344 { 345 return kmap_atomic(p->page); 346 } 347 348 /* We use the flushing unmap only with ppgtt structures: 349 * page directories, page tables and scratch pages. 350 */ 351 static void kunmap_page_dma(struct drm_device *dev, void *vaddr) 352 { 353 /* There are only few exceptions for gen >=6. chv and bxt. 354 * And we are not sure about the latter so play safe for now. 355 */ 356 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) 357 drm_clflush_virt_range(vaddr, PAGE_SIZE); 358 359 kunmap_atomic(vaddr); 360 } 361 362 #define kmap_px(px) kmap_page_dma(px_base(px)) 363 #define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr)) 364 365 #define setup_px(dev, px) setup_page_dma((dev), px_base(px)) 366 #define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px)) 367 #define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v)) 368 #define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v)) 369 370 static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p, 371 const uint64_t val) 372 { 373 int i; 374 uint64_t * const vaddr = kmap_page_dma(p); 375 376 for (i = 0; i < 512; i++) 377 vaddr[i] = val; 378 379 kunmap_page_dma(dev, vaddr); 380 } 381 382 static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p, 383 const uint32_t val32) 384 { 385 uint64_t v = val32; 386 387 v = v << 32 | val32; 388 389 fill_page_dma(dev, p, v); 390 } 391 392 static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev) 393 { 394 struct i915_page_scratch *sp; 395 int ret; 396 397 sp = kzalloc(sizeof(*sp), GFP_KERNEL); 398 if (sp == NULL) 399 return ERR_PTR(-ENOMEM); 400 401 ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO); 402 if (ret) { 403 kfree(sp); 404 return ERR_PTR(ret); 405 } 406 407 set_pages_uc(px_page(sp), 1); 408 409 return sp; 410 } 411 412 static void free_scratch_page(struct drm_device *dev, 413 struct i915_page_scratch *sp) 414 { 415 set_pages_wb(px_page(sp), 1); 416 417 cleanup_px(dev, sp); 418 kfree(sp); 419 } 420 421 static struct i915_page_table *alloc_pt(struct drm_device *dev) 422 { 423 struct i915_page_table *pt; 424 const size_t count = INTEL_INFO(dev)->gen >= 8 ? 425 GEN8_PTES : GEN6_PTES; 426 int ret = -ENOMEM; 427 428 pt = kzalloc(sizeof(*pt), GFP_KERNEL); 429 if (!pt) 430 return ERR_PTR(-ENOMEM); 431 432 pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes), 433 GFP_KERNEL); 434 435 if (!pt->used_ptes) 436 goto fail_bitmap; 437 438 ret = setup_px(dev, pt); 439 if (ret) 440 goto fail_page_m; 441 442 return pt; 443 444 fail_page_m: 445 kfree(pt->used_ptes); 446 fail_bitmap: 447 kfree(pt); 448 449 return ERR_PTR(ret); 450 } 451 452 static void free_pt(struct drm_device *dev, struct i915_page_table *pt) 453 { 454 cleanup_px(dev, pt); 455 kfree(pt->used_ptes); 456 kfree(pt); 457 } 458 459 static void gen8_initialize_pt(struct i915_address_space *vm, 460 struct i915_page_table *pt) 461 { 462 gen8_pte_t scratch_pte; 463 464 scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), 465 I915_CACHE_LLC, true); 466 467 fill_px(vm->dev, pt, scratch_pte); 468 } 469 470 static void gen6_initialize_pt(struct i915_address_space *vm, 471 struct i915_page_table *pt) 472 { 473 gen6_pte_t scratch_pte; 474 475 WARN_ON(px_dma(vm->scratch_page) == 0); 476 477 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), 478 I915_CACHE_LLC, true, 0); 479 480 fill32_px(vm->dev, pt, scratch_pte); 481 } 482 483 static struct i915_page_directory *alloc_pd(struct drm_device *dev) 484 { 485 struct i915_page_directory *pd; 486 int ret = -ENOMEM; 487 488 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 489 if (!pd) 490 return ERR_PTR(-ENOMEM); 491 492 pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES), 493 sizeof(*pd->used_pdes), GFP_KERNEL); 494 if (!pd->used_pdes) 495 goto fail_bitmap; 496 497 ret = setup_px(dev, pd); 498 if (ret) 499 goto fail_page_m; 500 501 return pd; 502 503 fail_page_m: 504 kfree(pd->used_pdes); 505 fail_bitmap: 506 kfree(pd); 507 508 return ERR_PTR(ret); 509 } 510 511 static void free_pd(struct drm_device *dev, struct i915_page_directory *pd) 512 { 513 if (px_page(pd)) { 514 cleanup_px(dev, pd); 515 kfree(pd->used_pdes); 516 kfree(pd); 517 } 518 } 519 520 static void gen8_initialize_pd(struct i915_address_space *vm, 521 struct i915_page_directory *pd) 522 { 523 gen8_pde_t scratch_pde; 524 525 scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC); 526 527 fill_px(vm->dev, pd, scratch_pde); 528 } 529 530 static int __pdp_init(struct drm_device *dev, 531 struct i915_page_directory_pointer *pdp) 532 { 533 size_t pdpes = I915_PDPES_PER_PDP(dev); 534 535 pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes), 536 sizeof(unsigned long), 537 GFP_KERNEL); 538 if (!pdp->used_pdpes) 539 return -ENOMEM; 540 541 pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory), 542 GFP_KERNEL); 543 if (!pdp->page_directory) { 544 kfree(pdp->used_pdpes); 545 /* the PDP might be the statically allocated top level. Keep it 546 * as clean as possible */ 547 pdp->used_pdpes = NULL; 548 return -ENOMEM; 549 } 550 551 return 0; 552 } 553 554 static void __pdp_fini(struct i915_page_directory_pointer *pdp) 555 { 556 kfree(pdp->used_pdpes); 557 kfree(pdp->page_directory); 558 pdp->page_directory = NULL; 559 } 560 561 static struct 562 i915_page_directory_pointer *alloc_pdp(struct drm_device *dev) 563 { 564 struct i915_page_directory_pointer *pdp; 565 int ret = -ENOMEM; 566 567 WARN_ON(!USES_FULL_48BIT_PPGTT(dev)); 568 569 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); 570 if (!pdp) 571 return ERR_PTR(-ENOMEM); 572 573 ret = __pdp_init(dev, pdp); 574 if (ret) 575 goto fail_bitmap; 576 577 ret = setup_px(dev, pdp); 578 if (ret) 579 goto fail_page_m; 580 581 return pdp; 582 583 fail_page_m: 584 __pdp_fini(pdp); 585 fail_bitmap: 586 kfree(pdp); 587 588 return ERR_PTR(ret); 589 } 590 591 static void free_pdp(struct drm_device *dev, 592 struct i915_page_directory_pointer *pdp) 593 { 594 __pdp_fini(pdp); 595 if (USES_FULL_48BIT_PPGTT(dev)) { 596 cleanup_px(dev, pdp); 597 kfree(pdp); 598 } 599 } 600 601 static void gen8_initialize_pdp(struct i915_address_space *vm, 602 struct i915_page_directory_pointer *pdp) 603 { 604 gen8_ppgtt_pdpe_t scratch_pdpe; 605 606 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC); 607 608 fill_px(vm->dev, pdp, scratch_pdpe); 609 } 610 611 static void gen8_initialize_pml4(struct i915_address_space *vm, 612 struct i915_pml4 *pml4) 613 { 614 gen8_ppgtt_pml4e_t scratch_pml4e; 615 616 scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp), 617 I915_CACHE_LLC); 618 619 fill_px(vm->dev, pml4, scratch_pml4e); 620 } 621 622 static void 623 gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt, 624 struct i915_page_directory_pointer *pdp, 625 struct i915_page_directory *pd, 626 int index) 627 { 628 gen8_ppgtt_pdpe_t *page_directorypo; 629 630 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) 631 return; 632 633 page_directorypo = kmap_px(pdp); 634 page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC); 635 kunmap_px(ppgtt, page_directorypo); 636 } 637 638 static void 639 gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt, 640 struct i915_pml4 *pml4, 641 struct i915_page_directory_pointer *pdp, 642 int index) 643 { 644 gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4); 645 646 WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)); 647 pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC); 648 kunmap_px(ppgtt, pagemap); 649 } 650 651 /* Broadwell Page Directory Pointer Descriptors */ 652 static int gen8_write_pdp(struct drm_i915_gem_request *req, 653 unsigned entry, 654 dma_addr_t addr) 655 { 656 struct intel_engine_cs *ring = req->ring; 657 int ret; 658 659 BUG_ON(entry >= 4); 660 661 ret = intel_ring_begin(req, 6); 662 if (ret) 663 return ret; 664 665 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 666 intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry)); 667 intel_ring_emit(ring, upper_32_bits(addr)); 668 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 669 intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry)); 670 intel_ring_emit(ring, lower_32_bits(addr)); 671 intel_ring_advance(ring); 672 673 return 0; 674 } 675 676 static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt, 677 struct drm_i915_gem_request *req) 678 { 679 int i, ret; 680 681 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { 682 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 683 684 ret = gen8_write_pdp(req, i, pd_daddr); 685 if (ret) 686 return ret; 687 } 688 689 return 0; 690 } 691 692 static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt, 693 struct drm_i915_gem_request *req) 694 { 695 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4)); 696 } 697 698 static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm, 699 struct i915_page_directory_pointer *pdp, 700 uint64_t start, 701 uint64_t length, 702 gen8_pte_t scratch_pte) 703 { 704 struct i915_hw_ppgtt *ppgtt = 705 container_of(vm, struct i915_hw_ppgtt, base); 706 gen8_pte_t *pt_vaddr; 707 unsigned pdpe = gen8_pdpe_index(start); 708 unsigned pde = gen8_pde_index(start); 709 unsigned pte = gen8_pte_index(start); 710 unsigned num_entries = length >> PAGE_SHIFT; 711 unsigned last_pte, i; 712 713 if (WARN_ON(!pdp)) 714 return; 715 716 while (num_entries) { 717 struct i915_page_directory *pd; 718 struct i915_page_table *pt; 719 720 if (WARN_ON(!pdp->page_directory[pdpe])) 721 break; 722 723 pd = pdp->page_directory[pdpe]; 724 725 if (WARN_ON(!pd->page_table[pde])) 726 break; 727 728 pt = pd->page_table[pde]; 729 730 if (WARN_ON(!px_page(pt))) 731 break; 732 733 last_pte = pte + num_entries; 734 if (last_pte > GEN8_PTES) 735 last_pte = GEN8_PTES; 736 737 pt_vaddr = kmap_px(pt); 738 739 for (i = pte; i < last_pte; i++) { 740 pt_vaddr[i] = scratch_pte; 741 num_entries--; 742 } 743 744 kunmap_px(ppgtt, pt_vaddr); /* XXX dillon, out of order 745 * patch from linux 746 * 44a71024 12-Apr-2016 747 */ 748 749 pte = 0; 750 if (++pde == I915_PDES) { 751 if (++pdpe == I915_PDPES_PER_PDP(vm->dev)) 752 break; 753 pde = 0; 754 } 755 } 756 } 757 758 static void gen8_ppgtt_clear_range(struct i915_address_space *vm, 759 uint64_t start, 760 uint64_t length, 761 bool use_scratch) 762 { 763 struct i915_hw_ppgtt *ppgtt = 764 container_of(vm, struct i915_hw_ppgtt, base); 765 gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), 766 I915_CACHE_LLC, use_scratch); 767 768 if (!USES_FULL_48BIT_PPGTT(vm->dev)) { 769 gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length, 770 scratch_pte); 771 } else { 772 uint64_t templ4, pml4e; 773 struct i915_page_directory_pointer *pdp; 774 775 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { 776 gen8_ppgtt_clear_pte_range(vm, pdp, start, length, 777 scratch_pte); 778 } 779 } 780 } 781 782 static void 783 gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, 784 struct i915_page_directory_pointer *pdp, 785 struct sg_page_iter *sg_iter, 786 uint64_t start, 787 enum i915_cache_level cache_level) 788 { 789 struct i915_hw_ppgtt *ppgtt = 790 container_of(vm, struct i915_hw_ppgtt, base); 791 gen8_pte_t *pt_vaddr; 792 unsigned pdpe = gen8_pdpe_index(start); 793 unsigned pde = gen8_pde_index(start); 794 unsigned pte = gen8_pte_index(start); 795 796 pt_vaddr = NULL; 797 798 while (__sg_page_iter_next(sg_iter)) { 799 if (pt_vaddr == NULL) { 800 struct i915_page_directory *pd = pdp->page_directory[pdpe]; 801 while (pd == NULL) { 802 kprintf("PD NULL pdp %p pdpe %u\n", pdp, pdpe); 803 tsleep(&pd, 0, "froze", hz); 804 } 805 struct i915_page_table *pt = pd->page_table[pde]; 806 while (pt == NULL) { 807 kprintf("PT NULL pdp %p pdpe %u\n", pd, pde); 808 tsleep(&pd, 0, "froze", hz); 809 } 810 811 pt_vaddr = kmap_px(pt); 812 } 813 814 pt_vaddr[pte] = 815 gen8_pte_encode(sg_page_iter_dma_address(sg_iter), 816 cache_level, true); 817 if (++pte == GEN8_PTES) { 818 kunmap_px(ppgtt, pt_vaddr); 819 pt_vaddr = NULL; 820 if (++pde == I915_PDES) { 821 if (++pdpe == I915_PDPES_PER_PDP(vm->dev)) 822 break; 823 pde = 0; 824 } 825 pte = 0; 826 } 827 } 828 829 if (pt_vaddr) 830 kunmap_px(ppgtt, pt_vaddr); 831 } 832 833 static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, 834 struct sg_table *pages, 835 uint64_t start, 836 enum i915_cache_level cache_level, 837 u32 unused) 838 { 839 struct i915_hw_ppgtt *ppgtt = 840 container_of(vm, struct i915_hw_ppgtt, base); 841 struct sg_page_iter sg_iter; 842 843 __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0); 844 845 if (!USES_FULL_48BIT_PPGTT(vm->dev)) { 846 gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start, 847 cache_level); 848 } else { 849 struct i915_page_directory_pointer *pdp; 850 uint64_t templ4, pml4e; 851 uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT; 852 853 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { 854 gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, 855 start, cache_level); 856 } 857 } 858 } 859 860 static void gen8_free_page_tables(struct drm_device *dev, 861 struct i915_page_directory *pd) 862 { 863 int i; 864 865 if (!px_page(pd)) 866 return; 867 868 for_each_set_bit(i, pd->used_pdes, I915_PDES) { 869 if (WARN_ON(!pd->page_table[i])) 870 continue; 871 872 free_pt(dev, pd->page_table[i]); 873 pd->page_table[i] = NULL; 874 } 875 } 876 877 static int gen8_init_scratch(struct i915_address_space *vm) 878 { 879 struct drm_device *dev = vm->dev; 880 881 vm->scratch_page = alloc_scratch_page(dev); 882 if (IS_ERR(vm->scratch_page)) 883 return PTR_ERR(vm->scratch_page); 884 885 vm->scratch_pt = alloc_pt(dev); 886 if (IS_ERR(vm->scratch_pt)) { 887 free_scratch_page(dev, vm->scratch_page); 888 return PTR_ERR(vm->scratch_pt); 889 } 890 891 vm->scratch_pd = alloc_pd(dev); 892 if (IS_ERR(vm->scratch_pd)) { 893 free_pt(dev, vm->scratch_pt); 894 free_scratch_page(dev, vm->scratch_page); 895 return PTR_ERR(vm->scratch_pd); 896 } 897 898 if (USES_FULL_48BIT_PPGTT(dev)) { 899 vm->scratch_pdp = alloc_pdp(dev); 900 if (IS_ERR(vm->scratch_pdp)) { 901 free_pd(dev, vm->scratch_pd); 902 free_pt(dev, vm->scratch_pt); 903 free_scratch_page(dev, vm->scratch_page); 904 return PTR_ERR(vm->scratch_pdp); 905 } 906 } 907 908 gen8_initialize_pt(vm, vm->scratch_pt); 909 gen8_initialize_pd(vm, vm->scratch_pd); 910 if (USES_FULL_48BIT_PPGTT(dev)) 911 gen8_initialize_pdp(vm, vm->scratch_pdp); 912 913 return 0; 914 } 915 916 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) 917 { 918 enum vgt_g2v_type msg; 919 struct drm_device *dev = ppgtt->base.dev; 920 struct drm_i915_private *dev_priv = dev->dev_private; 921 unsigned int offset = vgtif_reg(pdp0_lo); 922 int i; 923 924 if (USES_FULL_48BIT_PPGTT(dev)) { 925 u64 daddr = px_dma(&ppgtt->pml4); 926 927 I915_WRITE(offset, lower_32_bits(daddr)); 928 I915_WRITE(offset + 4, upper_32_bits(daddr)); 929 930 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : 931 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); 932 } else { 933 for (i = 0; i < GEN8_LEGACY_PDPES; i++) { 934 u64 daddr = i915_page_dir_dma_addr(ppgtt, i); 935 936 I915_WRITE(offset, lower_32_bits(daddr)); 937 I915_WRITE(offset + 4, upper_32_bits(daddr)); 938 939 offset += 8; 940 } 941 942 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : 943 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); 944 } 945 946 I915_WRITE(vgtif_reg(g2v_notify), msg); 947 948 return 0; 949 } 950 951 static void gen8_free_scratch(struct i915_address_space *vm) 952 { 953 struct drm_device *dev = vm->dev; 954 955 if (USES_FULL_48BIT_PPGTT(dev)) 956 free_pdp(dev, vm->scratch_pdp); 957 free_pd(dev, vm->scratch_pd); 958 free_pt(dev, vm->scratch_pt); 959 free_scratch_page(dev, vm->scratch_page); 960 } 961 962 static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev, 963 struct i915_page_directory_pointer *pdp) 964 { 965 int i; 966 967 for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) { 968 if (WARN_ON(!pdp->page_directory[i])) 969 continue; 970 971 gen8_free_page_tables(dev, pdp->page_directory[i]); 972 free_pd(dev, pdp->page_directory[i]); 973 } 974 975 free_pdp(dev, pdp); 976 } 977 978 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) 979 { 980 int i; 981 982 for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) { 983 if (WARN_ON(!ppgtt->pml4.pdps[i])) 984 continue; 985 986 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]); 987 } 988 989 cleanup_px(ppgtt->base.dev, &ppgtt->pml4); 990 } 991 992 static void gen8_ppgtt_cleanup(struct i915_address_space *vm) 993 { 994 struct i915_hw_ppgtt *ppgtt = 995 container_of(vm, struct i915_hw_ppgtt, base); 996 997 if (intel_vgpu_active(vm->dev)) 998 gen8_ppgtt_notify_vgt(ppgtt, false); 999 1000 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) 1001 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp); 1002 else 1003 gen8_ppgtt_cleanup_4lvl(ppgtt); 1004 1005 gen8_free_scratch(vm); 1006 } 1007 1008 /** 1009 * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range. 1010 * @vm: Master vm structure. 1011 * @pd: Page directory for this address range. 1012 * @start: Starting virtual address to begin allocations. 1013 * @length: Size of the allocations. 1014 * @new_pts: Bitmap set by function with new allocations. Likely used by the 1015 * caller to free on error. 1016 * 1017 * Allocate the required number of page tables. Extremely similar to 1018 * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by 1019 * the page directory boundary (instead of the page directory pointer). That 1020 * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is 1021 * possible, and likely that the caller will need to use multiple calls of this 1022 * function to achieve the appropriate allocation. 1023 * 1024 * Return: 0 if success; negative error code otherwise. 1025 */ 1026 static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm, 1027 struct i915_page_directory *pd, 1028 uint64_t start, 1029 uint64_t length, 1030 unsigned long *new_pts) 1031 { 1032 struct drm_device *dev = vm->dev; 1033 struct i915_page_table *pt; 1034 uint64_t temp; 1035 uint32_t pde; 1036 1037 gen8_for_each_pde(pt, pd, start, length, temp, pde) { 1038 /* Don't reallocate page tables */ 1039 if (test_bit(pde, pd->used_pdes)) { 1040 /* Scratch is never allocated this way */ 1041 WARN_ON(pt == vm->scratch_pt); 1042 continue; 1043 } 1044 1045 pt = alloc_pt(dev); 1046 if (IS_ERR(pt)) 1047 goto unwind_out; 1048 1049 gen8_initialize_pt(vm, pt); 1050 pd->page_table[pde] = pt; 1051 __set_bit(pde, new_pts); 1052 trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT); 1053 } 1054 1055 return 0; 1056 1057 unwind_out: 1058 for_each_set_bit(pde, new_pts, I915_PDES) 1059 free_pt(dev, pd->page_table[pde]); 1060 1061 return -ENOMEM; 1062 } 1063 1064 /** 1065 * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range. 1066 * @vm: Master vm structure. 1067 * @pdp: Page directory pointer for this address range. 1068 * @start: Starting virtual address to begin allocations. 1069 * @length: Size of the allocations. 1070 * @new_pds: Bitmap set by function with new allocations. Likely used by the 1071 * caller to free on error. 1072 * 1073 * Allocate the required number of page directories starting at the pde index of 1074 * @start, and ending at the pde index @start + @length. This function will skip 1075 * over already allocated page directories within the range, and only allocate 1076 * new ones, setting the appropriate pointer within the pdp as well as the 1077 * correct position in the bitmap @new_pds. 1078 * 1079 * The function will only allocate the pages within the range for a give page 1080 * directory pointer. In other words, if @start + @length straddles a virtually 1081 * addressed PDP boundary (512GB for 4k pages), there will be more allocations 1082 * required by the caller, This is not currently possible, and the BUG in the 1083 * code will prevent it. 1084 * 1085 * Return: 0 if success; negative error code otherwise. 1086 */ 1087 static int 1088 gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm, 1089 struct i915_page_directory_pointer *pdp, 1090 uint64_t start, 1091 uint64_t length, 1092 unsigned long *new_pds) 1093 { 1094 struct drm_device *dev = vm->dev; 1095 struct i915_page_directory *pd; 1096 uint64_t temp; 1097 uint32_t pdpe; 1098 uint32_t pdpes = I915_PDPES_PER_PDP(dev); 1099 1100 WARN_ON(!bitmap_empty(new_pds, pdpes)); 1101 1102 gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { 1103 if (test_bit(pdpe, pdp->used_pdpes)) 1104 continue; 1105 1106 pd = alloc_pd(dev); 1107 if (IS_ERR(pd)) 1108 goto unwind_out; 1109 1110 gen8_initialize_pd(vm, pd); 1111 pdp->page_directory[pdpe] = pd; 1112 __set_bit(pdpe, new_pds); 1113 trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT); 1114 } 1115 1116 return 0; 1117 1118 unwind_out: 1119 for_each_set_bit(pdpe, new_pds, pdpes) 1120 free_pd(dev, pdp->page_directory[pdpe]); 1121 1122 return -ENOMEM; 1123 } 1124 1125 /** 1126 * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range. 1127 * @vm: Master vm structure. 1128 * @pml4: Page map level 4 for this address range. 1129 * @start: Starting virtual address to begin allocations. 1130 * @length: Size of the allocations. 1131 * @new_pdps: Bitmap set by function with new allocations. Likely used by the 1132 * caller to free on error. 1133 * 1134 * Allocate the required number of page directory pointers. Extremely similar to 1135 * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs(). 1136 * The main difference is here we are limited by the pml4 boundary (instead of 1137 * the page directory pointer). 1138 * 1139 * Return: 0 if success; negative error code otherwise. 1140 */ 1141 static int 1142 gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm, 1143 struct i915_pml4 *pml4, 1144 uint64_t start, 1145 uint64_t length, 1146 unsigned long *new_pdps) 1147 { 1148 struct drm_device *dev = vm->dev; 1149 struct i915_page_directory_pointer *pdp; 1150 uint64_t temp; 1151 uint32_t pml4e; 1152 1153 WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4)); 1154 1155 gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) { 1156 if (!test_bit(pml4e, pml4->used_pml4es)) { 1157 pdp = alloc_pdp(dev); 1158 if (IS_ERR(pdp)) 1159 goto unwind_out; 1160 1161 gen8_initialize_pdp(vm, pdp); 1162 pml4->pdps[pml4e] = pdp; 1163 __set_bit(pml4e, new_pdps); 1164 trace_i915_page_directory_pointer_entry_alloc(vm, 1165 pml4e, 1166 start, 1167 GEN8_PML4E_SHIFT); 1168 } 1169 } 1170 1171 return 0; 1172 1173 unwind_out: 1174 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) 1175 free_pdp(dev, pml4->pdps[pml4e]); 1176 1177 return -ENOMEM; 1178 } 1179 1180 static void 1181 free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts) 1182 { 1183 kfree(new_pts); 1184 kfree(new_pds); 1185 } 1186 1187 /* Fills in the page directory bitmap, and the array of page tables bitmap. Both 1188 * of these are based on the number of PDPEs in the system. 1189 */ 1190 static 1191 int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds, 1192 unsigned long **new_pts, 1193 uint32_t pdpes) 1194 { 1195 unsigned long *pds; 1196 unsigned long *pts; 1197 1198 pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY); 1199 if (!pds) 1200 return -ENOMEM; 1201 1202 pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long), 1203 GFP_TEMPORARY); 1204 if (!pts) 1205 goto err_out; 1206 1207 *new_pds = pds; 1208 *new_pts = pts; 1209 1210 return 0; 1211 1212 err_out: 1213 free_gen8_temp_bitmaps(pds, pts); 1214 return -ENOMEM; 1215 } 1216 1217 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify 1218 * the page table structures, we mark them dirty so that 1219 * context switching/execlist queuing code takes extra steps 1220 * to ensure that tlbs are flushed. 1221 */ 1222 static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) 1223 { 1224 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask; 1225 } 1226 1227 static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, 1228 struct i915_page_directory_pointer *pdp, 1229 uint64_t start, 1230 uint64_t length) 1231 { 1232 struct i915_hw_ppgtt *ppgtt = 1233 container_of(vm, struct i915_hw_ppgtt, base); 1234 unsigned long *new_page_dirs, *new_page_tables; 1235 struct drm_device *dev = vm->dev; 1236 struct i915_page_directory *pd; 1237 const uint64_t orig_start = start; 1238 const uint64_t orig_length = length; 1239 uint64_t temp; 1240 uint32_t pdpe; 1241 uint32_t pdpes = I915_PDPES_PER_PDP(dev); 1242 int ret; 1243 1244 /* Wrap is never okay since we can only represent 48b, and we don't 1245 * actually use the other side of the canonical address space. 1246 */ 1247 if (WARN_ON(start + length < start)) 1248 return -ENODEV; 1249 1250 if (WARN_ON(start + length > vm->total)) 1251 return -ENODEV; 1252 1253 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes); 1254 if (ret) 1255 return ret; 1256 1257 /* Do the allocations first so we can easily bail out */ 1258 ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length, 1259 new_page_dirs); 1260 if (ret) { 1261 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); 1262 return ret; 1263 } 1264 1265 /* For every page directory referenced, allocate page tables */ 1266 gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { 1267 ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length, 1268 new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES)); 1269 if (ret) 1270 goto err_out; 1271 } 1272 1273 start = orig_start; 1274 length = orig_length; 1275 1276 /* Allocations have completed successfully, so set the bitmaps, and do 1277 * the mappings. */ 1278 gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { 1279 gen8_pde_t *const page_directory = kmap_px(pd); 1280 struct i915_page_table *pt; 1281 uint64_t pd_len = length; 1282 uint64_t pd_start = start; 1283 uint32_t pde; 1284 1285 /* Every pd should be allocated, we just did that above. */ 1286 WARN_ON(!pd); 1287 1288 gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) { 1289 /* Same reasoning as pd */ 1290 WARN_ON(!pt); 1291 if (pt == NULL) /* XXX dillon hack */ 1292 continue; /* XXX dillon hack */ 1293 WARN_ON(!pd_len); 1294 WARN_ON(!gen8_pte_count(pd_start, pd_len)); 1295 1296 /* Set our used ptes within the page table */ 1297 bitmap_set(pt->used_ptes, 1298 gen8_pte_index(pd_start), 1299 gen8_pte_count(pd_start, pd_len)); 1300 1301 /* Our pde is now pointing to the pagetable, pt */ 1302 __set_bit(pde, pd->used_pdes); 1303 1304 /* Map the PDE to the page table */ 1305 page_directory[pde] = gen8_pde_encode(px_dma(pt), 1306 I915_CACHE_LLC); 1307 trace_i915_page_table_entry_map(&ppgtt->base, pde, pt, 1308 gen8_pte_index(start), 1309 gen8_pte_count(start, length), 1310 GEN8_PTES); 1311 1312 /* NB: We haven't yet mapped ptes to pages. At this 1313 * point we're still relying on insert_entries() */ 1314 } 1315 1316 kunmap_px(ppgtt, page_directory); 1317 __set_bit(pdpe, pdp->used_pdpes); 1318 gen8_setup_page_directory(ppgtt, pdp, pd, pdpe); 1319 } 1320 1321 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); 1322 mark_tlbs_dirty(ppgtt); 1323 return 0; 1324 1325 err_out: 1326 while (pdpe--) { 1327 for_each_set_bit(temp, new_page_tables + pdpe * 1328 BITS_TO_LONGS(I915_PDES), I915_PDES) 1329 free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]); 1330 } 1331 1332 for_each_set_bit(pdpe, new_page_dirs, pdpes) 1333 free_pd(dev, pdp->page_directory[pdpe]); 1334 1335 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); 1336 mark_tlbs_dirty(ppgtt); 1337 return ret; 1338 } 1339 1340 static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, 1341 struct i915_pml4 *pml4, 1342 uint64_t start, 1343 uint64_t length) 1344 { 1345 DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4); 1346 struct i915_hw_ppgtt *ppgtt = 1347 container_of(vm, struct i915_hw_ppgtt, base); 1348 struct i915_page_directory_pointer *pdp; 1349 uint64_t temp, pml4e; 1350 int ret = 0; 1351 1352 /* Do the pml4 allocations first, so we don't need to track the newly 1353 * allocated tables below the pdp */ 1354 bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4); 1355 1356 /* The pagedirectory and pagetable allocations are done in the shared 3 1357 * and 4 level code. Just allocate the pdps. 1358 */ 1359 ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length, 1360 new_pdps); 1361 if (ret) 1362 return ret; 1363 1364 WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2, 1365 "The allocation has spanned more than 512GB. " 1366 "It is highly likely this is incorrect."); 1367 1368 gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) { 1369 WARN_ON(!pdp); 1370 1371 ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length); 1372 if (ret) 1373 goto err_out; 1374 1375 gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e); 1376 } 1377 1378 bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es, 1379 GEN8_PML4ES_PER_PML4); 1380 1381 return 0; 1382 1383 err_out: 1384 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4) 1385 gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]); 1386 1387 return ret; 1388 } 1389 1390 static int gen8_alloc_va_range(struct i915_address_space *vm, 1391 uint64_t start, uint64_t length) 1392 { 1393 struct i915_hw_ppgtt *ppgtt = 1394 container_of(vm, struct i915_hw_ppgtt, base); 1395 1396 if (USES_FULL_48BIT_PPGTT(vm->dev)) 1397 return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length); 1398 else 1399 return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length); 1400 } 1401 1402 static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp, 1403 uint64_t start, uint64_t length, 1404 gen8_pte_t scratch_pte, 1405 struct seq_file *m) 1406 { 1407 struct i915_page_directory *pd; 1408 uint64_t temp; 1409 uint32_t pdpe; 1410 1411 gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { 1412 struct i915_page_table *pt; 1413 uint64_t pd_len = length; 1414 uint64_t pd_start = start; 1415 uint32_t pde; 1416 1417 if (!test_bit(pdpe, pdp->used_pdpes)) 1418 continue; 1419 1420 seq_printf(m, "\tPDPE #%d\n", pdpe); 1421 gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) { 1422 uint32_t pte; 1423 gen8_pte_t *pt_vaddr; 1424 1425 if (!test_bit(pde, pd->used_pdes)) 1426 continue; 1427 1428 pt_vaddr = kmap_px(pt); 1429 for (pte = 0; pte < GEN8_PTES; pte += 4) { 1430 uint64_t va = 1431 (pdpe << GEN8_PDPE_SHIFT) | 1432 (pde << GEN8_PDE_SHIFT) | 1433 (pte << GEN8_PTE_SHIFT); 1434 int i; 1435 bool found = false; 1436 1437 for (i = 0; i < 4; i++) 1438 if (pt_vaddr[pte + i] != scratch_pte) 1439 found = true; 1440 if (!found) 1441 continue; 1442 1443 seq_printf(m, "\t\t0x%lx [%03d,%03d,%04d]: =", va, pdpe, pde, pte); 1444 for (i = 0; i < 4; i++) { 1445 if (pt_vaddr[pte + i] != scratch_pte) 1446 seq_printf(m, " %lx", pt_vaddr[pte + i]); 1447 else 1448 seq_puts(m, " SCRATCH "); 1449 } 1450 seq_puts(m, "\n"); 1451 } 1452 /* don't use kunmap_px, it could trigger 1453 * an unnecessary flush. 1454 */ 1455 kunmap_atomic(pt_vaddr); 1456 } 1457 } 1458 } 1459 1460 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) 1461 { 1462 struct i915_address_space *vm = &ppgtt->base; 1463 uint64_t start = ppgtt->base.start; 1464 uint64_t length = ppgtt->base.total; 1465 gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), 1466 I915_CACHE_LLC, true); 1467 1468 if (!USES_FULL_48BIT_PPGTT(vm->dev)) { 1469 gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); 1470 } else { 1471 uint64_t templ4, pml4e; 1472 struct i915_pml4 *pml4 = &ppgtt->pml4; 1473 struct i915_page_directory_pointer *pdp; 1474 1475 gen8_for_each_pml4e(pdp, pml4, start, length, templ4, pml4e) { 1476 if (!test_bit(pml4e, pml4->used_pml4es)) 1477 continue; 1478 1479 seq_printf(m, " PML4E #%lu\n", pml4e); 1480 gen8_dump_pdp(pdp, start, length, scratch_pte, m); 1481 } 1482 } 1483 } 1484 1485 static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt) 1486 { 1487 unsigned long *new_page_dirs, *new_page_tables; 1488 uint32_t pdpes = I915_PDPES_PER_PDP(dev); 1489 int ret; 1490 1491 /* We allocate temp bitmap for page tables for no gain 1492 * but as this is for init only, lets keep the things simple 1493 */ 1494 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes); 1495 if (ret) 1496 return ret; 1497 1498 /* Allocate for all pdps regardless of how the ppgtt 1499 * was defined. 1500 */ 1501 ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp, 1502 0, 1ULL << 32, 1503 new_page_dirs); 1504 if (!ret) 1505 *ppgtt->pdp.used_pdpes = *new_page_dirs; 1506 1507 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables); 1508 1509 return ret; 1510 } 1511 1512 /* 1513 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers 1514 * with a net effect resembling a 2-level page table in normal x86 terms. Each 1515 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address 1516 * space. 1517 * 1518 */ 1519 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 1520 { 1521 int ret; 1522 1523 ret = gen8_init_scratch(&ppgtt->base); 1524 if (ret) 1525 return ret; 1526 1527 ppgtt->base.start = 0; 1528 ppgtt->base.cleanup = gen8_ppgtt_cleanup; 1529 ppgtt->base.allocate_va_range = gen8_alloc_va_range; 1530 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; 1531 ppgtt->base.clear_range = gen8_ppgtt_clear_range; 1532 ppgtt->base.unbind_vma = ppgtt_unbind_vma; 1533 ppgtt->base.bind_vma = ppgtt_bind_vma; 1534 ppgtt->debug_dump = gen8_dump_ppgtt; 1535 1536 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { 1537 ret = setup_px(ppgtt->base.dev, &ppgtt->pml4); 1538 if (ret) 1539 goto free_scratch; 1540 1541 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4); 1542 1543 ppgtt->base.total = 1ULL << 48; 1544 ppgtt->switch_mm = gen8_48b_mm_switch; 1545 } else { 1546 ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp); 1547 if (ret) 1548 goto free_scratch; 1549 1550 ppgtt->base.total = 1ULL << 32; 1551 ppgtt->switch_mm = gen8_legacy_mm_switch; 1552 trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base, 1553 0, 0, 1554 GEN8_PML4E_SHIFT); 1555 1556 if (intel_vgpu_active(ppgtt->base.dev)) { 1557 ret = gen8_preallocate_top_level_pdps(ppgtt); 1558 if (ret) 1559 goto free_scratch; 1560 } 1561 } 1562 1563 if (intel_vgpu_active(ppgtt->base.dev)) 1564 gen8_ppgtt_notify_vgt(ppgtt, true); 1565 1566 return 0; 1567 1568 free_scratch: 1569 gen8_free_scratch(&ppgtt->base); 1570 return ret; 1571 } 1572 1573 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) 1574 { 1575 struct i915_address_space *vm = &ppgtt->base; 1576 struct i915_page_table *unused; 1577 gen6_pte_t scratch_pte; 1578 uint32_t pd_entry; 1579 uint32_t pte, pde, temp; 1580 uint32_t start = ppgtt->base.start, length = ppgtt->base.total; 1581 1582 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), 1583 I915_CACHE_LLC, true, 0); 1584 1585 gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) { 1586 u32 expected; 1587 gen6_pte_t *pt_vaddr; 1588 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]); 1589 pd_entry = readl(ppgtt->pd_addr + pde); 1590 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); 1591 1592 if (pd_entry != expected) 1593 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", 1594 pde, 1595 pd_entry, 1596 expected); 1597 seq_printf(m, "\tPDE: %x\n", pd_entry); 1598 1599 pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]); 1600 1601 for (pte = 0; pte < GEN6_PTES; pte+=4) { 1602 unsigned long va = 1603 (pde * PAGE_SIZE * GEN6_PTES) + 1604 (pte * PAGE_SIZE); 1605 int i; 1606 bool found = false; 1607 for (i = 0; i < 4; i++) 1608 if (pt_vaddr[pte + i] != scratch_pte) 1609 found = true; 1610 if (!found) 1611 continue; 1612 1613 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte); 1614 for (i = 0; i < 4; i++) { 1615 if (pt_vaddr[pte + i] != scratch_pte) 1616 seq_printf(m, " %08x", pt_vaddr[pte + i]); 1617 else 1618 seq_puts(m, " SCRATCH "); 1619 } 1620 seq_puts(m, "\n"); 1621 } 1622 kunmap_px(ppgtt, pt_vaddr); 1623 } 1624 } 1625 1626 /* Write pde (index) from the page directory @pd to the page table @pt */ 1627 static void gen6_write_pde(struct i915_page_directory *pd, 1628 const int pde, struct i915_page_table *pt) 1629 { 1630 /* Caller needs to make sure the write completes if necessary */ 1631 struct i915_hw_ppgtt *ppgtt = 1632 container_of(pd, struct i915_hw_ppgtt, pd); 1633 u32 pd_entry; 1634 1635 pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt)); 1636 pd_entry |= GEN6_PDE_VALID; 1637 1638 writel(pd_entry, ppgtt->pd_addr + pde); 1639 } 1640 1641 /* Write all the page tables found in the ppgtt structure to incrementing page 1642 * directories. */ 1643 static void gen6_write_page_range(struct drm_i915_private *dev_priv, 1644 struct i915_page_directory *pd, 1645 uint32_t start, uint32_t length) 1646 { 1647 struct i915_page_table *pt; 1648 uint32_t pde, temp; 1649 1650 gen6_for_each_pde(pt, pd, start, length, temp, pde) 1651 gen6_write_pde(pd, pde, pt); 1652 1653 /* Make sure write is complete before other code can use this page 1654 * table. Also require for WC mapped PTEs */ 1655 readl(dev_priv->gtt.gsm); 1656 } 1657 1658 static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) 1659 { 1660 BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f); 1661 1662 return (ppgtt->pd.base.ggtt_offset / 64) << 16; 1663 } 1664 1665 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, 1666 struct drm_i915_gem_request *req) 1667 { 1668 struct intel_engine_cs *ring = req->ring; 1669 int ret; 1670 1671 /* NB: TLBs must be flushed and invalidated before a switch */ 1672 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 1673 if (ret) 1674 return ret; 1675 1676 ret = intel_ring_begin(req, 6); 1677 if (ret) 1678 return ret; 1679 1680 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); 1681 intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); 1682 intel_ring_emit(ring, PP_DIR_DCLV_2G); 1683 intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); 1684 intel_ring_emit(ring, get_pd_offset(ppgtt)); 1685 intel_ring_emit(ring, MI_NOOP); 1686 intel_ring_advance(ring); 1687 1688 return 0; 1689 } 1690 1691 static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, 1692 struct drm_i915_gem_request *req) 1693 { 1694 struct intel_engine_cs *ring = req->ring; 1695 struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); 1696 1697 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); 1698 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); 1699 return 0; 1700 } 1701 1702 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, 1703 struct drm_i915_gem_request *req) 1704 { 1705 struct intel_engine_cs *ring = req->ring; 1706 int ret; 1707 1708 /* NB: TLBs must be flushed and invalidated before a switch */ 1709 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 1710 if (ret) 1711 return ret; 1712 1713 ret = intel_ring_begin(req, 6); 1714 if (ret) 1715 return ret; 1716 1717 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); 1718 intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); 1719 intel_ring_emit(ring, PP_DIR_DCLV_2G); 1720 intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); 1721 intel_ring_emit(ring, get_pd_offset(ppgtt)); 1722 intel_ring_emit(ring, MI_NOOP); 1723 intel_ring_advance(ring); 1724 1725 /* XXX: RCS is the only one to auto invalidate the TLBs? */ 1726 if (ring->id != RCS) { 1727 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 1728 if (ret) 1729 return ret; 1730 } 1731 1732 return 0; 1733 } 1734 1735 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, 1736 struct drm_i915_gem_request *req) 1737 { 1738 struct intel_engine_cs *ring = req->ring; 1739 struct drm_device *dev = ppgtt->base.dev; 1740 struct drm_i915_private *dev_priv = dev->dev_private; 1741 1742 1743 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); 1744 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); 1745 1746 POSTING_READ(RING_PP_DIR_DCLV(ring)); 1747 1748 return 0; 1749 } 1750 1751 static void gen8_ppgtt_enable(struct drm_device *dev) 1752 { 1753 struct drm_i915_private *dev_priv = dev->dev_private; 1754 struct intel_engine_cs *ring; 1755 int j; 1756 1757 for_each_ring(ring, dev_priv, j) { 1758 u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0; 1759 I915_WRITE(RING_MODE_GEN7(ring), 1760 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); 1761 } 1762 } 1763 1764 static void gen7_ppgtt_enable(struct drm_device *dev) 1765 { 1766 struct drm_i915_private *dev_priv = dev->dev_private; 1767 struct intel_engine_cs *ring; 1768 uint32_t ecochk, ecobits; 1769 int i; 1770 1771 ecobits = I915_READ(GAC_ECO_BITS); 1772 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); 1773 1774 ecochk = I915_READ(GAM_ECOCHK); 1775 if (IS_HASWELL(dev)) { 1776 ecochk |= ECOCHK_PPGTT_WB_HSW; 1777 } else { 1778 ecochk |= ECOCHK_PPGTT_LLC_IVB; 1779 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; 1780 } 1781 I915_WRITE(GAM_ECOCHK, ecochk); 1782 1783 for_each_ring(ring, dev_priv, i) { 1784 /* GFX_MODE is per-ring on gen7+ */ 1785 I915_WRITE(RING_MODE_GEN7(ring), 1786 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 1787 } 1788 } 1789 1790 static void gen6_ppgtt_enable(struct drm_device *dev) 1791 { 1792 struct drm_i915_private *dev_priv = dev->dev_private; 1793 uint32_t ecochk, gab_ctl, ecobits; 1794 1795 ecobits = I915_READ(GAC_ECO_BITS); 1796 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | 1797 ECOBITS_PPGTT_CACHE64B); 1798 1799 gab_ctl = I915_READ(GAB_CTL); 1800 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); 1801 1802 ecochk = I915_READ(GAM_ECOCHK); 1803 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); 1804 1805 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 1806 } 1807 1808 /* PPGTT support for Sandybdrige/Gen6 and later */ 1809 static void gen6_ppgtt_clear_range(struct i915_address_space *vm, 1810 uint64_t start, 1811 uint64_t length, 1812 bool use_scratch) 1813 { 1814 struct i915_hw_ppgtt *ppgtt = 1815 container_of(vm, struct i915_hw_ppgtt, base); 1816 gen6_pte_t *pt_vaddr, scratch_pte; 1817 unsigned first_entry = start >> PAGE_SHIFT; 1818 unsigned num_entries = length >> PAGE_SHIFT; 1819 unsigned act_pt = first_entry / GEN6_PTES; 1820 unsigned first_pte = first_entry % GEN6_PTES; 1821 unsigned last_pte, i; 1822 1823 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), 1824 I915_CACHE_LLC, true, 0); 1825 1826 while (num_entries) { 1827 last_pte = first_pte + num_entries; 1828 if (last_pte > GEN6_PTES) 1829 last_pte = GEN6_PTES; 1830 1831 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); 1832 1833 for (i = first_pte; i < last_pte; i++) 1834 pt_vaddr[i] = scratch_pte; 1835 1836 kunmap_px(ppgtt, pt_vaddr); 1837 1838 num_entries -= last_pte - first_pte; 1839 first_pte = 0; 1840 act_pt++; 1841 } 1842 } 1843 1844 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, 1845 struct sg_table *pages, 1846 uint64_t start, 1847 enum i915_cache_level cache_level, u32 flags) 1848 { 1849 struct i915_hw_ppgtt *ppgtt = 1850 container_of(vm, struct i915_hw_ppgtt, base); 1851 gen6_pte_t *pt_vaddr; 1852 unsigned first_entry = start >> PAGE_SHIFT; 1853 unsigned act_pt = first_entry / GEN6_PTES; 1854 unsigned act_pte = first_entry % GEN6_PTES; 1855 struct sg_page_iter sg_iter; 1856 1857 pt_vaddr = NULL; 1858 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { 1859 if (pt_vaddr == NULL) 1860 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); 1861 1862 pt_vaddr[act_pte] = 1863 vm->pte_encode(sg_page_iter_dma_address(&sg_iter), 1864 cache_level, true, flags); 1865 1866 if (++act_pte == GEN6_PTES) { 1867 kunmap_px(ppgtt, pt_vaddr); 1868 pt_vaddr = NULL; 1869 act_pt++; 1870 act_pte = 0; 1871 } 1872 } 1873 if (pt_vaddr) 1874 kunmap_px(ppgtt, pt_vaddr); 1875 } 1876 1877 static int gen6_alloc_va_range(struct i915_address_space *vm, 1878 uint64_t start_in, uint64_t length_in) 1879 { 1880 DECLARE_BITMAP(new_page_tables, I915_PDES); 1881 struct drm_device *dev = vm->dev; 1882 struct drm_i915_private *dev_priv = dev->dev_private; 1883 struct i915_hw_ppgtt *ppgtt = 1884 container_of(vm, struct i915_hw_ppgtt, base); 1885 struct i915_page_table *pt; 1886 uint32_t start, length, start_save, length_save; 1887 uint32_t pde, temp; 1888 int ret; 1889 1890 if (WARN_ON(start_in + length_in > ppgtt->base.total)) 1891 return -ENODEV; 1892 1893 start = start_save = start_in; 1894 length = length_save = length_in; 1895 1896 bitmap_zero(new_page_tables, I915_PDES); 1897 1898 /* The allocation is done in two stages so that we can bail out with 1899 * minimal amount of pain. The first stage finds new page tables that 1900 * need allocation. The second stage marks use ptes within the page 1901 * tables. 1902 */ 1903 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { 1904 if (pt != vm->scratch_pt) { 1905 WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES)); 1906 continue; 1907 } 1908 1909 /* We've already allocated a page table */ 1910 WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES)); 1911 1912 pt = alloc_pt(dev); 1913 if (IS_ERR(pt)) { 1914 ret = PTR_ERR(pt); 1915 goto unwind_out; 1916 } 1917 1918 gen6_initialize_pt(vm, pt); 1919 1920 ppgtt->pd.page_table[pde] = pt; 1921 __set_bit(pde, new_page_tables); 1922 trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT); 1923 } 1924 1925 start = start_save; 1926 length = length_save; 1927 1928 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { 1929 DECLARE_BITMAP(tmp_bitmap, GEN6_PTES); 1930 1931 bitmap_zero(tmp_bitmap, GEN6_PTES); 1932 bitmap_set(tmp_bitmap, gen6_pte_index(start), 1933 gen6_pte_count(start, length)); 1934 1935 if (__test_and_clear_bit(pde, new_page_tables)) 1936 gen6_write_pde(&ppgtt->pd, pde, pt); 1937 1938 trace_i915_page_table_entry_map(vm, pde, pt, 1939 gen6_pte_index(start), 1940 gen6_pte_count(start, length), 1941 GEN6_PTES); 1942 bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes, 1943 GEN6_PTES); 1944 } 1945 1946 WARN_ON(!bitmap_empty(new_page_tables, I915_PDES)); 1947 1948 /* Make sure write is complete before other code can use this page 1949 * table. Also require for WC mapped PTEs */ 1950 readl(dev_priv->gtt.gsm); 1951 1952 mark_tlbs_dirty(ppgtt); 1953 return 0; 1954 1955 unwind_out: 1956 for_each_set_bit(pde, new_page_tables, I915_PDES) { 1957 struct i915_page_table *pt = ppgtt->pd.page_table[pde]; 1958 1959 ppgtt->pd.page_table[pde] = vm->scratch_pt; 1960 free_pt(vm->dev, pt); 1961 } 1962 1963 mark_tlbs_dirty(ppgtt); 1964 return ret; 1965 } 1966 1967 static int gen6_init_scratch(struct i915_address_space *vm) 1968 { 1969 struct drm_device *dev = vm->dev; 1970 1971 vm->scratch_page = alloc_scratch_page(dev); 1972 if (IS_ERR(vm->scratch_page)) 1973 return PTR_ERR(vm->scratch_page); 1974 1975 vm->scratch_pt = alloc_pt(dev); 1976 if (IS_ERR(vm->scratch_pt)) { 1977 free_scratch_page(dev, vm->scratch_page); 1978 return PTR_ERR(vm->scratch_pt); 1979 } 1980 1981 gen6_initialize_pt(vm, vm->scratch_pt); 1982 1983 return 0; 1984 } 1985 1986 static void gen6_free_scratch(struct i915_address_space *vm) 1987 { 1988 struct drm_device *dev = vm->dev; 1989 1990 free_pt(dev, vm->scratch_pt); 1991 free_scratch_page(dev, vm->scratch_page); 1992 } 1993 1994 static void gen6_ppgtt_cleanup(struct i915_address_space *vm) 1995 { 1996 struct i915_hw_ppgtt *ppgtt = 1997 container_of(vm, struct i915_hw_ppgtt, base); 1998 struct i915_page_table *pt; 1999 uint32_t pde; 2000 2001 drm_mm_remove_node(&ppgtt->node); 2002 2003 gen6_for_all_pdes(pt, ppgtt, pde) { 2004 if (pt != vm->scratch_pt) 2005 free_pt(ppgtt->base.dev, pt); 2006 } 2007 2008 gen6_free_scratch(vm); 2009 } 2010 2011 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) 2012 { 2013 struct i915_address_space *vm = &ppgtt->base; 2014 struct drm_device *dev = ppgtt->base.dev; 2015 struct drm_i915_private *dev_priv = dev->dev_private; 2016 bool retried = false; 2017 int ret; 2018 2019 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The 2020 * allocator works in address space sizes, so it's multiplied by page 2021 * size. We allocate at the top of the GTT to avoid fragmentation. 2022 */ 2023 BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); 2024 2025 ret = gen6_init_scratch(vm); 2026 if (ret) 2027 return ret; 2028 2029 alloc: 2030 ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, 2031 &ppgtt->node, GEN6_PD_SIZE, 2032 GEN6_PD_ALIGN, 0, 2033 0, dev_priv->gtt.base.total, 2034 DRM_MM_TOPDOWN); 2035 if (ret == -ENOSPC && !retried) { 2036 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, 2037 GEN6_PD_SIZE, GEN6_PD_ALIGN, 2038 I915_CACHE_NONE, 2039 0, dev_priv->gtt.base.total, 2040 0); 2041 if (ret) 2042 goto err_out; 2043 2044 retried = true; 2045 goto alloc; 2046 } 2047 2048 if (ret) 2049 goto err_out; 2050 2051 2052 if (ppgtt->node.start < dev_priv->gtt.mappable_end) 2053 DRM_DEBUG("Forced to use aperture for PDEs\n"); 2054 2055 return 0; 2056 2057 err_out: 2058 gen6_free_scratch(vm); 2059 return ret; 2060 } 2061 2062 static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) 2063 { 2064 return gen6_ppgtt_allocate_page_directories(ppgtt); 2065 } 2066 2067 static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, 2068 uint64_t start, uint64_t length) 2069 { 2070 struct i915_page_table *unused; 2071 uint32_t pde, temp; 2072 2073 gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) 2074 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt; 2075 } 2076 2077 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 2078 { 2079 struct drm_device *dev = ppgtt->base.dev; 2080 struct drm_i915_private *dev_priv = dev->dev_private; 2081 int ret; 2082 2083 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; 2084 if (IS_GEN6(dev)) { 2085 ppgtt->switch_mm = gen6_mm_switch; 2086 } else if (IS_HASWELL(dev)) { 2087 ppgtt->switch_mm = hsw_mm_switch; 2088 } else if (IS_GEN7(dev)) { 2089 ppgtt->switch_mm = gen7_mm_switch; 2090 } else 2091 BUG(); 2092 2093 if (intel_vgpu_active(dev)) 2094 ppgtt->switch_mm = vgpu_mm_switch; 2095 2096 ret = gen6_ppgtt_alloc(ppgtt); 2097 if (ret) 2098 return ret; 2099 2100 ppgtt->base.allocate_va_range = gen6_alloc_va_range; 2101 ppgtt->base.clear_range = gen6_ppgtt_clear_range; 2102 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; 2103 ppgtt->base.unbind_vma = ppgtt_unbind_vma; 2104 ppgtt->base.bind_vma = ppgtt_bind_vma; 2105 ppgtt->base.cleanup = gen6_ppgtt_cleanup; 2106 ppgtt->base.start = 0; 2107 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE; 2108 ppgtt->debug_dump = gen6_dump_ppgtt; 2109 2110 ppgtt->pd.base.ggtt_offset = 2111 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t); 2112 2113 ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm + 2114 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t); 2115 2116 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); 2117 2118 gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total); 2119 2120 DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", 2121 ppgtt->node.size >> 20, 2122 ppgtt->node.start / PAGE_SIZE); 2123 2124 DRM_DEBUG("Adding PPGTT at offset %x\n", 2125 ppgtt->pd.base.ggtt_offset << 10); 2126 2127 return 0; 2128 } 2129 2130 static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) 2131 { 2132 ppgtt->base.dev = dev; 2133 2134 if (INTEL_INFO(dev)->gen < 8) 2135 return gen6_ppgtt_init(ppgtt); 2136 else 2137 return gen8_ppgtt_init(ppgtt); 2138 } 2139 2140 static void i915_address_space_init(struct i915_address_space *vm, 2141 struct drm_i915_private *dev_priv) 2142 { 2143 drm_mm_init(&vm->mm, vm->start, vm->total); 2144 vm->dev = dev_priv->dev; 2145 INIT_LIST_HEAD(&vm->active_list); 2146 INIT_LIST_HEAD(&vm->inactive_list); 2147 list_add_tail(&vm->global_link, &dev_priv->vm_list); 2148 } 2149 2150 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) 2151 { 2152 struct drm_i915_private *dev_priv = dev->dev_private; 2153 int ret = 0; 2154 2155 ret = __hw_ppgtt_init(dev, ppgtt); 2156 if (ret == 0) { 2157 kref_init(&ppgtt->ref); 2158 i915_address_space_init(&ppgtt->base, dev_priv); 2159 } 2160 2161 return ret; 2162 } 2163 2164 int i915_ppgtt_init_hw(struct drm_device *dev) 2165 { 2166 /* In the case of execlists, PPGTT is enabled by the context descriptor 2167 * and the PDPs are contained within the context itself. We don't 2168 * need to do anything here. */ 2169 if (i915.enable_execlists) 2170 return 0; 2171 2172 if (!USES_PPGTT(dev)) 2173 return 0; 2174 2175 if (IS_GEN6(dev)) 2176 gen6_ppgtt_enable(dev); 2177 else if (IS_GEN7(dev)) 2178 gen7_ppgtt_enable(dev); 2179 else if (INTEL_INFO(dev)->gen >= 8) 2180 gen8_ppgtt_enable(dev); 2181 else 2182 MISSING_CASE(INTEL_INFO(dev)->gen); 2183 2184 return 0; 2185 } 2186 2187 int i915_ppgtt_init_ring(struct drm_i915_gem_request *req) 2188 { 2189 struct drm_i915_private *dev_priv = req->ring->dev->dev_private; 2190 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2191 2192 if (i915.enable_execlists) 2193 return 0; 2194 2195 if (!ppgtt) 2196 return 0; 2197 2198 return ppgtt->switch_mm(ppgtt, req); 2199 } 2200 2201 struct i915_hw_ppgtt * 2202 i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) 2203 { 2204 struct i915_hw_ppgtt *ppgtt; 2205 int ret; 2206 2207 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 2208 if (!ppgtt) 2209 return ERR_PTR(-ENOMEM); 2210 2211 ret = i915_ppgtt_init(dev, ppgtt); 2212 if (ret) { 2213 kfree(ppgtt); 2214 return ERR_PTR(ret); 2215 } 2216 2217 ppgtt->file_priv = fpriv; 2218 2219 trace_i915_ppgtt_create(&ppgtt->base); 2220 2221 return ppgtt; 2222 } 2223 2224 void i915_ppgtt_release(struct kref *kref) 2225 { 2226 struct i915_hw_ppgtt *ppgtt = 2227 container_of(kref, struct i915_hw_ppgtt, ref); 2228 2229 trace_i915_ppgtt_release(&ppgtt->base); 2230 2231 /* vmas should already be unbound */ 2232 WARN_ON(!list_empty(&ppgtt->base.active_list)); 2233 WARN_ON(!list_empty(&ppgtt->base.inactive_list)); 2234 2235 list_del(&ppgtt->base.global_link); 2236 drm_mm_takedown(&ppgtt->base.mm); 2237 2238 ppgtt->base.cleanup(&ppgtt->base); 2239 kfree(ppgtt); 2240 } 2241 2242 extern int intel_iommu_gfx_mapped; 2243 /* Certain Gen5 chipsets require require idling the GPU before 2244 * unmapping anything from the GTT when VT-d is enabled. 2245 */ 2246 static bool needs_idle_maps(struct drm_device *dev) 2247 { 2248 #ifdef CONFIG_INTEL_IOMMU 2249 /* Query intel_iommu to see if we need the workaround. Presumably that 2250 * was loaded first. 2251 */ 2252 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped) 2253 return true; 2254 #endif 2255 return false; 2256 } 2257 2258 static bool do_idling(struct drm_i915_private *dev_priv) 2259 { 2260 bool ret = dev_priv->mm.interruptible; 2261 2262 if (unlikely(dev_priv->gtt.do_idle_maps)) { 2263 dev_priv->mm.interruptible = false; 2264 if (i915_gpu_idle(dev_priv->dev)) { 2265 DRM_ERROR("Couldn't idle GPU\n"); 2266 /* Wait a bit, in hopes it avoids the hang */ 2267 udelay(10); 2268 } 2269 } 2270 2271 return ret; 2272 } 2273 2274 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) 2275 { 2276 if (unlikely(dev_priv->gtt.do_idle_maps)) 2277 dev_priv->mm.interruptible = interruptible; 2278 } 2279 2280 void i915_check_and_clear_faults(struct drm_device *dev) 2281 { 2282 struct drm_i915_private *dev_priv = dev->dev_private; 2283 struct intel_engine_cs *ring; 2284 int i; 2285 2286 if (INTEL_INFO(dev)->gen < 6) 2287 return; 2288 2289 for_each_ring(ring, dev_priv, i) { 2290 u32 fault_reg; 2291 fault_reg = I915_READ(RING_FAULT_REG(ring)); 2292 if (fault_reg & RING_FAULT_VALID) { 2293 #if 0 2294 DRM_DEBUG_DRIVER("Unexpected fault\n" 2295 "\tAddr: 0x%08lx\n" 2296 "\tAddress space: %s\n" 2297 "\tSource ID: %d\n" 2298 "\tType: %d\n", 2299 fault_reg & PAGE_MASK, 2300 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", 2301 RING_FAULT_SRCID(fault_reg), 2302 RING_FAULT_FAULT_TYPE(fault_reg)); 2303 #endif 2304 I915_WRITE(RING_FAULT_REG(ring), 2305 fault_reg & ~RING_FAULT_VALID); 2306 } 2307 } 2308 POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); 2309 } 2310 2311 static void i915_ggtt_flush(struct drm_i915_private *dev_priv) 2312 { 2313 if (INTEL_INFO(dev_priv->dev)->gen < 6) { 2314 intel_gtt_chipset_flush(); 2315 } else { 2316 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 2317 POSTING_READ(GFX_FLSH_CNTL_GEN6); 2318 } 2319 } 2320 2321 void i915_gem_suspend_gtt_mappings(struct drm_device *dev) 2322 { 2323 struct drm_i915_private *dev_priv = dev->dev_private; 2324 2325 /* Don't bother messing with faults pre GEN6 as we have little 2326 * documentation supporting that it's a good idea. 2327 */ 2328 if (INTEL_INFO(dev)->gen < 6) 2329 return; 2330 2331 i915_check_and_clear_faults(dev); 2332 2333 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 2334 dev_priv->gtt.base.start, 2335 dev_priv->gtt.base.total, 2336 true); 2337 2338 i915_ggtt_flush(dev_priv); 2339 } 2340 2341 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) 2342 { 2343 if (!dma_map_sg(obj->base.dev->pdev->dev, 2344 obj->pages->sgl, obj->pages->nents, 2345 PCI_DMA_BIDIRECTIONAL)) 2346 return -ENOSPC; 2347 2348 return 0; 2349 } 2350 2351 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) 2352 { 2353 #ifdef writeq 2354 writeq(pte, addr); 2355 #else 2356 iowrite32((u32)pte, addr); 2357 iowrite32(pte >> 32, addr + 4); 2358 #endif 2359 } 2360 2361 static void gen8_ggtt_insert_entries(struct i915_address_space *vm, 2362 struct sg_table *st, 2363 uint64_t start, 2364 enum i915_cache_level level, u32 unused) 2365 { 2366 struct drm_i915_private *dev_priv = vm->dev->dev_private; 2367 unsigned first_entry = start >> PAGE_SHIFT; 2368 gen8_pte_t __iomem *gtt_entries = 2369 (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 2370 int i = 0; 2371 struct sg_page_iter sg_iter; 2372 dma_addr_t addr = 0; /* shut up gcc */ 2373 2374 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 2375 addr = sg_dma_address(sg_iter.sg) + 2376 (sg_iter.sg_pgoffset << PAGE_SHIFT); 2377 gen8_set_pte(>t_entries[i], 2378 gen8_pte_encode(addr, level, true)); 2379 i++; 2380 } 2381 2382 /* 2383 * XXX: This serves as a posting read to make sure that the PTE has 2384 * actually been updated. There is some concern that even though 2385 * registers and PTEs are within the same BAR that they are potentially 2386 * of NUMA access patterns. Therefore, even with the way we assume 2387 * hardware should work, we must keep this posting read for paranoia. 2388 */ 2389 if (i != 0) 2390 WARN_ON(readq(>t_entries[i-1]) 2391 != gen8_pte_encode(addr, level, true)); 2392 2393 /* This next bit makes the above posting read even more important. We 2394 * want to flush the TLBs only after we're certain all the PTE updates 2395 * have finished. 2396 */ 2397 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 2398 POSTING_READ(GFX_FLSH_CNTL_GEN6); 2399 } 2400 2401 /* 2402 * Binds an object into the global gtt with the specified cache level. The object 2403 * will be accessible to the GPU via commands whose operands reference offsets 2404 * within the global GTT as well as accessible by the GPU through the GMADR 2405 * mapped BAR (dev_priv->mm.gtt->gtt). 2406 */ 2407 static void gen6_ggtt_insert_entries(struct i915_address_space *vm, 2408 struct sg_table *st, 2409 uint64_t start, 2410 enum i915_cache_level level, u32 flags) 2411 { 2412 struct drm_i915_private *dev_priv = vm->dev->dev_private; 2413 unsigned first_entry = start >> PAGE_SHIFT; 2414 gen6_pte_t __iomem *gtt_entries = 2415 (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 2416 int i = 0; 2417 struct sg_page_iter sg_iter; 2418 dma_addr_t addr = 0; 2419 2420 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 2421 addr = sg_page_iter_dma_address(&sg_iter); 2422 iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]); 2423 i++; 2424 } 2425 2426 /* XXX: This serves as a posting read to make sure that the PTE has 2427 * actually been updated. There is some concern that even though 2428 * registers and PTEs are within the same BAR that they are potentially 2429 * of NUMA access patterns. Therefore, even with the way we assume 2430 * hardware should work, we must keep this posting read for paranoia. 2431 */ 2432 if (i != 0) { 2433 unsigned long gtt = readl(>t_entries[i-1]); 2434 WARN_ON(gtt != vm->pte_encode(addr, level, true, flags)); 2435 } 2436 2437 /* This next bit makes the above posting read even more important. We 2438 * want to flush the TLBs only after we're certain all the PTE updates 2439 * have finished. 2440 */ 2441 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 2442 POSTING_READ(GFX_FLSH_CNTL_GEN6); 2443 } 2444 2445 static void gen8_ggtt_clear_range(struct i915_address_space *vm, 2446 uint64_t start, 2447 uint64_t length, 2448 bool use_scratch) 2449 { 2450 struct drm_i915_private *dev_priv = vm->dev->dev_private; 2451 unsigned first_entry = start >> PAGE_SHIFT; 2452 unsigned num_entries = length >> PAGE_SHIFT; 2453 gen8_pte_t scratch_pte, __iomem *gtt_base = 2454 (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; 2455 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; 2456 int i; 2457 2458 if (WARN(num_entries > max_entries, 2459 "First entry = %d; Num entries = %d (max=%d)\n", 2460 first_entry, num_entries, max_entries)) 2461 num_entries = max_entries; 2462 2463 scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), 2464 I915_CACHE_LLC, 2465 use_scratch); 2466 for (i = 0; i < num_entries; i++) 2467 gen8_set_pte(>t_base[i], scratch_pte); 2468 readl(gtt_base); 2469 } 2470 2471 static void gen6_ggtt_clear_range(struct i915_address_space *vm, 2472 uint64_t start, 2473 uint64_t length, 2474 bool use_scratch) 2475 { 2476 struct drm_i915_private *dev_priv = vm->dev->dev_private; 2477 unsigned first_entry = start >> PAGE_SHIFT; 2478 unsigned num_entries = length >> PAGE_SHIFT; 2479 gen6_pte_t scratch_pte, __iomem *gtt_base = 2480 (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; 2481 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; 2482 int i; 2483 2484 if (WARN(num_entries > max_entries, 2485 "First entry = %d; Num entries = %d (max=%d)\n", 2486 first_entry, num_entries, max_entries)) 2487 num_entries = max_entries; 2488 2489 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), 2490 I915_CACHE_LLC, use_scratch, 0); 2491 2492 for (i = 0; i < num_entries; i++) 2493 iowrite32(scratch_pte, >t_base[i]); 2494 readl(gtt_base); 2495 } 2496 2497 static void i915_ggtt_insert_entries(struct i915_address_space *vm, 2498 struct sg_table *pages, 2499 uint64_t start, 2500 enum i915_cache_level cache_level, u32 unused) 2501 { 2502 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 2503 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 2504 2505 intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags); 2506 2507 } 2508 2509 static void i915_ggtt_clear_range(struct i915_address_space *vm, 2510 uint64_t start, 2511 uint64_t length, 2512 bool unused) 2513 { 2514 unsigned first_entry = start >> PAGE_SHIFT; 2515 unsigned num_entries = length >> PAGE_SHIFT; 2516 intel_gtt_clear_range(first_entry, num_entries); 2517 } 2518 2519 static int ggtt_bind_vma(struct i915_vma *vma, 2520 enum i915_cache_level cache_level, 2521 u32 flags) 2522 { 2523 struct drm_i915_gem_object *obj = vma->obj; 2524 u32 pte_flags = 0; 2525 int ret; 2526 2527 ret = i915_get_ggtt_vma_pages(vma); 2528 if (ret) 2529 return ret; 2530 2531 /* Currently applicable only to VLV */ 2532 if (obj->gt_ro) 2533 pte_flags |= PTE_READ_ONLY; 2534 2535 vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages, 2536 vma->node.start, 2537 cache_level, pte_flags); 2538 2539 /* 2540 * Without aliasing PPGTT there's no difference between 2541 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally 2542 * upgrade to both bound if we bind either to avoid double-binding. 2543 */ 2544 vma->bound |= GLOBAL_BIND | LOCAL_BIND; 2545 2546 return 0; 2547 } 2548 2549 static int aliasing_gtt_bind_vma(struct i915_vma *vma, 2550 enum i915_cache_level cache_level, 2551 u32 flags) 2552 { 2553 struct drm_device *dev = vma->vm->dev; 2554 struct drm_i915_private *dev_priv = dev->dev_private; 2555 struct drm_i915_gem_object *obj = vma->obj; 2556 struct sg_table *pages = obj->pages; 2557 u32 pte_flags = 0; 2558 int ret; 2559 2560 ret = i915_get_ggtt_vma_pages(vma); 2561 if (ret) 2562 return ret; 2563 pages = vma->ggtt_view.pages; 2564 2565 /* Currently applicable only to VLV */ 2566 if (obj->gt_ro) 2567 pte_flags |= PTE_READ_ONLY; 2568 2569 2570 if (flags & GLOBAL_BIND) { 2571 vma->vm->insert_entries(vma->vm, pages, 2572 vma->node.start, 2573 cache_level, pte_flags); 2574 } 2575 2576 if (flags & LOCAL_BIND) { 2577 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; 2578 appgtt->base.insert_entries(&appgtt->base, pages, 2579 vma->node.start, 2580 cache_level, pte_flags); 2581 } 2582 2583 return 0; 2584 } 2585 2586 static void ggtt_unbind_vma(struct i915_vma *vma) 2587 { 2588 struct drm_device *dev = vma->vm->dev; 2589 struct drm_i915_private *dev_priv = dev->dev_private; 2590 struct drm_i915_gem_object *obj = vma->obj; 2591 const uint64_t size = min_t(uint64_t, 2592 obj->base.size, 2593 vma->node.size); 2594 2595 if (vma->bound & GLOBAL_BIND) { 2596 vma->vm->clear_range(vma->vm, 2597 vma->node.start, 2598 size, 2599 true); 2600 } 2601 2602 if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) { 2603 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; 2604 2605 appgtt->base.clear_range(&appgtt->base, 2606 vma->node.start, 2607 size, 2608 true); 2609 } 2610 } 2611 2612 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) 2613 { 2614 struct drm_device *dev = obj->base.dev; 2615 struct drm_i915_private *dev_priv = dev->dev_private; 2616 bool interruptible; 2617 2618 interruptible = do_idling(dev_priv); 2619 2620 dma_unmap_sg(dev->pdev->dev, obj->pages->sgl, obj->pages->nents, 2621 PCI_DMA_BIDIRECTIONAL); 2622 2623 undo_idling(dev_priv, interruptible); 2624 } 2625 2626 static void i915_gtt_color_adjust(struct drm_mm_node *node, 2627 unsigned long color, 2628 u64 *start, 2629 u64 *end) 2630 { 2631 if (node->color != color) 2632 *start += 4096; 2633 2634 if (!list_empty(&node->node_list)) { 2635 node = list_entry(node->node_list.next, 2636 struct drm_mm_node, 2637 node_list); 2638 if (node->allocated && node->color != color) 2639 *end -= 4096; 2640 } 2641 } 2642 2643 static int i915_gem_setup_global_gtt(struct drm_device *dev, 2644 u64 start, 2645 u64 mappable_end, 2646 u64 end) 2647 { 2648 /* Let GEM Manage all of the aperture. 2649 * 2650 * However, leave one page at the end still bound to the scratch page. 2651 * There are a number of places where the hardware apparently prefetches 2652 * past the end of the object, and we've seen multiple hangs with the 2653 * GPU head pointer stuck in a batchbuffer bound at the last page of the 2654 * aperture. One page should be enough to keep any prefetching inside 2655 * of the aperture. 2656 */ 2657 struct drm_i915_private *dev_priv = dev->dev_private; 2658 struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; 2659 unsigned long mappable; 2660 int error; 2661 struct drm_mm_node *entry; 2662 struct drm_i915_gem_object *obj; 2663 unsigned long hole_start, hole_end; 2664 int ret; 2665 2666 mappable = min(end, mappable_end) - start; 2667 BUG_ON(mappable_end > end); 2668 2669 ggtt_vm->start = start; 2670 2671 /* Subtract the guard page before address space initialization to 2672 * shrink the range used by drm_mm */ 2673 ggtt_vm->total = end - start - PAGE_SIZE; 2674 i915_address_space_init(ggtt_vm, dev_priv); 2675 ggtt_vm->total += PAGE_SIZE; 2676 2677 if (intel_vgpu_active(dev)) { 2678 ret = intel_vgt_balloon(dev); 2679 if (ret) 2680 return ret; 2681 } 2682 2683 if (!HAS_LLC(dev)) 2684 ggtt_vm->mm.color_adjust = i915_gtt_color_adjust; 2685 2686 /* Mark any preallocated objects as occupied */ 2687 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 2688 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); 2689 2690 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", 2691 i915_gem_obj_ggtt_offset(obj), obj->base.size); 2692 2693 WARN_ON(i915_gem_obj_ggtt_bound(obj)); 2694 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node); 2695 if (ret) { 2696 DRM_DEBUG_KMS("Reservation failed: %i\n", ret); 2697 return ret; 2698 } 2699 vma->bound |= GLOBAL_BIND; 2700 __i915_vma_set_map_and_fenceable(vma); 2701 list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list); 2702 } 2703 2704 /* Clear any non-preallocated blocks */ 2705 drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { 2706 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 2707 hole_start, hole_end); 2708 ggtt_vm->clear_range(ggtt_vm, hole_start, 2709 hole_end - hole_start, true); 2710 } 2711 2712 #ifdef __DragonFly__ 2713 device_printf(dev->dev, 2714 "taking over the fictitious range 0x%lx-0x%lx\n", 2715 dev_priv->gtt.mappable_base + start, dev_priv->gtt.mappable_base + start + mappable); 2716 error = -vm_phys_fictitious_reg_range(dev_priv->gtt.mappable_base + start, 2717 dev_priv->gtt.mappable_base + start + mappable, VM_MEMATTR_WRITE_COMBINING); 2718 #endif 2719 2720 /* And finally clear the reserved guard page */ 2721 ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true); 2722 2723 if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) { 2724 struct i915_hw_ppgtt *ppgtt; 2725 2726 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 2727 if (!ppgtt) 2728 return -ENOMEM; 2729 2730 ret = __hw_ppgtt_init(dev, ppgtt); 2731 if (ret) { 2732 ppgtt->base.cleanup(&ppgtt->base); 2733 kfree(ppgtt); 2734 return ret; 2735 } 2736 2737 if (ppgtt->base.allocate_va_range) 2738 ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0, 2739 ppgtt->base.total); 2740 if (ret) { 2741 ppgtt->base.cleanup(&ppgtt->base); 2742 kfree(ppgtt); 2743 return ret; 2744 } 2745 2746 ppgtt->base.clear_range(&ppgtt->base, 2747 ppgtt->base.start, 2748 ppgtt->base.total, 2749 true); 2750 2751 dev_priv->mm.aliasing_ppgtt = ppgtt; 2752 WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma); 2753 dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma; 2754 } 2755 2756 return 0; 2757 } 2758 2759 void i915_gem_init_global_gtt(struct drm_device *dev) 2760 { 2761 struct drm_i915_private *dev_priv = dev->dev_private; 2762 u64 gtt_size, mappable_size; 2763 2764 gtt_size = dev_priv->gtt.base.total; 2765 mappable_size = dev_priv->gtt.mappable_end; 2766 2767 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 2768 } 2769 2770 void i915_global_gtt_cleanup(struct drm_device *dev) 2771 { 2772 struct drm_i915_private *dev_priv = dev->dev_private; 2773 struct i915_address_space *vm = &dev_priv->gtt.base; 2774 2775 if (dev_priv->mm.aliasing_ppgtt) { 2776 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2777 2778 ppgtt->base.cleanup(&ppgtt->base); 2779 } 2780 2781 if (drm_mm_initialized(&vm->mm)) { 2782 if (intel_vgpu_active(dev)) 2783 intel_vgt_deballoon(); 2784 2785 drm_mm_takedown(&vm->mm); 2786 list_del(&vm->global_link); 2787 } 2788 2789 vm->cleanup(vm); 2790 } 2791 2792 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) 2793 { 2794 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; 2795 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; 2796 return snb_gmch_ctl << 20; 2797 } 2798 2799 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) 2800 { 2801 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; 2802 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; 2803 if (bdw_gmch_ctl) 2804 bdw_gmch_ctl = 1 << bdw_gmch_ctl; 2805 2806 #ifdef CONFIG_X86_32 2807 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */ 2808 if (bdw_gmch_ctl > 4) 2809 bdw_gmch_ctl = 4; 2810 #endif 2811 2812 return bdw_gmch_ctl << 20; 2813 } 2814 2815 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) 2816 { 2817 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; 2818 gmch_ctrl &= SNB_GMCH_GGMS_MASK; 2819 2820 if (gmch_ctrl) 2821 return 1 << (20 + gmch_ctrl); 2822 2823 return 0; 2824 } 2825 2826 static size_t gen6_get_stolen_size(u16 snb_gmch_ctl) 2827 { 2828 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; 2829 snb_gmch_ctl &= SNB_GMCH_GMS_MASK; 2830 return snb_gmch_ctl << 25; /* 32 MB units */ 2831 } 2832 2833 static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl) 2834 { 2835 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; 2836 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK; 2837 return bdw_gmch_ctl << 25; /* 32 MB units */ 2838 } 2839 2840 static size_t chv_get_stolen_size(u16 gmch_ctrl) 2841 { 2842 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; 2843 gmch_ctrl &= SNB_GMCH_GMS_MASK; 2844 2845 /* 2846 * 0x0 to 0x10: 32MB increments starting at 0MB 2847 * 0x11 to 0x16: 4MB increments starting at 8MB 2848 * 0x17 to 0x1d: 4MB increments start at 36MB 2849 */ 2850 if (gmch_ctrl < 0x11) 2851 return gmch_ctrl << 25; 2852 else if (gmch_ctrl < 0x17) 2853 return (gmch_ctrl - 0x11 + 2) << 22; 2854 else 2855 return (gmch_ctrl - 0x17 + 9) << 22; 2856 } 2857 2858 static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl) 2859 { 2860 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; 2861 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK; 2862 2863 if (gen9_gmch_ctl < 0xf0) 2864 return gen9_gmch_ctl << 25; /* 32 MB units */ 2865 else 2866 /* 4MB increments starting at 0xf0 for 4MB */ 2867 return (gen9_gmch_ctl - 0xf0 + 1) << 22; 2868 } 2869 2870 static int ggtt_probe_common(struct drm_device *dev, 2871 size_t gtt_size) 2872 { 2873 struct drm_i915_private *dev_priv = dev->dev_private; 2874 struct i915_page_scratch *scratch_page; 2875 phys_addr_t gtt_phys_addr; 2876 2877 /* For Modern GENs the PTEs and register space are split in the BAR */ 2878 gtt_phys_addr = pci_resource_start(dev->pdev, 0) + 2879 (pci_resource_len(dev->pdev, 0) / 2); 2880 2881 /* 2882 * On BXT writes larger than 64 bit to the GTT pagetable range will be 2883 * dropped. For WC mappings in general we have 64 byte burst writes 2884 * when the WC buffer is flushed, so we can't use it, but have to 2885 * resort to an uncached mapping. The WC issue is easily caught by the 2886 * readback check when writing GTT PTE entries. 2887 */ 2888 if (IS_BROXTON(dev)) 2889 dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size); 2890 else 2891 dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); 2892 if (!dev_priv->gtt.gsm) { 2893 DRM_ERROR("Failed to map the gtt page table\n"); 2894 return -ENOMEM; 2895 } 2896 2897 scratch_page = alloc_scratch_page(dev); 2898 if (IS_ERR(scratch_page)) { 2899 DRM_ERROR("Scratch setup failed\n"); 2900 /* iounmap will also get called at remove, but meh */ 2901 iounmap(dev_priv->gtt.gsm); 2902 return PTR_ERR(scratch_page); 2903 } 2904 2905 dev_priv->gtt.base.scratch_page = scratch_page; 2906 2907 return 0; 2908 } 2909 2910 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability 2911 * bits. When using advanced contexts each context stores its own PAT, but 2912 * writing this data shouldn't be harmful even in those cases. */ 2913 static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) 2914 { 2915 uint64_t pat; 2916 2917 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ 2918 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ 2919 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ 2920 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ 2921 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | 2922 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | 2923 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | 2924 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); 2925 2926 if (!USES_PPGTT(dev_priv->dev)) 2927 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, 2928 * so RTL will always use the value corresponding to 2929 * pat_sel = 000". 2930 * So let's disable cache for GGTT to avoid screen corruptions. 2931 * MOCS still can be used though. 2932 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work 2933 * before this patch, i.e. the same uncached + snooping access 2934 * like on gen6/7 seems to be in effect. 2935 * - So this just fixes blitter/render access. Again it looks 2936 * like it's not just uncached access, but uncached + snooping. 2937 * So we can still hold onto all our assumptions wrt cpu 2938 * clflushing on LLC machines. 2939 */ 2940 pat = GEN8_PPAT(0, GEN8_PPAT_UC); 2941 2942 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b 2943 * write would work. */ 2944 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); 2945 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); 2946 } 2947 2948 static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) 2949 { 2950 uint64_t pat; 2951 2952 /* 2953 * Map WB on BDW to snooped on CHV. 2954 * 2955 * Only the snoop bit has meaning for CHV, the rest is 2956 * ignored. 2957 * 2958 * The hardware will never snoop for certain types of accesses: 2959 * - CPU GTT (GMADR->GGTT->no snoop->memory) 2960 * - PPGTT page tables 2961 * - some other special cycles 2962 * 2963 * As with BDW, we also need to consider the following for GT accesses: 2964 * "For GGTT, there is NO pat_sel[2:0] from the entry, 2965 * so RTL will always use the value corresponding to 2966 * pat_sel = 000". 2967 * Which means we must set the snoop bit in PAT entry 0 2968 * in order to keep the global status page working. 2969 */ 2970 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | 2971 GEN8_PPAT(1, 0) | 2972 GEN8_PPAT(2, 0) | 2973 GEN8_PPAT(3, 0) | 2974 GEN8_PPAT(4, CHV_PPAT_SNOOP) | 2975 GEN8_PPAT(5, CHV_PPAT_SNOOP) | 2976 GEN8_PPAT(6, CHV_PPAT_SNOOP) | 2977 GEN8_PPAT(7, CHV_PPAT_SNOOP); 2978 2979 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); 2980 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); 2981 } 2982 2983 static int gen8_gmch_probe(struct drm_device *dev, 2984 u64 *gtt_total, 2985 size_t *stolen, 2986 phys_addr_t *mappable_base, 2987 u64 *mappable_end) 2988 { 2989 struct drm_i915_private *dev_priv = dev->dev_private; 2990 u64 gtt_size; 2991 u16 snb_gmch_ctl; 2992 int ret; 2993 2994 /* TODO: We're not aware of mappable constraints on gen8 yet */ 2995 *mappable_base = pci_resource_start(dev->pdev, 2); 2996 *mappable_end = pci_resource_len(dev->pdev, 2); 2997 2998 #if 0 2999 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39))) 3000 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39)); 3001 #endif 3002 3003 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 3004 3005 if (INTEL_INFO(dev)->gen >= 9) { 3006 *stolen = gen9_get_stolen_size(snb_gmch_ctl); 3007 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); 3008 } else if (IS_CHERRYVIEW(dev)) { 3009 *stolen = chv_get_stolen_size(snb_gmch_ctl); 3010 gtt_size = chv_get_total_gtt_size(snb_gmch_ctl); 3011 } else { 3012 *stolen = gen8_get_stolen_size(snb_gmch_ctl); 3013 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); 3014 } 3015 3016 *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT; 3017 3018 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) 3019 chv_setup_private_ppat(dev_priv); 3020 else 3021 bdw_setup_private_ppat(dev_priv); 3022 3023 ret = ggtt_probe_common(dev, gtt_size); 3024 3025 dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range; 3026 dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries; 3027 dev_priv->gtt.base.bind_vma = ggtt_bind_vma; 3028 dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; 3029 3030 return ret; 3031 } 3032 3033 static int gen6_gmch_probe(struct drm_device *dev, 3034 u64 *gtt_total, 3035 size_t *stolen, 3036 phys_addr_t *mappable_base, 3037 u64 *mappable_end) 3038 { 3039 struct drm_i915_private *dev_priv = dev->dev_private; 3040 unsigned int gtt_size; 3041 u16 snb_gmch_ctl; 3042 int ret; 3043 3044 *mappable_base = pci_resource_start(dev->pdev, 2); 3045 *mappable_end = pci_resource_len(dev->pdev, 2); 3046 3047 /* 64/512MB is the current min/max we actually know of, but this is just 3048 * a coarse sanity check. 3049 */ 3050 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) { 3051 DRM_ERROR("Unknown GMADR size (%lx)\n", 3052 dev_priv->gtt.mappable_end); 3053 return -ENXIO; 3054 } 3055 3056 #if 0 3057 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) 3058 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); 3059 #endif 3060 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 3061 3062 *stolen = gen6_get_stolen_size(snb_gmch_ctl); 3063 3064 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); 3065 *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT; 3066 3067 ret = ggtt_probe_common(dev, gtt_size); 3068 3069 dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; 3070 dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; 3071 dev_priv->gtt.base.bind_vma = ggtt_bind_vma; 3072 dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; 3073 3074 return ret; 3075 } 3076 3077 static void gen6_gmch_remove(struct i915_address_space *vm) 3078 { 3079 3080 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); 3081 3082 iounmap(gtt->gsm); 3083 free_scratch_page(vm->dev, vm->scratch_page); 3084 } 3085 3086 static int i915_gmch_probe(struct drm_device *dev, 3087 u64 *gtt_total, 3088 size_t *stolen, 3089 phys_addr_t *mappable_base, 3090 u64 *mappable_end) 3091 { 3092 struct drm_i915_private *dev_priv = dev->dev_private; 3093 #if 0 3094 int ret; 3095 3096 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); 3097 if (!ret) { 3098 DRM_ERROR("failed to set up gmch\n"); 3099 return -EIO; 3100 } 3101 #endif 3102 3103 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); 3104 3105 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); 3106 dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; 3107 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; 3108 dev_priv->gtt.base.bind_vma = ggtt_bind_vma; 3109 dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; 3110 3111 if (unlikely(dev_priv->gtt.do_idle_maps)) 3112 DRM_INFO("applying Ironlake quirks for intel_iommu\n"); 3113 3114 return 0; 3115 } 3116 3117 static void i915_gmch_remove(struct i915_address_space *vm) 3118 { 3119 intel_gmch_remove(); 3120 } 3121 3122 int i915_gem_gtt_init(struct drm_device *dev) 3123 { 3124 struct drm_i915_private *dev_priv = dev->dev_private; 3125 struct i915_gtt *gtt = &dev_priv->gtt; 3126 int ret; 3127 3128 if (INTEL_INFO(dev)->gen <= 5) { 3129 gtt->gtt_probe = i915_gmch_probe; 3130 gtt->base.cleanup = i915_gmch_remove; 3131 } else if (INTEL_INFO(dev)->gen < 8) { 3132 gtt->gtt_probe = gen6_gmch_probe; 3133 gtt->base.cleanup = gen6_gmch_remove; 3134 if (IS_HASWELL(dev) && dev_priv->ellc_size) 3135 gtt->base.pte_encode = iris_pte_encode; 3136 else if (IS_HASWELL(dev)) 3137 gtt->base.pte_encode = hsw_pte_encode; 3138 else if (IS_VALLEYVIEW(dev)) 3139 gtt->base.pte_encode = byt_pte_encode; 3140 else if (INTEL_INFO(dev)->gen >= 7) 3141 gtt->base.pte_encode = ivb_pte_encode; 3142 else 3143 gtt->base.pte_encode = snb_pte_encode; 3144 } else { 3145 dev_priv->gtt.gtt_probe = gen8_gmch_probe; 3146 dev_priv->gtt.base.cleanup = gen6_gmch_remove; 3147 } 3148 3149 gtt->base.dev = dev; 3150 3151 ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, 3152 >t->mappable_base, >t->mappable_end); 3153 if (ret) 3154 return ret; 3155 3156 /* GMADR is the PCI mmio aperture into the global GTT. */ 3157 DRM_INFO("Memory usable by graphics device = %luM\n", 3158 gtt->base.total >> 20); 3159 DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); 3160 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); 3161 #ifdef CONFIG_INTEL_IOMMU 3162 if (intel_iommu_gfx_mapped) 3163 DRM_INFO("VT-d active for gfx access\n"); 3164 #endif 3165 /* 3166 * i915.enable_ppgtt is read-only, so do an early pass to validate the 3167 * user's requested state against the hardware/driver capabilities. We 3168 * do this now so that we can print out any log messages once rather 3169 * than every time we check intel_enable_ppgtt(). 3170 */ 3171 i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); 3172 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); 3173 3174 return 0; 3175 } 3176 3177 void i915_gem_restore_gtt_mappings(struct drm_device *dev) 3178 { 3179 struct drm_i915_private *dev_priv = dev->dev_private; 3180 struct drm_i915_gem_object *obj; 3181 struct i915_address_space *vm; 3182 struct i915_vma *vma; 3183 bool flush; 3184 3185 i915_check_and_clear_faults(dev); 3186 3187 /* First fill our portion of the GTT with scratch pages */ 3188 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 3189 dev_priv->gtt.base.start, 3190 dev_priv->gtt.base.total, 3191 true); 3192 3193 /* Cache flush objects bound into GGTT and rebind them. */ 3194 vm = &dev_priv->gtt.base; 3195 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 3196 flush = false; 3197 list_for_each_entry(vma, &obj->vma_list, vma_link) { 3198 if (vma->vm != vm) 3199 continue; 3200 3201 WARN_ON(i915_vma_bind(vma, obj->cache_level, 3202 PIN_UPDATE)); 3203 3204 flush = true; 3205 } 3206 3207 if (flush) 3208 i915_gem_clflush_object(obj, obj->pin_display); 3209 } 3210 3211 if (INTEL_INFO(dev)->gen >= 8) { 3212 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) 3213 chv_setup_private_ppat(dev_priv); 3214 else 3215 bdw_setup_private_ppat(dev_priv); 3216 3217 return; 3218 } 3219 3220 if (USES_PPGTT(dev)) { 3221 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 3222 /* TODO: Perhaps it shouldn't be gen6 specific */ 3223 3224 struct i915_hw_ppgtt *ppgtt = 3225 container_of(vm, struct i915_hw_ppgtt, 3226 base); 3227 3228 if (i915_is_ggtt(vm)) 3229 ppgtt = dev_priv->mm.aliasing_ppgtt; 3230 3231 gen6_write_page_range(dev_priv, &ppgtt->pd, 3232 0, ppgtt->base.total); 3233 } 3234 } 3235 3236 i915_ggtt_flush(dev_priv); 3237 } 3238 3239 static struct i915_vma * 3240 __i915_gem_vma_create(struct drm_i915_gem_object *obj, 3241 struct i915_address_space *vm, 3242 const struct i915_ggtt_view *ggtt_view) 3243 { 3244 struct i915_vma *vma; 3245 3246 if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) 3247 return ERR_PTR(-EINVAL); 3248 3249 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 3250 if (vma == NULL) 3251 return ERR_PTR(-ENOMEM); 3252 3253 INIT_LIST_HEAD(&vma->vma_link); 3254 INIT_LIST_HEAD(&vma->mm_list); 3255 INIT_LIST_HEAD(&vma->exec_list); 3256 vma->vm = vm; 3257 vma->obj = obj; 3258 3259 if (i915_is_ggtt(vm)) 3260 vma->ggtt_view = *ggtt_view; 3261 3262 list_add_tail(&vma->vma_link, &obj->vma_list); 3263 if (!i915_is_ggtt(vm)) 3264 i915_ppgtt_get(i915_vm_to_ppgtt(vm)); 3265 3266 return vma; 3267 } 3268 3269 struct i915_vma * 3270 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 3271 struct i915_address_space *vm) 3272 { 3273 struct i915_vma *vma; 3274 3275 vma = i915_gem_obj_to_vma(obj, vm); 3276 if (!vma) 3277 vma = __i915_gem_vma_create(obj, vm, 3278 i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL); 3279 3280 return vma; 3281 } 3282 3283 struct i915_vma * 3284 i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, 3285 const struct i915_ggtt_view *view) 3286 { 3287 struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); 3288 struct i915_vma *vma; 3289 3290 if (WARN_ON(!view)) 3291 return ERR_PTR(-EINVAL); 3292 3293 vma = i915_gem_obj_to_ggtt_view(obj, view); 3294 3295 if (IS_ERR(vma)) 3296 return vma; 3297 3298 if (!vma) 3299 vma = __i915_gem_vma_create(obj, ggtt, view); 3300 3301 return vma; 3302 3303 } 3304 3305 static struct scatterlist * 3306 rotate_pages(dma_addr_t *in, unsigned int offset, 3307 unsigned int width, unsigned int height, 3308 struct sg_table *st, struct scatterlist *sg) 3309 { 3310 unsigned int column, row; 3311 unsigned int src_idx; 3312 3313 if (!sg) { 3314 st->nents = 0; 3315 sg = st->sgl; 3316 } 3317 3318 for (column = 0; column < width; column++) { 3319 src_idx = width * (height - 1) + column; 3320 for (row = 0; row < height; row++) { 3321 st->nents++; 3322 /* We don't need the pages, but need to initialize 3323 * the entries so the sg list can be happily traversed. 3324 * The only thing we need are DMA addresses. 3325 */ 3326 sg_set_page(sg, NULL, PAGE_SIZE, 0); 3327 sg_dma_address(sg) = in[offset + src_idx]; 3328 sg_dma_len(sg) = PAGE_SIZE; 3329 sg = sg_next(sg); 3330 src_idx -= width; 3331 } 3332 } 3333 3334 return sg; 3335 } 3336 3337 static struct sg_table * 3338 intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, 3339 struct drm_i915_gem_object *obj) 3340 { 3341 struct intel_rotation_info *rot_info = &ggtt_view->rotation_info; 3342 unsigned int size_pages = rot_info->size >> PAGE_SHIFT; 3343 unsigned int size_pages_uv; 3344 struct sg_page_iter sg_iter; 3345 unsigned long i; 3346 dma_addr_t *page_addr_list; 3347 struct sg_table *st; 3348 unsigned int uv_start_page; 3349 struct scatterlist *sg; 3350 int ret = -ENOMEM; 3351 3352 /* Allocate a temporary list of source pages for random access. */ 3353 page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE, 3354 sizeof(dma_addr_t)); 3355 if (!page_addr_list) 3356 return ERR_PTR(ret); 3357 3358 /* Account for UV plane with NV12. */ 3359 if (rot_info->pixel_format == DRM_FORMAT_NV12) 3360 size_pages_uv = rot_info->size_uv >> PAGE_SHIFT; 3361 else 3362 size_pages_uv = 0; 3363 3364 /* Allocate target SG list. */ 3365 st = kmalloc(sizeof(*st), M_DRM, M_WAITOK); 3366 if (!st) 3367 goto err_st_alloc; 3368 3369 ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL); 3370 if (ret) 3371 goto err_sg_alloc; 3372 3373 /* Populate source page list from the object. */ 3374 i = 0; 3375 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 3376 page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); 3377 i++; 3378 } 3379 3380 /* Rotate the pages. */ 3381 sg = rotate_pages(page_addr_list, 0, 3382 rot_info->width_pages, rot_info->height_pages, 3383 st, NULL); 3384 3385 /* Append the UV plane if NV12. */ 3386 if (rot_info->pixel_format == DRM_FORMAT_NV12) { 3387 uv_start_page = size_pages; 3388 3389 /* Check for tile-row un-alignment. */ 3390 if (offset_in_page(rot_info->uv_offset)) 3391 uv_start_page--; 3392 3393 rot_info->uv_start_page = uv_start_page; 3394 3395 rotate_pages(page_addr_list, uv_start_page, 3396 rot_info->width_pages_uv, 3397 rot_info->height_pages_uv, 3398 st, sg); 3399 } 3400 3401 DRM_DEBUG_KMS( 3402 "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n", 3403 obj->base.size, rot_info->pitch, rot_info->height, 3404 rot_info->pixel_format, rot_info->width_pages, 3405 rot_info->height_pages, size_pages + size_pages_uv, 3406 size_pages); 3407 3408 drm_free_large(page_addr_list); 3409 3410 return st; 3411 3412 err_sg_alloc: 3413 kfree(st); 3414 err_st_alloc: 3415 drm_free_large(page_addr_list); 3416 3417 DRM_DEBUG_KMS( 3418 "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n", 3419 obj->base.size, ret, rot_info->pitch, rot_info->height, 3420 rot_info->pixel_format, rot_info->width_pages, 3421 rot_info->height_pages, size_pages + size_pages_uv, 3422 size_pages); 3423 return ERR_PTR(ret); 3424 } 3425 3426 static struct sg_table * 3427 intel_partial_pages(const struct i915_ggtt_view *view, 3428 struct drm_i915_gem_object *obj) 3429 { 3430 struct sg_table *st; 3431 struct scatterlist *sg; 3432 struct sg_page_iter obj_sg_iter; 3433 int ret = -ENOMEM; 3434 3435 st = kmalloc(sizeof(*st), M_DRM, M_WAITOK); 3436 if (!st) 3437 goto err_st_alloc; 3438 3439 ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL); 3440 if (ret) 3441 goto err_sg_alloc; 3442 3443 sg = st->sgl; 3444 st->nents = 0; 3445 for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents, 3446 view->params.partial.offset) 3447 { 3448 if (st->nents >= view->params.partial.size) 3449 break; 3450 3451 sg_set_page(sg, NULL, PAGE_SIZE, 0); 3452 sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter); 3453 sg_dma_len(sg) = PAGE_SIZE; 3454 3455 sg = sg_next(sg); 3456 st->nents++; 3457 } 3458 3459 return st; 3460 3461 err_sg_alloc: 3462 kfree(st); 3463 err_st_alloc: 3464 return ERR_PTR(ret); 3465 } 3466 3467 static int 3468 i915_get_ggtt_vma_pages(struct i915_vma *vma) 3469 { 3470 int ret = 0; 3471 3472 if (vma->ggtt_view.pages) 3473 return 0; 3474 3475 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) 3476 vma->ggtt_view.pages = vma->obj->pages; 3477 else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) 3478 vma->ggtt_view.pages = 3479 intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj); 3480 else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL) 3481 vma->ggtt_view.pages = 3482 intel_partial_pages(&vma->ggtt_view, vma->obj); 3483 else 3484 WARN_ONCE(1, "GGTT view %u not implemented!\n", 3485 vma->ggtt_view.type); 3486 3487 if (!vma->ggtt_view.pages) { 3488 DRM_ERROR("Failed to get pages for GGTT view type %u!\n", 3489 vma->ggtt_view.type); 3490 ret = -EINVAL; 3491 } else if (IS_ERR(vma->ggtt_view.pages)) { 3492 ret = PTR_ERR(vma->ggtt_view.pages); 3493 vma->ggtt_view.pages = NULL; 3494 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", 3495 vma->ggtt_view.type, ret); 3496 } 3497 3498 return ret; 3499 } 3500 3501 /** 3502 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. 3503 * @vma: VMA to map 3504 * @cache_level: mapping cache level 3505 * @flags: flags like global or local mapping 3506 * 3507 * DMA addresses are taken from the scatter-gather table of this object (or of 3508 * this VMA in case of non-default GGTT views) and PTE entries set up. 3509 * Note that DMA addresses are also the only part of the SG table we care about. 3510 */ 3511 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 3512 u32 flags) 3513 { 3514 int ret; 3515 u32 bind_flags; 3516 3517 if (WARN_ON(flags == 0)) 3518 return -EINVAL; 3519 3520 bind_flags = 0; 3521 if (flags & PIN_GLOBAL) 3522 bind_flags |= GLOBAL_BIND; 3523 if (flags & PIN_USER) 3524 bind_flags |= LOCAL_BIND; 3525 3526 if (flags & PIN_UPDATE) 3527 bind_flags |= vma->bound; 3528 else 3529 bind_flags &= ~vma->bound; 3530 3531 if (bind_flags == 0) 3532 return 0; 3533 3534 if (vma->bound == 0 && vma->vm->allocate_va_range) { 3535 trace_i915_va_alloc(vma->vm, 3536 vma->node.start, 3537 vma->node.size, 3538 VM_TO_TRACE_NAME(vma->vm)); 3539 3540 /* XXX: i915_vma_pin() will fix this +- hack */ 3541 vma->pin_count++; 3542 ret = vma->vm->allocate_va_range(vma->vm, 3543 vma->node.start, 3544 vma->node.size); 3545 vma->pin_count--; 3546 if (ret) 3547 return ret; 3548 } 3549 3550 ret = vma->vm->bind_vma(vma, cache_level, bind_flags); 3551 if (ret) 3552 return ret; 3553 3554 vma->bound |= bind_flags; 3555 3556 return 0; 3557 } 3558 3559 /** 3560 * i915_ggtt_view_size - Get the size of a GGTT view. 3561 * @obj: Object the view is of. 3562 * @view: The view in question. 3563 * 3564 * @return The size of the GGTT view in bytes. 3565 */ 3566 size_t 3567 i915_ggtt_view_size(struct drm_i915_gem_object *obj, 3568 const struct i915_ggtt_view *view) 3569 { 3570 if (view->type == I915_GGTT_VIEW_NORMAL) { 3571 return obj->base.size; 3572 } else if (view->type == I915_GGTT_VIEW_ROTATED) { 3573 return view->rotation_info.size; 3574 } else if (view->type == I915_GGTT_VIEW_PARTIAL) { 3575 return view->params.partial.size << PAGE_SHIFT; 3576 } else { 3577 WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type); 3578 return obj->base.size; 3579 } 3580 } 3581