1 /* 2 * Copyright © 2008-2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Chris Wilson <chris@chris-wilson.co.uk> 26 * 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 33 #define KB(x) ((x) * 1024) 34 #define MB(x) (KB(x) * 1024) 35 36 /* 37 * The BIOS typically reserves some of the system's memory for the exclusive 38 * use of the integrated graphics. This memory is no longer available for 39 * use by the OS and so the user finds that his system has less memory 40 * available than he put in. We refer to this memory as stolen. 41 * 42 * The BIOS will allocate its framebuffer from the stolen memory. Our 43 * goal is try to reuse that object for our own fbcon which must always 44 * be available for panics. Anything else we can reuse the stolen memory 45 * for is a boon. 46 */ 47 48 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 49 struct drm_mm_node *node, u64 size, 50 unsigned alignment, u64 start, u64 end) 51 { 52 int ret; 53 54 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 55 return -ENODEV; 56 57 mutex_lock(&dev_priv->mm.stolen_lock); 58 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, 59 size, alignment, 0, 60 start, end, DRM_MM_INSERT_BEST); 61 mutex_unlock(&dev_priv->mm.stolen_lock); 62 63 return ret; 64 } 65 66 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 67 struct drm_mm_node *node, u64 size, 68 unsigned alignment) 69 { 70 return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, 71 alignment, 0, U64_MAX); 72 } 73 74 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 75 struct drm_mm_node *node) 76 { 77 mutex_lock(&dev_priv->mm.stolen_lock); 78 drm_mm_remove_node(node); 79 mutex_unlock(&dev_priv->mm.stolen_lock); 80 } 81 82 #ifdef __DragonFly__ 83 static 84 struct resource * devm_request_mem_region(struct device *dev, 85 resource_size_t start, resource_size_t n, const char *name) 86 { 87 static struct rman stolen_rman; 88 struct resource *res; 89 90 stolen_rman.rm_start = start; 91 stolen_rman.rm_end = start + n; 92 stolen_rman.rm_type = RMAN_ARRAY; 93 stolen_rman.rm_descr = name; 94 if (rman_init(&stolen_rman, -1)) 95 return NULL; 96 97 if (rman_manage_region(&stolen_rman, stolen_rman.rm_start, stolen_rman.rm_end)) 98 return NULL; 99 100 res = kmalloc(sizeof(*res), M_DRM, GFP_KERNEL); 101 return res; 102 } 103 #endif /* __DragonFly__ */ 104 105 static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv) 106 { 107 struct pci_dev *pdev = dev_priv->drm.pdev; 108 struct i915_ggtt *ggtt = &dev_priv->ggtt; 109 struct resource *r; 110 dma_addr_t base; 111 112 /* Almost universally we can find the Graphics Base of Stolen Memory 113 * at register BSM (0x5c) in the igfx configuration space. On a few 114 * (desktop) machines this is also mirrored in the bridge device at 115 * different locations, or in the MCHBAR. 116 * 117 * On 865 we just check the TOUD register. 118 * 119 * On 830/845/85x the stolen memory base isn't available in any 120 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. 121 * 122 */ 123 base = 0; 124 if (INTEL_GEN(dev_priv) >= 3) { 125 u32 bsm; 126 127 pci_read_config_dword(pdev, INTEL_BSM, &bsm); 128 129 base = bsm & INTEL_BSM_MASK; 130 } else if (IS_I865G(dev_priv)) { 131 u32 tseg_size = 0; 132 u16 toud = 0; 133 u8 tmp; 134 135 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), 136 I845_ESMRAMC, &tmp); 137 138 if (tmp & TSEG_ENABLE) { 139 switch (tmp & I845_TSEG_SIZE_MASK) { 140 case I845_TSEG_SIZE_512K: 141 tseg_size = KB(512); 142 break; 143 case I845_TSEG_SIZE_1M: 144 tseg_size = MB(1); 145 break; 146 } 147 } 148 149 pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0), 150 I865_TOUD, &toud); 151 152 base = (toud << 16) + tseg_size; 153 } else if (IS_I85X(dev_priv)) { 154 u32 tseg_size = 0; 155 u32 tom; 156 u8 tmp; 157 158 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), 159 I85X_ESMRAMC, &tmp); 160 161 if (tmp & TSEG_ENABLE) 162 tseg_size = MB(1); 163 164 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1), 165 I85X_DRB3, &tmp); 166 tom = tmp * MB(32); 167 168 base = tom - tseg_size - ggtt->stolen_size; 169 } else if (IS_I845G(dev_priv)) { 170 u32 tseg_size = 0; 171 u32 tom; 172 u8 tmp; 173 174 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), 175 I845_ESMRAMC, &tmp); 176 177 if (tmp & TSEG_ENABLE) { 178 switch (tmp & I845_TSEG_SIZE_MASK) { 179 case I845_TSEG_SIZE_512K: 180 tseg_size = KB(512); 181 break; 182 case I845_TSEG_SIZE_1M: 183 tseg_size = MB(1); 184 break; 185 } 186 } 187 188 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), 189 I830_DRB3, &tmp); 190 tom = tmp * MB(32); 191 192 base = tom - tseg_size - ggtt->stolen_size; 193 } else if (IS_I830(dev_priv)) { 194 u32 tseg_size = 0; 195 u32 tom; 196 u8 tmp; 197 198 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), 199 I830_ESMRAMC, &tmp); 200 201 if (tmp & TSEG_ENABLE) { 202 if (tmp & I830_TSEG_SIZE_1M) 203 tseg_size = MB(1); 204 else 205 tseg_size = KB(512); 206 } 207 208 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), 209 I830_DRB3, &tmp); 210 tom = tmp * MB(32); 211 212 base = tom - tseg_size - ggtt->stolen_size; 213 } 214 215 if (base == 0 || add_overflows(base, ggtt->stolen_size)) 216 return 0; 217 218 /* make sure we don't clobber the GTT if it's within stolen memory */ 219 if (INTEL_GEN(dev_priv) <= 4 && 220 !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) { 221 struct { 222 dma_addr_t start, end; 223 } stolen[2] = { 224 { .start = base, .end = base + ggtt->stolen_size, }, 225 { .start = base, .end = base + ggtt->stolen_size, }, 226 }; 227 u64 ggtt_start, ggtt_end; 228 229 ggtt_start = I915_READ(PGTBL_CTL); 230 if (IS_GEN4(dev_priv)) 231 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | 232 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; 233 else 234 ggtt_start &= PGTBL_ADDRESS_LO_MASK; 235 ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4; 236 237 if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end) 238 stolen[0].end = ggtt_start; 239 if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end) 240 stolen[1].start = ggtt_end; 241 242 /* pick the larger of the two chunks */ 243 if (stolen[0].end - stolen[0].start > 244 stolen[1].end - stolen[1].start) { 245 base = stolen[0].start; 246 ggtt->stolen_size = stolen[0].end - stolen[0].start; 247 } else { 248 base = stolen[1].start; 249 ggtt->stolen_size = stolen[1].end - stolen[1].start; 250 } 251 252 if (stolen[0].start != stolen[1].start || 253 stolen[0].end != stolen[1].end) { 254 dma_addr_t end = base + ggtt->stolen_size - 1; 255 256 DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", 257 (unsigned long long)ggtt_start, 258 (unsigned long long)ggtt_end - 1); 259 DRM_DEBUG_KMS("Stolen memory adjusted to %pad - %pad\n", 260 &base, &end); 261 } 262 } 263 264 265 /* Verify that nothing else uses this physical address. Stolen 266 * memory should be reserved by the BIOS and hidden from the 267 * kernel. So if the region is already marked as busy, something 268 * is seriously wrong. 269 */ 270 r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size, 271 "Graphics Stolen Memory"); 272 if (r == NULL) { 273 /* 274 * One more attempt but this time requesting region from 275 * base + 1, as we have seen that this resolves the region 276 * conflict with the PCI Bus. 277 * This is a BIOS w/a: Some BIOS wrap stolen in the root 278 * PCI bus, but have an off-by-one error. Hence retry the 279 * reservation starting from 1 instead of 0. 280 */ 281 r = devm_request_mem_region(dev_priv->drm.dev, base + 1, 282 ggtt->stolen_size - 1, 283 "Graphics Stolen Memory"); 284 /* 285 * GEN3 firmware likes to smash pci bridges into the stolen 286 * range. Apparently this works. 287 */ 288 if (r == NULL && !IS_GEN3(dev_priv)) { 289 dma_addr_t end = base + ggtt->stolen_size; 290 291 DRM_ERROR("conflict detected with stolen region: [%pad - %pad]\n", 292 &base, &end); 293 base = 0; 294 } 295 } 296 297 return base; 298 } 299 300 void i915_gem_cleanup_stolen(struct drm_device *dev) 301 { 302 struct drm_i915_private *dev_priv = to_i915(dev); 303 304 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 305 return; 306 307 drm_mm_takedown(&dev_priv->mm.stolen); 308 } 309 310 static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, 311 dma_addr_t *base, u32 *size) 312 { 313 struct i915_ggtt *ggtt = &dev_priv->ggtt; 314 uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ? 315 CTG_STOLEN_RESERVED : 316 ELK_STOLEN_RESERVED); 317 dma_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; 318 319 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; 320 321 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); 322 323 /* On these platforms, the register doesn't have a size field, so the 324 * size is the distance between the base and the top of the stolen 325 * memory. We also have the genuine case where base is zero and there's 326 * nothing reserved. */ 327 if (*base == 0) 328 *size = 0; 329 else 330 *size = stolen_top - *base; 331 } 332 333 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, 334 dma_addr_t *base, u32 *size) 335 { 336 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); 337 338 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 339 340 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { 341 case GEN6_STOLEN_RESERVED_1M: 342 *size = 1024 * 1024; 343 break; 344 case GEN6_STOLEN_RESERVED_512K: 345 *size = 512 * 1024; 346 break; 347 case GEN6_STOLEN_RESERVED_256K: 348 *size = 256 * 1024; 349 break; 350 case GEN6_STOLEN_RESERVED_128K: 351 *size = 128 * 1024; 352 break; 353 default: 354 *size = 1024 * 1024; 355 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); 356 } 357 } 358 359 static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, 360 dma_addr_t *base, u32 *size) 361 { 362 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); 363 364 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; 365 366 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 367 case GEN7_STOLEN_RESERVED_1M: 368 *size = 1024 * 1024; 369 break; 370 case GEN7_STOLEN_RESERVED_256K: 371 *size = 256 * 1024; 372 break; 373 default: 374 *size = 1024 * 1024; 375 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 376 } 377 } 378 379 static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv, 380 dma_addr_t *base, u32 *size) 381 { 382 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); 383 384 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 385 386 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 387 case GEN8_STOLEN_RESERVED_1M: 388 *size = 1024 * 1024; 389 break; 390 case GEN8_STOLEN_RESERVED_2M: 391 *size = 2 * 1024 * 1024; 392 break; 393 case GEN8_STOLEN_RESERVED_4M: 394 *size = 4 * 1024 * 1024; 395 break; 396 case GEN8_STOLEN_RESERVED_8M: 397 *size = 8 * 1024 * 1024; 398 break; 399 default: 400 *size = 8 * 1024 * 1024; 401 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 402 } 403 } 404 405 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, 406 dma_addr_t *base, u32 *size) 407 { 408 struct i915_ggtt *ggtt = &dev_priv->ggtt; 409 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); 410 dma_addr_t stolen_top; 411 412 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; 413 414 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 415 416 /* On these platforms, the register doesn't have a size field, so the 417 * size is the distance between the base and the top of the stolen 418 * memory. We also have the genuine case where base is zero and there's 419 * nothing reserved. */ 420 if (*base == 0) 421 *size = 0; 422 else 423 *size = stolen_top - *base; 424 } 425 426 int i915_gem_init_stolen(struct drm_i915_private *dev_priv) 427 { 428 struct i915_ggtt *ggtt = &dev_priv->ggtt; 429 dma_addr_t reserved_base, stolen_top; 430 u32 reserved_total, reserved_size; 431 u32 stolen_usable_start; 432 433 lockinit(&dev_priv->mm.stolen_lock, "i915msl", 0, LK_CANRECURSE); 434 435 if (intel_vgpu_active(dev_priv)) { 436 DRM_INFO("iGVT-g active, disabling use of stolen memory\n"); 437 return 0; 438 } 439 440 #ifdef CONFIG_INTEL_IOMMU 441 if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) { 442 DRM_INFO("DMAR active, disabling use of stolen memory\n"); 443 return 0; 444 } 445 #endif 446 447 if (ggtt->stolen_size == 0) 448 return 0; 449 450 dev_priv->mm.stolen_base = i915_stolen_to_dma(dev_priv); 451 if (dev_priv->mm.stolen_base == 0) 452 return 0; 453 454 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; 455 reserved_base = 0; 456 reserved_size = 0; 457 458 switch (INTEL_INFO(dev_priv)->gen) { 459 case 2: 460 case 3: 461 break; 462 case 4: 463 if (IS_G4X(dev_priv)) 464 g4x_get_stolen_reserved(dev_priv, 465 &reserved_base, &reserved_size); 466 break; 467 case 5: 468 /* Assume the gen6 maximum for the older platforms. */ 469 reserved_size = 1024 * 1024; 470 reserved_base = stolen_top - reserved_size; 471 break; 472 case 6: 473 gen6_get_stolen_reserved(dev_priv, 474 &reserved_base, &reserved_size); 475 break; 476 case 7: 477 gen7_get_stolen_reserved(dev_priv, 478 &reserved_base, &reserved_size); 479 break; 480 default: 481 if (IS_LP(dev_priv)) 482 chv_get_stolen_reserved(dev_priv, 483 &reserved_base, &reserved_size); 484 else 485 bdw_get_stolen_reserved(dev_priv, 486 &reserved_base, &reserved_size); 487 break; 488 } 489 490 /* It is possible for the reserved base to be zero, but the register 491 * field for size doesn't have a zero option. */ 492 if (reserved_base == 0) { 493 reserved_size = 0; 494 reserved_base = stolen_top; 495 } 496 497 if (reserved_base < dev_priv->mm.stolen_base || 498 reserved_base + reserved_size > stolen_top) { 499 dma_addr_t reserved_top = reserved_base + reserved_size; 500 DRM_DEBUG_KMS("Stolen reserved area [%pad - %pad] outside stolen memory [%pad - %pad]\n", 501 &reserved_base, &reserved_top, 502 &dev_priv->mm.stolen_base, &stolen_top); 503 return 0; 504 } 505 506 ggtt->stolen_reserved_base = reserved_base; 507 ggtt->stolen_reserved_size = reserved_size; 508 509 /* It is possible for the reserved area to end before the end of stolen 510 * memory, so just consider the start. */ 511 reserved_total = stolen_top - reserved_base; 512 513 DRM_DEBUG_KMS("Memory reserved for graphics device: %uK, usable: %uK\n", 514 ggtt->stolen_size >> 10, 515 (ggtt->stolen_size - reserved_total) >> 10); 516 517 stolen_usable_start = 0; 518 /* WaSkipStolenMemoryFirstPage:bdw+ */ 519 if (INTEL_GEN(dev_priv) >= 8) 520 stolen_usable_start = 4096; 521 522 ggtt->stolen_usable_size = 523 ggtt->stolen_size - reserved_total - stolen_usable_start; 524 525 /* Basic memrange allocator for stolen space. */ 526 drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start, 527 ggtt->stolen_usable_size); 528 529 return 0; 530 } 531 532 static struct sg_table * 533 i915_pages_create_for_stolen(struct drm_device *dev, 534 u32 offset, u32 size) 535 { 536 struct drm_i915_private *dev_priv = to_i915(dev); 537 struct sg_table *st; 538 struct scatterlist *sg; 539 540 GEM_BUG_ON(range_overflows(offset, size, dev_priv->ggtt.stolen_size)); 541 542 /* We hide that we have no struct page backing our stolen object 543 * by wrapping the contiguous physical allocation with a fake 544 * dma mapping in a single scatterlist. 545 */ 546 547 st = kmalloc(sizeof(*st), M_DRM, GFP_KERNEL); 548 if (st == NULL) 549 return ERR_PTR(-ENOMEM); 550 551 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 552 kfree(st); 553 return ERR_PTR(-ENOMEM); 554 } 555 556 sg = st->sgl; 557 sg->offset = 0; 558 sg->length = size; 559 560 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; 561 sg_dma_len(sg) = size; 562 563 return st; 564 } 565 566 static struct sg_table * 567 i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) 568 { 569 return i915_pages_create_for_stolen(obj->base.dev, 570 obj->stolen->start, 571 obj->stolen->size); 572 } 573 574 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, 575 struct sg_table *pages) 576 { 577 /* Should only be called from i915_gem_object_release_stolen() */ 578 sg_free_table(pages); 579 kfree(pages); 580 } 581 582 static void 583 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 584 { 585 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 586 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); 587 588 GEM_BUG_ON(!stolen); 589 590 __i915_gem_object_unpin_pages(obj); 591 592 i915_gem_stolen_remove_node(dev_priv, stolen); 593 kfree(stolen); 594 } 595 596 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { 597 .get_pages = i915_gem_object_get_pages_stolen, 598 .put_pages = i915_gem_object_put_pages_stolen, 599 .release = i915_gem_object_release_stolen, 600 }; 601 602 static struct drm_i915_gem_object * 603 _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, 604 struct drm_mm_node *stolen) 605 { 606 struct drm_i915_gem_object *obj; 607 608 obj = i915_gem_object_alloc(dev_priv); 609 if (obj == NULL) 610 return NULL; 611 612 drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size); 613 i915_gem_object_init(obj, &i915_gem_object_stolen_ops); 614 615 obj->stolen = stolen; 616 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 617 obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; 618 619 if (i915_gem_object_pin_pages(obj)) 620 goto cleanup; 621 622 return obj; 623 624 cleanup: 625 i915_gem_object_free(obj); 626 return NULL; 627 } 628 629 struct drm_i915_gem_object * 630 i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size) 631 { 632 struct drm_i915_gem_object *obj; 633 struct drm_mm_node *stolen; 634 int ret; 635 636 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 637 return NULL; 638 639 if (size == 0) 640 return NULL; 641 642 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 643 if (!stolen) 644 return NULL; 645 646 ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); 647 if (ret) { 648 kfree(stolen); 649 return NULL; 650 } 651 652 obj = _i915_gem_object_create_stolen(dev_priv, stolen); 653 if (obj) 654 return obj; 655 656 i915_gem_stolen_remove_node(dev_priv, stolen); 657 kfree(stolen); 658 return NULL; 659 } 660 661 struct drm_i915_gem_object * 662 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, 663 u32 stolen_offset, 664 u32 gtt_offset, 665 u32 size) 666 { 667 struct i915_ggtt *ggtt = &dev_priv->ggtt; 668 struct drm_i915_gem_object *obj; 669 struct drm_mm_node *stolen; 670 struct i915_vma *vma; 671 int ret; 672 673 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 674 return NULL; 675 676 lockdep_assert_held(&dev_priv->drm.struct_mutex); 677 678 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", 679 stolen_offset, gtt_offset, size); 680 681 /* KISS and expect everything to be page-aligned */ 682 if (WARN_ON(size == 0) || 683 WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) || 684 WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT))) 685 return NULL; 686 687 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 688 if (!stolen) 689 return NULL; 690 691 stolen->start = stolen_offset; 692 stolen->size = size; 693 mutex_lock(&dev_priv->mm.stolen_lock); 694 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); 695 mutex_unlock(&dev_priv->mm.stolen_lock); 696 if (ret) { 697 DRM_DEBUG_KMS("failed to allocate stolen space\n"); 698 kfree(stolen); 699 return NULL; 700 } 701 702 obj = _i915_gem_object_create_stolen(dev_priv, stolen); 703 if (obj == NULL) { 704 DRM_DEBUG_KMS("failed to allocate stolen object\n"); 705 i915_gem_stolen_remove_node(dev_priv, stolen); 706 kfree(stolen); 707 return NULL; 708 } 709 710 /* Some objects just need physical mem from stolen space */ 711 if (gtt_offset == I915_GTT_OFFSET_NONE) 712 return obj; 713 714 ret = i915_gem_object_pin_pages(obj); 715 if (ret) 716 goto err; 717 718 vma = i915_vma_instance(obj, &ggtt->base, NULL); 719 if (IS_ERR(vma)) { 720 ret = PTR_ERR(vma); 721 goto err_pages; 722 } 723 724 /* To simplify the initialisation sequence between KMS and GTT, 725 * we allow construction of the stolen object prior to 726 * setting up the GTT space. The actual reservation will occur 727 * later. 728 */ 729 ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node, 730 size, gtt_offset, obj->cache_level, 731 0); 732 if (ret) { 733 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); 734 goto err_pages; 735 } 736 737 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 738 739 vma->pages = obj->mm.pages; 740 vma->flags |= I915_VMA_GLOBAL_BIND; 741 __i915_vma_set_map_and_fenceable(vma); 742 list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); 743 list_move_tail(&obj->global_link, &dev_priv->mm.bound_list); 744 obj->bind_count++; 745 746 return obj; 747 748 err_pages: 749 i915_gem_object_unpin_pages(obj); 750 err: 751 i915_gem_object_put(obj); 752 return NULL; 753 } 754