1 /* 2 * Copyright © 2008-2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Chris Wilson <chris@chris-wilson.co.uk> 26 * 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 33 #define KB(x) ((x) * 1024) 34 #define MB(x) (KB(x) * 1024) 35 36 /* 37 * The BIOS typically reserves some of the system's memory for the exclusive 38 * use of the integrated graphics. This memory is no longer available for 39 * use by the OS and so the user finds that his system has less memory 40 * available than he put in. We refer to this memory as stolen. 41 * 42 * The BIOS will allocate its framebuffer from the stolen memory. Our 43 * goal is try to reuse that object for our own fbcon which must always 44 * be available for panics. Anything else we can reuse the stolen memory 45 * for is a boon. 46 */ 47 48 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 49 struct drm_mm_node *node, u64 size, 50 unsigned alignment, u64 start, u64 end) 51 { 52 int ret; 53 54 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 55 return -ENODEV; 56 57 /* See the comment at the drm_mm_init() call for more about this check. 58 * WaSkipStolenMemoryFirstPage:bdw+ (incomplete) 59 */ 60 if (start < 4096 && INTEL_GEN(dev_priv) >= 8) 61 start = 4096; 62 63 mutex_lock(&dev_priv->mm.stolen_lock); 64 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size, 65 alignment, start, end, 66 DRM_MM_SEARCH_DEFAULT); 67 mutex_unlock(&dev_priv->mm.stolen_lock); 68 69 return ret; 70 } 71 72 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 73 struct drm_mm_node *node, u64 size, 74 unsigned alignment) 75 { 76 struct i915_ggtt *ggtt = &dev_priv->ggtt; 77 78 return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, 79 alignment, 0, 80 ggtt->stolen_usable_size); 81 } 82 83 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 84 struct drm_mm_node *node) 85 { 86 mutex_lock(&dev_priv->mm.stolen_lock); 87 drm_mm_remove_node(node); 88 mutex_unlock(&dev_priv->mm.stolen_lock); 89 } 90 91 static unsigned long i915_stolen_to_physical(struct drm_device *dev) 92 { 93 struct drm_i915_private *dev_priv = to_i915(dev); 94 struct i915_ggtt *ggtt = &dev_priv->ggtt; 95 u32 base; 96 97 /* Almost universally we can find the Graphics Base of Stolen Memory 98 * at register BSM (0x5c) in the igfx configuration space. On a few 99 * (desktop) machines this is also mirrored in the bridge device at 100 * different locations, or in the MCHBAR. 101 * 102 * On 865 we just check the TOUD register. 103 * 104 * On 830/845/85x the stolen memory base isn't available in any 105 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. 106 * 107 */ 108 base = 0; 109 if (INTEL_INFO(dev)->gen >= 3) { 110 u32 bsm; 111 112 pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm); 113 114 base = bsm & INTEL_BSM_MASK; 115 } else if (IS_I865G(dev)) { 116 u32 tseg_size = 0; 117 u16 toud = 0; 118 u8 tmp; 119 120 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), 121 I845_ESMRAMC, &tmp); 122 123 if (tmp & TSEG_ENABLE) { 124 switch (tmp & I845_TSEG_SIZE_MASK) { 125 case I845_TSEG_SIZE_512K: 126 tseg_size = KB(512); 127 break; 128 case I845_TSEG_SIZE_1M: 129 tseg_size = MB(1); 130 break; 131 } 132 } 133 134 pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0), 135 I865_TOUD, &toud); 136 137 base = (toud << 16) + tseg_size; 138 } else if (IS_I85X(dev)) { 139 u32 tseg_size = 0; 140 u32 tom; 141 u8 tmp; 142 143 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), 144 I85X_ESMRAMC, &tmp); 145 146 if (tmp & TSEG_ENABLE) 147 tseg_size = MB(1); 148 149 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 1), 150 I85X_DRB3, &tmp); 151 tom = tmp * MB(32); 152 153 base = tom - tseg_size - ggtt->stolen_size; 154 } else if (IS_845G(dev)) { 155 u32 tseg_size = 0; 156 u32 tom; 157 u8 tmp; 158 159 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), 160 I845_ESMRAMC, &tmp); 161 162 if (tmp & TSEG_ENABLE) { 163 switch (tmp & I845_TSEG_SIZE_MASK) { 164 case I845_TSEG_SIZE_512K: 165 tseg_size = KB(512); 166 break; 167 case I845_TSEG_SIZE_1M: 168 tseg_size = MB(1); 169 break; 170 } 171 } 172 173 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), 174 I830_DRB3, &tmp); 175 tom = tmp * MB(32); 176 177 base = tom - tseg_size - ggtt->stolen_size; 178 } else if (IS_I830(dev)) { 179 u32 tseg_size = 0; 180 u32 tom; 181 u8 tmp; 182 183 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), 184 I830_ESMRAMC, &tmp); 185 186 if (tmp & TSEG_ENABLE) { 187 if (tmp & I830_TSEG_SIZE_1M) 188 tseg_size = MB(1); 189 else 190 tseg_size = KB(512); 191 } 192 193 pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), 194 I830_DRB3, &tmp); 195 tom = tmp * MB(32); 196 197 base = tom - tseg_size - ggtt->stolen_size; 198 } 199 200 if (base == 0) 201 return 0; 202 203 /* make sure we don't clobber the GTT if it's within stolen memory */ 204 if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) { 205 struct { 206 u32 start, end; 207 } stolen[2] = { 208 { .start = base, .end = base + ggtt->stolen_size, }, 209 { .start = base, .end = base + ggtt->stolen_size, }, 210 }; 211 u64 ggtt_start, ggtt_end; 212 213 ggtt_start = I915_READ(PGTBL_CTL); 214 if (IS_GEN4(dev)) 215 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | 216 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; 217 else 218 ggtt_start &= PGTBL_ADDRESS_LO_MASK; 219 ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4; 220 221 if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end) 222 stolen[0].end = ggtt_start; 223 if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end) 224 stolen[1].start = ggtt_end; 225 226 /* pick the larger of the two chunks */ 227 if (stolen[0].end - stolen[0].start > 228 stolen[1].end - stolen[1].start) { 229 base = stolen[0].start; 230 ggtt->stolen_size = stolen[0].end - stolen[0].start; 231 } else { 232 base = stolen[1].start; 233 ggtt->stolen_size = stolen[1].end - stolen[1].start; 234 } 235 236 if (stolen[0].start != stolen[1].start || 237 stolen[0].end != stolen[1].end) { 238 DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", 239 (unsigned long long)ggtt_start, 240 (unsigned long long)ggtt_end - 1); 241 DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", 242 base, base + (u32)ggtt->stolen_size - 1); 243 } 244 } 245 246 247 /* Verify that nothing else uses this physical address. Stolen 248 * memory should be reserved by the BIOS and hidden from the 249 * kernel. So if the region is already marked as busy, something 250 * is seriously wrong. 251 */ 252 #if 0 253 r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size, 254 "Graphics Stolen Memory"); 255 if (r == NULL) { 256 /* 257 * One more attempt but this time requesting region from 258 * base + 1, as we have seen that this resolves the region 259 * conflict with the PCI Bus. 260 * This is a BIOS w/a: Some BIOS wrap stolen in the root 261 * PCI bus, but have an off-by-one error. Hence retry the 262 * reservation starting from 1 instead of 0. 263 */ 264 r = devm_request_mem_region(dev->dev, base + 1, 265 ggtt->stolen_size - 1, 266 "Graphics Stolen Memory"); 267 /* 268 * GEN3 firmware likes to smash pci bridges into the stolen 269 * range. Apparently this works. 270 */ 271 if (r == NULL && !IS_GEN3(dev)) { 272 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", 273 base, base + (uint32_t)ggtt->stolen_size); 274 base = 0; 275 } 276 } 277 #endif 278 279 return base; 280 } 281 282 void i915_gem_cleanup_stolen(struct drm_device *dev) 283 { 284 struct drm_i915_private *dev_priv = to_i915(dev); 285 286 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 287 return; 288 289 drm_mm_takedown(&dev_priv->mm.stolen); 290 } 291 292 static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, 293 unsigned long *base, unsigned long *size) 294 { 295 struct i915_ggtt *ggtt = &dev_priv->ggtt; 296 uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ? 297 CTG_STOLEN_RESERVED : 298 ELK_STOLEN_RESERVED); 299 unsigned long stolen_top = dev_priv->mm.stolen_base + 300 ggtt->stolen_size; 301 302 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; 303 304 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); 305 306 /* On these platforms, the register doesn't have a size field, so the 307 * size is the distance between the base and the top of the stolen 308 * memory. We also have the genuine case where base is zero and there's 309 * nothing reserved. */ 310 if (*base == 0) 311 *size = 0; 312 else 313 *size = stolen_top - *base; 314 } 315 316 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, 317 unsigned long *base, unsigned long *size) 318 { 319 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); 320 321 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 322 323 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { 324 case GEN6_STOLEN_RESERVED_1M: 325 *size = 1024 * 1024; 326 break; 327 case GEN6_STOLEN_RESERVED_512K: 328 *size = 512 * 1024; 329 break; 330 case GEN6_STOLEN_RESERVED_256K: 331 *size = 256 * 1024; 332 break; 333 case GEN6_STOLEN_RESERVED_128K: 334 *size = 128 * 1024; 335 break; 336 default: 337 *size = 1024 * 1024; 338 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); 339 } 340 } 341 342 static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, 343 unsigned long *base, unsigned long *size) 344 { 345 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); 346 347 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; 348 349 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 350 case GEN7_STOLEN_RESERVED_1M: 351 *size = 1024 * 1024; 352 break; 353 case GEN7_STOLEN_RESERVED_256K: 354 *size = 256 * 1024; 355 break; 356 default: 357 *size = 1024 * 1024; 358 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 359 } 360 } 361 362 static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv, 363 unsigned long *base, unsigned long *size) 364 { 365 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); 366 367 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 368 369 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 370 case GEN8_STOLEN_RESERVED_1M: 371 *size = 1024 * 1024; 372 break; 373 case GEN8_STOLEN_RESERVED_2M: 374 *size = 2 * 1024 * 1024; 375 break; 376 case GEN8_STOLEN_RESERVED_4M: 377 *size = 4 * 1024 * 1024; 378 break; 379 case GEN8_STOLEN_RESERVED_8M: 380 *size = 8 * 1024 * 1024; 381 break; 382 default: 383 *size = 8 * 1024 * 1024; 384 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 385 } 386 } 387 388 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, 389 unsigned long *base, unsigned long *size) 390 { 391 struct i915_ggtt *ggtt = &dev_priv->ggtt; 392 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); 393 unsigned long stolen_top; 394 395 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; 396 397 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 398 399 /* On these platforms, the register doesn't have a size field, so the 400 * size is the distance between the base and the top of the stolen 401 * memory. We also have the genuine case where base is zero and there's 402 * nothing reserved. */ 403 if (*base == 0) 404 *size = 0; 405 else 406 *size = stolen_top - *base; 407 } 408 409 int i915_gem_init_stolen(struct drm_device *dev) 410 { 411 struct drm_i915_private *dev_priv = to_i915(dev); 412 struct i915_ggtt *ggtt = &dev_priv->ggtt; 413 unsigned long reserved_total, reserved_base = 0, reserved_size; 414 unsigned long stolen_top; 415 416 lockinit(&dev_priv->mm.stolen_lock, "i915msl", 0, LK_CANRECURSE); 417 #ifdef __DragonFly__ 418 /* Stolen memory support is still incomplete */ 419 return 0; 420 #endif 421 422 #ifdef CONFIG_INTEL_IOMMU 423 if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) { 424 DRM_INFO("DMAR active, disabling use of stolen memory\n"); 425 return 0; 426 } 427 #endif 428 429 if (ggtt->stolen_size == 0) 430 return 0; 431 432 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); 433 if (dev_priv->mm.stolen_base == 0) 434 return 0; 435 436 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; 437 438 switch (INTEL_INFO(dev_priv)->gen) { 439 case 2: 440 case 3: 441 break; 442 case 4: 443 if (IS_G4X(dev)) 444 g4x_get_stolen_reserved(dev_priv, &reserved_base, 445 &reserved_size); 446 break; 447 case 5: 448 /* Assume the gen6 maximum for the older platforms. */ 449 reserved_size = 1024 * 1024; 450 reserved_base = stolen_top - reserved_size; 451 break; 452 case 6: 453 gen6_get_stolen_reserved(dev_priv, &reserved_base, 454 &reserved_size); 455 break; 456 case 7: 457 gen7_get_stolen_reserved(dev_priv, &reserved_base, 458 &reserved_size); 459 break; 460 default: 461 if (IS_BROADWELL(dev_priv) || 462 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev)) 463 bdw_get_stolen_reserved(dev_priv, &reserved_base, 464 &reserved_size); 465 else 466 gen8_get_stolen_reserved(dev_priv, &reserved_base, 467 &reserved_size); 468 break; 469 } 470 471 /* It is possible for the reserved base to be zero, but the register 472 * field for size doesn't have a zero option. */ 473 if (reserved_base == 0) { 474 reserved_size = 0; 475 reserved_base = stolen_top; 476 } 477 478 if (reserved_base < dev_priv->mm.stolen_base || 479 reserved_base + reserved_size > stolen_top) { 480 DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n", 481 reserved_base, reserved_base + reserved_size, 482 dev_priv->mm.stolen_base, stolen_top); 483 return 0; 484 } 485 486 ggtt->stolen_reserved_base = reserved_base; 487 ggtt->stolen_reserved_size = reserved_size; 488 489 /* It is possible for the reserved area to end before the end of stolen 490 * memory, so just consider the start. */ 491 reserved_total = stolen_top - reserved_base; 492 493 DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n", 494 ggtt->stolen_size >> 10, 495 (ggtt->stolen_size - reserved_total) >> 10); 496 497 ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total; 498 499 /* 500 * Basic memrange allocator for stolen space. 501 * 502 * TODO: Notice that some platforms require us to not use the first page 503 * of the stolen memory but their BIOSes may still put the framebuffer 504 * on the first page. So we don't reserve this page for now because of 505 * that. Our current solution is to just prevent new nodes from being 506 * inserted on the first page - see the check we have at 507 * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon 508 * problem later. 509 */ 510 drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size); 511 512 return 0; 513 } 514 515 static struct sg_table * 516 i915_pages_create_for_stolen(struct drm_device *dev, 517 u32 offset, u32 size) 518 { 519 struct drm_i915_private *dev_priv = to_i915(dev); 520 struct i915_ggtt *ggtt = &dev_priv->ggtt; 521 struct sg_table *st; 522 struct scatterlist *sg; 523 524 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); 525 BUG_ON(offset > ggtt->stolen_size - size); 526 527 /* We hide that we have no struct page backing our stolen object 528 * by wrapping the contiguous physical allocation with a fake 529 * dma mapping in a single scatterlist. 530 */ 531 532 st = kmalloc(sizeof(*st), M_DRM, M_WAITOK); 533 if (st == NULL) 534 return NULL; 535 536 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 537 kfree(st); 538 return NULL; 539 } 540 541 sg = st->sgl; 542 sg->offset = 0; 543 sg->length = size; 544 545 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; 546 sg_dma_len(sg) = size; 547 548 return st; 549 } 550 551 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) 552 { 553 BUG(); 554 return -EINVAL; 555 } 556 557 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) 558 { 559 /* Should only be called during free */ 560 sg_free_table(obj->pages); 561 kfree(obj->pages); 562 } 563 564 565 static void 566 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 567 { 568 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 569 570 if (obj->stolen) { 571 i915_gem_stolen_remove_node(dev_priv, obj->stolen); 572 kfree(obj->stolen); 573 obj->stolen = NULL; 574 } 575 } 576 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { 577 .get_pages = i915_gem_object_get_pages_stolen, 578 .put_pages = i915_gem_object_put_pages_stolen, 579 .release = i915_gem_object_release_stolen, 580 }; 581 582 static struct drm_i915_gem_object * 583 _i915_gem_object_create_stolen(struct drm_device *dev, 584 struct drm_mm_node *stolen) 585 { 586 struct drm_i915_gem_object *obj; 587 588 obj = i915_gem_object_alloc(dev); 589 if (obj == NULL) 590 return NULL; 591 592 drm_gem_private_object_init(dev, &obj->base, stolen->size); 593 i915_gem_object_init(obj, &i915_gem_object_stolen_ops); 594 595 obj->pages = i915_pages_create_for_stolen(dev, 596 stolen->start, stolen->size); 597 if (obj->pages == NULL) 598 goto cleanup; 599 600 obj->get_page.sg = obj->pages->sgl; 601 obj->get_page.last = 0; 602 603 i915_gem_object_pin_pages(obj); 604 obj->stolen = stolen; 605 606 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 607 obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE; 608 609 return obj; 610 611 cleanup: 612 i915_gem_object_free(obj); 613 return NULL; 614 } 615 616 struct drm_i915_gem_object * 617 i915_gem_object_create_stolen(struct drm_device *dev, u32 size) 618 { 619 struct drm_i915_private *dev_priv = to_i915(dev); 620 struct drm_i915_gem_object *obj; 621 struct drm_mm_node *stolen; 622 int ret; 623 624 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 625 return NULL; 626 627 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); 628 if (size == 0) 629 return NULL; 630 631 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 632 if (!stolen) 633 return NULL; 634 635 ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); 636 if (ret) { 637 kfree(stolen); 638 return NULL; 639 } 640 641 obj = _i915_gem_object_create_stolen(dev, stolen); 642 if (obj) 643 return obj; 644 645 i915_gem_stolen_remove_node(dev_priv, stolen); 646 kfree(stolen); 647 return NULL; 648 } 649 650 struct drm_i915_gem_object * 651 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 652 u32 stolen_offset, 653 u32 gtt_offset, 654 u32 size) 655 { 656 struct drm_i915_private *dev_priv = to_i915(dev); 657 struct i915_ggtt *ggtt = &dev_priv->ggtt; 658 struct drm_i915_gem_object *obj; 659 struct drm_mm_node *stolen; 660 struct i915_vma *vma; 661 int ret; 662 663 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 664 return NULL; 665 666 lockdep_assert_held(&dev->struct_mutex); 667 668 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", 669 stolen_offset, gtt_offset, size); 670 671 /* KISS and expect everything to be page-aligned */ 672 if (WARN_ON(size == 0) || WARN_ON(size & 4095) || 673 WARN_ON(stolen_offset & 4095)) 674 return NULL; 675 676 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 677 if (!stolen) 678 return NULL; 679 680 stolen->start = stolen_offset; 681 stolen->size = size; 682 mutex_lock(&dev_priv->mm.stolen_lock); 683 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); 684 mutex_unlock(&dev_priv->mm.stolen_lock); 685 if (ret) { 686 DRM_DEBUG_KMS("failed to allocate stolen space\n"); 687 kfree(stolen); 688 return NULL; 689 } 690 691 obj = _i915_gem_object_create_stolen(dev, stolen); 692 if (obj == NULL) { 693 DRM_DEBUG_KMS("failed to allocate stolen object\n"); 694 i915_gem_stolen_remove_node(dev_priv, stolen); 695 kfree(stolen); 696 return NULL; 697 } 698 699 /* Some objects just need physical mem from stolen space */ 700 if (gtt_offset == I915_GTT_OFFSET_NONE) 701 return obj; 702 703 vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base); 704 if (IS_ERR(vma)) { 705 ret = PTR_ERR(vma); 706 goto err; 707 } 708 709 /* To simplify the initialisation sequence between KMS and GTT, 710 * we allow construction of the stolen object prior to 711 * setting up the GTT space. The actual reservation will occur 712 * later. 713 */ 714 vma->node.start = gtt_offset; 715 vma->node.size = size; 716 if (drm_mm_initialized(&ggtt->base.mm)) { 717 ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node); 718 if (ret) { 719 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); 720 goto err; 721 } 722 723 vma->bound |= GLOBAL_BIND; 724 __i915_vma_set_map_and_fenceable(vma); 725 list_add_tail(&vma->vm_link, &ggtt->base.inactive_list); 726 } 727 728 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); 729 i915_gem_object_pin_pages(obj); 730 731 return obj; 732 733 err: 734 drm_gem_object_unreference(&obj->base); 735 return NULL; 736 } 737