1 /* 2 * Copyright © 2008-2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Chris Wilson <chris@chris-wilson.co.uk> 26 * 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 33 #define KB(x) ((x) * 1024) 34 #define MB(x) (KB(x) * 1024) 35 36 /* 37 * The BIOS typically reserves some of the system's memory for the exclusive 38 * use of the integrated graphics. This memory is no longer available for 39 * use by the OS and so the user finds that his system has less memory 40 * available than he put in. We refer to this memory as stolen. 41 * 42 * The BIOS will allocate its framebuffer from the stolen memory. Our 43 * goal is try to reuse that object for our own fbcon which must always 44 * be available for panics. Anything else we can reuse the stolen memory 45 * for is a boon. 46 */ 47 48 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 49 struct drm_mm_node *node, u64 size, 50 unsigned alignment, u64 start, u64 end) 51 { 52 int ret; 53 54 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 55 return -ENODEV; 56 57 /* See the comment at the drm_mm_init() call for more about this check. 58 * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete) 59 */ 60 if (start < 4096 && (IS_GEN8(dev_priv) || 61 IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0))) 62 start = 4096; 63 64 mutex_lock(&dev_priv->mm.stolen_lock); 65 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size, 66 alignment, start, end, 67 DRM_MM_SEARCH_DEFAULT); 68 mutex_unlock(&dev_priv->mm.stolen_lock); 69 70 return ret; 71 } 72 73 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 74 struct drm_mm_node *node, u64 size, 75 unsigned alignment) 76 { 77 struct i915_ggtt *ggtt = &dev_priv->ggtt; 78 79 return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, 80 alignment, 0, 81 ggtt->stolen_usable_size); 82 } 83 84 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 85 struct drm_mm_node *node) 86 { 87 mutex_lock(&dev_priv->mm.stolen_lock); 88 drm_mm_remove_node(node); 89 mutex_unlock(&dev_priv->mm.stolen_lock); 90 } 91 92 #ifdef __DragonFly__ 93 static 94 struct resource * devm_request_mem_region(struct device *dev, 95 resource_size_t start, resource_size_t n, const char *name) 96 { 97 static struct rman stolen_rman; 98 struct resource *res; 99 100 stolen_rman.rm_start = start; 101 stolen_rman.rm_end = start + n; 102 stolen_rman.rm_type = RMAN_ARRAY; 103 stolen_rman.rm_descr = name; 104 if (rman_init(&stolen_rman, -1)) 105 return NULL; 106 107 if (rman_manage_region(&stolen_rman, stolen_rman.rm_start, stolen_rman.rm_end)) 108 return NULL; 109 110 res = kmalloc(sizeof(*res), M_DRM, GFP_KERNEL); 111 return res; 112 } 113 #endif /* __DragonFly__ */ 114 115 static unsigned long i915_stolen_to_physical(struct drm_device *dev) 116 { 117 struct drm_i915_private *dev_priv = to_i915(dev); 118 struct pci_dev *pdev = dev_priv->drm.pdev; 119 struct i915_ggtt *ggtt = &dev_priv->ggtt; 120 struct resource *r; 121 u32 base; 122 123 /* Almost universally we can find the Graphics Base of Stolen Memory 124 * at register BSM (0x5c) in the igfx configuration space. On a few 125 * (desktop) machines this is also mirrored in the bridge device at 126 * different locations, or in the MCHBAR. 127 * 128 * On 865 we just check the TOUD register. 129 * 130 * On 830/845/85x the stolen memory base isn't available in any 131 * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. 132 * 133 */ 134 base = 0; 135 if (INTEL_INFO(dev)->gen >= 3) { 136 u32 bsm; 137 138 pci_read_config_dword(pdev, INTEL_BSM, &bsm); 139 140 base = bsm & INTEL_BSM_MASK; 141 } else if (IS_I865G(dev_priv)) { 142 u32 tseg_size = 0; 143 u16 toud = 0; 144 u8 tmp; 145 146 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), 147 I845_ESMRAMC, &tmp); 148 149 if (tmp & TSEG_ENABLE) { 150 switch (tmp & I845_TSEG_SIZE_MASK) { 151 case I845_TSEG_SIZE_512K: 152 tseg_size = KB(512); 153 break; 154 case I845_TSEG_SIZE_1M: 155 tseg_size = MB(1); 156 break; 157 } 158 } 159 160 pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0), 161 I865_TOUD, &toud); 162 163 base = (toud << 16) + tseg_size; 164 } else if (IS_I85X(dev)) { 165 u32 tseg_size = 0; 166 u32 tom; 167 u8 tmp; 168 169 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), 170 I85X_ESMRAMC, &tmp); 171 172 if (tmp & TSEG_ENABLE) 173 tseg_size = MB(1); 174 175 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1), 176 I85X_DRB3, &tmp); 177 tom = tmp * MB(32); 178 179 base = tom - tseg_size - ggtt->stolen_size; 180 } else if (IS_845G(dev_priv)) { 181 u32 tseg_size = 0; 182 u32 tom; 183 u8 tmp; 184 185 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), 186 I845_ESMRAMC, &tmp); 187 188 if (tmp & TSEG_ENABLE) { 189 switch (tmp & I845_TSEG_SIZE_MASK) { 190 case I845_TSEG_SIZE_512K: 191 tseg_size = KB(512); 192 break; 193 case I845_TSEG_SIZE_1M: 194 tseg_size = MB(1); 195 break; 196 } 197 } 198 199 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), 200 I830_DRB3, &tmp); 201 tom = tmp * MB(32); 202 203 base = tom - tseg_size - ggtt->stolen_size; 204 } else if (IS_I830(dev_priv)) { 205 u32 tseg_size = 0; 206 u32 tom; 207 u8 tmp; 208 209 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), 210 I830_ESMRAMC, &tmp); 211 212 if (tmp & TSEG_ENABLE) { 213 if (tmp & I830_TSEG_SIZE_1M) 214 tseg_size = MB(1); 215 else 216 tseg_size = KB(512); 217 } 218 219 pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0), 220 I830_DRB3, &tmp); 221 tom = tmp * MB(32); 222 223 base = tom - tseg_size - ggtt->stolen_size; 224 } 225 226 if (base == 0) 227 return 0; 228 229 /* make sure we don't clobber the GTT if it's within stolen memory */ 230 if (INTEL_GEN(dev_priv) <= 4 && !IS_G33(dev_priv) && 231 !IS_G4X(dev_priv)) { 232 struct { 233 u32 start, end; 234 } stolen[2] = { 235 { .start = base, .end = base + ggtt->stolen_size, }, 236 { .start = base, .end = base + ggtt->stolen_size, }, 237 }; 238 u64 ggtt_start, ggtt_end; 239 240 ggtt_start = I915_READ(PGTBL_CTL); 241 if (IS_GEN4(dev_priv)) 242 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | 243 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; 244 else 245 ggtt_start &= PGTBL_ADDRESS_LO_MASK; 246 ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4; 247 248 if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end) 249 stolen[0].end = ggtt_start; 250 if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end) 251 stolen[1].start = ggtt_end; 252 253 /* pick the larger of the two chunks */ 254 if (stolen[0].end - stolen[0].start > 255 stolen[1].end - stolen[1].start) { 256 base = stolen[0].start; 257 ggtt->stolen_size = stolen[0].end - stolen[0].start; 258 } else { 259 base = stolen[1].start; 260 ggtt->stolen_size = stolen[1].end - stolen[1].start; 261 } 262 263 if (stolen[0].start != stolen[1].start || 264 stolen[0].end != stolen[1].end) { 265 DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", 266 (unsigned long long)ggtt_start, 267 (unsigned long long)ggtt_end - 1); 268 DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", 269 base, base + (u32)ggtt->stolen_size - 1); 270 } 271 } 272 273 274 /* Verify that nothing else uses this physical address. Stolen 275 * memory should be reserved by the BIOS and hidden from the 276 * kernel. So if the region is already marked as busy, something 277 * is seriously wrong. 278 */ 279 r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size, 280 "Graphics Stolen Memory"); 281 if (r == NULL) { 282 /* 283 * One more attempt but this time requesting region from 284 * base + 1, as we have seen that this resolves the region 285 * conflict with the PCI Bus. 286 * This is a BIOS w/a: Some BIOS wrap stolen in the root 287 * PCI bus, but have an off-by-one error. Hence retry the 288 * reservation starting from 1 instead of 0. 289 */ 290 r = devm_request_mem_region(dev->dev, base + 1, 291 ggtt->stolen_size - 1, 292 "Graphics Stolen Memory"); 293 /* 294 * GEN3 firmware likes to smash pci bridges into the stolen 295 * range. Apparently this works. 296 */ 297 if (r == NULL && !IS_GEN3(dev_priv)) { 298 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", 299 base, base + (uint32_t)ggtt->stolen_size); 300 base = 0; 301 } 302 } 303 304 return base; 305 } 306 307 void i915_gem_cleanup_stolen(struct drm_device *dev) 308 { 309 struct drm_i915_private *dev_priv = to_i915(dev); 310 311 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 312 return; 313 314 drm_mm_takedown(&dev_priv->mm.stolen); 315 } 316 317 static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, 318 unsigned long *base, unsigned long *size) 319 { 320 struct i915_ggtt *ggtt = &dev_priv->ggtt; 321 uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ? 322 CTG_STOLEN_RESERVED : 323 ELK_STOLEN_RESERVED); 324 unsigned long stolen_top = dev_priv->mm.stolen_base + 325 ggtt->stolen_size; 326 327 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; 328 329 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); 330 331 /* On these platforms, the register doesn't have a size field, so the 332 * size is the distance between the base and the top of the stolen 333 * memory. We also have the genuine case where base is zero and there's 334 * nothing reserved. */ 335 if (*base == 0) 336 *size = 0; 337 else 338 *size = stolen_top - *base; 339 } 340 341 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, 342 unsigned long *base, unsigned long *size) 343 { 344 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); 345 346 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 347 348 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { 349 case GEN6_STOLEN_RESERVED_1M: 350 *size = 1024 * 1024; 351 break; 352 case GEN6_STOLEN_RESERVED_512K: 353 *size = 512 * 1024; 354 break; 355 case GEN6_STOLEN_RESERVED_256K: 356 *size = 256 * 1024; 357 break; 358 case GEN6_STOLEN_RESERVED_128K: 359 *size = 128 * 1024; 360 break; 361 default: 362 *size = 1024 * 1024; 363 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); 364 } 365 } 366 367 static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv, 368 unsigned long *base, unsigned long *size) 369 { 370 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); 371 372 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; 373 374 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 375 case GEN7_STOLEN_RESERVED_1M: 376 *size = 1024 * 1024; 377 break; 378 case GEN7_STOLEN_RESERVED_256K: 379 *size = 256 * 1024; 380 break; 381 default: 382 *size = 1024 * 1024; 383 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 384 } 385 } 386 387 static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv, 388 unsigned long *base, unsigned long *size) 389 { 390 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); 391 392 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 393 394 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 395 case GEN8_STOLEN_RESERVED_1M: 396 *size = 1024 * 1024; 397 break; 398 case GEN8_STOLEN_RESERVED_2M: 399 *size = 2 * 1024 * 1024; 400 break; 401 case GEN8_STOLEN_RESERVED_4M: 402 *size = 4 * 1024 * 1024; 403 break; 404 case GEN8_STOLEN_RESERVED_8M: 405 *size = 8 * 1024 * 1024; 406 break; 407 default: 408 *size = 8 * 1024 * 1024; 409 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 410 } 411 } 412 413 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, 414 unsigned long *base, unsigned long *size) 415 { 416 struct i915_ggtt *ggtt = &dev_priv->ggtt; 417 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); 418 unsigned long stolen_top; 419 420 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; 421 422 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 423 424 /* On these platforms, the register doesn't have a size field, so the 425 * size is the distance between the base and the top of the stolen 426 * memory. We also have the genuine case where base is zero and there's 427 * nothing reserved. */ 428 if (*base == 0) 429 *size = 0; 430 else 431 *size = stolen_top - *base; 432 } 433 434 int i915_gem_init_stolen(struct drm_device *dev) 435 { 436 struct drm_i915_private *dev_priv = to_i915(dev); 437 struct i915_ggtt *ggtt = &dev_priv->ggtt; 438 unsigned long reserved_total, reserved_base = 0, reserved_size; 439 unsigned long stolen_top; 440 441 lockinit(&dev_priv->mm.stolen_lock, "i915msl", 0, LK_CANRECURSE); 442 443 #ifdef CONFIG_INTEL_IOMMU 444 if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) { 445 DRM_INFO("DMAR active, disabling use of stolen memory\n"); 446 return 0; 447 } 448 #endif 449 450 if (ggtt->stolen_size == 0) 451 return 0; 452 453 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); 454 if (dev_priv->mm.stolen_base == 0) 455 return 0; 456 457 stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; 458 459 switch (INTEL_INFO(dev_priv)->gen) { 460 case 2: 461 case 3: 462 break; 463 case 4: 464 if (IS_G4X(dev_priv)) 465 g4x_get_stolen_reserved(dev_priv, &reserved_base, 466 &reserved_size); 467 break; 468 case 5: 469 /* Assume the gen6 maximum for the older platforms. */ 470 reserved_size = 1024 * 1024; 471 reserved_base = stolen_top - reserved_size; 472 break; 473 case 6: 474 gen6_get_stolen_reserved(dev_priv, &reserved_base, 475 &reserved_size); 476 break; 477 case 7: 478 gen7_get_stolen_reserved(dev_priv, &reserved_base, 479 &reserved_size); 480 break; 481 default: 482 if (IS_BROADWELL(dev_priv) || 483 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 484 bdw_get_stolen_reserved(dev_priv, &reserved_base, 485 &reserved_size); 486 else 487 gen8_get_stolen_reserved(dev_priv, &reserved_base, 488 &reserved_size); 489 break; 490 } 491 492 /* It is possible for the reserved base to be zero, but the register 493 * field for size doesn't have a zero option. */ 494 if (reserved_base == 0) { 495 reserved_size = 0; 496 reserved_base = stolen_top; 497 } 498 499 if (reserved_base < dev_priv->mm.stolen_base || 500 reserved_base + reserved_size > stolen_top) { 501 DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n", 502 reserved_base, reserved_base + reserved_size, 503 dev_priv->mm.stolen_base, stolen_top); 504 return 0; 505 } 506 507 ggtt->stolen_reserved_base = reserved_base; 508 ggtt->stolen_reserved_size = reserved_size; 509 510 /* It is possible for the reserved area to end before the end of stolen 511 * memory, so just consider the start. */ 512 reserved_total = stolen_top - reserved_base; 513 514 DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n", 515 ggtt->stolen_size >> 10, 516 (ggtt->stolen_size - reserved_total) >> 10); 517 518 ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total; 519 520 /* 521 * Basic memrange allocator for stolen space. 522 * 523 * TODO: Notice that some platforms require us to not use the first page 524 * of the stolen memory but their BIOSes may still put the framebuffer 525 * on the first page. So we don't reserve this page for now because of 526 * that. Our current solution is to just prevent new nodes from being 527 * inserted on the first page - see the check we have at 528 * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon 529 * problem later. 530 */ 531 drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size); 532 533 return 0; 534 } 535 536 static struct sg_table * 537 i915_pages_create_for_stolen(struct drm_device *dev, 538 u32 offset, u32 size) 539 { 540 struct drm_i915_private *dev_priv = to_i915(dev); 541 struct i915_ggtt *ggtt = &dev_priv->ggtt; 542 struct sg_table *st; 543 struct scatterlist *sg; 544 545 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); 546 BUG_ON(offset > ggtt->stolen_size - size); 547 548 /* We hide that we have no struct page backing our stolen object 549 * by wrapping the contiguous physical allocation with a fake 550 * dma mapping in a single scatterlist. 551 */ 552 553 st = kmalloc(sizeof(*st), M_DRM, GFP_KERNEL); 554 if (st == NULL) 555 return NULL; 556 557 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 558 kfree(st); 559 return NULL; 560 } 561 562 sg = st->sgl; 563 sg->offset = 0; 564 sg->length = size; 565 566 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; 567 sg_dma_len(sg) = size; 568 569 return st; 570 } 571 572 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) 573 { 574 BUG(); 575 return -EINVAL; 576 } 577 578 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) 579 { 580 /* Should only be called during free */ 581 sg_free_table(obj->pages); 582 kfree(obj->pages); 583 } 584 585 586 static void 587 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 588 { 589 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 590 591 if (obj->stolen) { 592 i915_gem_stolen_remove_node(dev_priv, obj->stolen); 593 kfree(obj->stolen); 594 obj->stolen = NULL; 595 } 596 } 597 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { 598 .get_pages = i915_gem_object_get_pages_stolen, 599 .put_pages = i915_gem_object_put_pages_stolen, 600 .release = i915_gem_object_release_stolen, 601 }; 602 603 static struct drm_i915_gem_object * 604 _i915_gem_object_create_stolen(struct drm_device *dev, 605 struct drm_mm_node *stolen) 606 { 607 struct drm_i915_gem_object *obj; 608 609 obj = i915_gem_object_alloc(dev); 610 if (obj == NULL) 611 return NULL; 612 613 drm_gem_private_object_init(dev, &obj->base, stolen->size); 614 i915_gem_object_init(obj, &i915_gem_object_stolen_ops); 615 616 obj->pages = i915_pages_create_for_stolen(dev, 617 stolen->start, stolen->size); 618 if (obj->pages == NULL) 619 goto cleanup; 620 621 obj->get_page.sg = obj->pages->sgl; 622 obj->get_page.last = 0; 623 624 i915_gem_object_pin_pages(obj); 625 obj->stolen = stolen; 626 627 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 628 obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE; 629 630 return obj; 631 632 cleanup: 633 i915_gem_object_free(obj); 634 return NULL; 635 } 636 637 struct drm_i915_gem_object * 638 i915_gem_object_create_stolen(struct drm_device *dev, u32 size) 639 { 640 struct drm_i915_private *dev_priv = to_i915(dev); 641 struct drm_i915_gem_object *obj; 642 struct drm_mm_node *stolen; 643 int ret; 644 645 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 646 return NULL; 647 648 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); 649 if (size == 0) 650 return NULL; 651 652 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 653 if (!stolen) 654 return NULL; 655 656 ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); 657 if (ret) { 658 kfree(stolen); 659 return NULL; 660 } 661 662 obj = _i915_gem_object_create_stolen(dev, stolen); 663 if (obj) 664 return obj; 665 666 i915_gem_stolen_remove_node(dev_priv, stolen); 667 kfree(stolen); 668 return NULL; 669 } 670 671 struct drm_i915_gem_object * 672 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 673 u32 stolen_offset, 674 u32 gtt_offset, 675 u32 size) 676 { 677 struct drm_i915_private *dev_priv = to_i915(dev); 678 struct i915_ggtt *ggtt = &dev_priv->ggtt; 679 struct drm_i915_gem_object *obj; 680 struct drm_mm_node *stolen; 681 struct i915_vma *vma; 682 int ret; 683 684 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 685 return NULL; 686 687 lockdep_assert_held(&dev->struct_mutex); 688 689 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", 690 stolen_offset, gtt_offset, size); 691 692 /* KISS and expect everything to be page-aligned */ 693 if (WARN_ON(size == 0) || WARN_ON(size & 4095) || 694 WARN_ON(stolen_offset & 4095)) 695 return NULL; 696 697 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 698 if (!stolen) 699 return NULL; 700 701 stolen->start = stolen_offset; 702 stolen->size = size; 703 mutex_lock(&dev_priv->mm.stolen_lock); 704 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); 705 mutex_unlock(&dev_priv->mm.stolen_lock); 706 if (ret) { 707 DRM_DEBUG_KMS("failed to allocate stolen space\n"); 708 kfree(stolen); 709 return NULL; 710 } 711 712 obj = _i915_gem_object_create_stolen(dev, stolen); 713 if (obj == NULL) { 714 DRM_DEBUG_KMS("failed to allocate stolen object\n"); 715 i915_gem_stolen_remove_node(dev_priv, stolen); 716 kfree(stolen); 717 return NULL; 718 } 719 720 /* Some objects just need physical mem from stolen space */ 721 if (gtt_offset == I915_GTT_OFFSET_NONE) 722 return obj; 723 724 vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL); 725 if (IS_ERR(vma)) { 726 ret = PTR_ERR(vma); 727 goto err; 728 } 729 730 /* To simplify the initialisation sequence between KMS and GTT, 731 * we allow construction of the stolen object prior to 732 * setting up the GTT space. The actual reservation will occur 733 * later. 734 */ 735 vma->node.start = gtt_offset; 736 vma->node.size = size; 737 738 ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node); 739 if (ret) { 740 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); 741 goto err; 742 } 743 744 vma->pages = obj->pages; 745 vma->flags |= I915_VMA_GLOBAL_BIND; 746 __i915_vma_set_map_and_fenceable(vma); 747 list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); 748 obj->bind_count++; 749 750 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); 751 i915_gem_object_pin_pages(obj); 752 753 return obj; 754 755 err: 756 i915_gem_object_put(obj); 757 return NULL; 758 } 759