1 /************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * 27 **************************************************************************/ 28 29 /* 30 * Generic simple memory manager implementation. Intended to be used as a base 31 * class implementation for more advanced memory managers. 32 * 33 * Note that the algorithm used is quite simple and there might be substantial 34 * performance gains if a smarter free list is implemented. Currently it is just an 35 * unordered stack of free regions. This could easily be improved if an RB-tree 36 * is used instead. At least if we expect heavy fragmentation. 37 * 38 * Aligned allocations can also see improvement. 39 * 40 * Authors: 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 42 */ 43 44 #include <drm/drmP.h> 45 #include <drm/drm_mm.h> 46 #include <linux/slab.h> 47 #include <linux/seq_file.h> 48 #include <linux/export.h> 49 50 #define MM_UNUSED_TARGET 4 51 52 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) 53 { 54 struct drm_mm_node *child; 55 56 if (atomic) 57 child = kzalloc(sizeof(*child), GFP_ATOMIC); 58 else 59 child = kzalloc(sizeof(*child), GFP_KERNEL); 60 61 if (unlikely(child == NULL)) { 62 spin_lock(&mm->unused_lock); 63 if (list_empty(&mm->unused_nodes)) 64 child = NULL; 65 else { 66 child = 67 list_entry(mm->unused_nodes.next, 68 struct drm_mm_node, node_list); 69 list_del(&child->node_list); 70 --mm->num_unused; 71 } 72 spin_unlock(&mm->unused_lock); 73 } 74 return child; 75 } 76 77 int drm_mm_pre_get(struct drm_mm *mm) 78 { 79 struct drm_mm_node *node; 80 81 spin_lock(&mm->unused_lock); 82 while (mm->num_unused < MM_UNUSED_TARGET) { 83 spin_unlock(&mm->unused_lock); 84 node = kzalloc(sizeof(*node), GFP_KERNEL); 85 spin_lock(&mm->unused_lock); 86 87 if (unlikely(node == NULL)) { 88 int ret = (mm->num_unused < 2) ? -ENOMEM : 0; 89 spin_unlock(&mm->unused_lock); 90 return ret; 91 } 92 ++mm->num_unused; 93 list_add_tail(&node->node_list, &mm->unused_nodes); 94 } 95 spin_unlock(&mm->unused_lock); 96 return 0; 97 } 98 99 /** 100 * DOC: Overview 101 * 102 * drm_mm provides a simple range allocator. The drivers are free to use the 103 * resource allocator from the linux core if it suits them, the upside of drm_mm 104 * is that it's in the DRM core. Which means that it's easier to extend for 105 * some of the crazier special purpose needs of gpus. 106 * 107 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. 108 * Drivers are free to embed either of them into their own suitable 109 * datastructures. drm_mm itself will not do any allocations of its own, so if 110 * drivers choose not to embed nodes they need to still allocate them 111 * themselves. 112 * 113 * The range allocator also supports reservation of preallocated blocks. This is 114 * useful for taking over initial mode setting configurations from the firmware, 115 * where an object needs to be created which exactly matches the firmware's 116 * scanout target. As long as the range is still free it can be inserted anytime 117 * after the allocator is initialized, which helps with avoiding looped 118 * depencies in the driver load sequence. 119 * 120 * drm_mm maintains a stack of most recently freed holes, which of all 121 * simplistic datastructures seems to be a fairly decent approach to clustering 122 * allocations and avoiding too much fragmentation. This means free space 123 * searches are O(num_holes). Given that all the fancy features drm_mm supports 124 * something better would be fairly complex and since gfx thrashing is a fairly 125 * steep cliff not a real concern. Removing a node again is O(1). 126 * 127 * drm_mm supports a few features: Alignment and range restrictions can be 128 * supplied. Further more every &drm_mm_node has a color value (which is just an 129 * opaqua unsigned long) which in conjunction with a driver callback can be used 130 * to implement sophisticated placement restrictions. The i915 DRM driver uses 131 * this to implement guard pages between incompatible caching domains in the 132 * graphics TT. 133 * 134 * Two behaviors are supported for searching and allocating: bottom-up and top-down. 135 * The default is bottom-up. Top-down allocation can be used if the memory area 136 * has different restrictions, or just to reduce fragmentation. 137 * 138 * Finally iteration helpers to walk all nodes and all holes are provided as are 139 * some basic allocator dumpers for debugging. 140 */ 141 142 static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 143 struct drm_mm_node *node, 144 u64 size, unsigned alignment, 145 unsigned long color, 146 enum drm_mm_allocator_flags flags) 147 { 148 struct drm_mm *mm = hole_node->mm; 149 u64 hole_start = drm_mm_hole_node_start(hole_node); 150 u64 hole_end = drm_mm_hole_node_end(hole_node); 151 u64 adj_start = hole_start; 152 u64 adj_end = hole_end; 153 154 BUG_ON(node->allocated); 155 156 if (mm->color_adjust) 157 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 158 159 if (flags & DRM_MM_CREATE_TOP) 160 adj_start = adj_end - size; 161 162 if (alignment) { 163 u64 tmp = adj_start; 164 unsigned rem; 165 166 rem = do_div(tmp, alignment); 167 if (rem) { 168 if (flags & DRM_MM_CREATE_TOP) 169 adj_start -= rem; 170 else 171 adj_start += alignment - rem; 172 } 173 } 174 175 BUG_ON(adj_start < hole_start); 176 BUG_ON(adj_end > hole_end); 177 178 if (adj_start == hole_start) { 179 hole_node->hole_follows = 0; 180 list_del(&hole_node->hole_stack); 181 } 182 183 node->start = adj_start; 184 node->size = size; 185 node->mm = mm; 186 node->color = color; 187 node->allocated = 1; 188 189 INIT_LIST_HEAD(&node->hole_stack); 190 list_add(&node->node_list, &hole_node->node_list); 191 192 BUG_ON(node->start + node->size > adj_end); 193 194 node->hole_follows = 0; 195 if (__drm_mm_hole_node_start(node) < hole_end) { 196 list_add(&node->hole_stack, &mm->hole_stack); 197 node->hole_follows = 1; 198 } 199 } 200 201 /** 202 * drm_mm_reserve_node - insert an pre-initialized node 203 * @mm: drm_mm allocator to insert @node into 204 * @node: drm_mm_node to insert 205 * 206 * This functions inserts an already set-up drm_mm_node into the allocator, 207 * meaning that start, size and color must be set by the caller. This is useful 208 * to initialize the allocator with preallocated objects which must be set-up 209 * before the range allocator can be set-up, e.g. when taking over a firmware 210 * framebuffer. 211 * 212 * Returns: 213 * 0 on success, -ENOSPC if there's no hole where @node is. 214 */ 215 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 216 { 217 struct drm_mm_node *hole; 218 u64 end = node->start + node->size; 219 u64 hole_start; 220 u64 hole_end; 221 222 BUG_ON(node == NULL); 223 224 /* Find the relevant hole to add our node to */ 225 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { 226 if (hole_start > node->start || hole_end < end) 227 continue; 228 229 node->mm = mm; 230 node->allocated = 1; 231 232 INIT_LIST_HEAD(&node->hole_stack); 233 list_add(&node->node_list, &hole->node_list); 234 235 if (node->start == hole_start) { 236 hole->hole_follows = 0; 237 list_del_init(&hole->hole_stack); 238 } 239 240 node->hole_follows = 0; 241 if (end != hole_end) { 242 list_add(&node->hole_stack, &mm->hole_stack); 243 node->hole_follows = 1; 244 } 245 246 return 0; 247 } 248 249 return -ENOSPC; 250 } 251 EXPORT_SYMBOL(drm_mm_reserve_node); 252 253 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, 254 unsigned long size, 255 unsigned alignment, 256 unsigned long color, 257 int atomic) 258 { 259 struct drm_mm_node *node; 260 261 node = drm_mm_kmalloc(hole_node->mm, atomic); 262 if (unlikely(node == NULL)) 263 return NULL; 264 265 drm_mm_insert_helper(hole_node, node, size, alignment, color, DRM_MM_CREATE_DEFAULT); 266 267 return node; 268 } 269 270 /** 271 * drm_mm_insert_node_generic - search for space and insert @node 272 * @mm: drm_mm to allocate from 273 * @node: preallocate node to insert 274 * @size: size of the allocation 275 * @alignment: alignment of the allocation 276 * @color: opaque tag value to use for this node 277 * @sflags: flags to fine-tune the allocation search 278 * @aflags: flags to fine-tune the allocation behavior 279 * 280 * The preallocated node must be cleared to 0. 281 * 282 * Returns: 283 * 0 on success, -ENOSPC if there's no suitable hole. 284 */ 285 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 286 u64 size, unsigned alignment, 287 unsigned long color, 288 enum drm_mm_search_flags sflags, 289 enum drm_mm_allocator_flags aflags) 290 { 291 struct drm_mm_node *hole_node; 292 293 hole_node = drm_mm_search_free_generic(mm, size, alignment, 294 color, sflags); 295 if (!hole_node) 296 return -ENOSPC; 297 298 drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags); 299 return 0; 300 } 301 EXPORT_SYMBOL(drm_mm_insert_node_generic); 302 303 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 304 struct drm_mm_node *node, 305 u64 size, unsigned alignment, 306 unsigned long color, 307 u64 start, u64 end, 308 enum drm_mm_allocator_flags flags) 309 { 310 struct drm_mm *mm = hole_node->mm; 311 u64 hole_start = drm_mm_hole_node_start(hole_node); 312 u64 hole_end = drm_mm_hole_node_end(hole_node); 313 u64 adj_start = hole_start; 314 u64 adj_end = hole_end; 315 316 BUG_ON(!hole_node->hole_follows || node->allocated); 317 318 if (adj_start < start) 319 adj_start = start; 320 if (adj_end > end) 321 adj_end = end; 322 323 if (mm->color_adjust) 324 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 325 326 if (flags & DRM_MM_CREATE_TOP) 327 adj_start = adj_end - size; 328 329 if (alignment) { 330 u64 tmp = adj_start; 331 unsigned rem; 332 333 rem = do_div(tmp, alignment); 334 if (rem) { 335 if (flags & DRM_MM_CREATE_TOP) 336 adj_start -= rem; 337 else 338 adj_start += alignment - rem; 339 } 340 } 341 342 if (adj_start == hole_start) { 343 hole_node->hole_follows = 0; 344 list_del(&hole_node->hole_stack); 345 } 346 347 node->start = adj_start; 348 node->size = size; 349 node->mm = mm; 350 node->color = color; 351 node->allocated = 1; 352 353 INIT_LIST_HEAD(&node->hole_stack); 354 list_add(&node->node_list, &hole_node->node_list); 355 356 BUG_ON(node->start < start); 357 BUG_ON(node->start < adj_start); 358 BUG_ON(node->start + node->size > adj_end); 359 BUG_ON(node->start + node->size > end); 360 361 node->hole_follows = 0; 362 if (__drm_mm_hole_node_start(node) < hole_end) { 363 list_add(&node->hole_stack, &mm->hole_stack); 364 node->hole_follows = 1; 365 } 366 } 367 368 /** 369 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node 370 * @mm: drm_mm to allocate from 371 * @node: preallocate node to insert 372 * @size: size of the allocation 373 * @alignment: alignment of the allocation 374 * @color: opaque tag value to use for this node 375 * @start: start of the allowed range for this node 376 * @end: end of the allowed range for this node 377 * @sflags: flags to fine-tune the allocation search 378 * @aflags: flags to fine-tune the allocation behavior 379 * 380 * The preallocated node must be cleared to 0. 381 * 382 * Returns: 383 * 0 on success, -ENOSPC if there's no suitable hole. 384 */ 385 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 386 u64 size, unsigned alignment, 387 unsigned long color, 388 u64 start, u64 end, 389 enum drm_mm_search_flags sflags, 390 enum drm_mm_allocator_flags aflags) 391 { 392 struct drm_mm_node *hole_node; 393 394 hole_node = drm_mm_search_free_in_range_generic(mm, 395 size, alignment, color, 396 start, end, sflags); 397 if (!hole_node) 398 return -ENOSPC; 399 400 drm_mm_insert_helper_range(hole_node, node, 401 size, alignment, color, 402 start, end, aflags); 403 return 0; 404 } 405 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); 406 407 /** 408 * drm_mm_remove_node - Remove a memory node from the allocator. 409 * @node: drm_mm_node to remove 410 * 411 * This just removes a node from its drm_mm allocator. The node does not need to 412 * be cleared again before it can be re-inserted into this or any other drm_mm 413 * allocator. It is a bug to call this function on a un-allocated node. 414 */ 415 void drm_mm_remove_node(struct drm_mm_node *node) 416 { 417 struct drm_mm *mm = node->mm; 418 struct drm_mm_node *prev_node; 419 420 if (WARN_ON(!node->allocated)) 421 return; 422 423 BUG_ON(node->scanned_block || node->scanned_prev_free 424 || node->scanned_next_free); 425 426 prev_node = 427 list_entry(node->node_list.prev, struct drm_mm_node, node_list); 428 429 if (node->hole_follows) { 430 BUG_ON(__drm_mm_hole_node_start(node) == 431 __drm_mm_hole_node_end(node)); 432 list_del(&node->hole_stack); 433 } else 434 BUG_ON(__drm_mm_hole_node_start(node) != 435 __drm_mm_hole_node_end(node)); 436 437 438 if (!prev_node->hole_follows) { 439 prev_node->hole_follows = 1; 440 list_add(&prev_node->hole_stack, &mm->hole_stack); 441 } else 442 list_move(&prev_node->hole_stack, &mm->hole_stack); 443 444 list_del(&node->node_list); 445 node->allocated = 0; 446 } 447 EXPORT_SYMBOL(drm_mm_remove_node); 448 449 /* 450 * Remove a memory node from the allocator and free the allocated struct 451 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the 452 * drm_mm_get_block functions. 453 */ 454 void drm_mm_put_block(struct drm_mm_node *node) 455 { 456 457 struct drm_mm *mm = node->mm; 458 459 drm_mm_remove_node(node); 460 461 spin_lock(&mm->unused_lock); 462 if (mm->num_unused < MM_UNUSED_TARGET) { 463 list_add(&node->node_list, &mm->unused_nodes); 464 ++mm->num_unused; 465 } else 466 kfree(node); 467 spin_unlock(&mm->unused_lock); 468 } 469 470 static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) 471 { 472 if (end - start < size) 473 return 0; 474 475 if (alignment) { 476 u64 tmp = start; 477 unsigned rem; 478 479 rem = do_div(tmp, alignment); 480 if (rem) 481 start += alignment - rem; 482 } 483 484 return end >= start + size; 485 } 486 487 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 488 u64 size, 489 unsigned alignment, 490 unsigned long color, 491 enum drm_mm_search_flags flags) 492 { 493 struct drm_mm_node *entry; 494 struct drm_mm_node *best; 495 u64 adj_start; 496 u64 adj_end; 497 u64 best_size; 498 499 BUG_ON(mm->scanned_blocks); 500 501 best = NULL; 502 best_size = ~0UL; 503 504 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 505 flags & DRM_MM_SEARCH_BELOW) { 506 u64 hole_size = adj_end - adj_start; 507 508 if (mm->color_adjust) { 509 mm->color_adjust(entry, color, &adj_start, &adj_end); 510 if (adj_end <= adj_start) 511 continue; 512 } 513 514 if (!check_free_hole(adj_start, adj_end, size, alignment)) 515 continue; 516 517 if (!(flags & DRM_MM_SEARCH_BEST)) 518 return entry; 519 520 if (hole_size < best_size) { 521 best = entry; 522 best_size = hole_size; 523 } 524 } 525 526 return best; 527 } 528 529 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 530 u64 size, 531 unsigned alignment, 532 unsigned long color, 533 u64 start, 534 u64 end, 535 enum drm_mm_search_flags flags) 536 { 537 struct drm_mm_node *entry; 538 struct drm_mm_node *best; 539 u64 adj_start; 540 u64 adj_end; 541 u64 best_size; 542 543 BUG_ON(mm->scanned_blocks); 544 545 best = NULL; 546 best_size = ~0UL; 547 548 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 549 flags & DRM_MM_SEARCH_BELOW) { 550 u64 hole_size = adj_end - adj_start; 551 552 if (adj_start < start) 553 adj_start = start; 554 if (adj_end > end) 555 adj_end = end; 556 557 if (mm->color_adjust) { 558 mm->color_adjust(entry, color, &adj_start, &adj_end); 559 if (adj_end <= adj_start) 560 continue; 561 } 562 563 if (!check_free_hole(adj_start, adj_end, size, alignment)) 564 continue; 565 566 if (!(flags & DRM_MM_SEARCH_BEST)) 567 return entry; 568 569 if (hole_size < best_size) { 570 best = entry; 571 best_size = hole_size; 572 } 573 } 574 575 return best; 576 } 577 578 /** 579 * drm_mm_replace_node - move an allocation from @old to @new 580 * @old: drm_mm_node to remove from the allocator 581 * @new: drm_mm_node which should inherit @old's allocation 582 * 583 * This is useful for when drivers embed the drm_mm_node structure and hence 584 * can't move allocations by reassigning pointers. It's a combination of remove 585 * and insert with the guarantee that the allocation start will match. 586 */ 587 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 588 { 589 list_replace(&old->node_list, &new->node_list); 590 list_replace(&old->hole_stack, &new->hole_stack); 591 new->hole_follows = old->hole_follows; 592 new->mm = old->mm; 593 new->start = old->start; 594 new->size = old->size; 595 new->color = old->color; 596 597 old->allocated = 0; 598 new->allocated = 1; 599 } 600 EXPORT_SYMBOL(drm_mm_replace_node); 601 602 /** 603 * DOC: lru scan roaster 604 * 605 * Very often GPUs need to have continuous allocations for a given object. When 606 * evicting objects to make space for a new one it is therefore not most 607 * efficient when we simply start to select all objects from the tail of an LRU 608 * until there's a suitable hole: Especially for big objects or nodes that 609 * otherwise have special allocation constraints there's a good chance we evict 610 * lots of (smaller) objects unecessarily. 611 * 612 * The DRM range allocator supports this use-case through the scanning 613 * interfaces. First a scan operation needs to be initialized with 614 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds 615 * objects to the roaster (probably by walking an LRU list, but this can be 616 * freely implemented) until a suitable hole is found or there's no further 617 * evitable object. 618 * 619 * The the driver must walk through all objects again in exactly the reverse 620 * order to restore the allocator state. Note that while the allocator is used 621 * in the scan mode no other operation is allowed. 622 * 623 * Finally the driver evicts all objects selected in the scan. Adding and 624 * removing an object is O(1), and since freeing a node is also O(1) the overall 625 * complexity is O(scanned_objects). So like the free stack which needs to be 626 * walked before a scan operation even begins this is linear in the number of 627 * objects. It doesn't seem to hurt badly. 628 */ 629 630 /** 631 * drm_mm_init_scan - initialize lru scanning 632 * @mm: drm_mm to scan 633 * @size: size of the allocation 634 * @alignment: alignment of the allocation 635 * @color: opaque tag value to use for the allocation 636 * 637 * This simply sets up the scanning routines with the parameters for the desired 638 * hole. Note that there's no need to specify allocation flags, since they only 639 * change the place a node is allocated from within a suitable hole. 640 * 641 * Warning: 642 * As long as the scan list is non-empty, no other operations than 643 * adding/removing nodes to/from the scan list are allowed. 644 */ 645 void drm_mm_init_scan(struct drm_mm *mm, 646 u64 size, 647 unsigned alignment, 648 unsigned long color) 649 { 650 mm->scan_color = color; 651 mm->scan_alignment = alignment; 652 mm->scan_size = size; 653 mm->scanned_blocks = 0; 654 mm->scan_hit_start = 0; 655 mm->scan_hit_end = 0; 656 mm->scan_check_range = 0; 657 mm->prev_scanned_node = NULL; 658 } 659 EXPORT_SYMBOL(drm_mm_init_scan); 660 661 /** 662 * drm_mm_init_scan - initialize range-restricted lru scanning 663 * @mm: drm_mm to scan 664 * @size: size of the allocation 665 * @alignment: alignment of the allocation 666 * @color: opaque tag value to use for the allocation 667 * @start: start of the allowed range for the allocation 668 * @end: end of the allowed range for the allocation 669 * 670 * This simply sets up the scanning routines with the parameters for the desired 671 * hole. Note that there's no need to specify allocation flags, since they only 672 * change the place a node is allocated from within a suitable hole. 673 * 674 * Warning: 675 * As long as the scan list is non-empty, no other operations than 676 * adding/removing nodes to/from the scan list are allowed. 677 */ 678 void drm_mm_init_scan_with_range(struct drm_mm *mm, 679 u64 size, 680 unsigned alignment, 681 unsigned long color, 682 u64 start, 683 u64 end) 684 { 685 mm->scan_color = color; 686 mm->scan_alignment = alignment; 687 mm->scan_size = size; 688 mm->scanned_blocks = 0; 689 mm->scan_hit_start = 0; 690 mm->scan_hit_end = 0; 691 mm->scan_start = start; 692 mm->scan_end = end; 693 mm->scan_check_range = 1; 694 mm->prev_scanned_node = NULL; 695 } 696 EXPORT_SYMBOL(drm_mm_init_scan_with_range); 697 698 /** 699 * drm_mm_scan_add_block - add a node to the scan list 700 * @node: drm_mm_node to add 701 * 702 * Add a node to the scan list that might be freed to make space for the desired 703 * hole. 704 * 705 * Returns: 706 * True if a hole has been found, false otherwise. 707 */ 708 bool drm_mm_scan_add_block(struct drm_mm_node *node) 709 { 710 struct drm_mm *mm = node->mm; 711 struct drm_mm_node *prev_node; 712 u64 hole_start, hole_end; 713 u64 adj_start, adj_end; 714 715 mm->scanned_blocks++; 716 717 BUG_ON(node->scanned_block); 718 node->scanned_block = 1; 719 720 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 721 node_list); 722 723 node->scanned_preceeds_hole = prev_node->hole_follows; 724 prev_node->hole_follows = 1; 725 list_del(&node->node_list); 726 node->node_list.prev = &prev_node->node_list; 727 node->node_list.next = &mm->prev_scanned_node->node_list; 728 mm->prev_scanned_node = node; 729 730 adj_start = hole_start = drm_mm_hole_node_start(prev_node); 731 adj_end = hole_end = drm_mm_hole_node_end(prev_node); 732 733 if (mm->scan_check_range) { 734 if (adj_start < mm->scan_start) 735 adj_start = mm->scan_start; 736 if (adj_end > mm->scan_end) 737 adj_end = mm->scan_end; 738 } 739 740 if (mm->color_adjust) 741 mm->color_adjust(prev_node, mm->scan_color, 742 &adj_start, &adj_end); 743 744 if (check_free_hole(adj_start, adj_end, 745 mm->scan_size, mm->scan_alignment)) { 746 mm->scan_hit_start = hole_start; 747 mm->scan_hit_end = hole_end; 748 return true; 749 } 750 751 return false; 752 } 753 EXPORT_SYMBOL(drm_mm_scan_add_block); 754 755 /** 756 * drm_mm_scan_remove_block - remove a node from the scan list 757 * @node: drm_mm_node to remove 758 * 759 * Nodes _must_ be removed in the exact same order from the scan list as they 760 * have been added, otherwise the internal state of the memory manager will be 761 * corrupted. 762 * 763 * When the scan list is empty, the selected memory nodes can be freed. An 764 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then 765 * return the just freed block (because its at the top of the free_stack list). 766 * 767 * Returns: 768 * True if this block should be evicted, false otherwise. Will always 769 * return false when no hole has been found. 770 */ 771 bool drm_mm_scan_remove_block(struct drm_mm_node *node) 772 { 773 struct drm_mm *mm = node->mm; 774 struct drm_mm_node *prev_node; 775 776 mm->scanned_blocks--; 777 778 BUG_ON(!node->scanned_block); 779 node->scanned_block = 0; 780 781 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 782 node_list); 783 784 prev_node->hole_follows = node->scanned_preceeds_hole; 785 list_add(&node->node_list, &prev_node->node_list); 786 787 return (drm_mm_hole_node_end(node) > mm->scan_hit_start && 788 node->start < mm->scan_hit_end); 789 } 790 EXPORT_SYMBOL(drm_mm_scan_remove_block); 791 792 /** 793 * drm_mm_clean - checks whether an allocator is clean 794 * @mm: drm_mm allocator to check 795 * 796 * Returns: 797 * True if the allocator is completely free, false if there's still a node 798 * allocated in it. 799 */ 800 bool drm_mm_clean(struct drm_mm * mm) 801 { 802 struct list_head *head = &mm->head_node.node_list; 803 804 return (head->next->next == head); 805 } 806 EXPORT_SYMBOL(drm_mm_clean); 807 808 /** 809 * drm_mm_init - initialize a drm-mm allocator 810 * @mm: the drm_mm structure to initialize 811 * @start: start of the range managed by @mm 812 * @size: end of the range managed by @mm 813 * 814 * Note that @mm must be cleared to 0 before calling this function. 815 */ 816 void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) 817 { 818 INIT_LIST_HEAD(&mm->hole_stack); 819 INIT_LIST_HEAD(&mm->unused_nodes); 820 mm->num_unused = 0; 821 mm->scanned_blocks = 0; 822 823 /* Clever trick to avoid a special case in the free hole tracking. */ 824 INIT_LIST_HEAD(&mm->head_node.node_list); 825 INIT_LIST_HEAD(&mm->head_node.hole_stack); 826 mm->head_node.hole_follows = 1; 827 mm->head_node.scanned_block = 0; 828 mm->head_node.scanned_prev_free = 0; 829 mm->head_node.scanned_next_free = 0; 830 mm->head_node.mm = mm; 831 mm->head_node.start = start + size; 832 mm->head_node.size = start - mm->head_node.start; 833 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); 834 835 mm->color_adjust = NULL; 836 } 837 EXPORT_SYMBOL(drm_mm_init); 838 839 /** 840 * drm_mm_takedown - clean up a drm_mm allocator 841 * @mm: drm_mm allocator to clean up 842 * 843 * Note that it is a bug to call this function on an allocator which is not 844 * clean. 845 */ 846 void drm_mm_takedown(struct drm_mm * mm) 847 { 848 WARN(!list_empty(&mm->head_node.node_list), 849 "Memory manager not clean during takedown.\n"); 850 } 851 EXPORT_SYMBOL(drm_mm_takedown); 852 853 static u64 drm_mm_debug_hole(struct drm_mm_node *entry, 854 const char *prefix) 855 { 856 u64 hole_start, hole_end, hole_size; 857 858 if (entry->hole_follows) { 859 hole_start = drm_mm_hole_node_start(entry); 860 hole_end = drm_mm_hole_node_end(entry); 861 hole_size = hole_end - hole_start; 862 pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start, 863 hole_end, hole_size); 864 return hole_size; 865 } 866 867 return 0; 868 } 869 870 /** 871 * drm_mm_debug_table - dump allocator state to dmesg 872 * @mm: drm_mm allocator to dump 873 * @prefix: prefix to use for dumping to dmesg 874 */ 875 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) 876 { 877 struct drm_mm_node *entry; 878 u64 total_used = 0, total_free = 0, total = 0; 879 880 total_free += drm_mm_debug_hole(&mm->head_node, prefix); 881 882 drm_mm_for_each_node(entry, mm) { 883 pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start, 884 entry->start + entry->size, entry->size); 885 total_used += entry->size; 886 total_free += drm_mm_debug_hole(entry, prefix); 887 } 888 total = total_free + total_used; 889 890 pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total, 891 total_used, total_free); 892 } 893 EXPORT_SYMBOL(drm_mm_debug_table); 894 895 #if defined(CONFIG_DEBUG_FS) 896 static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) 897 { 898 u64 hole_start, hole_end, hole_size; 899 900 if (entry->hole_follows) { 901 hole_start = drm_mm_hole_node_start(entry); 902 hole_end = drm_mm_hole_node_end(entry); 903 hole_size = hole_end - hole_start; 904 seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start, 905 hole_end, hole_size); 906 return hole_size; 907 } 908 909 return 0; 910 } 911 912 /** 913 * drm_mm_dump_table - dump allocator state to a seq_file 914 * @m: seq_file to dump to 915 * @mm: drm_mm allocator to dump 916 */ 917 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 918 { 919 struct drm_mm_node *entry; 920 u64 total_used = 0, total_free = 0, total = 0; 921 922 total_free += drm_mm_dump_hole(m, &mm->head_node); 923 924 drm_mm_for_each_node(entry, mm) { 925 seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start, 926 entry->start + entry->size, entry->size); 927 total_used += entry->size; 928 total_free += drm_mm_dump_hole(m, entry); 929 } 930 total = total_free + total_used; 931 932 seq_printf(m, "total: %llu, used %llu free %llu\n", total, 933 total_used, total_free); 934 return 0; 935 } 936 EXPORT_SYMBOL(drm_mm_dump_table); 937 #endif 938