1 /************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * 27 **************************************************************************/ 28 29 /* 30 * Generic simple memory manager implementation. Intended to be used as a base 31 * class implementation for more advanced memory managers. 32 * 33 * Note that the algorithm used is quite simple and there might be substantial 34 * performance gains if a smarter free list is implemented. Currently it is just an 35 * unordered stack of free regions. This could easily be improved if an RB-tree 36 * is used instead. At least if we expect heavy fragmentation. 37 * 38 * Aligned allocations can also see improvement. 39 * 40 * Authors: 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 42 */ 43 44 #include <drm/drmP.h> 45 #include <drm/drm_mm.h> 46 #include <linux/slab.h> 47 #include <linux/seq_file.h> 48 #include <linux/export.h> 49 50 #define MM_UNUSED_TARGET 4 51 52 /** 53 * DOC: Overview 54 * 55 * drm_mm provides a simple range allocator. The drivers are free to use the 56 * resource allocator from the linux core if it suits them, the upside of drm_mm 57 * is that it's in the DRM core. Which means that it's easier to extend for 58 * some of the crazier special purpose needs of gpus. 59 * 60 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. 61 * Drivers are free to embed either of them into their own suitable 62 * datastructures. drm_mm itself will not do any allocations of its own, so if 63 * drivers choose not to embed nodes they need to still allocate them 64 * themselves. 65 * 66 * The range allocator also supports reservation of preallocated blocks. This is 67 * useful for taking over initial mode setting configurations from the firmware, 68 * where an object needs to be created which exactly matches the firmware's 69 * scanout target. As long as the range is still free it can be inserted anytime 70 * after the allocator is initialized, which helps with avoiding looped 71 * depencies in the driver load sequence. 72 * 73 * drm_mm maintains a stack of most recently freed holes, which of all 74 * simplistic datastructures seems to be a fairly decent approach to clustering 75 * allocations and avoiding too much fragmentation. This means free space 76 * searches are O(num_holes). Given that all the fancy features drm_mm supports 77 * something better would be fairly complex and since gfx thrashing is a fairly 78 * steep cliff not a real concern. Removing a node again is O(1). 79 * 80 * drm_mm supports a few features: Alignment and range restrictions can be 81 * supplied. Further more every &drm_mm_node has a color value (which is just an 82 * opaqua unsigned long) which in conjunction with a driver callback can be used 83 * to implement sophisticated placement restrictions. The i915 DRM driver uses 84 * this to implement guard pages between incompatible caching domains in the 85 * graphics TT. 86 * 87 * Two behaviors are supported for searching and allocating: bottom-up and top-down. 88 * The default is bottom-up. Top-down allocation can be used if the memory area 89 * has different restrictions, or just to reduce fragmentation. 90 * 91 * Finally iteration helpers to walk all nodes and all holes are provided as are 92 * some basic allocator dumpers for debugging. 93 */ 94 95 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 96 u64 size, 97 unsigned alignment, 98 unsigned long color, 99 enum drm_mm_search_flags flags); 100 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 101 u64 size, 102 unsigned alignment, 103 unsigned long color, 104 u64 start, 105 u64 end, 106 enum drm_mm_search_flags flags); 107 108 static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 109 struct drm_mm_node *node, 110 u64 size, unsigned alignment, 111 unsigned long color, 112 enum drm_mm_allocator_flags flags) 113 { 114 struct drm_mm *mm = hole_node->mm; 115 u64 hole_start = drm_mm_hole_node_start(hole_node); 116 u64 hole_end = drm_mm_hole_node_end(hole_node); 117 u64 adj_start = hole_start; 118 u64 adj_end = hole_end; 119 120 BUG_ON(node->allocated); 121 122 if (mm->color_adjust) 123 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 124 125 if (flags & DRM_MM_CREATE_TOP) 126 adj_start = adj_end - size; 127 128 if (alignment) { 129 u64 tmp = adj_start; 130 unsigned rem; 131 132 rem = do_div(tmp, alignment); 133 if (rem) { 134 if (flags & DRM_MM_CREATE_TOP) 135 adj_start -= rem; 136 else 137 adj_start += alignment - rem; 138 } 139 } 140 141 BUG_ON(adj_start < hole_start); 142 BUG_ON(adj_end > hole_end); 143 144 if (adj_start == hole_start) { 145 hole_node->hole_follows = 0; 146 list_del(&hole_node->hole_stack); 147 } 148 149 node->start = adj_start; 150 node->size = size; 151 node->mm = mm; 152 node->color = color; 153 node->allocated = 1; 154 155 INIT_LIST_HEAD(&node->hole_stack); 156 list_add(&node->node_list, &hole_node->node_list); 157 158 BUG_ON(node->start + node->size > adj_end); 159 160 node->hole_follows = 0; 161 if (__drm_mm_hole_node_start(node) < hole_end) { 162 list_add(&node->hole_stack, &mm->hole_stack); 163 node->hole_follows = 1; 164 } 165 } 166 167 /** 168 * drm_mm_reserve_node - insert an pre-initialized node 169 * @mm: drm_mm allocator to insert @node into 170 * @node: drm_mm_node to insert 171 * 172 * This functions inserts an already set-up drm_mm_node into the allocator, 173 * meaning that start, size and color must be set by the caller. This is useful 174 * to initialize the allocator with preallocated objects which must be set-up 175 * before the range allocator can be set-up, e.g. when taking over a firmware 176 * framebuffer. 177 * 178 * Returns: 179 * 0 on success, -ENOSPC if there's no hole where @node is. 180 */ 181 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 182 { 183 struct drm_mm_node *hole; 184 u64 end = node->start + node->size; 185 u64 hole_start; 186 u64 hole_end; 187 188 BUG_ON(node == NULL); 189 190 /* Find the relevant hole to add our node to */ 191 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { 192 if (hole_start > node->start || hole_end < end) 193 continue; 194 195 node->mm = mm; 196 node->allocated = 1; 197 198 INIT_LIST_HEAD(&node->hole_stack); 199 list_add(&node->node_list, &hole->node_list); 200 201 if (node->start == hole_start) { 202 hole->hole_follows = 0; 203 list_del_init(&hole->hole_stack); 204 } 205 206 node->hole_follows = 0; 207 if (end != hole_end) { 208 list_add(&node->hole_stack, &mm->hole_stack); 209 node->hole_follows = 1; 210 } 211 212 return 0; 213 } 214 215 return -ENOSPC; 216 } 217 EXPORT_SYMBOL(drm_mm_reserve_node); 218 219 /** 220 * drm_mm_insert_node_generic - search for space and insert @node 221 * @mm: drm_mm to allocate from 222 * @node: preallocate node to insert 223 * @size: size of the allocation 224 * @alignment: alignment of the allocation 225 * @color: opaque tag value to use for this node 226 * @sflags: flags to fine-tune the allocation search 227 * @aflags: flags to fine-tune the allocation behavior 228 * 229 * The preallocated node must be cleared to 0. 230 * 231 * Returns: 232 * 0 on success, -ENOSPC if there's no suitable hole. 233 */ 234 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 235 u64 size, unsigned alignment, 236 unsigned long color, 237 enum drm_mm_search_flags sflags, 238 enum drm_mm_allocator_flags aflags) 239 { 240 struct drm_mm_node *hole_node; 241 242 hole_node = drm_mm_search_free_generic(mm, size, alignment, 243 color, sflags); 244 if (!hole_node) 245 return -ENOSPC; 246 247 drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags); 248 return 0; 249 } 250 EXPORT_SYMBOL(drm_mm_insert_node_generic); 251 252 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 253 struct drm_mm_node *node, 254 u64 size, unsigned alignment, 255 unsigned long color, 256 u64 start, u64 end, 257 enum drm_mm_allocator_flags flags) 258 { 259 struct drm_mm *mm = hole_node->mm; 260 u64 hole_start = drm_mm_hole_node_start(hole_node); 261 u64 hole_end = drm_mm_hole_node_end(hole_node); 262 u64 adj_start = hole_start; 263 u64 adj_end = hole_end; 264 265 BUG_ON(!hole_node->hole_follows || node->allocated); 266 267 if (adj_start < start) 268 adj_start = start; 269 if (adj_end > end) 270 adj_end = end; 271 272 if (mm->color_adjust) 273 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 274 275 if (flags & DRM_MM_CREATE_TOP) 276 adj_start = adj_end - size; 277 278 if (alignment) { 279 u64 tmp = adj_start; 280 unsigned rem; 281 282 rem = do_div(tmp, alignment); 283 if (rem) { 284 if (flags & DRM_MM_CREATE_TOP) 285 adj_start -= rem; 286 else 287 adj_start += alignment - rem; 288 } 289 } 290 291 if (adj_start == hole_start) { 292 hole_node->hole_follows = 0; 293 list_del(&hole_node->hole_stack); 294 } 295 296 node->start = adj_start; 297 node->size = size; 298 node->mm = mm; 299 node->color = color; 300 node->allocated = 1; 301 302 INIT_LIST_HEAD(&node->hole_stack); 303 list_add(&node->node_list, &hole_node->node_list); 304 305 BUG_ON(node->start < start); 306 BUG_ON(node->start < adj_start); 307 BUG_ON(node->start + node->size > adj_end); 308 BUG_ON(node->start + node->size > end); 309 310 node->hole_follows = 0; 311 if (__drm_mm_hole_node_start(node) < hole_end) { 312 list_add(&node->hole_stack, &mm->hole_stack); 313 node->hole_follows = 1; 314 } 315 } 316 317 /** 318 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node 319 * @mm: drm_mm to allocate from 320 * @node: preallocate node to insert 321 * @size: size of the allocation 322 * @alignment: alignment of the allocation 323 * @color: opaque tag value to use for this node 324 * @start: start of the allowed range for this node 325 * @end: end of the allowed range for this node 326 * @sflags: flags to fine-tune the allocation search 327 * @aflags: flags to fine-tune the allocation behavior 328 * 329 * The preallocated node must be cleared to 0. 330 * 331 * Returns: 332 * 0 on success, -ENOSPC if there's no suitable hole. 333 */ 334 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 335 u64 size, unsigned alignment, 336 unsigned long color, 337 u64 start, u64 end, 338 enum drm_mm_search_flags sflags, 339 enum drm_mm_allocator_flags aflags) 340 { 341 struct drm_mm_node *hole_node; 342 343 hole_node = drm_mm_search_free_in_range_generic(mm, 344 size, alignment, color, 345 start, end, sflags); 346 if (!hole_node) 347 return -ENOSPC; 348 349 drm_mm_insert_helper_range(hole_node, node, 350 size, alignment, color, 351 start, end, aflags); 352 return 0; 353 } 354 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); 355 356 /** 357 * drm_mm_remove_node - Remove a memory node from the allocator. 358 * @node: drm_mm_node to remove 359 * 360 * This just removes a node from its drm_mm allocator. The node does not need to 361 * be cleared again before it can be re-inserted into this or any other drm_mm 362 * allocator. It is a bug to call this function on a un-allocated node. 363 */ 364 void drm_mm_remove_node(struct drm_mm_node *node) 365 { 366 struct drm_mm *mm = node->mm; 367 struct drm_mm_node *prev_node; 368 369 if (WARN_ON(!node->allocated)) 370 return; 371 372 BUG_ON(node->scanned_block || node->scanned_prev_free 373 || node->scanned_next_free); 374 375 prev_node = 376 list_entry(node->node_list.prev, struct drm_mm_node, node_list); 377 378 if (node->hole_follows) { 379 BUG_ON(__drm_mm_hole_node_start(node) == 380 __drm_mm_hole_node_end(node)); 381 list_del(&node->hole_stack); 382 } else 383 BUG_ON(__drm_mm_hole_node_start(node) != 384 __drm_mm_hole_node_end(node)); 385 386 387 if (!prev_node->hole_follows) { 388 prev_node->hole_follows = 1; 389 list_add(&prev_node->hole_stack, &mm->hole_stack); 390 } else 391 list_move(&prev_node->hole_stack, &mm->hole_stack); 392 393 list_del(&node->node_list); 394 node->allocated = 0; 395 } 396 EXPORT_SYMBOL(drm_mm_remove_node); 397 398 /* 399 * Remove a memory node from the allocator and free the allocated struct 400 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the 401 * drm_mm_get_block functions. 402 */ 403 void drm_mm_put_block(struct drm_mm_node *node) 404 { 405 406 struct drm_mm *mm = node->mm; 407 408 drm_mm_remove_node(node); 409 410 spin_lock(&mm->unused_lock); 411 if (mm->num_unused < MM_UNUSED_TARGET) { 412 list_add(&node->node_list, &mm->unused_nodes); 413 ++mm->num_unused; 414 } else 415 kfree(node); 416 spin_unlock(&mm->unused_lock); 417 } 418 419 static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) 420 { 421 if (end - start < size) 422 return 0; 423 424 if (alignment) { 425 u64 tmp = start; 426 unsigned rem; 427 428 rem = do_div(tmp, alignment); 429 if (rem) 430 start += alignment - rem; 431 } 432 433 return end >= start + size; 434 } 435 436 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 437 u64 size, 438 unsigned alignment, 439 unsigned long color, 440 enum drm_mm_search_flags flags) 441 { 442 struct drm_mm_node *entry; 443 struct drm_mm_node *best; 444 u64 adj_start; 445 u64 adj_end; 446 u64 best_size; 447 448 BUG_ON(mm->scanned_blocks); 449 450 best = NULL; 451 best_size = ~0UL; 452 453 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 454 flags & DRM_MM_SEARCH_BELOW) { 455 u64 hole_size = adj_end - adj_start; 456 457 if (mm->color_adjust) { 458 mm->color_adjust(entry, color, &adj_start, &adj_end); 459 if (adj_end <= adj_start) 460 continue; 461 } 462 463 if (!check_free_hole(adj_start, adj_end, size, alignment)) 464 continue; 465 466 if (!(flags & DRM_MM_SEARCH_BEST)) 467 return entry; 468 469 if (hole_size < best_size) { 470 best = entry; 471 best_size = hole_size; 472 } 473 } 474 475 return best; 476 } 477 478 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 479 u64 size, 480 unsigned alignment, 481 unsigned long color, 482 u64 start, 483 u64 end, 484 enum drm_mm_search_flags flags) 485 { 486 struct drm_mm_node *entry; 487 struct drm_mm_node *best; 488 u64 adj_start; 489 u64 adj_end; 490 u64 best_size; 491 492 BUG_ON(mm->scanned_blocks); 493 494 best = NULL; 495 best_size = ~0UL; 496 497 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 498 flags & DRM_MM_SEARCH_BELOW) { 499 u64 hole_size = adj_end - adj_start; 500 501 if (adj_start < start) 502 adj_start = start; 503 if (adj_end > end) 504 adj_end = end; 505 506 if (mm->color_adjust) { 507 mm->color_adjust(entry, color, &adj_start, &adj_end); 508 if (adj_end <= adj_start) 509 continue; 510 } 511 512 if (!check_free_hole(adj_start, adj_end, size, alignment)) 513 continue; 514 515 if (!(flags & DRM_MM_SEARCH_BEST)) 516 return entry; 517 518 if (hole_size < best_size) { 519 best = entry; 520 best_size = hole_size; 521 } 522 } 523 524 return best; 525 } 526 527 /** 528 * drm_mm_replace_node - move an allocation from @old to @new 529 * @old: drm_mm_node to remove from the allocator 530 * @new: drm_mm_node which should inherit @old's allocation 531 * 532 * This is useful for when drivers embed the drm_mm_node structure and hence 533 * can't move allocations by reassigning pointers. It's a combination of remove 534 * and insert with the guarantee that the allocation start will match. 535 */ 536 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 537 { 538 list_replace(&old->node_list, &new->node_list); 539 list_replace(&old->hole_stack, &new->hole_stack); 540 new->hole_follows = old->hole_follows; 541 new->mm = old->mm; 542 new->start = old->start; 543 new->size = old->size; 544 new->color = old->color; 545 546 old->allocated = 0; 547 new->allocated = 1; 548 } 549 EXPORT_SYMBOL(drm_mm_replace_node); 550 551 /** 552 * DOC: lru scan roaster 553 * 554 * Very often GPUs need to have continuous allocations for a given object. When 555 * evicting objects to make space for a new one it is therefore not most 556 * efficient when we simply start to select all objects from the tail of an LRU 557 * until there's a suitable hole: Especially for big objects or nodes that 558 * otherwise have special allocation constraints there's a good chance we evict 559 * lots of (smaller) objects unecessarily. 560 * 561 * The DRM range allocator supports this use-case through the scanning 562 * interfaces. First a scan operation needs to be initialized with 563 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds 564 * objects to the roaster (probably by walking an LRU list, but this can be 565 * freely implemented) until a suitable hole is found or there's no further 566 * evitable object. 567 * 568 * The the driver must walk through all objects again in exactly the reverse 569 * order to restore the allocator state. Note that while the allocator is used 570 * in the scan mode no other operation is allowed. 571 * 572 * Finally the driver evicts all objects selected in the scan. Adding and 573 * removing an object is O(1), and since freeing a node is also O(1) the overall 574 * complexity is O(scanned_objects). So like the free stack which needs to be 575 * walked before a scan operation even begins this is linear in the number of 576 * objects. It doesn't seem to hurt badly. 577 */ 578 579 /** 580 * drm_mm_init_scan - initialize lru scanning 581 * @mm: drm_mm to scan 582 * @size: size of the allocation 583 * @alignment: alignment of the allocation 584 * @color: opaque tag value to use for the allocation 585 * 586 * This simply sets up the scanning routines with the parameters for the desired 587 * hole. Note that there's no need to specify allocation flags, since they only 588 * change the place a node is allocated from within a suitable hole. 589 * 590 * Warning: 591 * As long as the scan list is non-empty, no other operations than 592 * adding/removing nodes to/from the scan list are allowed. 593 */ 594 void drm_mm_init_scan(struct drm_mm *mm, 595 u64 size, 596 unsigned alignment, 597 unsigned long color) 598 { 599 mm->scan_color = color; 600 mm->scan_alignment = alignment; 601 mm->scan_size = size; 602 mm->scanned_blocks = 0; 603 mm->scan_hit_start = 0; 604 mm->scan_hit_end = 0; 605 mm->scan_check_range = 0; 606 mm->prev_scanned_node = NULL; 607 } 608 EXPORT_SYMBOL(drm_mm_init_scan); 609 610 /** 611 * drm_mm_init_scan - initialize range-restricted lru scanning 612 * @mm: drm_mm to scan 613 * @size: size of the allocation 614 * @alignment: alignment of the allocation 615 * @color: opaque tag value to use for the allocation 616 * @start: start of the allowed range for the allocation 617 * @end: end of the allowed range for the allocation 618 * 619 * This simply sets up the scanning routines with the parameters for the desired 620 * hole. Note that there's no need to specify allocation flags, since they only 621 * change the place a node is allocated from within a suitable hole. 622 * 623 * Warning: 624 * As long as the scan list is non-empty, no other operations than 625 * adding/removing nodes to/from the scan list are allowed. 626 */ 627 void drm_mm_init_scan_with_range(struct drm_mm *mm, 628 u64 size, 629 unsigned alignment, 630 unsigned long color, 631 u64 start, 632 u64 end) 633 { 634 mm->scan_color = color; 635 mm->scan_alignment = alignment; 636 mm->scan_size = size; 637 mm->scanned_blocks = 0; 638 mm->scan_hit_start = 0; 639 mm->scan_hit_end = 0; 640 mm->scan_start = start; 641 mm->scan_end = end; 642 mm->scan_check_range = 1; 643 mm->prev_scanned_node = NULL; 644 } 645 EXPORT_SYMBOL(drm_mm_init_scan_with_range); 646 647 /** 648 * drm_mm_scan_add_block - add a node to the scan list 649 * @node: drm_mm_node to add 650 * 651 * Add a node to the scan list that might be freed to make space for the desired 652 * hole. 653 * 654 * Returns: 655 * True if a hole has been found, false otherwise. 656 */ 657 bool drm_mm_scan_add_block(struct drm_mm_node *node) 658 { 659 struct drm_mm *mm = node->mm; 660 struct drm_mm_node *prev_node; 661 u64 hole_start, hole_end; 662 u64 adj_start, adj_end; 663 664 mm->scanned_blocks++; 665 666 BUG_ON(node->scanned_block); 667 node->scanned_block = 1; 668 669 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 670 node_list); 671 672 node->scanned_preceeds_hole = prev_node->hole_follows; 673 prev_node->hole_follows = 1; 674 list_del(&node->node_list); 675 node->node_list.prev = &prev_node->node_list; 676 node->node_list.next = &mm->prev_scanned_node->node_list; 677 mm->prev_scanned_node = node; 678 679 adj_start = hole_start = drm_mm_hole_node_start(prev_node); 680 adj_end = hole_end = drm_mm_hole_node_end(prev_node); 681 682 if (mm->scan_check_range) { 683 if (adj_start < mm->scan_start) 684 adj_start = mm->scan_start; 685 if (adj_end > mm->scan_end) 686 adj_end = mm->scan_end; 687 } 688 689 if (mm->color_adjust) 690 mm->color_adjust(prev_node, mm->scan_color, 691 &adj_start, &adj_end); 692 693 if (check_free_hole(adj_start, adj_end, 694 mm->scan_size, mm->scan_alignment)) { 695 mm->scan_hit_start = hole_start; 696 mm->scan_hit_end = hole_end; 697 return true; 698 } 699 700 return false; 701 } 702 EXPORT_SYMBOL(drm_mm_scan_add_block); 703 704 /** 705 * drm_mm_scan_remove_block - remove a node from the scan list 706 * @node: drm_mm_node to remove 707 * 708 * Nodes _must_ be removed in the exact same order from the scan list as they 709 * have been added, otherwise the internal state of the memory manager will be 710 * corrupted. 711 * 712 * When the scan list is empty, the selected memory nodes can be freed. An 713 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then 714 * return the just freed block (because its at the top of the free_stack list). 715 * 716 * Returns: 717 * True if this block should be evicted, false otherwise. Will always 718 * return false when no hole has been found. 719 */ 720 bool drm_mm_scan_remove_block(struct drm_mm_node *node) 721 { 722 struct drm_mm *mm = node->mm; 723 struct drm_mm_node *prev_node; 724 725 mm->scanned_blocks--; 726 727 BUG_ON(!node->scanned_block); 728 node->scanned_block = 0; 729 730 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 731 node_list); 732 733 prev_node->hole_follows = node->scanned_preceeds_hole; 734 list_add(&node->node_list, &prev_node->node_list); 735 736 return (drm_mm_hole_node_end(node) > mm->scan_hit_start && 737 node->start < mm->scan_hit_end); 738 } 739 EXPORT_SYMBOL(drm_mm_scan_remove_block); 740 741 /** 742 * drm_mm_clean - checks whether an allocator is clean 743 * @mm: drm_mm allocator to check 744 * 745 * Returns: 746 * True if the allocator is completely free, false if there's still a node 747 * allocated in it. 748 */ 749 bool drm_mm_clean(struct drm_mm * mm) 750 { 751 struct list_head *head = &mm->head_node.node_list; 752 753 return (head->next->next == head); 754 } 755 EXPORT_SYMBOL(drm_mm_clean); 756 757 /** 758 * drm_mm_init - initialize a drm-mm allocator 759 * @mm: the drm_mm structure to initialize 760 * @start: start of the range managed by @mm 761 * @size: end of the range managed by @mm 762 * 763 * Note that @mm must be cleared to 0 before calling this function. 764 */ 765 void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) 766 { 767 INIT_LIST_HEAD(&mm->hole_stack); 768 INIT_LIST_HEAD(&mm->unused_nodes); 769 mm->num_unused = 0; 770 mm->scanned_blocks = 0; 771 772 /* Clever trick to avoid a special case in the free hole tracking. */ 773 INIT_LIST_HEAD(&mm->head_node.node_list); 774 INIT_LIST_HEAD(&mm->head_node.hole_stack); 775 mm->head_node.hole_follows = 1; 776 mm->head_node.scanned_block = 0; 777 mm->head_node.scanned_prev_free = 0; 778 mm->head_node.scanned_next_free = 0; 779 mm->head_node.mm = mm; 780 mm->head_node.start = start + size; 781 mm->head_node.size = start - mm->head_node.start; 782 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); 783 784 mm->color_adjust = NULL; 785 } 786 EXPORT_SYMBOL(drm_mm_init); 787 788 /** 789 * drm_mm_takedown - clean up a drm_mm allocator 790 * @mm: drm_mm allocator to clean up 791 * 792 * Note that it is a bug to call this function on an allocator which is not 793 * clean. 794 */ 795 void drm_mm_takedown(struct drm_mm * mm) 796 { 797 WARN(!list_empty(&mm->head_node.node_list), 798 "Memory manager not clean during takedown.\n"); 799 } 800 EXPORT_SYMBOL(drm_mm_takedown); 801 802 static u64 drm_mm_debug_hole(struct drm_mm_node *entry, 803 const char *prefix) 804 { 805 u64 hole_start, hole_end, hole_size; 806 807 if (entry->hole_follows) { 808 hole_start = drm_mm_hole_node_start(entry); 809 hole_end = drm_mm_hole_node_end(entry); 810 hole_size = hole_end - hole_start; 811 pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start, 812 hole_end, hole_size); 813 return hole_size; 814 } 815 816 return 0; 817 } 818 819 /** 820 * drm_mm_debug_table - dump allocator state to dmesg 821 * @mm: drm_mm allocator to dump 822 * @prefix: prefix to use for dumping to dmesg 823 */ 824 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) 825 { 826 struct drm_mm_node *entry; 827 u64 total_used = 0, total_free = 0, total = 0; 828 829 total_free += drm_mm_debug_hole(&mm->head_node, prefix); 830 831 drm_mm_for_each_node(entry, mm) { 832 pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start, 833 entry->start + entry->size, entry->size); 834 total_used += entry->size; 835 total_free += drm_mm_debug_hole(entry, prefix); 836 } 837 total = total_free + total_used; 838 839 pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total, 840 total_used, total_free); 841 } 842 EXPORT_SYMBOL(drm_mm_debug_table); 843 844 #if defined(CONFIG_DEBUG_FS) 845 static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) 846 { 847 u64 hole_start, hole_end, hole_size; 848 849 if (entry->hole_follows) { 850 hole_start = drm_mm_hole_node_start(entry); 851 hole_end = drm_mm_hole_node_end(entry); 852 hole_size = hole_end - hole_start; 853 seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start, 854 hole_end, hole_size); 855 return hole_size; 856 } 857 858 return 0; 859 } 860 861 /** 862 * drm_mm_dump_table - dump allocator state to a seq_file 863 * @m: seq_file to dump to 864 * @mm: drm_mm allocator to dump 865 */ 866 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 867 { 868 struct drm_mm_node *entry; 869 u64 total_used = 0, total_free = 0, total = 0; 870 871 total_free += drm_mm_dump_hole(m, &mm->head_node); 872 873 drm_mm_for_each_node(entry, mm) { 874 seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start, 875 entry->start + entry->size, entry->size); 876 total_used += entry->size; 877 total_free += drm_mm_dump_hole(m, entry); 878 } 879 total = total_free + total_used; 880 881 seq_printf(m, "total: %llu, used %llu free %llu\n", total, 882 total_used, total_free); 883 return 0; 884 } 885 EXPORT_SYMBOL(drm_mm_dump_table); 886 #endif 887