1 /************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * 27 **************************************************************************/ 28 29 /* 30 * Generic simple memory manager implementation. Intended to be used as a base 31 * class implementation for more advanced memory managers. 32 * 33 * Note that the algorithm used is quite simple and there might be substantial 34 * performance gains if a smarter free list is implemented. Currently it is just an 35 * unordered stack of free regions. This could easily be improved if an RB-tree 36 * is used instead. At least if we expect heavy fragmentation. 37 * 38 * Aligned allocations can also see improvement. 39 * 40 * Authors: 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 42 */ 43 44 #include <drm/drmP.h> 45 #include <drm/drm_mm.h> 46 #include <linux/slab.h> 47 #include <linux/seq_file.h> 48 #include <linux/export.h> 49 50 /** 51 * DOC: Overview 52 * 53 * drm_mm provides a simple range allocator. The drivers are free to use the 54 * resource allocator from the linux core if it suits them, the upside of drm_mm 55 * is that it's in the DRM core. Which means that it's easier to extend for 56 * some of the crazier special purpose needs of gpus. 57 * 58 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. 59 * Drivers are free to embed either of them into their own suitable 60 * datastructures. drm_mm itself will not do any allocations of its own, so if 61 * drivers choose not to embed nodes they need to still allocate them 62 * themselves. 63 * 64 * The range allocator also supports reservation of preallocated blocks. This is 65 * useful for taking over initial mode setting configurations from the firmware, 66 * where an object needs to be created which exactly matches the firmware's 67 * scanout target. As long as the range is still free it can be inserted anytime 68 * after the allocator is initialized, which helps with avoiding looped 69 * depencies in the driver load sequence. 70 * 71 * drm_mm maintains a stack of most recently freed holes, which of all 72 * simplistic datastructures seems to be a fairly decent approach to clustering 73 * allocations and avoiding too much fragmentation. This means free space 74 * searches are O(num_holes). Given that all the fancy features drm_mm supports 75 * something better would be fairly complex and since gfx thrashing is a fairly 76 * steep cliff not a real concern. Removing a node again is O(1). 77 * 78 * drm_mm supports a few features: Alignment and range restrictions can be 79 * supplied. Further more every &drm_mm_node has a color value (which is just an 80 * opaqua unsigned long) which in conjunction with a driver callback can be used 81 * to implement sophisticated placement restrictions. The i915 DRM driver uses 82 * this to implement guard pages between incompatible caching domains in the 83 * graphics TT. 84 * 85 * Two behaviors are supported for searching and allocating: bottom-up and top-down. 86 * The default is bottom-up. Top-down allocation can be used if the memory area 87 * has different restrictions, or just to reduce fragmentation. 88 * 89 * Finally iteration helpers to walk all nodes and all holes are provided as are 90 * some basic allocator dumpers for debugging. 91 */ 92 93 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 94 u64 size, 95 unsigned alignment, 96 unsigned long color, 97 enum drm_mm_search_flags flags); 98 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 99 u64 size, 100 unsigned alignment, 101 unsigned long color, 102 u64 start, 103 u64 end, 104 enum drm_mm_search_flags flags); 105 106 static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 107 struct drm_mm_node *node, 108 u64 size, unsigned alignment, 109 unsigned long color, 110 enum drm_mm_allocator_flags flags) 111 { 112 struct drm_mm *mm = hole_node->mm; 113 u64 hole_start = drm_mm_hole_node_start(hole_node); 114 u64 hole_end = drm_mm_hole_node_end(hole_node); 115 u64 adj_start = hole_start; 116 u64 adj_end = hole_end; 117 118 BUG_ON(node->allocated); 119 120 if (mm->color_adjust) 121 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 122 123 if (flags & DRM_MM_CREATE_TOP) 124 adj_start = adj_end - size; 125 126 if (alignment) { 127 u64 tmp = adj_start; 128 unsigned rem; 129 130 rem = do_div(tmp, alignment); 131 if (rem) { 132 if (flags & DRM_MM_CREATE_TOP) 133 adj_start -= rem; 134 else 135 adj_start += alignment - rem; 136 } 137 } 138 139 BUG_ON(adj_start < hole_start); 140 BUG_ON(adj_end > hole_end); 141 142 if (adj_start == hole_start) { 143 hole_node->hole_follows = 0; 144 list_del(&hole_node->hole_stack); 145 } 146 147 node->start = adj_start; 148 node->size = size; 149 node->mm = mm; 150 node->color = color; 151 node->allocated = 1; 152 153 INIT_LIST_HEAD(&node->hole_stack); 154 list_add(&node->node_list, &hole_node->node_list); 155 156 BUG_ON(node->start + node->size > adj_end); 157 158 node->hole_follows = 0; 159 if (__drm_mm_hole_node_start(node) < hole_end) { 160 list_add(&node->hole_stack, &mm->hole_stack); 161 node->hole_follows = 1; 162 } 163 } 164 165 /** 166 * drm_mm_reserve_node - insert an pre-initialized node 167 * @mm: drm_mm allocator to insert @node into 168 * @node: drm_mm_node to insert 169 * 170 * This functions inserts an already set-up drm_mm_node into the allocator, 171 * meaning that start, size and color must be set by the caller. This is useful 172 * to initialize the allocator with preallocated objects which must be set-up 173 * before the range allocator can be set-up, e.g. when taking over a firmware 174 * framebuffer. 175 * 176 * Returns: 177 * 0 on success, -ENOSPC if there's no hole where @node is. 178 */ 179 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 180 { 181 struct drm_mm_node *hole; 182 u64 end; 183 u64 hole_start; 184 u64 hole_end; 185 186 BUG_ON(node == NULL); 187 188 if (WARN_ON(node->size == 0)) 189 return -EINVAL; 190 191 end = node->start + node->size; 192 193 /* Find the relevant hole to add our node to */ 194 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { 195 if (hole_start > node->start || hole_end < end) 196 continue; 197 198 node->mm = mm; 199 node->allocated = 1; 200 201 INIT_LIST_HEAD(&node->hole_stack); 202 list_add(&node->node_list, &hole->node_list); 203 204 if (node->start == hole_start) { 205 hole->hole_follows = 0; 206 list_del_init(&hole->hole_stack); 207 } 208 209 node->hole_follows = 0; 210 if (end != hole_end) { 211 list_add(&node->hole_stack, &mm->hole_stack); 212 node->hole_follows = 1; 213 } 214 215 return 0; 216 } 217 218 return -ENOSPC; 219 } 220 EXPORT_SYMBOL(drm_mm_reserve_node); 221 222 /** 223 * drm_mm_insert_node_generic - search for space and insert @node 224 * @mm: drm_mm to allocate from 225 * @node: preallocate node to insert 226 * @size: size of the allocation 227 * @alignment: alignment of the allocation 228 * @color: opaque tag value to use for this node 229 * @sflags: flags to fine-tune the allocation search 230 * @aflags: flags to fine-tune the allocation behavior 231 * 232 * The preallocated node must be cleared to 0. 233 * 234 * Returns: 235 * 0 on success, -ENOSPC if there's no suitable hole. 236 */ 237 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 238 u64 size, unsigned alignment, 239 unsigned long color, 240 enum drm_mm_search_flags sflags, 241 enum drm_mm_allocator_flags aflags) 242 { 243 struct drm_mm_node *hole_node; 244 245 if (WARN_ON(size == 0)) 246 return -EINVAL; 247 248 hole_node = drm_mm_search_free_generic(mm, size, alignment, 249 color, sflags); 250 if (!hole_node) 251 return -ENOSPC; 252 253 drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags); 254 return 0; 255 } 256 EXPORT_SYMBOL(drm_mm_insert_node_generic); 257 258 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 259 struct drm_mm_node *node, 260 u64 size, unsigned alignment, 261 unsigned long color, 262 u64 start, u64 end, 263 enum drm_mm_allocator_flags flags) 264 { 265 struct drm_mm *mm = hole_node->mm; 266 u64 hole_start = drm_mm_hole_node_start(hole_node); 267 u64 hole_end = drm_mm_hole_node_end(hole_node); 268 u64 adj_start = hole_start; 269 u64 adj_end = hole_end; 270 271 BUG_ON(!hole_node->hole_follows || node->allocated); 272 273 if (adj_start < start) 274 adj_start = start; 275 if (adj_end > end) 276 adj_end = end; 277 278 if (mm->color_adjust) 279 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 280 281 if (flags & DRM_MM_CREATE_TOP) 282 adj_start = adj_end - size; 283 284 if (alignment) { 285 u64 tmp = adj_start; 286 unsigned rem; 287 288 rem = do_div(tmp, alignment); 289 if (rem) { 290 if (flags & DRM_MM_CREATE_TOP) 291 adj_start -= rem; 292 else 293 adj_start += alignment - rem; 294 } 295 } 296 297 if (adj_start == hole_start) { 298 hole_node->hole_follows = 0; 299 list_del(&hole_node->hole_stack); 300 } 301 302 node->start = adj_start; 303 node->size = size; 304 node->mm = mm; 305 node->color = color; 306 node->allocated = 1; 307 308 INIT_LIST_HEAD(&node->hole_stack); 309 list_add(&node->node_list, &hole_node->node_list); 310 311 BUG_ON(node->start < start); 312 BUG_ON(node->start < adj_start); 313 BUG_ON(node->start + node->size > adj_end); 314 BUG_ON(node->start + node->size > end); 315 316 node->hole_follows = 0; 317 if (__drm_mm_hole_node_start(node) < hole_end) { 318 list_add(&node->hole_stack, &mm->hole_stack); 319 node->hole_follows = 1; 320 } 321 } 322 323 /** 324 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node 325 * @mm: drm_mm to allocate from 326 * @node: preallocate node to insert 327 * @size: size of the allocation 328 * @alignment: alignment of the allocation 329 * @color: opaque tag value to use for this node 330 * @start: start of the allowed range for this node 331 * @end: end of the allowed range for this node 332 * @sflags: flags to fine-tune the allocation search 333 * @aflags: flags to fine-tune the allocation behavior 334 * 335 * The preallocated node must be cleared to 0. 336 * 337 * Returns: 338 * 0 on success, -ENOSPC if there's no suitable hole. 339 */ 340 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 341 u64 size, unsigned alignment, 342 unsigned long color, 343 u64 start, u64 end, 344 enum drm_mm_search_flags sflags, 345 enum drm_mm_allocator_flags aflags) 346 { 347 struct drm_mm_node *hole_node; 348 349 if (WARN_ON(size == 0)) 350 return -EINVAL; 351 352 hole_node = drm_mm_search_free_in_range_generic(mm, 353 size, alignment, color, 354 start, end, sflags); 355 if (!hole_node) 356 return -ENOSPC; 357 358 drm_mm_insert_helper_range(hole_node, node, 359 size, alignment, color, 360 start, end, aflags); 361 return 0; 362 } 363 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); 364 365 /** 366 * drm_mm_remove_node - Remove a memory node from the allocator. 367 * @node: drm_mm_node to remove 368 * 369 * This just removes a node from its drm_mm allocator. The node does not need to 370 * be cleared again before it can be re-inserted into this or any other drm_mm 371 * allocator. It is a bug to call this function on a un-allocated node. 372 */ 373 void drm_mm_remove_node(struct drm_mm_node *node) 374 { 375 struct drm_mm *mm = node->mm; 376 struct drm_mm_node *prev_node; 377 378 if (WARN_ON(!node->allocated)) 379 return; 380 381 BUG_ON(node->scanned_block || node->scanned_prev_free 382 || node->scanned_next_free); 383 384 prev_node = 385 list_entry(node->node_list.prev, struct drm_mm_node, node_list); 386 387 if (node->hole_follows) { 388 BUG_ON(__drm_mm_hole_node_start(node) == 389 __drm_mm_hole_node_end(node)); 390 list_del(&node->hole_stack); 391 } else 392 BUG_ON(__drm_mm_hole_node_start(node) != 393 __drm_mm_hole_node_end(node)); 394 395 396 if (!prev_node->hole_follows) { 397 prev_node->hole_follows = 1; 398 list_add(&prev_node->hole_stack, &mm->hole_stack); 399 } else 400 list_move(&prev_node->hole_stack, &mm->hole_stack); 401 402 list_del(&node->node_list); 403 node->allocated = 0; 404 } 405 EXPORT_SYMBOL(drm_mm_remove_node); 406 407 static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) 408 { 409 if (end - start < size) 410 return 0; 411 412 if (alignment) { 413 u64 tmp = start; 414 unsigned rem; 415 416 rem = do_div(tmp, alignment); 417 if (rem) 418 start += alignment - rem; 419 } 420 421 return end >= start + size; 422 } 423 424 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, 425 u64 size, 426 unsigned alignment, 427 unsigned long color, 428 enum drm_mm_search_flags flags) 429 { 430 struct drm_mm_node *entry; 431 struct drm_mm_node *best; 432 u64 adj_start; 433 u64 adj_end; 434 u64 best_size; 435 436 BUG_ON(mm->scanned_blocks); 437 438 best = NULL; 439 best_size = ~0UL; 440 441 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 442 flags & DRM_MM_SEARCH_BELOW) { 443 u64 hole_size = adj_end - adj_start; 444 445 if (mm->color_adjust) { 446 mm->color_adjust(entry, color, &adj_start, &adj_end); 447 if (adj_end <= adj_start) 448 continue; 449 } 450 451 if (!check_free_hole(adj_start, adj_end, size, alignment)) 452 continue; 453 454 if (!(flags & DRM_MM_SEARCH_BEST)) 455 return entry; 456 457 if (hole_size < best_size) { 458 best = entry; 459 best_size = hole_size; 460 } 461 } 462 463 return best; 464 } 465 466 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 467 u64 size, 468 unsigned alignment, 469 unsigned long color, 470 u64 start, 471 u64 end, 472 enum drm_mm_search_flags flags) 473 { 474 struct drm_mm_node *entry; 475 struct drm_mm_node *best; 476 u64 adj_start; 477 u64 adj_end; 478 u64 best_size; 479 480 BUG_ON(mm->scanned_blocks); 481 482 best = NULL; 483 best_size = ~0UL; 484 485 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 486 flags & DRM_MM_SEARCH_BELOW) { 487 u64 hole_size = adj_end - adj_start; 488 489 if (adj_start < start) 490 adj_start = start; 491 if (adj_end > end) 492 adj_end = end; 493 494 if (mm->color_adjust) { 495 mm->color_adjust(entry, color, &adj_start, &adj_end); 496 if (adj_end <= adj_start) 497 continue; 498 } 499 500 if (!check_free_hole(adj_start, adj_end, size, alignment)) 501 continue; 502 503 if (!(flags & DRM_MM_SEARCH_BEST)) 504 return entry; 505 506 if (hole_size < best_size) { 507 best = entry; 508 best_size = hole_size; 509 } 510 } 511 512 return best; 513 } 514 515 /** 516 * drm_mm_replace_node - move an allocation from @old to @new 517 * @old: drm_mm_node to remove from the allocator 518 * @new: drm_mm_node which should inherit @old's allocation 519 * 520 * This is useful for when drivers embed the drm_mm_node structure and hence 521 * can't move allocations by reassigning pointers. It's a combination of remove 522 * and insert with the guarantee that the allocation start will match. 523 */ 524 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 525 { 526 list_replace(&old->node_list, &new->node_list); 527 list_replace(&old->hole_stack, &new->hole_stack); 528 new->hole_follows = old->hole_follows; 529 new->mm = old->mm; 530 new->start = old->start; 531 new->size = old->size; 532 new->color = old->color; 533 534 old->allocated = 0; 535 new->allocated = 1; 536 } 537 EXPORT_SYMBOL(drm_mm_replace_node); 538 539 /** 540 * DOC: lru scan roaster 541 * 542 * Very often GPUs need to have continuous allocations for a given object. When 543 * evicting objects to make space for a new one it is therefore not most 544 * efficient when we simply start to select all objects from the tail of an LRU 545 * until there's a suitable hole: Especially for big objects or nodes that 546 * otherwise have special allocation constraints there's a good chance we evict 547 * lots of (smaller) objects unecessarily. 548 * 549 * The DRM range allocator supports this use-case through the scanning 550 * interfaces. First a scan operation needs to be initialized with 551 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds 552 * objects to the roaster (probably by walking an LRU list, but this can be 553 * freely implemented) until a suitable hole is found or there's no further 554 * evitable object. 555 * 556 * The the driver must walk through all objects again in exactly the reverse 557 * order to restore the allocator state. Note that while the allocator is used 558 * in the scan mode no other operation is allowed. 559 * 560 * Finally the driver evicts all objects selected in the scan. Adding and 561 * removing an object is O(1), and since freeing a node is also O(1) the overall 562 * complexity is O(scanned_objects). So like the free stack which needs to be 563 * walked before a scan operation even begins this is linear in the number of 564 * objects. It doesn't seem to hurt badly. 565 */ 566 567 /** 568 * drm_mm_init_scan - initialize lru scanning 569 * @mm: drm_mm to scan 570 * @size: size of the allocation 571 * @alignment: alignment of the allocation 572 * @color: opaque tag value to use for the allocation 573 * 574 * This simply sets up the scanning routines with the parameters for the desired 575 * hole. Note that there's no need to specify allocation flags, since they only 576 * change the place a node is allocated from within a suitable hole. 577 * 578 * Warning: 579 * As long as the scan list is non-empty, no other operations than 580 * adding/removing nodes to/from the scan list are allowed. 581 */ 582 void drm_mm_init_scan(struct drm_mm *mm, 583 u64 size, 584 unsigned alignment, 585 unsigned long color) 586 { 587 mm->scan_color = color; 588 mm->scan_alignment = alignment; 589 mm->scan_size = size; 590 mm->scanned_blocks = 0; 591 mm->scan_hit_start = 0; 592 mm->scan_hit_end = 0; 593 mm->scan_check_range = 0; 594 mm->prev_scanned_node = NULL; 595 } 596 EXPORT_SYMBOL(drm_mm_init_scan); 597 598 /** 599 * drm_mm_init_scan - initialize range-restricted lru scanning 600 * @mm: drm_mm to scan 601 * @size: size of the allocation 602 * @alignment: alignment of the allocation 603 * @color: opaque tag value to use for the allocation 604 * @start: start of the allowed range for the allocation 605 * @end: end of the allowed range for the allocation 606 * 607 * This simply sets up the scanning routines with the parameters for the desired 608 * hole. Note that there's no need to specify allocation flags, since they only 609 * change the place a node is allocated from within a suitable hole. 610 * 611 * Warning: 612 * As long as the scan list is non-empty, no other operations than 613 * adding/removing nodes to/from the scan list are allowed. 614 */ 615 void drm_mm_init_scan_with_range(struct drm_mm *mm, 616 u64 size, 617 unsigned alignment, 618 unsigned long color, 619 u64 start, 620 u64 end) 621 { 622 mm->scan_color = color; 623 mm->scan_alignment = alignment; 624 mm->scan_size = size; 625 mm->scanned_blocks = 0; 626 mm->scan_hit_start = 0; 627 mm->scan_hit_end = 0; 628 mm->scan_start = start; 629 mm->scan_end = end; 630 mm->scan_check_range = 1; 631 mm->prev_scanned_node = NULL; 632 } 633 EXPORT_SYMBOL(drm_mm_init_scan_with_range); 634 635 /** 636 * drm_mm_scan_add_block - add a node to the scan list 637 * @node: drm_mm_node to add 638 * 639 * Add a node to the scan list that might be freed to make space for the desired 640 * hole. 641 * 642 * Returns: 643 * True if a hole has been found, false otherwise. 644 */ 645 bool drm_mm_scan_add_block(struct drm_mm_node *node) 646 { 647 struct drm_mm *mm = node->mm; 648 struct drm_mm_node *prev_node; 649 u64 hole_start, hole_end; 650 u64 adj_start, adj_end; 651 652 mm->scanned_blocks++; 653 654 BUG_ON(node->scanned_block); 655 node->scanned_block = 1; 656 657 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 658 node_list); 659 660 node->scanned_preceeds_hole = prev_node->hole_follows; 661 prev_node->hole_follows = 1; 662 list_del(&node->node_list); 663 node->node_list.prev = &prev_node->node_list; 664 node->node_list.next = &mm->prev_scanned_node->node_list; 665 mm->prev_scanned_node = node; 666 667 adj_start = hole_start = drm_mm_hole_node_start(prev_node); 668 adj_end = hole_end = drm_mm_hole_node_end(prev_node); 669 670 if (mm->scan_check_range) { 671 if (adj_start < mm->scan_start) 672 adj_start = mm->scan_start; 673 if (adj_end > mm->scan_end) 674 adj_end = mm->scan_end; 675 } 676 677 if (mm->color_adjust) 678 mm->color_adjust(prev_node, mm->scan_color, 679 &adj_start, &adj_end); 680 681 if (check_free_hole(adj_start, adj_end, 682 mm->scan_size, mm->scan_alignment)) { 683 mm->scan_hit_start = hole_start; 684 mm->scan_hit_end = hole_end; 685 return true; 686 } 687 688 return false; 689 } 690 EXPORT_SYMBOL(drm_mm_scan_add_block); 691 692 /** 693 * drm_mm_scan_remove_block - remove a node from the scan list 694 * @node: drm_mm_node to remove 695 * 696 * Nodes _must_ be removed in the exact same order from the scan list as they 697 * have been added, otherwise the internal state of the memory manager will be 698 * corrupted. 699 * 700 * When the scan list is empty, the selected memory nodes can be freed. An 701 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then 702 * return the just freed block (because its at the top of the free_stack list). 703 * 704 * Returns: 705 * True if this block should be evicted, false otherwise. Will always 706 * return false when no hole has been found. 707 */ 708 bool drm_mm_scan_remove_block(struct drm_mm_node *node) 709 { 710 struct drm_mm *mm = node->mm; 711 struct drm_mm_node *prev_node; 712 713 mm->scanned_blocks--; 714 715 BUG_ON(!node->scanned_block); 716 node->scanned_block = 0; 717 718 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 719 node_list); 720 721 prev_node->hole_follows = node->scanned_preceeds_hole; 722 list_add(&node->node_list, &prev_node->node_list); 723 724 return (drm_mm_hole_node_end(node) > mm->scan_hit_start && 725 node->start < mm->scan_hit_end); 726 } 727 EXPORT_SYMBOL(drm_mm_scan_remove_block); 728 729 /** 730 * drm_mm_clean - checks whether an allocator is clean 731 * @mm: drm_mm allocator to check 732 * 733 * Returns: 734 * True if the allocator is completely free, false if there's still a node 735 * allocated in it. 736 */ 737 bool drm_mm_clean(struct drm_mm * mm) 738 { 739 struct list_head *head = &mm->head_node.node_list; 740 741 return (head->next->next == head); 742 } 743 EXPORT_SYMBOL(drm_mm_clean); 744 745 /** 746 * drm_mm_init - initialize a drm-mm allocator 747 * @mm: the drm_mm structure to initialize 748 * @start: start of the range managed by @mm 749 * @size: end of the range managed by @mm 750 * 751 * Note that @mm must be cleared to 0 before calling this function. 752 */ 753 void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) 754 { 755 INIT_LIST_HEAD(&mm->hole_stack); 756 mm->scanned_blocks = 0; 757 758 /* Clever trick to avoid a special case in the free hole tracking. */ 759 INIT_LIST_HEAD(&mm->head_node.node_list); 760 INIT_LIST_HEAD(&mm->head_node.hole_stack); 761 mm->head_node.hole_follows = 1; 762 mm->head_node.scanned_block = 0; 763 mm->head_node.scanned_prev_free = 0; 764 mm->head_node.scanned_next_free = 0; 765 mm->head_node.mm = mm; 766 mm->head_node.start = start + size; 767 mm->head_node.size = start - mm->head_node.start; 768 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); 769 770 mm->color_adjust = NULL; 771 } 772 EXPORT_SYMBOL(drm_mm_init); 773 774 /** 775 * drm_mm_takedown - clean up a drm_mm allocator 776 * @mm: drm_mm allocator to clean up 777 * 778 * Note that it is a bug to call this function on an allocator which is not 779 * clean. 780 */ 781 void drm_mm_takedown(struct drm_mm * mm) 782 { 783 WARN(!list_empty(&mm->head_node.node_list), 784 "Memory manager not clean during takedown.\n"); 785 } 786 EXPORT_SYMBOL(drm_mm_takedown); 787 788 static u64 drm_mm_debug_hole(struct drm_mm_node *entry, 789 const char *prefix) 790 { 791 u64 hole_start, hole_end, hole_size; 792 793 if (entry->hole_follows) { 794 hole_start = drm_mm_hole_node_start(entry); 795 hole_end = drm_mm_hole_node_end(entry); 796 hole_size = hole_end - hole_start; 797 pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start, 798 hole_end, hole_size); 799 return hole_size; 800 } 801 802 return 0; 803 } 804 805 /** 806 * drm_mm_debug_table - dump allocator state to dmesg 807 * @mm: drm_mm allocator to dump 808 * @prefix: prefix to use for dumping to dmesg 809 */ 810 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) 811 { 812 struct drm_mm_node *entry; 813 u64 total_used = 0, total_free = 0, total = 0; 814 815 total_free += drm_mm_debug_hole(&mm->head_node, prefix); 816 817 drm_mm_for_each_node(entry, mm) { 818 pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start, 819 entry->start + entry->size, entry->size); 820 total_used += entry->size; 821 total_free += drm_mm_debug_hole(entry, prefix); 822 } 823 total = total_free + total_used; 824 825 pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total, 826 total_used, total_free); 827 } 828 EXPORT_SYMBOL(drm_mm_debug_table); 829 830 #if defined(CONFIG_DEBUG_FS) 831 static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) 832 { 833 u64 hole_start, hole_end, hole_size; 834 835 if (entry->hole_follows) { 836 hole_start = drm_mm_hole_node_start(entry); 837 hole_end = drm_mm_hole_node_end(entry); 838 hole_size = hole_end - hole_start; 839 seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start, 840 hole_end, hole_size); 841 return hole_size; 842 } 843 844 return 0; 845 } 846 847 /** 848 * drm_mm_dump_table - dump allocator state to a seq_file 849 * @m: seq_file to dump to 850 * @mm: drm_mm allocator to dump 851 */ 852 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 853 { 854 struct drm_mm_node *entry; 855 u64 total_used = 0, total_free = 0, total = 0; 856 857 total_free += drm_mm_dump_hole(m, &mm->head_node); 858 859 drm_mm_for_each_node(entry, mm) { 860 seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start, 861 entry->start + entry->size, entry->size); 862 total_used += entry->size; 863 total_free += drm_mm_dump_hole(m, entry); 864 } 865 total = total_free + total_used; 866 867 seq_printf(m, "total: %llu, used %llu free %llu\n", total, 868 total_used, total_free); 869 return 0; 870 } 871 EXPORT_SYMBOL(drm_mm_dump_table); 872 #endif 873