1 /************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. 4 * Copyright 2016 Intel Corporation 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 * 28 **************************************************************************/ 29 30 /* 31 * Generic simple memory manager implementation. Intended to be used as a base 32 * class implementation for more advanced memory managers. 33 * 34 * Note that the algorithm used is quite simple and there might be substantial 35 * performance gains if a smarter free list is implemented. Currently it is 36 * just an unordered stack of free regions. This could easily be improved if 37 * an RB-tree is used instead. At least if we expect heavy fragmentation. 38 * 39 * Aligned allocations can also see improvement. 40 * 41 * Authors: 42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 43 */ 44 45 #include <drm/drmP.h> 46 #include <drm/drm_mm.h> 47 #include <linux/slab.h> 48 #include <linux/seq_file.h> 49 #include <linux/export.h> 50 51 extern int drm_vma_debug; 52 53 /** 54 * DOC: Overview 55 * 56 * drm_mm provides a simple range allocator. The drivers are free to use the 57 * resource allocator from the linux core if it suits them, the upside of drm_mm 58 * is that it's in the DRM core. Which means that it's easier to extend for 59 * some of the crazier special purpose needs of gpus. 60 * 61 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. 62 * Drivers are free to embed either of them into their own suitable 63 * datastructures. drm_mm itself will not do any memory allocations of its own, 64 * so if drivers choose not to embed nodes they need to still allocate them 65 * themselves. 66 * 67 * The range allocator also supports reservation of preallocated blocks. This is 68 * useful for taking over initial mode setting configurations from the firmware, 69 * where an object needs to be created which exactly matches the firmware's 70 * scanout target. As long as the range is still free it can be inserted anytime 71 * after the allocator is initialized, which helps with avoiding looped 72 * dependencies in the driver load sequence. 73 * 74 * drm_mm maintains a stack of most recently freed holes, which of all 75 * simplistic datastructures seems to be a fairly decent approach to clustering 76 * allocations and avoiding too much fragmentation. This means free space 77 * searches are O(num_holes). Given that all the fancy features drm_mm supports 78 * something better would be fairly complex and since gfx thrashing is a fairly 79 * steep cliff not a real concern. Removing a node again is O(1). 80 * 81 * drm_mm supports a few features: Alignment and range restrictions can be 82 * supplied. Furthermore every &drm_mm_node has a color value (which is just an 83 * opaque unsigned long) which in conjunction with a driver callback can be used 84 * to implement sophisticated placement restrictions. The i915 DRM driver uses 85 * this to implement guard pages between incompatible caching domains in the 86 * graphics TT. 87 * 88 * Two behaviors are supported for searching and allocating: bottom-up and 89 * top-down. The default is bottom-up. Top-down allocation can be used if the 90 * memory area has different restrictions, or just to reduce fragmentation. 91 * 92 * Finally iteration helpers to walk all nodes and all holes are provided as are 93 * some basic allocator dumpers for debugging. 94 * 95 * Note that this range allocator is not thread-safe, drivers need to protect 96 * modifications with their on locking. The idea behind this is that for a full 97 * memory manager additional data needs to be protected anyway, hence internal 98 * locking would be fully redundant. 99 */ 100 101 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 102 u64 size, 103 u64 alignment, 104 unsigned long color, 105 u64 start, 106 u64 end, 107 enum drm_mm_search_flags flags); 108 109 #ifdef CONFIG_DRM_DEBUG_MM 110 #include <linux/stackdepot.h> 111 112 #define STACKDEPTH 32 113 #define BUFSZ 4096 114 115 static noinline void save_stack(struct drm_mm_node *node) 116 { 117 unsigned long entries[STACKDEPTH]; 118 struct stack_trace trace = { 119 .entries = entries, 120 .max_entries = STACKDEPTH, 121 .skip = 1 122 }; 123 124 save_stack_trace(&trace); 125 if (trace.nr_entries != 0 && 126 trace.entries[trace.nr_entries-1] == ULONG_MAX) 127 trace.nr_entries--; 128 129 /* May be called under spinlock, so avoid sleeping */ 130 node->stack = depot_save_stack(&trace, GFP_NOWAIT); 131 } 132 133 static void show_leaks(struct drm_mm *mm) 134 { 135 struct drm_mm_node *node; 136 unsigned long entries[STACKDEPTH]; 137 char *buf; 138 139 buf = kmalloc(BUFSZ, M_DRM, GFP_KERNEL); 140 if (!buf) 141 return; 142 143 list_for_each_entry(node, drm_mm_nodes(mm), node_list) { 144 struct stack_trace trace = { 145 .entries = entries, 146 .max_entries = STACKDEPTH 147 }; 148 149 if (!node->stack) { 150 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n", 151 node->start, node->size); 152 continue; 153 } 154 155 depot_fetch_stack(node->stack, &trace); 156 snprint_stack_trace(buf, BUFSZ, &trace, 0); 157 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s", 158 node->start, node->size, buf); 159 } 160 161 kfree(buf); 162 } 163 164 #undef STACKDEPTH 165 #undef BUFSZ 166 #else 167 static void save_stack(struct drm_mm_node *node) { } 168 static void show_leaks(struct drm_mm *mm) { } 169 #endif 170 171 #define START(node) ((node)->start) 172 #define LAST(node) ((node)->start + (node)->size - 1) 173 174 #ifndef __DragonFly__ 175 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, 176 u64, __subtree_last, 177 START, LAST, static inline, drm_mm_interval_tree) 178 #else 179 static struct drm_mm_node * 180 drm_mm_interval_tree_iter_first(struct rb_root *rb, u64 start, u64 last) 181 { 182 struct drm_mm *mm = container_of(rb, typeof(*mm), interval_tree); 183 struct drm_mm_node *node; 184 185 drm_mm_for_each_node(node, mm) { 186 if (LAST(node) >= start && START(node) <= last) 187 return node; 188 } 189 return NULL; 190 } 191 #endif 192 193 struct drm_mm_node * 194 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) 195 { 196 return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree, 197 start, last) ?: (struct drm_mm_node *)&mm->head_node; 198 } 199 EXPORT_SYMBOL(__drm_mm_interval_first); 200 201 static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 202 struct drm_mm_node *node, 203 u64 size, u64 alignment, 204 unsigned long color, 205 u64 range_start, u64 range_end, 206 enum drm_mm_allocator_flags flags) 207 { 208 struct drm_mm *mm = hole_node->mm; 209 u64 hole_start = drm_mm_hole_node_start(hole_node); 210 u64 hole_end = drm_mm_hole_node_end(hole_node); 211 u64 adj_start = hole_start; 212 u64 adj_end = hole_end; 213 214 DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node) || node->allocated); 215 216 if (mm->color_adjust) 217 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 218 219 adj_start = max(adj_start, range_start); 220 adj_end = min(adj_end, range_end); 221 222 if (flags & DRM_MM_CREATE_TOP) 223 adj_start = adj_end - size; 224 225 if (alignment) { 226 u64 rem; 227 228 div64_u64_rem(adj_start, alignment, &rem); 229 if (rem) { 230 if (flags & DRM_MM_CREATE_TOP) 231 adj_start -= rem; 232 else 233 adj_start += alignment - rem; 234 } 235 } 236 237 if (adj_start == hole_start) { 238 hole_node->hole_follows = 0; 239 list_del(&hole_node->hole_stack); 240 } 241 242 node->start = adj_start; 243 node->size = size; 244 node->mm = mm; 245 node->color = color; 246 node->allocated = 1; 247 248 list_add(&node->node_list, &hole_node->node_list); 249 250 DRM_MM_BUG_ON(node->start < range_start); 251 DRM_MM_BUG_ON(node->start < adj_start); 252 DRM_MM_BUG_ON(node->start + node->size > adj_end); 253 DRM_MM_BUG_ON(node->start + node->size > range_end); 254 255 node->hole_follows = 0; 256 if (__drm_mm_hole_node_start(node) < hole_end) { 257 list_add(&node->hole_stack, &mm->hole_stack); 258 node->hole_follows = 1; 259 } 260 261 save_stack(node); 262 } 263 264 /** 265 * drm_mm_reserve_node - insert an pre-initialized node 266 * @mm: drm_mm allocator to insert @node into 267 * @node: drm_mm_node to insert 268 * 269 * This functions inserts an already set-up &drm_mm_node into the allocator, 270 * meaning that start, size and color must be set by the caller. All other 271 * fields must be cleared to 0. This is useful to initialize the allocator with 272 * preallocated objects which must be set-up before the range allocator can be 273 * set-up, e.g. when taking over a firmware framebuffer. 274 * 275 * Returns: 276 * 0 on success, -ENOSPC if there's no hole where @node is. 277 */ 278 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 279 { 280 u64 end = node->start + node->size; 281 struct drm_mm_node *hole; 282 u64 hole_start, hole_end; 283 u64 adj_start, adj_end; 284 285 end = node->start + node->size; 286 if (unlikely(end <= node->start)) 287 return -ENOSPC; 288 289 /* Find the relevant hole to add our node to */ 290 hole = drm_mm_interval_tree_iter_first(&mm->interval_tree, 291 node->start, ~(u64)0); 292 if (hole) { 293 if (hole->start < end) 294 return -ENOSPC; 295 } else { 296 hole = list_entry(drm_mm_nodes(mm), typeof(*hole), node_list); 297 } 298 299 hole = list_last_entry(&hole->node_list, typeof(*hole), node_list); 300 if (!drm_mm_hole_follows(hole)) 301 return -ENOSPC; 302 303 adj_start = hole_start = __drm_mm_hole_node_start(hole); 304 adj_end = hole_end = __drm_mm_hole_node_end(hole); 305 306 if (mm->color_adjust) 307 mm->color_adjust(hole, node->color, &adj_start, &adj_end); 308 309 if (adj_start > node->start || adj_end < end) 310 return -ENOSPC; 311 312 node->mm = mm; 313 node->allocated = 1; 314 315 list_add(&node->node_list, &hole->node_list); 316 317 #if 0 318 drm_mm_interval_tree_add_node(hole, node); 319 #endif 320 321 if (node->start == hole_start) { 322 hole->hole_follows = 0; 323 list_del(&hole->hole_stack); 324 } 325 326 node->hole_follows = 0; 327 if (end != hole_end) { 328 list_add(&node->hole_stack, &mm->hole_stack); 329 node->hole_follows = 1; 330 } 331 332 save_stack(node); 333 334 return 0; 335 } 336 EXPORT_SYMBOL(drm_mm_reserve_node); 337 338 /** 339 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node 340 * @mm: drm_mm to allocate from 341 * @node: preallocate node to insert 342 * @size: size of the allocation 343 * @alignment: alignment of the allocation 344 * @color: opaque tag value to use for this node 345 * @start: start of the allowed range for this node 346 * @end: end of the allowed range for this node 347 * @sflags: flags to fine-tune the allocation search 348 * @aflags: flags to fine-tune the allocation behavior 349 * 350 * The preallocated @node must be cleared to 0. 351 * 352 * Returns: 353 * 0 on success, -ENOSPC if there's no suitable hole. 354 */ 355 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 356 u64 size, u64 alignment, 357 unsigned long color, 358 u64 start, u64 end, 359 enum drm_mm_search_flags sflags, 360 enum drm_mm_allocator_flags aflags) 361 { 362 struct drm_mm_node *hole_node; 363 364 if (WARN_ON(size == 0)) 365 return -EINVAL; 366 367 hole_node = drm_mm_search_free_in_range_generic(mm, 368 size, alignment, color, 369 start, end, sflags); 370 if (!hole_node) 371 return -ENOSPC; 372 373 drm_mm_insert_helper(hole_node, node, 374 size, alignment, color, 375 start, end, aflags); 376 return 0; 377 } 378 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); 379 380 /** 381 * drm_mm_remove_node - Remove a memory node from the allocator. 382 * @node: drm_mm_node to remove 383 * 384 * This just removes a node from its drm_mm allocator. The node does not need to 385 * be cleared again before it can be re-inserted into this or any other drm_mm 386 * allocator. It is a bug to call this function on a unallocated node. 387 */ 388 void drm_mm_remove_node(struct drm_mm_node *node) 389 { 390 struct drm_mm *mm = node->mm; 391 struct drm_mm_node *prev_node; 392 393 if (drm_vma_debug & 2) { 394 drm_vma_debug &= ~2; 395 } 396 397 DRM_MM_BUG_ON(!node->allocated); 398 DRM_MM_BUG_ON(node->scanned_block); 399 400 prev_node = 401 list_entry(node->node_list.prev, struct drm_mm_node, node_list); 402 403 if (drm_mm_hole_follows(node)) { 404 DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) == 405 __drm_mm_hole_node_end(node)); 406 list_del(&node->hole_stack); 407 } else { 408 DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) != 409 __drm_mm_hole_node_end(node)); 410 } 411 412 if (!drm_mm_hole_follows(prev_node)) { 413 prev_node->hole_follows = 1; 414 list_add(&prev_node->hole_stack, &mm->hole_stack); 415 } else 416 list_move(&prev_node->hole_stack, &mm->hole_stack); 417 418 list_del(&node->node_list); 419 node->allocated = 0; 420 } 421 EXPORT_SYMBOL(drm_mm_remove_node); 422 423 static int check_free_hole(u64 start, u64 end, u64 size, u64 alignment) 424 { 425 if (end - start < size) 426 return 0; 427 428 if (alignment) { 429 u64 rem; 430 431 div64_u64_rem(start, alignment, &rem); 432 if (rem) 433 start += alignment - rem; 434 } 435 436 return end >= start + size; 437 } 438 439 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, 440 u64 size, 441 u64 alignment, 442 unsigned long color, 443 u64 start, 444 u64 end, 445 enum drm_mm_search_flags flags) 446 { 447 struct drm_mm_node *entry; 448 struct drm_mm_node *best; 449 u64 adj_start; 450 u64 adj_end; 451 u64 best_size; 452 453 DRM_MM_BUG_ON(mm->scan_active); 454 455 best = NULL; 456 best_size = ~0UL; 457 458 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, 459 flags & DRM_MM_SEARCH_BELOW) { 460 u64 hole_size = adj_end - adj_start; 461 462 if (mm->color_adjust) { 463 mm->color_adjust(entry, color, &adj_start, &adj_end); 464 if (adj_end <= adj_start) 465 continue; 466 } 467 468 adj_start = max(adj_start, start); 469 adj_end = min(adj_end, end); 470 471 if (!check_free_hole(adj_start, adj_end, size, alignment)) 472 continue; 473 474 if (!(flags & DRM_MM_SEARCH_BEST)) 475 return entry; 476 477 if (hole_size < best_size) { 478 best = entry; 479 best_size = hole_size; 480 } 481 } 482 483 return best; 484 } 485 486 /** 487 * drm_mm_replace_node - move an allocation from @old to @new 488 * @old: drm_mm_node to remove from the allocator 489 * @new: drm_mm_node which should inherit @old's allocation 490 * 491 * This is useful for when drivers embed the drm_mm_node structure and hence 492 * can't move allocations by reassigning pointers. It's a combination of remove 493 * and insert with the guarantee that the allocation start will match. 494 */ 495 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 496 { 497 DRM_MM_BUG_ON(!old->allocated); 498 499 list_replace(&old->node_list, &new->node_list); 500 list_replace(&old->hole_stack, &new->hole_stack); 501 new->hole_follows = old->hole_follows; 502 new->mm = old->mm; 503 new->start = old->start; 504 new->size = old->size; 505 new->color = old->color; 506 507 old->allocated = 0; 508 new->allocated = 1; 509 } 510 EXPORT_SYMBOL(drm_mm_replace_node); 511 512 /** 513 * DOC: lru scan roster 514 * 515 * Very often GPUs need to have continuous allocations for a given object. When 516 * evicting objects to make space for a new one it is therefore not most 517 * efficient when we simply start to select all objects from the tail of an LRU 518 * until there's a suitable hole: Especially for big objects or nodes that 519 * otherwise have special allocation constraints there's a good chance we evict 520 * lots of (smaller) objects unnecessarily. 521 * 522 * The DRM range allocator supports this use-case through the scanning 523 * interfaces. First a scan operation needs to be initialized with 524 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds 525 * objects to the roster, probably by walking an LRU list, but this can be 526 * freely implemented. Eviction candiates are added using 527 * drm_mm_scan_add_block() until a suitable hole is found or there are no 528 * further evictable objects. Eviction roster metadata is tracked in struct 529 * &drm_mm_scan. 530 * 531 * The driver must walk through all objects again in exactly the reverse 532 * order to restore the allocator state. Note that while the allocator is used 533 * in the scan mode no other operation is allowed. 534 * 535 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block() 536 * reported true) in the scan, and any overlapping nodes after color adjustment 537 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and 538 * since freeing a node is also O(1) the overall complexity is 539 * O(scanned_objects). So like the free stack which needs to be walked before a 540 * scan operation even begins this is linear in the number of objects. It 541 * doesn't seem to hurt too badly. 542 */ 543 544 /** 545 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning 546 * @scan: scan state 547 * @mm: drm_mm to scan 548 * @size: size of the allocation 549 * @alignment: alignment of the allocation 550 * @color: opaque tag value to use for the allocation 551 * @start: start of the allowed range for the allocation 552 * @end: end of the allowed range for the allocation 553 * @flags: flags to specify how the allocation will be performed afterwards 554 * 555 * This simply sets up the scanning routines with the parameters for the desired 556 * hole. 557 * 558 * Warning: 559 * As long as the scan list is non-empty, no other operations than 560 * adding/removing nodes to/from the scan list are allowed. 561 */ 562 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, 563 struct drm_mm *mm, 564 u64 size, 565 u64 alignment, 566 unsigned long color, 567 u64 start, 568 u64 end, 569 unsigned int flags) 570 { 571 DRM_MM_BUG_ON(start >= end); 572 DRM_MM_BUG_ON(!size || size > end - start); 573 DRM_MM_BUG_ON(mm->scan_active); 574 575 scan->mm = mm; 576 577 if (alignment <= 1) 578 alignment = 0; 579 580 scan->color = color; 581 scan->alignment = alignment; 582 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; 583 scan->size = size; 584 scan->flags = flags; 585 586 DRM_MM_BUG_ON(end <= start); 587 scan->range_start = start; 588 scan->range_end = end; 589 590 scan->hit_start = U64_MAX; 591 scan->hit_end = 0; 592 } 593 EXPORT_SYMBOL(drm_mm_scan_init_with_range); 594 595 /** 596 * drm_mm_scan_add_block - add a node to the scan list 597 * @scan: the active drm_mm scanner 598 * @node: drm_mm_node to add 599 * 600 * Add a node to the scan list that might be freed to make space for the desired 601 * hole. 602 * 603 * Returns: 604 * True if a hole has been found, false otherwise. 605 */ 606 bool drm_mm_scan_add_block(struct drm_mm_scan *scan, 607 struct drm_mm_node *node) 608 { 609 struct drm_mm *mm = scan->mm; 610 struct drm_mm_node *hole; 611 u64 hole_start, hole_end; 612 u64 col_start, col_end; 613 u64 adj_start, adj_end; 614 615 DRM_MM_BUG_ON(node->mm != mm); 616 DRM_MM_BUG_ON(!node->allocated); 617 DRM_MM_BUG_ON(node->scanned_block); 618 node->scanned_block = true; 619 mm->scan_active++; 620 621 /* Remove this block from the node_list so that we enlarge the hole 622 * (distance between the end of our previous node and the start of 623 * or next), without poisoning the link so that we can restore it 624 * later in drm_mm_scan_remove_block(). 625 */ 626 hole = list_prev_entry(node, node_list); 627 DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node); 628 __list_del_entry(&node->node_list); 629 630 hole_start = __drm_mm_hole_node_start(hole); 631 hole_end = __drm_mm_hole_node_end(hole); 632 633 col_start = hole_start; 634 col_end = hole_end; 635 if (mm->color_adjust) 636 mm->color_adjust(hole, scan->color, &col_start, &col_end); 637 638 adj_start = max(col_start, scan->range_start); 639 adj_end = min(col_end, scan->range_end); 640 if (adj_end <= adj_start || adj_end - adj_start < scan->size) 641 return false; 642 643 if (scan->flags == DRM_MM_CREATE_TOP) 644 adj_start = adj_end - scan->size; 645 646 if (scan->alignment) { 647 u64 rem; 648 649 if (likely(scan->remainder_mask)) 650 rem = adj_start & scan->remainder_mask; 651 else 652 div64_u64_rem(adj_start, scan->alignment, &rem); 653 if (rem) { 654 adj_start -= rem; 655 if (scan->flags != DRM_MM_CREATE_TOP) 656 adj_start += scan->alignment; 657 if (adj_start < max(col_start, scan->range_start) || 658 min(col_end, scan->range_end) - adj_start < scan->size) 659 return false; 660 661 if (adj_end <= adj_start || 662 adj_end - adj_start < scan->size) 663 return false; 664 } 665 } 666 667 scan->hit_start = adj_start; 668 scan->hit_end = adj_start + scan->size; 669 670 DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end); 671 DRM_MM_BUG_ON(scan->hit_start < hole_start); 672 DRM_MM_BUG_ON(scan->hit_end > hole_end); 673 674 return true; 675 } 676 EXPORT_SYMBOL(drm_mm_scan_add_block); 677 678 /** 679 * drm_mm_scan_remove_block - remove a node from the scan list 680 * @scan: the active drm_mm scanner 681 * @node: drm_mm_node to remove 682 * 683 * Nodes **must** be removed in exactly the reverse order from the scan list as 684 * they have been added (e.g. using list_add() as they are added and then 685 * list_for_each() over that eviction list to remove), otherwise the internal 686 * state of the memory manager will be corrupted. 687 * 688 * When the scan list is empty, the selected memory nodes can be freed. An 689 * immediately following drm_mm_insert_node_in_range_generic() or one of the 690 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return 691 * the just freed block (because its at the top of the free_stack list). 692 * 693 * Returns: 694 * True if this block should be evicted, false otherwise. Will always 695 * return false when no hole has been found. 696 */ 697 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, 698 struct drm_mm_node *node) 699 { 700 struct drm_mm_node *prev_node; 701 702 DRM_MM_BUG_ON(node->mm != scan->mm); 703 DRM_MM_BUG_ON(!node->scanned_block); 704 node->scanned_block = false; 705 706 DRM_MM_BUG_ON(!node->mm->scan_active); 707 node->mm->scan_active--; 708 709 /* During drm_mm_scan_add_block() we decoupled this node leaving 710 * its pointers intact. Now that the caller is walking back along 711 * the eviction list we can restore this block into its rightful 712 * place on the full node_list. To confirm that the caller is walking 713 * backwards correctly we check that prev_node->next == node->next, 714 * i.e. both believe the same node should be on the other side of the 715 * hole. 716 */ 717 prev_node = list_prev_entry(node, node_list); 718 DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) != 719 list_next_entry(node, node_list)); 720 list_add(&node->node_list, &prev_node->node_list); 721 722 return (node->start + node->size > scan->hit_start && 723 node->start < scan->hit_end); 724 } 725 EXPORT_SYMBOL(drm_mm_scan_remove_block); 726 727 /** 728 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole 729 * @scan: drm_mm scan with target hole 730 * 731 * After completing an eviction scan and removing the selected nodes, we may 732 * need to remove a few more nodes from either side of the target hole if 733 * mm.color_adjust is being used. 734 * 735 * Returns: 736 * A node to evict, or NULL if there are no overlapping nodes. 737 */ 738 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan) 739 { 740 struct drm_mm *mm = scan->mm; 741 struct drm_mm_node *hole; 742 u64 hole_start, hole_end; 743 744 DRM_MM_BUG_ON(list_empty(&mm->hole_stack)); 745 746 if (!mm->color_adjust) 747 return NULL; 748 749 hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack); 750 hole_start = __drm_mm_hole_node_start(hole); 751 hole_end = __drm_mm_hole_node_end(hole); 752 753 DRM_MM_BUG_ON(hole_start > scan->hit_start); 754 DRM_MM_BUG_ON(hole_end < scan->hit_end); 755 756 mm->color_adjust(hole, scan->color, &hole_start, &hole_end); 757 if (hole_start > scan->hit_start) 758 return hole; 759 if (hole_end < scan->hit_end) 760 return list_next_entry(hole, node_list); 761 762 return NULL; 763 } 764 EXPORT_SYMBOL(drm_mm_scan_color_evict); 765 766 /** 767 * drm_mm_init - initialize a drm-mm allocator 768 * @mm: the drm_mm structure to initialize 769 * @start: start of the range managed by @mm 770 * @size: end of the range managed by @mm 771 * 772 * Note that @mm must be cleared to 0 before calling this function. 773 */ 774 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) 775 { 776 DRM_MM_BUG_ON(start + size <= start); 777 778 INIT_LIST_HEAD(&mm->hole_stack); 779 mm->scan_active = 0; 780 781 /* Clever trick to avoid a special case in the free hole tracking. */ 782 INIT_LIST_HEAD(&mm->head_node.node_list); 783 INIT_LIST_HEAD(&mm->head_node.hole_stack); 784 mm->head_node.allocated = 0; 785 mm->head_node.hole_follows = 1; 786 mm->head_node.mm = mm; 787 mm->head_node.start = start + size; 788 mm->head_node.size = start - mm->head_node.start; 789 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); 790 791 mm->color_adjust = NULL; 792 } 793 EXPORT_SYMBOL(drm_mm_init); 794 795 /** 796 * drm_mm_takedown - clean up a drm_mm allocator 797 * @mm: drm_mm allocator to clean up 798 * 799 * Note that it is a bug to call this function on an allocator which is not 800 * clean. 801 */ 802 void drm_mm_takedown(struct drm_mm *mm) 803 { 804 if (WARN(!drm_mm_clean(mm), 805 "Memory manager not clean during takedown.\n")) 806 show_leaks(mm); 807 } 808 EXPORT_SYMBOL(drm_mm_takedown); 809 810 static u64 drm_mm_dump_hole(struct drm_printer *p, struct drm_mm_node *entry) 811 { 812 u64 hole_start, hole_end, hole_size; 813 814 if (entry->hole_follows) { 815 hole_start = drm_mm_hole_node_start(entry); 816 hole_end = drm_mm_hole_node_end(entry); 817 hole_size = hole_end - hole_start; 818 drm_printf(p, "%#018llx-%#018llx: %llu: free\n", hole_start, 819 hole_end, hole_size); 820 return hole_size; 821 } 822 823 return 0; 824 } 825 826 /** 827 * drm_mm_print - print allocator state 828 * @mm: drm_mm allocator to print 829 * @p: DRM printer to use 830 */ 831 void drm_mm_print(struct drm_mm *mm, struct drm_printer *p) 832 { 833 struct drm_mm_node *entry; 834 u64 total_used = 0, total_free = 0, total = 0; 835 836 total_free += drm_mm_dump_hole(p, &mm->head_node); 837 838 drm_mm_for_each_node(entry, mm) { 839 drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start, 840 entry->start + entry->size, entry->size); 841 total_used += entry->size; 842 total_free += drm_mm_dump_hole(p, entry); 843 } 844 total = total_free + total_used; 845 846 drm_printf(p, "total: %llu, used %llu free %llu\n", total, 847 total_used, total_free); 848 } 849 EXPORT_SYMBOL(drm_mm_print); 850