1 /************************************************************************** 2 * 3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * $FreeBSD: head/sys/dev/drm2/drm_mm.c 247833 2013-03-05 09:07:58Z kib $ 27 **************************************************************************/ 28 29 /* 30 * Generic simple memory manager implementation. Intended to be used as a base 31 * class implementation for more advanced memory managers. 32 * 33 * Note that the algorithm used is quite simple and there might be substantial 34 * performance gains if a smarter free list is implemented. Currently it is just an 35 * unordered stack of free regions. This could easily be improved if an RB-tree 36 * is used instead. At least if we expect heavy fragmentation. 37 * 38 * Aligned allocations can also see improvement. 39 * 40 * Authors: 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 42 */ 43 44 #include <drm/drmP.h> 45 #include <drm/drm_mm.h> 46 47 #define MM_UNUSED_TARGET 4 48 49 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) 50 { 51 struct drm_mm_node *child; 52 53 child = kmalloc(sizeof(*child), DRM_MEM_MM, M_ZERO | 54 (atomic ? M_NOWAIT : M_WAITOK)); 55 56 if (unlikely(child == NULL)) { 57 spin_lock(&mm->unused_spin); 58 if (list_empty(&mm->unused_nodes)) 59 child = NULL; 60 else { 61 child = 62 list_entry(mm->unused_nodes.next, 63 struct drm_mm_node, node_list); 64 list_del(&child->node_list); 65 --mm->num_unused; 66 } 67 spin_unlock(&mm->unused_spin); 68 } 69 return child; 70 } 71 72 int drm_mm_pre_get(struct drm_mm *mm) 73 { 74 struct drm_mm_node *node; 75 76 spin_lock(&mm->unused_spin); 77 while (mm->num_unused < MM_UNUSED_TARGET) { 78 spin_unlock(&mm->unused_spin); 79 node = kmalloc(sizeof(*node), DRM_MEM_MM, M_WAITOK); 80 spin_lock(&mm->unused_spin); 81 82 if (unlikely(node == NULL)) { 83 int ret = (mm->num_unused < 2) ? -ENOMEM : 0; 84 spin_unlock(&mm->unused_spin); 85 return ret; 86 } 87 ++mm->num_unused; 88 list_add_tail(&node->node_list, &mm->unused_nodes); 89 } 90 spin_unlock(&mm->unused_spin); 91 return 0; 92 } 93 94 static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) 95 { 96 return hole_node->start + hole_node->size; 97 } 98 99 static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) 100 { 101 struct drm_mm_node *next_node = 102 list_entry(hole_node->node_list.next, struct drm_mm_node, 103 node_list); 104 105 return next_node->start; 106 } 107 108 static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 109 struct drm_mm_node *node, 110 unsigned long size, unsigned alignment) 111 { 112 struct drm_mm *mm = hole_node->mm; 113 unsigned long tmp = 0, wasted = 0; 114 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 115 unsigned long hole_end = drm_mm_hole_node_end(hole_node); 116 117 KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node")); 118 119 if (alignment) 120 tmp = hole_start % alignment; 121 122 if (!tmp) { 123 hole_node->hole_follows = 0; 124 list_del_init(&hole_node->hole_stack); 125 } else 126 wasted = alignment - tmp; 127 128 node->start = hole_start + wasted; 129 node->size = size; 130 node->mm = mm; 131 node->allocated = 1; 132 133 INIT_LIST_HEAD(&node->hole_stack); 134 list_add(&node->node_list, &hole_node->node_list); 135 136 KASSERT(node->start + node->size <= hole_end, ("hole pos")); 137 138 if (node->start + node->size < hole_end) { 139 list_add(&node->hole_stack, &mm->hole_stack); 140 node->hole_follows = 1; 141 } else { 142 node->hole_follows = 0; 143 } 144 } 145 146 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, 147 unsigned long size, 148 unsigned alignment, 149 int atomic) 150 { 151 struct drm_mm_node *node; 152 153 node = drm_mm_kmalloc(hole_node->mm, atomic); 154 if (unlikely(node == NULL)) 155 return NULL; 156 157 drm_mm_insert_helper(hole_node, node, size, alignment); 158 159 return node; 160 } 161 162 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, 163 unsigned long size, unsigned alignment) 164 { 165 struct drm_mm_node *hole_node; 166 167 hole_node = drm_mm_search_free(mm, size, alignment, 0); 168 if (!hole_node) 169 return -ENOSPC; 170 171 drm_mm_insert_helper(hole_node, node, size, alignment); 172 173 return 0; 174 } 175 176 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 177 struct drm_mm_node *node, 178 unsigned long size, unsigned alignment, 179 unsigned long start, unsigned long end) 180 { 181 struct drm_mm *mm = hole_node->mm; 182 unsigned long tmp = 0, wasted = 0; 183 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 184 unsigned long hole_end = drm_mm_hole_node_end(hole_node); 185 186 KASSERT(hole_node->hole_follows && !node->allocated, ("hole_node")); 187 188 if (hole_start < start) 189 wasted += start - hole_start; 190 if (alignment) 191 tmp = (hole_start + wasted) % alignment; 192 193 if (tmp) 194 wasted += alignment - tmp; 195 196 if (!wasted) { 197 hole_node->hole_follows = 0; 198 list_del_init(&hole_node->hole_stack); 199 } 200 201 node->start = hole_start + wasted; 202 node->size = size; 203 node->mm = mm; 204 node->allocated = 1; 205 206 INIT_LIST_HEAD(&node->hole_stack); 207 list_add(&node->node_list, &hole_node->node_list); 208 209 KASSERT(node->start + node->size <= hole_end, ("hole_end")); 210 KASSERT(node->start + node->size <= end, ("end")); 211 212 if (node->start + node->size < hole_end) { 213 list_add(&node->hole_stack, &mm->hole_stack); 214 node->hole_follows = 1; 215 } else { 216 node->hole_follows = 0; 217 } 218 } 219 220 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node, 221 unsigned long size, 222 unsigned alignment, 223 unsigned long start, 224 unsigned long end, 225 int atomic) 226 { 227 struct drm_mm_node *node; 228 229 node = drm_mm_kmalloc(hole_node->mm, atomic); 230 if (unlikely(node == NULL)) 231 return NULL; 232 233 drm_mm_insert_helper_range(hole_node, node, size, alignment, 234 start, end); 235 236 return node; 237 } 238 239 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, 240 unsigned long size, unsigned alignment, 241 unsigned long start, unsigned long end) 242 { 243 struct drm_mm_node *hole_node; 244 245 hole_node = drm_mm_search_free_in_range(mm, size, alignment, 246 start, end, 0); 247 if (!hole_node) 248 return -ENOSPC; 249 250 drm_mm_insert_helper_range(hole_node, node, size, alignment, 251 start, end); 252 253 return 0; 254 } 255 256 void drm_mm_remove_node(struct drm_mm_node *node) 257 { 258 struct drm_mm *mm = node->mm; 259 struct drm_mm_node *prev_node; 260 261 KASSERT(!node->scanned_block && !node->scanned_prev_free 262 && !node->scanned_next_free, ("node")); 263 264 prev_node = 265 list_entry(node->node_list.prev, struct drm_mm_node, node_list); 266 267 if (node->hole_follows) { 268 KASSERT(drm_mm_hole_node_start(node) 269 != drm_mm_hole_node_end(node), ("hole_follows")); 270 list_del(&node->hole_stack); 271 } else 272 KASSERT(drm_mm_hole_node_start(node) 273 == drm_mm_hole_node_end(node), ("!hole_follows")); 274 275 if (!prev_node->hole_follows) { 276 prev_node->hole_follows = 1; 277 list_add(&prev_node->hole_stack, &mm->hole_stack); 278 } else 279 list_move(&prev_node->hole_stack, &mm->hole_stack); 280 281 list_del(&node->node_list); 282 node->allocated = 0; 283 } 284 285 /* 286 * Put a block. Merge with the previous and / or next block if they are free. 287 * Otherwise add to the free stack. 288 */ 289 290 void drm_mm_put_block(struct drm_mm_node *node) 291 { 292 struct drm_mm *mm = node->mm; 293 294 drm_mm_remove_node(node); 295 296 spin_lock(&mm->unused_spin); 297 if (mm->num_unused < MM_UNUSED_TARGET) { 298 list_add(&node->node_list, &mm->unused_nodes); 299 ++mm->num_unused; 300 } else 301 drm_free(node, DRM_MEM_MM); 302 spin_unlock(&mm->unused_spin); 303 } 304 305 static int check_free_hole(unsigned long start, unsigned long end, 306 unsigned long size, unsigned alignment) 307 { 308 unsigned wasted = 0; 309 310 if (end - start < size) 311 return 0; 312 313 if (alignment) { 314 unsigned tmp = start % alignment; 315 if (tmp) 316 wasted = alignment - tmp; 317 } 318 319 if (end >= start + size + wasted) { 320 return 1; 321 } 322 323 return 0; 324 } 325 326 327 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, 328 unsigned long size, 329 unsigned alignment, int best_match) 330 { 331 struct drm_mm_node *entry; 332 struct drm_mm_node *best; 333 unsigned long best_size; 334 335 best = NULL; 336 best_size = ~0UL; 337 338 list_for_each_entry(entry, &mm->hole_stack, hole_stack) { 339 KASSERT(entry->hole_follows, ("hole_follows")); 340 if (!check_free_hole(drm_mm_hole_node_start(entry), 341 drm_mm_hole_node_end(entry), 342 size, alignment)) 343 continue; 344 345 if (!best_match) 346 return entry; 347 348 if (entry->size < best_size) { 349 best = entry; 350 best_size = entry->size; 351 } 352 } 353 354 return best; 355 } 356 357 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, 358 unsigned long size, 359 unsigned alignment, 360 unsigned long start, 361 unsigned long end, 362 int best_match) 363 { 364 struct drm_mm_node *entry; 365 struct drm_mm_node *best; 366 unsigned long best_size; 367 368 KASSERT(!mm->scanned_blocks, ("scanned")); 369 370 best = NULL; 371 best_size = ~0UL; 372 373 list_for_each_entry(entry, &mm->hole_stack, hole_stack) { 374 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ? 375 start : drm_mm_hole_node_start(entry); 376 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ? 377 end : drm_mm_hole_node_end(entry); 378 379 KASSERT(entry->hole_follows, ("hole_follows")); 380 if (!check_free_hole(adj_start, adj_end, size, alignment)) 381 continue; 382 383 if (!best_match) 384 return entry; 385 386 if (entry->size < best_size) { 387 best = entry; 388 best_size = entry->size; 389 } 390 } 391 392 return best; 393 } 394 395 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 396 { 397 list_replace(&old->node_list, &new->node_list); 398 list_replace(&old->hole_stack, &new->hole_stack); 399 new->hole_follows = old->hole_follows; 400 new->mm = old->mm; 401 new->start = old->start; 402 new->size = old->size; 403 404 old->allocated = 0; 405 new->allocated = 1; 406 } 407 408 void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, 409 unsigned alignment) 410 { 411 mm->scan_alignment = alignment; 412 mm->scan_size = size; 413 mm->scanned_blocks = 0; 414 mm->scan_hit_start = 0; 415 mm->scan_hit_size = 0; 416 mm->scan_check_range = 0; 417 mm->prev_scanned_node = NULL; 418 } 419 420 void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size, 421 unsigned alignment, 422 unsigned long start, 423 unsigned long end) 424 { 425 mm->scan_alignment = alignment; 426 mm->scan_size = size; 427 mm->scanned_blocks = 0; 428 mm->scan_hit_start = 0; 429 mm->scan_hit_size = 0; 430 mm->scan_start = start; 431 mm->scan_end = end; 432 mm->scan_check_range = 1; 433 mm->prev_scanned_node = NULL; 434 } 435 436 int drm_mm_scan_add_block(struct drm_mm_node *node) 437 { 438 struct drm_mm *mm = node->mm; 439 struct drm_mm_node *prev_node; 440 unsigned long hole_start, hole_end; 441 unsigned long adj_start; 442 unsigned long adj_end; 443 444 mm->scanned_blocks++; 445 446 KASSERT(!node->scanned_block, ("node->scanned_block")); 447 node->scanned_block = 1; 448 449 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 450 node_list); 451 452 node->scanned_preceeds_hole = prev_node->hole_follows; 453 prev_node->hole_follows = 1; 454 list_del(&node->node_list); 455 node->node_list.prev = &prev_node->node_list; 456 node->node_list.next = &mm->prev_scanned_node->node_list; 457 mm->prev_scanned_node = node; 458 459 hole_start = drm_mm_hole_node_start(prev_node); 460 hole_end = drm_mm_hole_node_end(prev_node); 461 if (mm->scan_check_range) { 462 adj_start = hole_start < mm->scan_start ? 463 mm->scan_start : hole_start; 464 adj_end = hole_end > mm->scan_end ? 465 mm->scan_end : hole_end; 466 } else { 467 adj_start = hole_start; 468 adj_end = hole_end; 469 } 470 471 if (check_free_hole(adj_start , adj_end, 472 mm->scan_size, mm->scan_alignment)) { 473 mm->scan_hit_start = hole_start; 474 mm->scan_hit_size = hole_end; 475 476 return 1; 477 } 478 479 return 0; 480 } 481 482 int drm_mm_scan_remove_block(struct drm_mm_node *node) 483 { 484 struct drm_mm *mm = node->mm; 485 struct drm_mm_node *prev_node; 486 487 mm->scanned_blocks--; 488 489 KASSERT(node->scanned_block, ("scanned_block")); 490 node->scanned_block = 0; 491 492 prev_node = list_entry(node->node_list.prev, struct drm_mm_node, 493 node_list); 494 495 prev_node->hole_follows = node->scanned_preceeds_hole; 496 INIT_LIST_HEAD(&node->node_list); 497 list_add(&node->node_list, &prev_node->node_list); 498 499 /* Only need to check for containement because start&size for the 500 * complete resulting free block (not just the desired part) is 501 * stored. */ 502 if (node->start >= mm->scan_hit_start && 503 node->start + node->size 504 <= mm->scan_hit_start + mm->scan_hit_size) { 505 return 1; 506 } 507 508 return 0; 509 } 510 511 int drm_mm_clean(struct drm_mm * mm) 512 { 513 struct list_head *head = &mm->head_node.node_list; 514 515 return (head->next->next == head); 516 } 517 518 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 519 { 520 INIT_LIST_HEAD(&mm->hole_stack); 521 INIT_LIST_HEAD(&mm->unused_nodes); 522 mm->num_unused = 0; 523 mm->scanned_blocks = 0; 524 spin_init(&mm->unused_spin); 525 526 INIT_LIST_HEAD(&mm->head_node.node_list); 527 INIT_LIST_HEAD(&mm->head_node.hole_stack); 528 mm->head_node.hole_follows = 1; 529 mm->head_node.scanned_block = 0; 530 mm->head_node.scanned_prev_free = 0; 531 mm->head_node.scanned_next_free = 0; 532 mm->head_node.mm = mm; 533 mm->head_node.start = start + size; 534 mm->head_node.size = start - mm->head_node.start; 535 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); 536 537 return 0; 538 } 539 540 void drm_mm_takedown(struct drm_mm * mm) 541 { 542 struct drm_mm_node *entry, *next; 543 544 if (!list_empty(&mm->head_node.node_list)) { 545 DRM_ERROR("Memory manager not clean. Delaying takedown\n"); 546 return; 547 } 548 549 spin_lock(&mm->unused_spin); 550 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) { 551 list_del(&entry->node_list); 552 drm_free(entry, DRM_MEM_MM); 553 --mm->num_unused; 554 } 555 spin_unlock(&mm->unused_spin); 556 557 spin_uninit(&mm->unused_spin); 558 559 KASSERT(mm->num_unused == 0, ("num_unused != 0")); 560 } 561 562 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) 563 { 564 struct drm_mm_node *entry; 565 unsigned long total_used = 0, total_free = 0, total = 0; 566 unsigned long hole_start, hole_end, hole_size; 567 568 hole_start = drm_mm_hole_node_start(&mm->head_node); 569 hole_end = drm_mm_hole_node_end(&mm->head_node); 570 hole_size = hole_end - hole_start; 571 if (hole_size) 572 kprintf("%s 0x%08lx-0x%08lx: %8lu: free\n", 573 prefix, hole_start, hole_end, 574 hole_size); 575 total_free += hole_size; 576 577 drm_mm_for_each_node(entry, mm) { 578 kprintf("%s 0x%08lx-0x%08lx: %8lu: used\n", 579 prefix, entry->start, entry->start + entry->size, 580 entry->size); 581 total_used += entry->size; 582 583 if (entry->hole_follows) { 584 hole_start = drm_mm_hole_node_start(entry); 585 hole_end = drm_mm_hole_node_end(entry); 586 hole_size = hole_end - hole_start; 587 kprintf("%s 0x%08lx-0x%08lx: %8lu: free\n", 588 prefix, hole_start, hole_end, 589 hole_size); 590 total_free += hole_size; 591 } 592 } 593 total = total_free + total_used; 594 595 kprintf("%s total: %lu, used %lu free %lu\n", prefix, total, 596 total_used, total_free); 597 } 598