1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include <linux/kmemleak.h>
7 #include <linux/module.h>
8 #include <linux/sizes.h>
9
10 #include <sys/pool.h>
11
12 #include <drm/drm_buddy.h>
13
14 static struct pool slab_blocks;
15
drm_block_alloc(struct drm_buddy * mm,struct drm_buddy_block * parent,unsigned int order,u64 offset)16 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
17 struct drm_buddy_block *parent,
18 unsigned int order,
19 u64 offset)
20 {
21 struct drm_buddy_block *block;
22
23 BUG_ON(order > DRM_BUDDY_MAX_ORDER);
24
25 #ifdef __linux__
26 block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
27 #else
28 block = pool_get(&slab_blocks, PR_WAITOK | PR_ZERO);
29 #endif
30 if (!block)
31 return NULL;
32
33 block->header = offset;
34 block->header |= order;
35 block->parent = parent;
36
37 BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
38 return block;
39 }
40
drm_block_free(struct drm_buddy * mm,struct drm_buddy_block * block)41 static void drm_block_free(struct drm_buddy *mm,
42 struct drm_buddy_block *block)
43 {
44 #ifdef __linux__
45 kmem_cache_free(slab_blocks, block);
46 #else
47 pool_put(&slab_blocks, block);
48 #endif
49 }
50
list_insert_sorted(struct drm_buddy * mm,struct drm_buddy_block * block)51 static void list_insert_sorted(struct drm_buddy *mm,
52 struct drm_buddy_block *block)
53 {
54 struct drm_buddy_block *node;
55 struct list_head *head;
56
57 head = &mm->free_list[drm_buddy_block_order(block)];
58 if (list_empty(head)) {
59 list_add(&block->link, head);
60 return;
61 }
62
63 list_for_each_entry(node, head, link)
64 if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
65 break;
66
67 __list_add(&block->link, node->link.prev, &node->link);
68 }
69
mark_allocated(struct drm_buddy_block * block)70 static void mark_allocated(struct drm_buddy_block *block)
71 {
72 block->header &= ~DRM_BUDDY_HEADER_STATE;
73 block->header |= DRM_BUDDY_ALLOCATED;
74
75 list_del(&block->link);
76 }
77
mark_free(struct drm_buddy * mm,struct drm_buddy_block * block)78 static void mark_free(struct drm_buddy *mm,
79 struct drm_buddy_block *block)
80 {
81 block->header &= ~DRM_BUDDY_HEADER_STATE;
82 block->header |= DRM_BUDDY_FREE;
83
84 list_insert_sorted(mm, block);
85 }
86
mark_split(struct drm_buddy_block * block)87 static void mark_split(struct drm_buddy_block *block)
88 {
89 block->header &= ~DRM_BUDDY_HEADER_STATE;
90 block->header |= DRM_BUDDY_SPLIT;
91
92 list_del(&block->link);
93 }
94
95 /**
96 * drm_buddy_init - init memory manager
97 *
98 * @mm: DRM buddy manager to initialize
99 * @size: size in bytes to manage
100 * @chunk_size: minimum page size in bytes for our allocations
101 *
102 * Initializes the memory manager and its resources.
103 *
104 * Returns:
105 * 0 on success, error code on failure.
106 */
drm_buddy_init(struct drm_buddy * mm,u64 size,u64 chunk_size)107 int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
108 {
109 unsigned int i;
110 u64 offset;
111
112 if (size < chunk_size)
113 return -EINVAL;
114
115 if (chunk_size < PAGE_SIZE)
116 return -EINVAL;
117
118 if (!is_power_of_2(chunk_size))
119 return -EINVAL;
120
121 size = round_down(size, chunk_size);
122
123 mm->size = size;
124 mm->avail = size;
125 mm->chunk_size = chunk_size;
126 mm->max_order = ilog2(size) - ilog2(chunk_size);
127
128 BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
129
130 mm->free_list = kmalloc_array(mm->max_order + 1,
131 sizeof(struct list_head),
132 GFP_KERNEL);
133 if (!mm->free_list)
134 return -ENOMEM;
135
136 for (i = 0; i <= mm->max_order; ++i)
137 INIT_LIST_HEAD(&mm->free_list[i]);
138
139 mm->n_roots = hweight64(size);
140
141 mm->roots = kmalloc_array(mm->n_roots,
142 sizeof(struct drm_buddy_block *),
143 GFP_KERNEL);
144 if (!mm->roots)
145 goto out_free_list;
146
147 offset = 0;
148 i = 0;
149
150 /*
151 * Split into power-of-two blocks, in case we are given a size that is
152 * not itself a power-of-two.
153 */
154 do {
155 struct drm_buddy_block *root;
156 unsigned int order;
157 u64 root_size;
158
159 order = ilog2(size) - ilog2(chunk_size);
160 root_size = chunk_size << order;
161
162 root = drm_block_alloc(mm, NULL, order, offset);
163 if (!root)
164 goto out_free_roots;
165
166 mark_free(mm, root);
167
168 BUG_ON(i > mm->max_order);
169 BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
170
171 mm->roots[i] = root;
172
173 offset += root_size;
174 size -= root_size;
175 i++;
176 } while (size);
177
178 return 0;
179
180 out_free_roots:
181 while (i--)
182 drm_block_free(mm, mm->roots[i]);
183 kfree(mm->roots);
184 out_free_list:
185 kfree(mm->free_list);
186 return -ENOMEM;
187 }
188 EXPORT_SYMBOL(drm_buddy_init);
189
190 /**
191 * drm_buddy_fini - tear down the memory manager
192 *
193 * @mm: DRM buddy manager to free
194 *
195 * Cleanup memory manager resources and the freelist
196 */
drm_buddy_fini(struct drm_buddy * mm)197 void drm_buddy_fini(struct drm_buddy *mm)
198 {
199 int i;
200
201 for (i = 0; i < mm->n_roots; ++i) {
202 WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
203 drm_block_free(mm, mm->roots[i]);
204 }
205
206 WARN_ON(mm->avail != mm->size);
207
208 kfree(mm->roots);
209 kfree(mm->free_list);
210 }
211 EXPORT_SYMBOL(drm_buddy_fini);
212
split_block(struct drm_buddy * mm,struct drm_buddy_block * block)213 static int split_block(struct drm_buddy *mm,
214 struct drm_buddy_block *block)
215 {
216 unsigned int block_order = drm_buddy_block_order(block) - 1;
217 u64 offset = drm_buddy_block_offset(block);
218
219 BUG_ON(!drm_buddy_block_is_free(block));
220 BUG_ON(!drm_buddy_block_order(block));
221
222 block->left = drm_block_alloc(mm, block, block_order, offset);
223 if (!block->left)
224 return -ENOMEM;
225
226 block->right = drm_block_alloc(mm, block, block_order,
227 offset + (mm->chunk_size << block_order));
228 if (!block->right) {
229 drm_block_free(mm, block->left);
230 return -ENOMEM;
231 }
232
233 mark_free(mm, block->left);
234 mark_free(mm, block->right);
235
236 mark_split(block);
237
238 return 0;
239 }
240
241 static struct drm_buddy_block *
__get_buddy(struct drm_buddy_block * block)242 __get_buddy(struct drm_buddy_block *block)
243 {
244 struct drm_buddy_block *parent;
245
246 parent = block->parent;
247 if (!parent)
248 return NULL;
249
250 if (parent->left == block)
251 return parent->right;
252
253 return parent->left;
254 }
255
256 /**
257 * drm_get_buddy - get buddy address
258 *
259 * @block: DRM buddy block
260 *
261 * Returns the corresponding buddy block for @block, or NULL
262 * if this is a root block and can't be merged further.
263 * Requires some kind of locking to protect against
264 * any concurrent allocate and free operations.
265 */
266 struct drm_buddy_block *
drm_get_buddy(struct drm_buddy_block * block)267 drm_get_buddy(struct drm_buddy_block *block)
268 {
269 return __get_buddy(block);
270 }
271 EXPORT_SYMBOL(drm_get_buddy);
272
__drm_buddy_free(struct drm_buddy * mm,struct drm_buddy_block * block)273 static void __drm_buddy_free(struct drm_buddy *mm,
274 struct drm_buddy_block *block)
275 {
276 struct drm_buddy_block *parent;
277
278 while ((parent = block->parent)) {
279 struct drm_buddy_block *buddy;
280
281 buddy = __get_buddy(block);
282
283 if (!drm_buddy_block_is_free(buddy))
284 break;
285
286 list_del(&buddy->link);
287
288 drm_block_free(mm, block);
289 drm_block_free(mm, buddy);
290
291 block = parent;
292 }
293
294 mark_free(mm, block);
295 }
296
297 /**
298 * drm_buddy_free_block - free a block
299 *
300 * @mm: DRM buddy manager
301 * @block: block to be freed
302 */
drm_buddy_free_block(struct drm_buddy * mm,struct drm_buddy_block * block)303 void drm_buddy_free_block(struct drm_buddy *mm,
304 struct drm_buddy_block *block)
305 {
306 BUG_ON(!drm_buddy_block_is_allocated(block));
307 mm->avail += drm_buddy_block_size(mm, block);
308 __drm_buddy_free(mm, block);
309 }
310 EXPORT_SYMBOL(drm_buddy_free_block);
311
312 /**
313 * drm_buddy_free_list - free blocks
314 *
315 * @mm: DRM buddy manager
316 * @objects: input list head to free blocks
317 */
drm_buddy_free_list(struct drm_buddy * mm,struct list_head * objects)318 void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
319 {
320 struct drm_buddy_block *block, *on;
321
322 list_for_each_entry_safe(block, on, objects, link) {
323 drm_buddy_free_block(mm, block);
324 cond_resched();
325 }
326 INIT_LIST_HEAD(objects);
327 }
328 EXPORT_SYMBOL(drm_buddy_free_list);
329
overlaps(u64 s1,u64 e1,u64 s2,u64 e2)330 static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
331 {
332 return s1 <= e2 && e1 >= s2;
333 }
334
contains(u64 s1,u64 e1,u64 s2,u64 e2)335 static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
336 {
337 return s1 <= s2 && e1 >= e2;
338 }
339
340 static struct drm_buddy_block *
alloc_range_bias(struct drm_buddy * mm,u64 start,u64 end,unsigned int order)341 alloc_range_bias(struct drm_buddy *mm,
342 u64 start, u64 end,
343 unsigned int order)
344 {
345 u64 req_size = mm->chunk_size << order;
346 struct drm_buddy_block *block;
347 struct drm_buddy_block *buddy;
348 DRM_LIST_HEAD(dfs);
349 int err;
350 int i;
351
352 end = end - 1;
353
354 for (i = 0; i < mm->n_roots; ++i)
355 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
356
357 do {
358 u64 block_start;
359 u64 block_end;
360
361 block = list_first_entry_or_null(&dfs,
362 struct drm_buddy_block,
363 tmp_link);
364 if (!block)
365 break;
366
367 list_del(&block->tmp_link);
368
369 if (drm_buddy_block_order(block) < order)
370 continue;
371
372 block_start = drm_buddy_block_offset(block);
373 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
374
375 if (!overlaps(start, end, block_start, block_end))
376 continue;
377
378 if (drm_buddy_block_is_allocated(block))
379 continue;
380
381 if (block_start < start || block_end > end) {
382 u64 adjusted_start = max(block_start, start);
383 u64 adjusted_end = min(block_end, end);
384
385 if (round_down(adjusted_end + 1, req_size) <=
386 round_up(adjusted_start, req_size))
387 continue;
388 }
389
390 if (contains(start, end, block_start, block_end) &&
391 order == drm_buddy_block_order(block)) {
392 /*
393 * Find the free block within the range.
394 */
395 if (drm_buddy_block_is_free(block))
396 return block;
397
398 continue;
399 }
400
401 if (!drm_buddy_block_is_split(block)) {
402 err = split_block(mm, block);
403 if (unlikely(err))
404 goto err_undo;
405 }
406
407 list_add(&block->right->tmp_link, &dfs);
408 list_add(&block->left->tmp_link, &dfs);
409 } while (1);
410
411 return ERR_PTR(-ENOSPC);
412
413 err_undo:
414 /*
415 * We really don't want to leave around a bunch of split blocks, since
416 * bigger is better, so make sure we merge everything back before we
417 * free the allocated blocks.
418 */
419 buddy = __get_buddy(block);
420 if (buddy &&
421 (drm_buddy_block_is_free(block) &&
422 drm_buddy_block_is_free(buddy)))
423 __drm_buddy_free(mm, block);
424 return ERR_PTR(err);
425 }
426
427 static struct drm_buddy_block *
get_maxblock(struct drm_buddy * mm,unsigned int order)428 get_maxblock(struct drm_buddy *mm, unsigned int order)
429 {
430 struct drm_buddy_block *max_block = NULL, *node;
431 unsigned int i;
432
433 for (i = order; i <= mm->max_order; ++i) {
434 if (!list_empty(&mm->free_list[i])) {
435 node = list_last_entry(&mm->free_list[i],
436 struct drm_buddy_block,
437 link);
438 if (!max_block) {
439 max_block = node;
440 continue;
441 }
442
443 if (drm_buddy_block_offset(node) >
444 drm_buddy_block_offset(max_block)) {
445 max_block = node;
446 }
447 }
448 }
449
450 return max_block;
451 }
452
453 static struct drm_buddy_block *
alloc_from_freelist(struct drm_buddy * mm,unsigned int order,unsigned long flags)454 alloc_from_freelist(struct drm_buddy *mm,
455 unsigned int order,
456 unsigned long flags)
457 {
458 struct drm_buddy_block *block = NULL;
459 unsigned int tmp;
460 int err;
461
462 if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
463 block = get_maxblock(mm, order);
464 if (block)
465 /* Store the obtained block order */
466 tmp = drm_buddy_block_order(block);
467 } else {
468 for (tmp = order; tmp <= mm->max_order; ++tmp) {
469 if (!list_empty(&mm->free_list[tmp])) {
470 block = list_last_entry(&mm->free_list[tmp],
471 struct drm_buddy_block,
472 link);
473 if (block)
474 break;
475 }
476 }
477 }
478
479 if (!block)
480 return ERR_PTR(-ENOSPC);
481
482 BUG_ON(!drm_buddy_block_is_free(block));
483
484 while (tmp != order) {
485 err = split_block(mm, block);
486 if (unlikely(err))
487 goto err_undo;
488
489 block = block->right;
490 tmp--;
491 }
492 return block;
493
494 err_undo:
495 if (tmp != order)
496 __drm_buddy_free(mm, block);
497 return ERR_PTR(err);
498 }
499
__alloc_range(struct drm_buddy * mm,struct list_head * dfs,u64 start,u64 size,struct list_head * blocks)500 static int __alloc_range(struct drm_buddy *mm,
501 struct list_head *dfs,
502 u64 start, u64 size,
503 struct list_head *blocks)
504 {
505 struct drm_buddy_block *block;
506 struct drm_buddy_block *buddy;
507 DRM_LIST_HEAD(allocated);
508 u64 end;
509 int err;
510
511 end = start + size - 1;
512
513 do {
514 u64 block_start;
515 u64 block_end;
516
517 block = list_first_entry_or_null(dfs,
518 struct drm_buddy_block,
519 tmp_link);
520 if (!block)
521 break;
522
523 list_del(&block->tmp_link);
524
525 block_start = drm_buddy_block_offset(block);
526 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
527
528 if (!overlaps(start, end, block_start, block_end))
529 continue;
530
531 if (drm_buddy_block_is_allocated(block)) {
532 err = -ENOSPC;
533 goto err_free;
534 }
535
536 if (contains(start, end, block_start, block_end)) {
537 if (!drm_buddy_block_is_free(block)) {
538 err = -ENOSPC;
539 goto err_free;
540 }
541
542 mark_allocated(block);
543 mm->avail -= drm_buddy_block_size(mm, block);
544 list_add_tail(&block->link, &allocated);
545 continue;
546 }
547
548 if (!drm_buddy_block_is_split(block)) {
549 err = split_block(mm, block);
550 if (unlikely(err))
551 goto err_undo;
552 }
553
554 list_add(&block->right->tmp_link, dfs);
555 list_add(&block->left->tmp_link, dfs);
556 } while (1);
557
558 list_splice_tail(&allocated, blocks);
559 return 0;
560
561 err_undo:
562 /*
563 * We really don't want to leave around a bunch of split blocks, since
564 * bigger is better, so make sure we merge everything back before we
565 * free the allocated blocks.
566 */
567 buddy = __get_buddy(block);
568 if (buddy &&
569 (drm_buddy_block_is_free(block) &&
570 drm_buddy_block_is_free(buddy)))
571 __drm_buddy_free(mm, block);
572
573 err_free:
574 drm_buddy_free_list(mm, &allocated);
575 return err;
576 }
577
__drm_buddy_alloc_range(struct drm_buddy * mm,u64 start,u64 size,struct list_head * blocks)578 static int __drm_buddy_alloc_range(struct drm_buddy *mm,
579 u64 start,
580 u64 size,
581 struct list_head *blocks)
582 {
583 DRM_LIST_HEAD(dfs);
584 int i;
585
586 for (i = 0; i < mm->n_roots; ++i)
587 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
588
589 return __alloc_range(mm, &dfs, start, size, blocks);
590 }
591
592 /**
593 * drm_buddy_block_trim - free unused pages
594 *
595 * @mm: DRM buddy manager
596 * @new_size: original size requested
597 * @blocks: Input and output list of allocated blocks.
598 * MUST contain single block as input to be trimmed.
599 * On success will contain the newly allocated blocks
600 * making up the @new_size. Blocks always appear in
601 * ascending order
602 *
603 * For contiguous allocation, we round up the size to the nearest
604 * power of two value, drivers consume *actual* size, so remaining
605 * portions are unused and can be optionally freed with this function
606 *
607 * Returns:
608 * 0 on success, error code on failure.
609 */
drm_buddy_block_trim(struct drm_buddy * mm,u64 new_size,struct list_head * blocks)610 int drm_buddy_block_trim(struct drm_buddy *mm,
611 u64 new_size,
612 struct list_head *blocks)
613 {
614 struct drm_buddy_block *parent;
615 struct drm_buddy_block *block;
616 DRM_LIST_HEAD(dfs);
617 u64 new_start;
618 int err;
619
620 if (!list_is_singular(blocks))
621 return -EINVAL;
622
623 block = list_first_entry(blocks,
624 struct drm_buddy_block,
625 link);
626
627 if (WARN_ON(!drm_buddy_block_is_allocated(block)))
628 return -EINVAL;
629
630 if (new_size > drm_buddy_block_size(mm, block))
631 return -EINVAL;
632
633 if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
634 return -EINVAL;
635
636 if (new_size == drm_buddy_block_size(mm, block))
637 return 0;
638
639 list_del(&block->link);
640 mark_free(mm, block);
641 mm->avail += drm_buddy_block_size(mm, block);
642
643 /* Prevent recursively freeing this node */
644 parent = block->parent;
645 block->parent = NULL;
646
647 new_start = drm_buddy_block_offset(block);
648 list_add(&block->tmp_link, &dfs);
649 err = __alloc_range(mm, &dfs, new_start, new_size, blocks);
650 if (err) {
651 mark_allocated(block);
652 mm->avail -= drm_buddy_block_size(mm, block);
653 list_add(&block->link, blocks);
654 }
655
656 block->parent = parent;
657 return err;
658 }
659 EXPORT_SYMBOL(drm_buddy_block_trim);
660
661 /**
662 * drm_buddy_alloc_blocks - allocate power-of-two blocks
663 *
664 * @mm: DRM buddy manager to allocate from
665 * @start: start of the allowed range for this block
666 * @end: end of the allowed range for this block
667 * @size: size of the allocation
668 * @min_page_size: alignment of the allocation
669 * @blocks: output list head to add allocated blocks
670 * @flags: DRM_BUDDY_*_ALLOCATION flags
671 *
672 * alloc_range_bias() called on range limitations, which traverses
673 * the tree and returns the desired block.
674 *
675 * alloc_from_freelist() called when *no* range restrictions
676 * are enforced, which picks the block from the freelist.
677 *
678 * Returns:
679 * 0 on success, error code on failure.
680 */
drm_buddy_alloc_blocks(struct drm_buddy * mm,u64 start,u64 end,u64 size,u64 min_page_size,struct list_head * blocks,unsigned long flags)681 int drm_buddy_alloc_blocks(struct drm_buddy *mm,
682 u64 start, u64 end, u64 size,
683 u64 min_page_size,
684 struct list_head *blocks,
685 unsigned long flags)
686 {
687 struct drm_buddy_block *block = NULL;
688 unsigned int min_order, order;
689 unsigned long pages;
690 DRM_LIST_HEAD(allocated);
691 int err;
692
693 if (size < mm->chunk_size)
694 return -EINVAL;
695
696 if (min_page_size < mm->chunk_size)
697 return -EINVAL;
698
699 if (!is_power_of_2(min_page_size))
700 return -EINVAL;
701
702 if (!IS_ALIGNED(start | end | size, mm->chunk_size))
703 return -EINVAL;
704
705 if (end > mm->size)
706 return -EINVAL;
707
708 if (range_overflows(start, size, mm->size))
709 return -EINVAL;
710
711 /* Actual range allocation */
712 if (start + size == end)
713 return __drm_buddy_alloc_range(mm, start, size, blocks);
714
715 if (!IS_ALIGNED(size, min_page_size))
716 return -EINVAL;
717
718 pages = size >> ilog2(mm->chunk_size);
719 order = fls(pages) - 1;
720 min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
721
722 do {
723 order = min(order, (unsigned int)fls(pages) - 1);
724 BUG_ON(order > mm->max_order);
725 BUG_ON(order < min_order);
726
727 do {
728 if (flags & DRM_BUDDY_RANGE_ALLOCATION)
729 /* Allocate traversing within the range */
730 block = alloc_range_bias(mm, start, end, order);
731 else
732 /* Allocate from freelist */
733 block = alloc_from_freelist(mm, order, flags);
734
735 if (!IS_ERR(block))
736 break;
737
738 if (order-- == min_order) {
739 err = -ENOSPC;
740 goto err_free;
741 }
742 } while (1);
743
744 mark_allocated(block);
745 mm->avail -= drm_buddy_block_size(mm, block);
746 kmemleak_update_trace(block);
747 list_add_tail(&block->link, &allocated);
748
749 pages -= BIT(order);
750
751 if (!pages)
752 break;
753 } while (1);
754
755 list_splice_tail(&allocated, blocks);
756 return 0;
757
758 err_free:
759 drm_buddy_free_list(mm, &allocated);
760 return err;
761 }
762 EXPORT_SYMBOL(drm_buddy_alloc_blocks);
763
764 /**
765 * drm_buddy_block_print - print block information
766 *
767 * @mm: DRM buddy manager
768 * @block: DRM buddy block
769 * @p: DRM printer to use
770 */
drm_buddy_block_print(struct drm_buddy * mm,struct drm_buddy_block * block,struct drm_printer * p)771 void drm_buddy_block_print(struct drm_buddy *mm,
772 struct drm_buddy_block *block,
773 struct drm_printer *p)
774 {
775 u64 start = drm_buddy_block_offset(block);
776 u64 size = drm_buddy_block_size(mm, block);
777
778 drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
779 }
780 EXPORT_SYMBOL(drm_buddy_block_print);
781
782 /**
783 * drm_buddy_print - print allocator state
784 *
785 * @mm: DRM buddy manager
786 * @p: DRM printer to use
787 */
drm_buddy_print(struct drm_buddy * mm,struct drm_printer * p)788 void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
789 {
790 int order;
791
792 drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
793 mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
794
795 for (order = mm->max_order; order >= 0; order--) {
796 struct drm_buddy_block *block;
797 u64 count = 0, free;
798
799 list_for_each_entry(block, &mm->free_list[order], link) {
800 BUG_ON(!drm_buddy_block_is_free(block));
801 count++;
802 }
803
804 drm_printf(p, "order-%2d ", order);
805
806 free = count * (mm->chunk_size << order);
807 if (free < SZ_1M)
808 drm_printf(p, "free: %8llu KiB", free >> 10);
809 else
810 drm_printf(p, "free: %8llu MiB", free >> 20);
811
812 drm_printf(p, ", blocks: %llu\n", count);
813 }
814 }
815 EXPORT_SYMBOL(drm_buddy_print);
816
drm_buddy_module_exit(void)817 void drm_buddy_module_exit(void)
818 {
819 #ifdef __linux__
820 kmem_cache_destroy(slab_blocks);
821 #else
822 pool_destroy(&slab_blocks);
823 #endif
824 }
825
drm_buddy_module_init(void)826 int __init drm_buddy_module_init(void)
827 {
828 #ifdef __linux__
829 slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
830 if (!slab_blocks)
831 return -ENOMEM;
832 #else
833 pool_init(&slab_blocks, sizeof(struct drm_buddy_block),
834 CACHELINESIZE, IPL_NONE, 0, "drmbb", NULL);
835 #endif
836
837 return 0;
838 }
839
840 module_init(drm_buddy_module_init);
841 module_exit(drm_buddy_module_exit);
842
843 MODULE_DESCRIPTION("DRM Buddy Allocator");
844 MODULE_LICENSE("Dual MIT/GPL");
845