xref: /linux/drivers/gpu/drm/drm_buddy.c (revision 908fc4c2)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include <linux/kmemleak.h>
7 #include <linux/module.h>
8 #include <linux/sizes.h>
9 
10 #include <drm/drm_buddy.h>
11 
12 static struct kmem_cache *slab_blocks;
13 
14 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
15 					       struct drm_buddy_block *parent,
16 					       unsigned int order,
17 					       u64 offset)
18 {
19 	struct drm_buddy_block *block;
20 
21 	BUG_ON(order > DRM_BUDDY_MAX_ORDER);
22 
23 	block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
24 	if (!block)
25 		return NULL;
26 
27 	block->header = offset;
28 	block->header |= order;
29 	block->parent = parent;
30 
31 	BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
32 	return block;
33 }
34 
35 static void drm_block_free(struct drm_buddy *mm,
36 			   struct drm_buddy_block *block)
37 {
38 	kmem_cache_free(slab_blocks, block);
39 }
40 
41 static void mark_allocated(struct drm_buddy_block *block)
42 {
43 	block->header &= ~DRM_BUDDY_HEADER_STATE;
44 	block->header |= DRM_BUDDY_ALLOCATED;
45 
46 	list_del(&block->link);
47 }
48 
49 static void mark_free(struct drm_buddy *mm,
50 		      struct drm_buddy_block *block)
51 {
52 	block->header &= ~DRM_BUDDY_HEADER_STATE;
53 	block->header |= DRM_BUDDY_FREE;
54 
55 	list_add(&block->link,
56 		 &mm->free_list[drm_buddy_block_order(block)]);
57 }
58 
59 static void mark_split(struct drm_buddy_block *block)
60 {
61 	block->header &= ~DRM_BUDDY_HEADER_STATE;
62 	block->header |= DRM_BUDDY_SPLIT;
63 
64 	list_del(&block->link);
65 }
66 
67 /**
68  * drm_buddy_init - init memory manager
69  *
70  * @mm: DRM buddy manager to initialize
71  * @size: size in bytes to manage
72  * @chunk_size: minimum page size in bytes for our allocations
73  *
74  * Initializes the memory manager and its resources.
75  *
76  * Returns:
77  * 0 on success, error code on failure.
78  */
79 int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
80 {
81 	unsigned int i;
82 	u64 offset;
83 
84 	if (size < chunk_size)
85 		return -EINVAL;
86 
87 	if (chunk_size < PAGE_SIZE)
88 		return -EINVAL;
89 
90 	if (!is_power_of_2(chunk_size))
91 		return -EINVAL;
92 
93 	size = round_down(size, chunk_size);
94 
95 	mm->size = size;
96 	mm->avail = size;
97 	mm->chunk_size = chunk_size;
98 	mm->max_order = ilog2(size) - ilog2(chunk_size);
99 
100 	BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
101 
102 	mm->free_list = kmalloc_array(mm->max_order + 1,
103 				      sizeof(struct list_head),
104 				      GFP_KERNEL);
105 	if (!mm->free_list)
106 		return -ENOMEM;
107 
108 	for (i = 0; i <= mm->max_order; ++i)
109 		INIT_LIST_HEAD(&mm->free_list[i]);
110 
111 	mm->n_roots = hweight64(size);
112 
113 	mm->roots = kmalloc_array(mm->n_roots,
114 				  sizeof(struct drm_buddy_block *),
115 				  GFP_KERNEL);
116 	if (!mm->roots)
117 		goto out_free_list;
118 
119 	offset = 0;
120 	i = 0;
121 
122 	/*
123 	 * Split into power-of-two blocks, in case we are given a size that is
124 	 * not itself a power-of-two.
125 	 */
126 	do {
127 		struct drm_buddy_block *root;
128 		unsigned int order;
129 		u64 root_size;
130 
131 		root_size = rounddown_pow_of_two(size);
132 		order = ilog2(root_size) - ilog2(chunk_size);
133 
134 		root = drm_block_alloc(mm, NULL, order, offset);
135 		if (!root)
136 			goto out_free_roots;
137 
138 		mark_free(mm, root);
139 
140 		BUG_ON(i > mm->max_order);
141 		BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
142 
143 		mm->roots[i] = root;
144 
145 		offset += root_size;
146 		size -= root_size;
147 		i++;
148 	} while (size);
149 
150 	return 0;
151 
152 out_free_roots:
153 	while (i--)
154 		drm_block_free(mm, mm->roots[i]);
155 	kfree(mm->roots);
156 out_free_list:
157 	kfree(mm->free_list);
158 	return -ENOMEM;
159 }
160 EXPORT_SYMBOL(drm_buddy_init);
161 
162 /**
163  * drm_buddy_fini - tear down the memory manager
164  *
165  * @mm: DRM buddy manager to free
166  *
167  * Cleanup memory manager resources and the freelist
168  */
169 void drm_buddy_fini(struct drm_buddy *mm)
170 {
171 	int i;
172 
173 	for (i = 0; i < mm->n_roots; ++i) {
174 		WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
175 		drm_block_free(mm, mm->roots[i]);
176 	}
177 
178 	WARN_ON(mm->avail != mm->size);
179 
180 	kfree(mm->roots);
181 	kfree(mm->free_list);
182 }
183 EXPORT_SYMBOL(drm_buddy_fini);
184 
185 static int split_block(struct drm_buddy *mm,
186 		       struct drm_buddy_block *block)
187 {
188 	unsigned int block_order = drm_buddy_block_order(block) - 1;
189 	u64 offset = drm_buddy_block_offset(block);
190 
191 	BUG_ON(!drm_buddy_block_is_free(block));
192 	BUG_ON(!drm_buddy_block_order(block));
193 
194 	block->left = drm_block_alloc(mm, block, block_order, offset);
195 	if (!block->left)
196 		return -ENOMEM;
197 
198 	block->right = drm_block_alloc(mm, block, block_order,
199 				       offset + (mm->chunk_size << block_order));
200 	if (!block->right) {
201 		drm_block_free(mm, block->left);
202 		return -ENOMEM;
203 	}
204 
205 	mark_free(mm, block->left);
206 	mark_free(mm, block->right);
207 
208 	mark_split(block);
209 
210 	return 0;
211 }
212 
213 static struct drm_buddy_block *
214 __get_buddy(struct drm_buddy_block *block)
215 {
216 	struct drm_buddy_block *parent;
217 
218 	parent = block->parent;
219 	if (!parent)
220 		return NULL;
221 
222 	if (parent->left == block)
223 		return parent->right;
224 
225 	return parent->left;
226 }
227 
228 /**
229  * drm_get_buddy - get buddy address
230  *
231  * @block: DRM buddy block
232  *
233  * Returns the corresponding buddy block for @block, or NULL
234  * if this is a root block and can't be merged further.
235  * Requires some kind of locking to protect against
236  * any concurrent allocate and free operations.
237  */
238 struct drm_buddy_block *
239 drm_get_buddy(struct drm_buddy_block *block)
240 {
241 	return __get_buddy(block);
242 }
243 EXPORT_SYMBOL(drm_get_buddy);
244 
245 static void __drm_buddy_free(struct drm_buddy *mm,
246 			     struct drm_buddy_block *block)
247 {
248 	struct drm_buddy_block *parent;
249 
250 	while ((parent = block->parent)) {
251 		struct drm_buddy_block *buddy;
252 
253 		buddy = __get_buddy(block);
254 
255 		if (!drm_buddy_block_is_free(buddy))
256 			break;
257 
258 		list_del(&buddy->link);
259 
260 		drm_block_free(mm, block);
261 		drm_block_free(mm, buddy);
262 
263 		block = parent;
264 	}
265 
266 	mark_free(mm, block);
267 }
268 
269 /**
270  * drm_buddy_free_block - free a block
271  *
272  * @mm: DRM buddy manager
273  * @block: block to be freed
274  */
275 void drm_buddy_free_block(struct drm_buddy *mm,
276 			  struct drm_buddy_block *block)
277 {
278 	BUG_ON(!drm_buddy_block_is_allocated(block));
279 	mm->avail += drm_buddy_block_size(mm, block);
280 	__drm_buddy_free(mm, block);
281 }
282 EXPORT_SYMBOL(drm_buddy_free_block);
283 
284 /**
285  * drm_buddy_free_list - free blocks
286  *
287  * @mm: DRM buddy manager
288  * @objects: input list head to free blocks
289  */
290 void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
291 {
292 	struct drm_buddy_block *block, *on;
293 
294 	list_for_each_entry_safe(block, on, objects, link) {
295 		drm_buddy_free_block(mm, block);
296 		cond_resched();
297 	}
298 	INIT_LIST_HEAD(objects);
299 }
300 EXPORT_SYMBOL(drm_buddy_free_list);
301 
302 static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
303 {
304 	return s1 <= e2 && e1 >= s2;
305 }
306 
307 static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
308 {
309 	return s1 <= s2 && e1 >= e2;
310 }
311 
312 static struct drm_buddy_block *
313 alloc_range_bias(struct drm_buddy *mm,
314 		 u64 start, u64 end,
315 		 unsigned int order)
316 {
317 	struct drm_buddy_block *block;
318 	struct drm_buddy_block *buddy;
319 	LIST_HEAD(dfs);
320 	int err;
321 	int i;
322 
323 	end = end - 1;
324 
325 	for (i = 0; i < mm->n_roots; ++i)
326 		list_add_tail(&mm->roots[i]->tmp_link, &dfs);
327 
328 	do {
329 		u64 block_start;
330 		u64 block_end;
331 
332 		block = list_first_entry_or_null(&dfs,
333 						 struct drm_buddy_block,
334 						 tmp_link);
335 		if (!block)
336 			break;
337 
338 		list_del(&block->tmp_link);
339 
340 		if (drm_buddy_block_order(block) < order)
341 			continue;
342 
343 		block_start = drm_buddy_block_offset(block);
344 		block_end = block_start + drm_buddy_block_size(mm, block) - 1;
345 
346 		if (!overlaps(start, end, block_start, block_end))
347 			continue;
348 
349 		if (drm_buddy_block_is_allocated(block))
350 			continue;
351 
352 		if (contains(start, end, block_start, block_end) &&
353 		    order == drm_buddy_block_order(block)) {
354 			/*
355 			 * Find the free block within the range.
356 			 */
357 			if (drm_buddy_block_is_free(block))
358 				return block;
359 
360 			continue;
361 		}
362 
363 		if (!drm_buddy_block_is_split(block)) {
364 			err = split_block(mm, block);
365 			if (unlikely(err))
366 				goto err_undo;
367 		}
368 
369 		list_add(&block->right->tmp_link, &dfs);
370 		list_add(&block->left->tmp_link, &dfs);
371 	} while (1);
372 
373 	return ERR_PTR(-ENOSPC);
374 
375 err_undo:
376 	/*
377 	 * We really don't want to leave around a bunch of split blocks, since
378 	 * bigger is better, so make sure we merge everything back before we
379 	 * free the allocated blocks.
380 	 */
381 	buddy = __get_buddy(block);
382 	if (buddy &&
383 	    (drm_buddy_block_is_free(block) &&
384 	     drm_buddy_block_is_free(buddy)))
385 		__drm_buddy_free(mm, block);
386 	return ERR_PTR(err);
387 }
388 
389 static struct drm_buddy_block *
390 get_maxblock(struct list_head *head)
391 {
392 	struct drm_buddy_block *max_block = NULL, *node;
393 
394 	max_block = list_first_entry_or_null(head,
395 					     struct drm_buddy_block,
396 					     link);
397 	if (!max_block)
398 		return NULL;
399 
400 	list_for_each_entry(node, head, link) {
401 		if (drm_buddy_block_offset(node) >
402 		    drm_buddy_block_offset(max_block))
403 			max_block = node;
404 	}
405 
406 	return max_block;
407 }
408 
409 static struct drm_buddy_block *
410 alloc_from_freelist(struct drm_buddy *mm,
411 		    unsigned int order,
412 		    unsigned long flags)
413 {
414 	struct drm_buddy_block *block = NULL;
415 	unsigned int i;
416 	int err;
417 
418 	for (i = order; i <= mm->max_order; ++i) {
419 		if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
420 			block = get_maxblock(&mm->free_list[i]);
421 			if (block)
422 				break;
423 		} else {
424 			block = list_first_entry_or_null(&mm->free_list[i],
425 							 struct drm_buddy_block,
426 							 link);
427 			if (block)
428 				break;
429 		}
430 	}
431 
432 	if (!block)
433 		return ERR_PTR(-ENOSPC);
434 
435 	BUG_ON(!drm_buddy_block_is_free(block));
436 
437 	while (i != order) {
438 		err = split_block(mm, block);
439 		if (unlikely(err))
440 			goto err_undo;
441 
442 		block = block->right;
443 		i--;
444 	}
445 	return block;
446 
447 err_undo:
448 	if (i != order)
449 		__drm_buddy_free(mm, block);
450 	return ERR_PTR(err);
451 }
452 
453 static int __alloc_range(struct drm_buddy *mm,
454 			 struct list_head *dfs,
455 			 u64 start, u64 size,
456 			 struct list_head *blocks)
457 {
458 	struct drm_buddy_block *block;
459 	struct drm_buddy_block *buddy;
460 	LIST_HEAD(allocated);
461 	u64 end;
462 	int err;
463 
464 	end = start + size - 1;
465 
466 	do {
467 		u64 block_start;
468 		u64 block_end;
469 
470 		block = list_first_entry_or_null(dfs,
471 						 struct drm_buddy_block,
472 						 tmp_link);
473 		if (!block)
474 			break;
475 
476 		list_del(&block->tmp_link);
477 
478 		block_start = drm_buddy_block_offset(block);
479 		block_end = block_start + drm_buddy_block_size(mm, block) - 1;
480 
481 		if (!overlaps(start, end, block_start, block_end))
482 			continue;
483 
484 		if (drm_buddy_block_is_allocated(block)) {
485 			err = -ENOSPC;
486 			goto err_free;
487 		}
488 
489 		if (contains(start, end, block_start, block_end)) {
490 			if (!drm_buddy_block_is_free(block)) {
491 				err = -ENOSPC;
492 				goto err_free;
493 			}
494 
495 			mark_allocated(block);
496 			mm->avail -= drm_buddy_block_size(mm, block);
497 			list_add_tail(&block->link, &allocated);
498 			continue;
499 		}
500 
501 		if (!drm_buddy_block_is_split(block)) {
502 			err = split_block(mm, block);
503 			if (unlikely(err))
504 				goto err_undo;
505 		}
506 
507 		list_add(&block->right->tmp_link, dfs);
508 		list_add(&block->left->tmp_link, dfs);
509 	} while (1);
510 
511 	list_splice_tail(&allocated, blocks);
512 	return 0;
513 
514 err_undo:
515 	/*
516 	 * We really don't want to leave around a bunch of split blocks, since
517 	 * bigger is better, so make sure we merge everything back before we
518 	 * free the allocated blocks.
519 	 */
520 	buddy = __get_buddy(block);
521 	if (buddy &&
522 	    (drm_buddy_block_is_free(block) &&
523 	     drm_buddy_block_is_free(buddy)))
524 		__drm_buddy_free(mm, block);
525 
526 err_free:
527 	drm_buddy_free_list(mm, &allocated);
528 	return err;
529 }
530 
531 static int __drm_buddy_alloc_range(struct drm_buddy *mm,
532 				   u64 start,
533 				   u64 size,
534 				   struct list_head *blocks)
535 {
536 	LIST_HEAD(dfs);
537 	int i;
538 
539 	for (i = 0; i < mm->n_roots; ++i)
540 		list_add_tail(&mm->roots[i]->tmp_link, &dfs);
541 
542 	return __alloc_range(mm, &dfs, start, size, blocks);
543 }
544 
545 /**
546  * drm_buddy_block_trim - free unused pages
547  *
548  * @mm: DRM buddy manager
549  * @new_size: original size requested
550  * @blocks: Input and output list of allocated blocks.
551  * MUST contain single block as input to be trimmed.
552  * On success will contain the newly allocated blocks
553  * making up the @new_size. Blocks always appear in
554  * ascending order
555  *
556  * For contiguous allocation, we round up the size to the nearest
557  * power of two value, drivers consume *actual* size, so remaining
558  * portions are unused and can be optionally freed with this function
559  *
560  * Returns:
561  * 0 on success, error code on failure.
562  */
563 int drm_buddy_block_trim(struct drm_buddy *mm,
564 			 u64 new_size,
565 			 struct list_head *blocks)
566 {
567 	struct drm_buddy_block *parent;
568 	struct drm_buddy_block *block;
569 	LIST_HEAD(dfs);
570 	u64 new_start;
571 	int err;
572 
573 	if (!list_is_singular(blocks))
574 		return -EINVAL;
575 
576 	block = list_first_entry(blocks,
577 				 struct drm_buddy_block,
578 				 link);
579 
580 	if (WARN_ON(!drm_buddy_block_is_allocated(block)))
581 		return -EINVAL;
582 
583 	if (new_size > drm_buddy_block_size(mm, block))
584 		return -EINVAL;
585 
586 	if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
587 		return -EINVAL;
588 
589 	if (new_size == drm_buddy_block_size(mm, block))
590 		return 0;
591 
592 	list_del(&block->link);
593 	mark_free(mm, block);
594 	mm->avail += drm_buddy_block_size(mm, block);
595 
596 	/* Prevent recursively freeing this node */
597 	parent = block->parent;
598 	block->parent = NULL;
599 
600 	new_start = drm_buddy_block_offset(block);
601 	list_add(&block->tmp_link, &dfs);
602 	err =  __alloc_range(mm, &dfs, new_start, new_size, blocks);
603 	if (err) {
604 		mark_allocated(block);
605 		mm->avail -= drm_buddy_block_size(mm, block);
606 		list_add(&block->link, blocks);
607 	}
608 
609 	block->parent = parent;
610 	return err;
611 }
612 EXPORT_SYMBOL(drm_buddy_block_trim);
613 
614 /**
615  * drm_buddy_alloc_blocks - allocate power-of-two blocks
616  *
617  * @mm: DRM buddy manager to allocate from
618  * @start: start of the allowed range for this block
619  * @end: end of the allowed range for this block
620  * @size: size of the allocation
621  * @min_page_size: alignment of the allocation
622  * @blocks: output list head to add allocated blocks
623  * @flags: DRM_BUDDY_*_ALLOCATION flags
624  *
625  * alloc_range_bias() called on range limitations, which traverses
626  * the tree and returns the desired block.
627  *
628  * alloc_from_freelist() called when *no* range restrictions
629  * are enforced, which picks the block from the freelist.
630  *
631  * Returns:
632  * 0 on success, error code on failure.
633  */
634 int drm_buddy_alloc_blocks(struct drm_buddy *mm,
635 			   u64 start, u64 end, u64 size,
636 			   u64 min_page_size,
637 			   struct list_head *blocks,
638 			   unsigned long flags)
639 {
640 	struct drm_buddy_block *block = NULL;
641 	unsigned int min_order, order;
642 	unsigned long pages;
643 	LIST_HEAD(allocated);
644 	int err;
645 
646 	if (size < mm->chunk_size)
647 		return -EINVAL;
648 
649 	if (min_page_size < mm->chunk_size)
650 		return -EINVAL;
651 
652 	if (!is_power_of_2(min_page_size))
653 		return -EINVAL;
654 
655 	if (!IS_ALIGNED(start | end | size, mm->chunk_size))
656 		return -EINVAL;
657 
658 	if (end > mm->size)
659 		return -EINVAL;
660 
661 	if (range_overflows(start, size, mm->size))
662 		return -EINVAL;
663 
664 	/* Actual range allocation */
665 	if (start + size == end)
666 		return __drm_buddy_alloc_range(mm, start, size, blocks);
667 
668 	if (!IS_ALIGNED(size, min_page_size))
669 		return -EINVAL;
670 
671 	pages = size >> ilog2(mm->chunk_size);
672 	order = fls(pages) - 1;
673 	min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
674 
675 	do {
676 		order = min(order, (unsigned int)fls(pages) - 1);
677 		BUG_ON(order > mm->max_order);
678 		BUG_ON(order < min_order);
679 
680 		do {
681 			if (flags & DRM_BUDDY_RANGE_ALLOCATION)
682 				/* Allocate traversing within the range */
683 				block = alloc_range_bias(mm, start, end, order);
684 			else
685 				/* Allocate from freelist */
686 				block = alloc_from_freelist(mm, order, flags);
687 
688 			if (!IS_ERR(block))
689 				break;
690 
691 			if (order-- == min_order) {
692 				err = -ENOSPC;
693 				goto err_free;
694 			}
695 		} while (1);
696 
697 		mark_allocated(block);
698 		mm->avail -= drm_buddy_block_size(mm, block);
699 		kmemleak_update_trace(block);
700 		list_add_tail(&block->link, &allocated);
701 
702 		pages -= BIT(order);
703 
704 		if (!pages)
705 			break;
706 	} while (1);
707 
708 	list_splice_tail(&allocated, blocks);
709 	return 0;
710 
711 err_free:
712 	drm_buddy_free_list(mm, &allocated);
713 	return err;
714 }
715 EXPORT_SYMBOL(drm_buddy_alloc_blocks);
716 
717 /**
718  * drm_buddy_block_print - print block information
719  *
720  * @mm: DRM buddy manager
721  * @block: DRM buddy block
722  * @p: DRM printer to use
723  */
724 void drm_buddy_block_print(struct drm_buddy *mm,
725 			   struct drm_buddy_block *block,
726 			   struct drm_printer *p)
727 {
728 	u64 start = drm_buddy_block_offset(block);
729 	u64 size = drm_buddy_block_size(mm, block);
730 
731 	drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
732 }
733 EXPORT_SYMBOL(drm_buddy_block_print);
734 
735 /**
736  * drm_buddy_print - print allocator state
737  *
738  * @mm: DRM buddy manager
739  * @p: DRM printer to use
740  */
741 void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
742 {
743 	int order;
744 
745 	drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
746 		   mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
747 
748 	for (order = mm->max_order; order >= 0; order--) {
749 		struct drm_buddy_block *block;
750 		u64 count = 0, free;
751 
752 		list_for_each_entry(block, &mm->free_list[order], link) {
753 			BUG_ON(!drm_buddy_block_is_free(block));
754 			count++;
755 		}
756 
757 		drm_printf(p, "order-%d ", order);
758 
759 		free = count * (mm->chunk_size << order);
760 		if (free < SZ_1M)
761 			drm_printf(p, "free: %lluKiB", free >> 10);
762 		else
763 			drm_printf(p, "free: %lluMiB", free >> 20);
764 
765 		drm_printf(p, ", pages: %llu\n", count);
766 	}
767 }
768 EXPORT_SYMBOL(drm_buddy_print);
769 
770 static void drm_buddy_module_exit(void)
771 {
772 	kmem_cache_destroy(slab_blocks);
773 }
774 
775 static int __init drm_buddy_module_init(void)
776 {
777 	slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
778 	if (!slab_blocks)
779 		return -ENOMEM;
780 
781 	return 0;
782 }
783 
784 module_init(drm_buddy_module_init);
785 module_exit(drm_buddy_module_exit);
786 
787 MODULE_DESCRIPTION("DRM Buddy Allocator");
788 MODULE_LICENSE("Dual MIT/GPL");
789