xref: /dragonfly/sys/dev/drm/drm_mm.c (revision 9317c2d0)
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28 
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43 
44 #include <drm/drmP.h>
45 #include <drm/drm_mm.h>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 #include <linux/export.h>
49 
50 /**
51  * DOC: Overview
52  *
53  * drm_mm provides a simple range allocator. The drivers are free to use the
54  * resource allocator from the linux core if it suits them, the upside of drm_mm
55  * is that it's in the DRM core. Which means that it's easier to extend for
56  * some of the crazier special purpose needs of gpus.
57  *
58  * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
59  * Drivers are free to embed either of them into their own suitable
60  * datastructures. drm_mm itself will not do any allocations of its own, so if
61  * drivers choose not to embed nodes they need to still allocate them
62  * themselves.
63  *
64  * The range allocator also supports reservation of preallocated blocks. This is
65  * useful for taking over initial mode setting configurations from the firmware,
66  * where an object needs to be created which exactly matches the firmware's
67  * scanout target. As long as the range is still free it can be inserted anytime
68  * after the allocator is initialized, which helps with avoiding looped
69  * depencies in the driver load sequence.
70  *
71  * drm_mm maintains a stack of most recently freed holes, which of all
72  * simplistic datastructures seems to be a fairly decent approach to clustering
73  * allocations and avoiding too much fragmentation. This means free space
74  * searches are O(num_holes). Given that all the fancy features drm_mm supports
75  * something better would be fairly complex and since gfx thrashing is a fairly
76  * steep cliff not a real concern. Removing a node again is O(1).
77  *
78  * drm_mm supports a few features: Alignment and range restrictions can be
79  * supplied. Further more every &drm_mm_node has a color value (which is just an
80  * opaqua unsigned long) which in conjunction with a driver callback can be used
81  * to implement sophisticated placement restrictions. The i915 DRM driver uses
82  * this to implement guard pages between incompatible caching domains in the
83  * graphics TT.
84  *
85  * Two behaviors are supported for searching and allocating: bottom-up and top-down.
86  * The default is bottom-up. Top-down allocation can be used if the memory area
87  * has different restrictions, or just to reduce fragmentation.
88  *
89  * Finally iteration helpers to walk all nodes and all holes are provided as are
90  * some basic allocator dumpers for debugging.
91  */
92 
93 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
94 						u64 size,
95 						unsigned alignment,
96 						unsigned long color,
97 						enum drm_mm_search_flags flags);
98 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
99 						u64 size,
100 						unsigned alignment,
101 						unsigned long color,
102 						u64 start,
103 						u64 end,
104 						enum drm_mm_search_flags flags);
105 
106 #ifdef CONFIG_DRM_DEBUG_MM
107 #include <linux/stackdepot.h>
108 
109 #define STACKDEPTH 32
110 #define BUFSZ 4096
111 
112 static noinline void save_stack(struct drm_mm_node *node)
113 {
114 	unsigned long entries[STACKDEPTH];
115 	struct stack_trace trace = {
116 		.entries = entries,
117 		.max_entries = STACKDEPTH,
118 		.skip = 1
119 	};
120 
121 	save_stack_trace(&trace);
122 	if (trace.nr_entries != 0 &&
123 	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
124 		trace.nr_entries--;
125 
126 	/* May be called under spinlock, so avoid sleeping */
127 	node->stack = depot_save_stack(&trace, GFP_NOWAIT);
128 }
129 
130 static void show_leaks(struct drm_mm *mm)
131 {
132 	struct drm_mm_node *node;
133 	unsigned long entries[STACKDEPTH];
134 	char *buf;
135 
136 	buf = kmalloc(BUFSZ, M_DRM, GFP_KERNEL);
137 	if (!buf)
138 		return;
139 
140 	list_for_each_entry(node, &mm->head_node.node_list, node_list) {
141 		struct stack_trace trace = {
142 			.entries = entries,
143 			.max_entries = STACKDEPTH
144 		};
145 
146 		if (!node->stack) {
147 			DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
148 				  node->start, node->size);
149 			continue;
150 		}
151 
152 		depot_fetch_stack(node->stack, &trace);
153 		snprint_stack_trace(buf, BUFSZ, &trace, 0);
154 		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
155 			  node->start, node->size, buf);
156 	}
157 
158 	kfree(buf);
159 }
160 
161 #undef STACKDEPTH
162 #undef BUFSZ
163 #else
164 static void save_stack(struct drm_mm_node *node) { }
165 static void show_leaks(struct drm_mm *mm) { }
166 #endif
167 
168 #define START(node) ((node)->start)
169 #define LAST(node)  ((node)->start + (node)->size - 1)
170 
171 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
172 				 struct drm_mm_node *node,
173 				 u64 size, unsigned alignment,
174 				 unsigned long color,
175 				 enum drm_mm_allocator_flags flags)
176 {
177 	struct drm_mm *mm = hole_node->mm;
178 	u64 hole_start = drm_mm_hole_node_start(hole_node);
179 	u64 hole_end = drm_mm_hole_node_end(hole_node);
180 	u64 adj_start = hole_start;
181 	u64 adj_end = hole_end;
182 
183 	BUG_ON(node->allocated);
184 
185 	if (mm->color_adjust)
186 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
187 
188 	if (flags & DRM_MM_CREATE_TOP)
189 		adj_start = adj_end - size;
190 
191 	if (alignment) {
192 		u64 tmp = adj_start;
193 		unsigned rem;
194 
195 		rem = do_div(tmp, alignment);
196 		if (rem) {
197 			if (flags & DRM_MM_CREATE_TOP)
198 				adj_start -= rem;
199 			else
200 				adj_start += alignment - rem;
201 		}
202 	}
203 
204 	BUG_ON(adj_start < hole_start);
205 	BUG_ON(adj_end > hole_end);
206 
207 	if (adj_start == hole_start) {
208 		hole_node->hole_follows = 0;
209 		list_del(&hole_node->hole_stack);
210 	}
211 
212 	node->start = adj_start;
213 	node->size = size;
214 	node->mm = mm;
215 	node->color = color;
216 	node->allocated = 1;
217 
218 	list_add(&node->node_list, &hole_node->node_list);
219 
220 	BUG_ON(node->start + node->size > adj_end);
221 
222 	node->hole_follows = 0;
223 	if (__drm_mm_hole_node_start(node) < hole_end) {
224 		list_add(&node->hole_stack, &mm->hole_stack);
225 		node->hole_follows = 1;
226 	}
227 
228 	save_stack(node);
229 }
230 
231 /**
232  * drm_mm_reserve_node - insert an pre-initialized node
233  * @mm: drm_mm allocator to insert @node into
234  * @node: drm_mm_node to insert
235  *
236  * This functions inserts an already set-up drm_mm_node into the allocator,
237  * meaning that start, size and color must be set by the caller. This is useful
238  * to initialize the allocator with preallocated objects which must be set-up
239  * before the range allocator can be set-up, e.g. when taking over a firmware
240  * framebuffer.
241  *
242  * Returns:
243  * 0 on success, -ENOSPC if there's no hole where @node is.
244  */
245 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
246 {
247 	struct drm_mm_node *hole;
248 	u64 end;
249 	u64 hole_start;
250 	u64 hole_end;
251 
252 	BUG_ON(node == NULL);
253 
254 	if (WARN_ON(node->size == 0))
255 		return -EINVAL;
256 
257 	if (WARN_ON(node->size == 0))
258 		return -EINVAL;
259 
260 	end = node->start + node->size;
261 
262 	/* Find the relevant hole to add our node to */
263 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
264 		if (hole_start > node->start || hole_end < end)
265 			continue;
266 
267 		node->mm = mm;
268 		node->allocated = 1;
269 
270 		list_add(&node->node_list, &hole->node_list);
271 
272 		if (node->start == hole_start) {
273 			hole->hole_follows = 0;
274 			list_del_init(&hole->hole_stack);
275 		}
276 
277 		node->hole_follows = 0;
278 		if (end != hole_end) {
279 			list_add(&node->hole_stack, &mm->hole_stack);
280 			node->hole_follows = 1;
281 		}
282 
283 		return 0;
284 	}
285 
286 	return -ENOSPC;
287 }
288 EXPORT_SYMBOL(drm_mm_reserve_node);
289 
290 /**
291  * drm_mm_insert_node_generic - search for space and insert @node
292  * @mm: drm_mm to allocate from
293  * @node: preallocate node to insert
294  * @size: size of the allocation
295  * @alignment: alignment of the allocation
296  * @color: opaque tag value to use for this node
297  * @sflags: flags to fine-tune the allocation search
298  * @aflags: flags to fine-tune the allocation behavior
299  *
300  * The preallocated node must be cleared to 0.
301  *
302  * Returns:
303  * 0 on success, -ENOSPC if there's no suitable hole.
304  */
305 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
306 			       u64 size, unsigned alignment,
307 			       unsigned long color,
308 			       enum drm_mm_search_flags sflags,
309 			       enum drm_mm_allocator_flags aflags)
310 {
311 	struct drm_mm_node *hole_node;
312 
313 	if (WARN_ON(size == 0))
314 		return -EINVAL;
315 
316 	if (WARN_ON(size == 0))
317 		return -EINVAL;
318 
319 	hole_node = drm_mm_search_free_generic(mm, size, alignment,
320 					       color, sflags);
321 	if (!hole_node)
322 		return -ENOSPC;
323 
324 	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
325 	return 0;
326 }
327 EXPORT_SYMBOL(drm_mm_insert_node_generic);
328 
329 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
330 				       struct drm_mm_node *node,
331 				       u64 size, unsigned alignment,
332 				       unsigned long color,
333 				       u64 start, u64 end,
334 				       enum drm_mm_allocator_flags flags)
335 {
336 	struct drm_mm *mm = hole_node->mm;
337 	u64 hole_start = drm_mm_hole_node_start(hole_node);
338 	u64 hole_end = drm_mm_hole_node_end(hole_node);
339 	u64 adj_start = hole_start;
340 	u64 adj_end = hole_end;
341 
342 	BUG_ON(!hole_node->hole_follows || node->allocated);
343 
344 	if (adj_start < start)
345 		adj_start = start;
346 	if (adj_end > end)
347 		adj_end = end;
348 
349 	if (mm->color_adjust)
350 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
351 
352 	if (flags & DRM_MM_CREATE_TOP)
353 		adj_start = adj_end - size;
354 
355 	if (alignment) {
356 		u64 tmp = adj_start;
357 		unsigned rem;
358 
359 		rem = do_div(tmp, alignment);
360 		if (rem) {
361 			if (flags & DRM_MM_CREATE_TOP)
362 				adj_start -= rem;
363 			else
364 				adj_start += alignment - rem;
365 		}
366 	}
367 
368 	if (adj_start == hole_start) {
369 		hole_node->hole_follows = 0;
370 		list_del(&hole_node->hole_stack);
371 	}
372 
373 	node->start = adj_start;
374 	node->size = size;
375 	node->mm = mm;
376 	node->color = color;
377 	node->allocated = 1;
378 
379 	list_add(&node->node_list, &hole_node->node_list);
380 
381 	BUG_ON(node->start < start);
382 	BUG_ON(node->start < adj_start);
383 	BUG_ON(node->start + node->size > adj_end);
384 	BUG_ON(node->start + node->size > end);
385 
386 	node->hole_follows = 0;
387 	if (__drm_mm_hole_node_start(node) < hole_end) {
388 		list_add(&node->hole_stack, &mm->hole_stack);
389 		node->hole_follows = 1;
390 	}
391 }
392 
393 /**
394  * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
395  * @mm: drm_mm to allocate from
396  * @node: preallocate node to insert
397  * @size: size of the allocation
398  * @alignment: alignment of the allocation
399  * @color: opaque tag value to use for this node
400  * @start: start of the allowed range for this node
401  * @end: end of the allowed range for this node
402  * @sflags: flags to fine-tune the allocation search
403  * @aflags: flags to fine-tune the allocation behavior
404  *
405  * The preallocated node must be cleared to 0.
406  *
407  * Returns:
408  * 0 on success, -ENOSPC if there's no suitable hole.
409  */
410 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
411 					u64 size, unsigned alignment,
412 					unsigned long color,
413 					u64 start, u64 end,
414 					enum drm_mm_search_flags sflags,
415 					enum drm_mm_allocator_flags aflags)
416 {
417 	struct drm_mm_node *hole_node;
418 
419 	if (WARN_ON(size == 0))
420 		return -EINVAL;
421 
422 	if (WARN_ON(size == 0))
423 		return -EINVAL;
424 
425 	hole_node = drm_mm_search_free_in_range_generic(mm,
426 							size, alignment, color,
427 							start, end, sflags);
428 	if (!hole_node)
429 		return -ENOSPC;
430 
431 	drm_mm_insert_helper_range(hole_node, node,
432 				   size, alignment, color,
433 				   start, end, aflags);
434 	return 0;
435 }
436 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
437 
438 /**
439  * drm_mm_remove_node - Remove a memory node from the allocator.
440  * @node: drm_mm_node to remove
441  *
442  * This just removes a node from its drm_mm allocator. The node does not need to
443  * be cleared again before it can be re-inserted into this or any other drm_mm
444  * allocator. It is a bug to call this function on a un-allocated node.
445  */
446 void drm_mm_remove_node(struct drm_mm_node *node)
447 {
448 	struct drm_mm *mm = node->mm;
449 	struct drm_mm_node *prev_node;
450 
451 	if (WARN_ON(!node->allocated))
452 		return;
453 
454 	BUG_ON(node->scanned_block || node->scanned_prev_free
455 				   || node->scanned_next_free);
456 
457 	prev_node =
458 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
459 
460 	if (node->hole_follows) {
461 		BUG_ON(__drm_mm_hole_node_start(node) ==
462 		       __drm_mm_hole_node_end(node));
463 		list_del(&node->hole_stack);
464 	} else
465 		BUG_ON(__drm_mm_hole_node_start(node) !=
466 		       __drm_mm_hole_node_end(node));
467 
468 
469 	if (!prev_node->hole_follows) {
470 		prev_node->hole_follows = 1;
471 		list_add(&prev_node->hole_stack, &mm->hole_stack);
472 	} else
473 		list_move(&prev_node->hole_stack, &mm->hole_stack);
474 
475 	list_del(&node->node_list);
476 	node->allocated = 0;
477 }
478 EXPORT_SYMBOL(drm_mm_remove_node);
479 
480 static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
481 {
482 	if (end - start < size)
483 		return 0;
484 
485 	if (alignment) {
486 		u64 tmp = start;
487 		unsigned rem;
488 
489 		rem = do_div(tmp, alignment);
490 		if (rem)
491 			start += alignment - rem;
492 	}
493 
494 	return end >= start + size;
495 }
496 
497 static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
498 						      u64 size,
499 						      unsigned alignment,
500 						      unsigned long color,
501 						      enum drm_mm_search_flags flags)
502 {
503 	struct drm_mm_node *entry;
504 	struct drm_mm_node *best;
505 	u64 adj_start;
506 	u64 adj_end;
507 	u64 best_size;
508 
509 	BUG_ON(mm->scanned_blocks);
510 
511 	best = NULL;
512 	best_size = ~0UL;
513 
514 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
515 			       flags & DRM_MM_SEARCH_BELOW) {
516 		u64 hole_size = adj_end - adj_start;
517 
518 		if (mm->color_adjust) {
519 			mm->color_adjust(entry, color, &adj_start, &adj_end);
520 			if (adj_end <= adj_start)
521 				continue;
522 		}
523 
524 		if (!check_free_hole(adj_start, adj_end, size, alignment))
525 			continue;
526 
527 		if (!(flags & DRM_MM_SEARCH_BEST))
528 			return entry;
529 
530 		if (hole_size < best_size) {
531 			best = entry;
532 			best_size = hole_size;
533 		}
534 	}
535 
536 	return best;
537 }
538 
539 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
540 							u64 size,
541 							unsigned alignment,
542 							unsigned long color,
543 							u64 start,
544 							u64 end,
545 							enum drm_mm_search_flags flags)
546 {
547 	struct drm_mm_node *entry;
548 	struct drm_mm_node *best;
549 	u64 adj_start;
550 	u64 adj_end;
551 	u64 best_size;
552 
553 	BUG_ON(mm->scanned_blocks);
554 
555 	best = NULL;
556 	best_size = ~0UL;
557 
558 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
559 			       flags & DRM_MM_SEARCH_BELOW) {
560 		u64 hole_size = adj_end - adj_start;
561 
562 		if (adj_start < start)
563 			adj_start = start;
564 		if (adj_end > end)
565 			adj_end = end;
566 
567 		if (mm->color_adjust) {
568 			mm->color_adjust(entry, color, &adj_start, &adj_end);
569 			if (adj_end <= adj_start)
570 				continue;
571 		}
572 
573 		if (!check_free_hole(adj_start, adj_end, size, alignment))
574 			continue;
575 
576 		if (!(flags & DRM_MM_SEARCH_BEST))
577 			return entry;
578 
579 		if (hole_size < best_size) {
580 			best = entry;
581 			best_size = hole_size;
582 		}
583 	}
584 
585 	return best;
586 }
587 
588 /**
589  * drm_mm_replace_node - move an allocation from @old to @new
590  * @old: drm_mm_node to remove from the allocator
591  * @new: drm_mm_node which should inherit @old's allocation
592  *
593  * This is useful for when drivers embed the drm_mm_node structure and hence
594  * can't move allocations by reassigning pointers. It's a combination of remove
595  * and insert with the guarantee that the allocation start will match.
596  */
597 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
598 {
599 	list_replace(&old->node_list, &new->node_list);
600 	list_replace(&old->hole_stack, &new->hole_stack);
601 	new->hole_follows = old->hole_follows;
602 	new->mm = old->mm;
603 	new->start = old->start;
604 	new->size = old->size;
605 	new->color = old->color;
606 
607 	old->allocated = 0;
608 	new->allocated = 1;
609 }
610 EXPORT_SYMBOL(drm_mm_replace_node);
611 
612 /**
613  * DOC: lru scan roaster
614  *
615  * Very often GPUs need to have continuous allocations for a given object. When
616  * evicting objects to make space for a new one it is therefore not most
617  * efficient when we simply start to select all objects from the tail of an LRU
618  * until there's a suitable hole: Especially for big objects or nodes that
619  * otherwise have special allocation constraints there's a good chance we evict
620  * lots of (smaller) objects unecessarily.
621  *
622  * The DRM range allocator supports this use-case through the scanning
623  * interfaces. First a scan operation needs to be initialized with
624  * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
625  * objects to the roaster (probably by walking an LRU list, but this can be
626  * freely implemented) until a suitable hole is found or there's no further
627  * evitable object.
628  *
629  * The the driver must walk through all objects again in exactly the reverse
630  * order to restore the allocator state. Note that while the allocator is used
631  * in the scan mode no other operation is allowed.
632  *
633  * Finally the driver evicts all objects selected in the scan. Adding and
634  * removing an object is O(1), and since freeing a node is also O(1) the overall
635  * complexity is O(scanned_objects). So like the free stack which needs to be
636  * walked before a scan operation even begins this is linear in the number of
637  * objects. It doesn't seem to hurt badly.
638  */
639 
640 /**
641  * drm_mm_init_scan - initialize lru scanning
642  * @mm: drm_mm to scan
643  * @size: size of the allocation
644  * @alignment: alignment of the allocation
645  * @color: opaque tag value to use for the allocation
646  *
647  * This simply sets up the scanning routines with the parameters for the desired
648  * hole. Note that there's no need to specify allocation flags, since they only
649  * change the place a node is allocated from within a suitable hole.
650  *
651  * Warning:
652  * As long as the scan list is non-empty, no other operations than
653  * adding/removing nodes to/from the scan list are allowed.
654  */
655 void drm_mm_init_scan(struct drm_mm *mm,
656 		      u64 size,
657 		      unsigned alignment,
658 		      unsigned long color)
659 {
660 	mm->scan_color = color;
661 	mm->scan_alignment = alignment;
662 	mm->scan_size = size;
663 	mm->scanned_blocks = 0;
664 	mm->scan_hit_start = 0;
665 	mm->scan_hit_end = 0;
666 	mm->scan_check_range = 0;
667 	mm->prev_scanned_node = NULL;
668 }
669 EXPORT_SYMBOL(drm_mm_init_scan);
670 
671 /**
672  * drm_mm_init_scan - initialize range-restricted lru scanning
673  * @mm: drm_mm to scan
674  * @size: size of the allocation
675  * @alignment: alignment of the allocation
676  * @color: opaque tag value to use for the allocation
677  * @start: start of the allowed range for the allocation
678  * @end: end of the allowed range for the allocation
679  *
680  * This simply sets up the scanning routines with the parameters for the desired
681  * hole. Note that there's no need to specify allocation flags, since they only
682  * change the place a node is allocated from within a suitable hole.
683  *
684  * Warning:
685  * As long as the scan list is non-empty, no other operations than
686  * adding/removing nodes to/from the scan list are allowed.
687  */
688 void drm_mm_init_scan_with_range(struct drm_mm *mm,
689 				 u64 size,
690 				 unsigned alignment,
691 				 unsigned long color,
692 				 u64 start,
693 				 u64 end)
694 {
695 	mm->scan_color = color;
696 	mm->scan_alignment = alignment;
697 	mm->scan_size = size;
698 	mm->scanned_blocks = 0;
699 	mm->scan_hit_start = 0;
700 	mm->scan_hit_end = 0;
701 	mm->scan_start = start;
702 	mm->scan_end = end;
703 	mm->scan_check_range = 1;
704 	mm->prev_scanned_node = NULL;
705 }
706 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
707 
708 /**
709  * drm_mm_scan_add_block - add a node to the scan list
710  * @node: drm_mm_node to add
711  *
712  * Add a node to the scan list that might be freed to make space for the desired
713  * hole.
714  *
715  * Returns:
716  * True if a hole has been found, false otherwise.
717  */
718 bool drm_mm_scan_add_block(struct drm_mm_node *node)
719 {
720 	struct drm_mm *mm = node->mm;
721 	struct drm_mm_node *prev_node;
722 	u64 hole_start, hole_end;
723 	u64 adj_start, adj_end;
724 
725 	mm->scanned_blocks++;
726 
727 	BUG_ON(node->scanned_block);
728 	node->scanned_block = 1;
729 
730 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
731 			       node_list);
732 
733 	node->scanned_preceeds_hole = prev_node->hole_follows;
734 	prev_node->hole_follows = 1;
735 	list_del(&node->node_list);
736 	node->node_list.prev = &prev_node->node_list;
737 	node->node_list.next = &mm->prev_scanned_node->node_list;
738 	mm->prev_scanned_node = node;
739 
740 	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
741 	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
742 
743 	if (mm->scan_check_range) {
744 		if (adj_start < mm->scan_start)
745 			adj_start = mm->scan_start;
746 		if (adj_end > mm->scan_end)
747 			adj_end = mm->scan_end;
748 	}
749 
750 	if (mm->color_adjust)
751 		mm->color_adjust(prev_node, mm->scan_color,
752 				 &adj_start, &adj_end);
753 
754 	if (check_free_hole(adj_start, adj_end,
755 			    mm->scan_size, mm->scan_alignment)) {
756 		mm->scan_hit_start = hole_start;
757 		mm->scan_hit_end = hole_end;
758 		return true;
759 	}
760 
761 	return false;
762 }
763 EXPORT_SYMBOL(drm_mm_scan_add_block);
764 
765 /**
766  * drm_mm_scan_remove_block - remove a node from the scan list
767  * @node: drm_mm_node to remove
768  *
769  * Nodes _must_ be removed in the exact same order from the scan list as they
770  * have been added, otherwise the internal state of the memory manager will be
771  * corrupted.
772  *
773  * When the scan list is empty, the selected memory nodes can be freed. An
774  * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
775  * return the just freed block (because its at the top of the free_stack list).
776  *
777  * Returns:
778  * True if this block should be evicted, false otherwise. Will always
779  * return false when no hole has been found.
780  */
781 bool drm_mm_scan_remove_block(struct drm_mm_node *node)
782 {
783 	struct drm_mm *mm = node->mm;
784 	struct drm_mm_node *prev_node;
785 
786 	mm->scanned_blocks--;
787 
788 	BUG_ON(!node->scanned_block);
789 	node->scanned_block = 0;
790 
791 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
792 			       node_list);
793 
794 	prev_node->hole_follows = node->scanned_preceeds_hole;
795 	list_add(&node->node_list, &prev_node->node_list);
796 
797 	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
798 		 node->start < mm->scan_hit_end);
799 }
800 EXPORT_SYMBOL(drm_mm_scan_remove_block);
801 
802 /**
803  * drm_mm_clean - checks whether an allocator is clean
804  * @mm: drm_mm allocator to check
805  *
806  * Returns:
807  * True if the allocator is completely free, false if there's still a node
808  * allocated in it.
809  */
810 bool drm_mm_clean(struct drm_mm * mm)
811 {
812 	struct list_head *head = &mm->head_node.node_list;
813 
814 	return (head->next->next == head);
815 }
816 EXPORT_SYMBOL(drm_mm_clean);
817 
818 /**
819  * drm_mm_init - initialize a drm-mm allocator
820  * @mm: the drm_mm structure to initialize
821  * @start: start of the range managed by @mm
822  * @size: end of the range managed by @mm
823  *
824  * Note that @mm must be cleared to 0 before calling this function.
825  */
826 void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
827 {
828 	INIT_LIST_HEAD(&mm->hole_stack);
829 	mm->scanned_blocks = 0;
830 
831 	/* Clever trick to avoid a special case in the free hole tracking. */
832 	INIT_LIST_HEAD(&mm->head_node.node_list);
833 	mm->head_node.allocated = 0;
834 	mm->head_node.hole_follows = 1;
835 	mm->head_node.scanned_block = 0;
836 	mm->head_node.scanned_prev_free = 0;
837 	mm->head_node.scanned_next_free = 0;
838 	mm->head_node.mm = mm;
839 	mm->head_node.start = start + size;
840 	mm->head_node.size = start - mm->head_node.start;
841 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
842 
843 	mm->color_adjust = NULL;
844 }
845 EXPORT_SYMBOL(drm_mm_init);
846 
847 /**
848  * drm_mm_takedown - clean up a drm_mm allocator
849  * @mm: drm_mm allocator to clean up
850  *
851  * Note that it is a bug to call this function on an allocator which is not
852  * clean.
853  */
854 void drm_mm_takedown(struct drm_mm *mm)
855 {
856 	if (WARN(!list_empty(&mm->head_node.node_list),
857 		 "Memory manager not clean during takedown.\n"))
858 		show_leaks(mm);
859 
860 }
861 EXPORT_SYMBOL(drm_mm_takedown);
862 
863 static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
864 				     const char *prefix)
865 {
866 	u64 hole_start, hole_end, hole_size;
867 
868 	if (entry->hole_follows) {
869 		hole_start = drm_mm_hole_node_start(entry);
870 		hole_end = drm_mm_hole_node_end(entry);
871 		hole_size = hole_end - hole_start;
872 		pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
873 			 hole_end, hole_size);
874 		return hole_size;
875 	}
876 
877 	return 0;
878 }
879 
880 /**
881  * drm_mm_debug_table - dump allocator state to dmesg
882  * @mm: drm_mm allocator to dump
883  * @prefix: prefix to use for dumping to dmesg
884  */
885 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
886 {
887 	struct drm_mm_node *entry;
888 	u64 total_used = 0, total_free = 0, total = 0;
889 
890 	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
891 
892 	drm_mm_for_each_node(entry, mm) {
893 		pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
894 			 entry->start + entry->size, entry->size);
895 		total_used += entry->size;
896 		total_free += drm_mm_debug_hole(entry, prefix);
897 	}
898 	total = total_free + total_used;
899 
900 	pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
901 		 total_used, total_free);
902 }
903 EXPORT_SYMBOL(drm_mm_debug_table);
904 
905 #if defined(CONFIG_DEBUG_FS)
906 static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
907 {
908 	u64 hole_start, hole_end, hole_size;
909 
910 	if (entry->hole_follows) {
911 		hole_start = drm_mm_hole_node_start(entry);
912 		hole_end = drm_mm_hole_node_end(entry);
913 		hole_size = hole_end - hole_start;
914 		seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
915 			   hole_end, hole_size);
916 		return hole_size;
917 	}
918 
919 	return 0;
920 }
921 
922 /**
923  * drm_mm_dump_table - dump allocator state to a seq_file
924  * @m: seq_file to dump to
925  * @mm: drm_mm allocator to dump
926  */
927 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
928 {
929 	struct drm_mm_node *entry;
930 	u64 total_used = 0, total_free = 0, total = 0;
931 
932 	total_free += drm_mm_dump_hole(m, &mm->head_node);
933 
934 	drm_mm_for_each_node(entry, mm) {
935 		seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
936 			   entry->start + entry->size, entry->size);
937 		total_used += entry->size;
938 		total_free += drm_mm_dump_hole(m, entry);
939 	}
940 	total = total_free + total_used;
941 
942 	seq_printf(m, "total: %llu, used %llu free %llu\n", total,
943 		   total_used, total_free);
944 	return 0;
945 }
946 EXPORT_SYMBOL(drm_mm_dump_table);
947 #endif
948