xref: /openbsd/sys/dev/pci/drm/drm_mm.c (revision 09467b48)
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * Copyright 2016 Intel Corporation
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  *
28  **************************************************************************/
29 
30 /*
31  * Generic simple memory manager implementation. Intended to be used as a base
32  * class implementation for more advanced memory managers.
33  *
34  * Note that the algorithm used is quite simple and there might be substantial
35  * performance gains if a smarter free list is implemented. Currently it is
36  * just an unordered stack of free regions. This could easily be improved if
37  * an RB-tree is used instead. At least if we expect heavy fragmentation.
38  *
39  * Aligned allocations can also see improvement.
40  *
41  * Authors:
42  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
43  */
44 
45 #include <linux/export.h>
46 #include <linux/interval_tree_generic.h>
47 #include <linux/seq_file.h>
48 #include <linux/slab.h>
49 #include <linux/stacktrace.h>
50 
51 #include <drm/drm_mm.h>
52 
53 /**
54  * DOC: Overview
55  *
56  * drm_mm provides a simple range allocator. The drivers are free to use the
57  * resource allocator from the linux core if it suits them, the upside of drm_mm
58  * is that it's in the DRM core. Which means that it's easier to extend for
59  * some of the crazier special purpose needs of gpus.
60  *
61  * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
62  * Drivers are free to embed either of them into their own suitable
63  * datastructures. drm_mm itself will not do any memory allocations of its own,
64  * so if drivers choose not to embed nodes they need to still allocate them
65  * themselves.
66  *
67  * The range allocator also supports reservation of preallocated blocks. This is
68  * useful for taking over initial mode setting configurations from the firmware,
69  * where an object needs to be created which exactly matches the firmware's
70  * scanout target. As long as the range is still free it can be inserted anytime
71  * after the allocator is initialized, which helps with avoiding looped
72  * dependencies in the driver load sequence.
73  *
74  * drm_mm maintains a stack of most recently freed holes, which of all
75  * simplistic datastructures seems to be a fairly decent approach to clustering
76  * allocations and avoiding too much fragmentation. This means free space
77  * searches are O(num_holes). Given that all the fancy features drm_mm supports
78  * something better would be fairly complex and since gfx thrashing is a fairly
79  * steep cliff not a real concern. Removing a node again is O(1).
80  *
81  * drm_mm supports a few features: Alignment and range restrictions can be
82  * supplied. Furthermore every &drm_mm_node has a color value (which is just an
83  * opaque unsigned long) which in conjunction with a driver callback can be used
84  * to implement sophisticated placement restrictions. The i915 DRM driver uses
85  * this to implement guard pages between incompatible caching domains in the
86  * graphics TT.
87  *
88  * Two behaviors are supported for searching and allocating: bottom-up and
89  * top-down. The default is bottom-up. Top-down allocation can be used if the
90  * memory area has different restrictions, or just to reduce fragmentation.
91  *
92  * Finally iteration helpers to walk all nodes and all holes are provided as are
93  * some basic allocator dumpers for debugging.
94  *
95  * Note that this range allocator is not thread-safe, drivers need to protect
96  * modifications with their on locking. The idea behind this is that for a full
97  * memory manager additional data needs to be protected anyway, hence internal
98  * locking would be fully redundant.
99  */
100 
101 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
102 						u64 size,
103 						u64 alignment,
104 						unsigned long color,
105 						u64 start,
106 						u64 end,
107 						enum drm_mm_search_flags flags);
108 
109 #ifdef CONFIG_DRM_DEBUG_MM
110 #include <linux/stackdepot.h>
111 
112 #define STACKDEPTH 32
113 #define BUFSZ 4096
114 
115 static noinline void save_stack(struct drm_mm_node *node)
116 {
117 	unsigned long entries[STACKDEPTH];
118 	struct stack_trace trace = {
119 		.entries = entries,
120 		.max_entries = STACKDEPTH,
121 		.skip = 1
122 	};
123 
124 	save_stack_trace(&trace);
125 	if (trace.nr_entries != 0 &&
126 	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
127 		trace.nr_entries--;
128 
129 	/* May be called under spinlock, so avoid sleeping */
130 	node->stack = depot_save_stack(&trace, GFP_NOWAIT);
131 }
132 
133 static void show_leaks(struct drm_mm *mm)
134 {
135 	struct drm_mm_node *node;
136 	unsigned long entries[STACKDEPTH];
137 	char *buf;
138 
139 	buf = kmalloc(BUFSZ, GFP_KERNEL);
140 	if (!buf)
141 		return;
142 
143 	list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
144 		struct stack_trace trace = {
145 			.entries = entries,
146 			.max_entries = STACKDEPTH
147 		};
148 
149 		if (!node->stack) {
150 			DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
151 				  node->start, node->size);
152 			continue;
153 		}
154 
155 		depot_fetch_stack(node->stack, &trace);
156 		snprint_stack_trace(buf, BUFSZ, &trace, 0);
157 		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
158 			  node->start, node->size, buf);
159 	}
160 
161 	kfree(buf);
162 }
163 
164 #undef STACKDEPTH
165 #undef BUFSZ
166 #else
167 static void save_stack(struct drm_mm_node *node) { }
168 static void show_leaks(struct drm_mm *mm) { }
169 #endif
170 
171 #define START(node) ((node)->start)
172 #define LAST(node)  ((node)->start + (node)->size - 1)
173 
174 #ifdef __linux__
175 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
176 		     u64, __subtree_last,
177 		     START, LAST, static inline, drm_mm_interval_tree)
178 #else
179 struct drm_mm_node *
180 drm_mm_interval_tree_iter_first(struct rb_root *rb, u64 start, u64 last)
181 {
182 	struct drm_mm *mm = container_of(rb, typeof(*mm), interval_tree);
183 	struct drm_mm_node *node;
184 
185 	drm_mm_for_each_node(node, mm) {
186 		if (LAST(node) >= start && START(node) <= last)
187 			return node;
188 	}
189 	return NULL;
190 }
191 #endif
192 
193 struct drm_mm_node *
194 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
195 {
196 	return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree,
197 					       start, last);
198 }
199 EXPORT_SYMBOL(__drm_mm_interval_first);
200 
201 #ifdef __linux__
202 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
203 					  struct drm_mm_node *node)
204 {
205 	struct drm_mm *mm = hole_node->mm;
206 	struct rb_node **link, *rb;
207 	struct drm_mm_node *parent;
208 
209 	node->__subtree_last = LAST(node);
210 
211 	if (hole_node->allocated) {
212 		rb = &hole_node->rb;
213 		while (rb) {
214 			parent = rb_entry(rb, struct drm_mm_node, rb);
215 			if (parent->__subtree_last >= node->__subtree_last)
216 				break;
217 
218 			parent->__subtree_last = node->__subtree_last;
219 			rb = rb_parent(rb);
220 		}
221 
222 		rb = &hole_node->rb;
223 		link = &hole_node->rb.rb_right;
224 	} else {
225 		rb = NULL;
226 		link = &mm->interval_tree.rb_node;
227 	}
228 
229 	while (*link) {
230 		rb = *link;
231 		parent = rb_entry(rb, struct drm_mm_node, rb);
232 		if (parent->__subtree_last < node->__subtree_last)
233 			parent->__subtree_last = node->__subtree_last;
234 		if (node->start < parent->start)
235 			link = &parent->rb.rb_left;
236 		else
237 			link = &parent->rb.rb_right;
238 	}
239 
240 	rb_link_node(&node->rb, rb, link);
241 	rb_insert_augmented(&node->rb,
242 			    &mm->interval_tree,
243 			    &drm_mm_interval_tree_augment);
244 }
245 #endif
246 
247 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
248 				 struct drm_mm_node *node,
249 				 u64 size, u64 alignment,
250 				 unsigned long color,
251 				 u64 range_start, u64 range_end,
252 				 enum drm_mm_allocator_flags flags)
253 {
254 	struct drm_mm *mm = hole_node->mm;
255 	u64 hole_start = drm_mm_hole_node_start(hole_node);
256 	u64 hole_end = drm_mm_hole_node_end(hole_node);
257 	u64 adj_start = hole_start;
258 	u64 adj_end = hole_end;
259 
260 	DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node) || node->allocated);
261 
262 	if (mm->color_adjust)
263 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
264 
265 	adj_start = max(adj_start, range_start);
266 	adj_end = min(adj_end, range_end);
267 
268 	if (flags & DRM_MM_CREATE_TOP)
269 		adj_start = adj_end - size;
270 
271 	if (alignment) {
272 		u64 rem;
273 
274 		div64_u64_rem(adj_start, alignment, &rem);
275 		if (rem) {
276 			if (flags & DRM_MM_CREATE_TOP)
277 				adj_start -= rem;
278 			else
279 				adj_start += alignment - rem;
280 		}
281 	}
282 
283 	if (adj_start == hole_start) {
284 		hole_node->hole_follows = 0;
285 		list_del(&hole_node->hole_stack);
286 	}
287 
288 	node->start = adj_start;
289 	node->size = size;
290 	node->mm = mm;
291 	node->color = color;
292 	node->allocated = 1;
293 
294 	list_add(&node->node_list, &hole_node->node_list);
295 
296 #ifdef __linux__
297 	drm_mm_interval_tree_add_node(hole_node, node);
298 #endif
299 
300 	DRM_MM_BUG_ON(node->start < range_start);
301 	DRM_MM_BUG_ON(node->start < adj_start);
302 	DRM_MM_BUG_ON(node->start + node->size > adj_end);
303 	DRM_MM_BUG_ON(node->start + node->size > range_end);
304 
305 	node->hole_follows = 0;
306 	if (__drm_mm_hole_node_start(node) < hole_end) {
307 		list_add(&node->hole_stack, &mm->hole_stack);
308 		node->hole_follows = 1;
309 	}
310 
311 	save_stack(node);
312 }
313 
314 /**
315  * drm_mm_reserve_node - insert an pre-initialized node
316  * @mm: drm_mm allocator to insert @node into
317  * @node: drm_mm_node to insert
318  *
319  * This functions inserts an already set-up &drm_mm_node into the allocator,
320  * meaning that start, size and color must be set by the caller. All other
321  * fields must be cleared to 0. This is useful to initialize the allocator with
322  * preallocated objects which must be set-up before the range allocator can be
323  * set-up, e.g. when taking over a firmware framebuffer.
324  *
325  * Returns:
326  * 0 on success, -ENOSPC if there's no hole where @node is.
327  */
328 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
329 {
330 	u64 end = node->start + node->size;
331 	struct drm_mm_node *hole;
332 	u64 hole_start, hole_end;
333 	u64 adj_start, adj_end;
334 
335 	end = node->start + node->size;
336 	if (unlikely(end <= node->start))
337 		return -ENOSPC;
338 
339 	/* Find the relevant hole to add our node to */
340 	hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
341 					       node->start, ~(u64)0);
342 	if (hole) {
343 		if (hole->start < end)
344 			return -ENOSPC;
345 	} else {
346 		hole = list_entry(drm_mm_nodes(mm), typeof(*hole), node_list);
347 	}
348 
349 	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
350 	if (!drm_mm_hole_follows(hole))
351 		return -ENOSPC;
352 
353 	adj_start = hole_start = __drm_mm_hole_node_start(hole);
354 	adj_end = hole_end = __drm_mm_hole_node_end(hole);
355 
356 	if (mm->color_adjust)
357 		mm->color_adjust(hole, node->color, &adj_start, &adj_end);
358 
359 	if (adj_start > node->start || adj_end < end)
360 		return -ENOSPC;
361 
362 	node->mm = mm;
363 	node->allocated = 1;
364 
365 	list_add(&node->node_list, &hole->node_list);
366 
367 #ifdef __linux__
368 	drm_mm_interval_tree_add_node(hole, node);
369 #endif
370 
371 	if (node->start == hole_start) {
372 		hole->hole_follows = 0;
373 		list_del(&hole->hole_stack);
374 	}
375 
376 	node->hole_follows = 0;
377 	if (end != hole_end) {
378 		list_add(&node->hole_stack, &mm->hole_stack);
379 		node->hole_follows = 1;
380 	}
381 
382 	save_stack(node);
383 
384 	return 0;
385 }
386 EXPORT_SYMBOL(drm_mm_reserve_node);
387 
388 /**
389  * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
390  * @mm: drm_mm to allocate from
391  * @node: preallocate node to insert
392  * @size: size of the allocation
393  * @alignment: alignment of the allocation
394  * @color: opaque tag value to use for this node
395  * @start: start of the allowed range for this node
396  * @end: end of the allowed range for this node
397  * @sflags: flags to fine-tune the allocation search
398  * @aflags: flags to fine-tune the allocation behavior
399  *
400  * The preallocated @node must be cleared to 0.
401  *
402  * Returns:
403  * 0 on success, -ENOSPC if there's no suitable hole.
404  */
405 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
406 					u64 size, u64 alignment,
407 					unsigned long color,
408 					u64 start, u64 end,
409 					enum drm_mm_search_flags sflags,
410 					enum drm_mm_allocator_flags aflags)
411 {
412 	struct drm_mm_node *hole_node;
413 
414 	if (WARN_ON(size == 0))
415 		return -EINVAL;
416 
417 	hole_node = drm_mm_search_free_in_range_generic(mm,
418 							size, alignment, color,
419 							start, end, sflags);
420 	if (!hole_node)
421 		return -ENOSPC;
422 
423 	drm_mm_insert_helper(hole_node, node,
424 			     size, alignment, color,
425 			     start, end, aflags);
426 	return 0;
427 }
428 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
429 
430 /**
431  * drm_mm_remove_node - Remove a memory node from the allocator.
432  * @node: drm_mm_node to remove
433  *
434  * This just removes a node from its drm_mm allocator. The node does not need to
435  * be cleared again before it can be re-inserted into this or any other drm_mm
436  * allocator. It is a bug to call this function on a unallocated node.
437  */
438 void drm_mm_remove_node(struct drm_mm_node *node)
439 {
440 	struct drm_mm *mm = node->mm;
441 	struct drm_mm_node *prev_node;
442 
443 	DRM_MM_BUG_ON(!node->allocated);
444 	DRM_MM_BUG_ON(node->scanned_block);
445 
446 	prev_node =
447 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
448 
449 	if (drm_mm_hole_follows(node)) {
450 		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) ==
451 			      __drm_mm_hole_node_end(node));
452 		list_del(&node->hole_stack);
453 	} else {
454 		DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) !=
455 			      __drm_mm_hole_node_end(node));
456 	}
457 
458 	if (!drm_mm_hole_follows(prev_node)) {
459 		prev_node->hole_follows = 1;
460 		list_add(&prev_node->hole_stack, &mm->hole_stack);
461 	} else
462 		list_move(&prev_node->hole_stack, &mm->hole_stack);
463 
464 #ifdef __linux__
465 	drm_mm_interval_tree_remove(node, &mm->interval_tree);
466 #endif
467 	list_del(&node->node_list);
468 	node->allocated = 0;
469 }
470 EXPORT_SYMBOL(drm_mm_remove_node);
471 
472 static int check_free_hole(u64 start, u64 end, u64 size, u64 alignment)
473 {
474 	if (end - start < size)
475 		return 0;
476 
477 	if (alignment) {
478 		u64 rem;
479 
480 		div64_u64_rem(start, alignment, &rem);
481 		if (rem)
482 			start += alignment - rem;
483 	}
484 
485 	return end >= start + size;
486 }
487 
488 static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
489 							u64 size,
490 							u64 alignment,
491 							unsigned long color,
492 							u64 start,
493 							u64 end,
494 							enum drm_mm_search_flags flags)
495 {
496 	struct drm_mm_node *entry;
497 	struct drm_mm_node *best;
498 	u64 adj_start;
499 	u64 adj_end;
500 	u64 best_size;
501 
502 	DRM_MM_BUG_ON(mm->scan_active);
503 
504 	best = NULL;
505 	best_size = ~0UL;
506 
507 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
508 			       flags & DRM_MM_SEARCH_BELOW) {
509 		u64 hole_size = adj_end - adj_start;
510 
511 		if (mm->color_adjust) {
512 			mm->color_adjust(entry, color, &adj_start, &adj_end);
513 			if (adj_end <= adj_start)
514 				continue;
515 		}
516 
517 		adj_start = max(adj_start, start);
518 		adj_end = min(adj_end, end);
519 
520 		if (!check_free_hole(adj_start, adj_end, size, alignment))
521 			continue;
522 
523 		if (!(flags & DRM_MM_SEARCH_BEST))
524 			return entry;
525 
526 		if (hole_size < best_size) {
527 			best = entry;
528 			best_size = hole_size;
529 		}
530 	}
531 
532 	return best;
533 }
534 
535 /**
536  * drm_mm_replace_node - move an allocation from @old to @new
537  * @old: drm_mm_node to remove from the allocator
538  * @new: drm_mm_node which should inherit @old's allocation
539  *
540  * This is useful for when drivers embed the drm_mm_node structure and hence
541  * can't move allocations by reassigning pointers. It's a combination of remove
542  * and insert with the guarantee that the allocation start will match.
543  */
544 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
545 {
546 	DRM_MM_BUG_ON(!old->allocated);
547 
548 	list_replace(&old->node_list, &new->node_list);
549 	list_replace(&old->hole_stack, &new->hole_stack);
550 #ifdef __linux__
551 	rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
552 #endif
553 	new->hole_follows = old->hole_follows;
554 	new->mm = old->mm;
555 	new->start = old->start;
556 	new->size = old->size;
557 	new->color = old->color;
558 	new->__subtree_last = old->__subtree_last;
559 
560 	old->allocated = 0;
561 	new->allocated = 1;
562 }
563 EXPORT_SYMBOL(drm_mm_replace_node);
564 
565 /**
566  * DOC: lru scan roster
567  *
568  * Very often GPUs need to have continuous allocations for a given object. When
569  * evicting objects to make space for a new one it is therefore not most
570  * efficient when we simply start to select all objects from the tail of an LRU
571  * until there's a suitable hole: Especially for big objects or nodes that
572  * otherwise have special allocation constraints there's a good chance we evict
573  * lots of (smaller) objects unnecessarily.
574  *
575  * The DRM range allocator supports this use-case through the scanning
576  * interfaces. First a scan operation needs to be initialized with
577  * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
578  * objects to the roster, probably by walking an LRU list, but this can be
579  * freely implemented. Eviction candiates are added using
580  * drm_mm_scan_add_block() until a suitable hole is found or there are no
581  * further evictable objects. Eviction roster metadata is tracked in &struct
582  * drm_mm_scan.
583  *
584  * The driver must walk through all objects again in exactly the reverse
585  * order to restore the allocator state. Note that while the allocator is used
586  * in the scan mode no other operation is allowed.
587  *
588  * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
589  * reported true) in the scan, and any overlapping nodes after color adjustment
590  * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
591  * since freeing a node is also O(1) the overall complexity is
592  * O(scanned_objects). So like the free stack which needs to be walked before a
593  * scan operation even begins this is linear in the number of objects. It
594  * doesn't seem to hurt too badly.
595  */
596 
597 /**
598  * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
599  * @scan: scan state
600  * @mm: drm_mm to scan
601  * @size: size of the allocation
602  * @alignment: alignment of the allocation
603  * @color: opaque tag value to use for the allocation
604  * @start: start of the allowed range for the allocation
605  * @end: end of the allowed range for the allocation
606  * @flags: flags to specify how the allocation will be performed afterwards
607  *
608  * This simply sets up the scanning routines with the parameters for the desired
609  * hole.
610  *
611  * Warning:
612  * As long as the scan list is non-empty, no other operations than
613  * adding/removing nodes to/from the scan list are allowed.
614  */
615 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
616 				 struct drm_mm *mm,
617 				 u64 size,
618 				 u64 alignment,
619 				 unsigned long color,
620 				 u64 start,
621 				 u64 end,
622 				 unsigned int flags)
623 {
624 	DRM_MM_BUG_ON(start >= end);
625 	DRM_MM_BUG_ON(!size || size > end - start);
626 	DRM_MM_BUG_ON(mm->scan_active);
627 
628 	scan->mm = mm;
629 
630 	if (alignment <= 1)
631 		alignment = 0;
632 
633 	scan->color = color;
634 	scan->alignment = alignment;
635 	scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
636 	scan->size = size;
637 	scan->flags = flags;
638 
639 	DRM_MM_BUG_ON(end <= start);
640 	scan->range_start = start;
641 	scan->range_end = end;
642 
643 	scan->hit_start = U64_MAX;
644 	scan->hit_end = 0;
645 }
646 EXPORT_SYMBOL(drm_mm_scan_init_with_range);
647 
648 /**
649  * drm_mm_scan_add_block - add a node to the scan list
650  * @scan: the active drm_mm scanner
651  * @node: drm_mm_node to add
652  *
653  * Add a node to the scan list that might be freed to make space for the desired
654  * hole.
655  *
656  * Returns:
657  * True if a hole has been found, false otherwise.
658  */
659 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
660 			   struct drm_mm_node *node)
661 {
662 	struct drm_mm *mm = scan->mm;
663 	struct drm_mm_node *hole;
664 	u64 hole_start, hole_end;
665 	u64 col_start, col_end;
666 	u64 adj_start, adj_end;
667 
668 	DRM_MM_BUG_ON(node->mm != mm);
669 	DRM_MM_BUG_ON(!node->allocated);
670 	DRM_MM_BUG_ON(node->scanned_block);
671 	node->scanned_block = true;
672 	mm->scan_active++;
673 
674 	/* Remove this block from the node_list so that we enlarge the hole
675 	 * (distance between the end of our previous node and the start of
676 	 * or next), without poisoning the link so that we can restore it
677 	 * later in drm_mm_scan_remove_block().
678 	 */
679 	hole = list_prev_entry(node, node_list);
680 	DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
681 	__list_del_entry(&node->node_list);
682 
683 	hole_start = __drm_mm_hole_node_start(hole);
684 	hole_end = __drm_mm_hole_node_end(hole);
685 
686 	col_start = hole_start;
687 	col_end = hole_end;
688 	if (mm->color_adjust)
689 		mm->color_adjust(hole, scan->color, &col_start, &col_end);
690 
691 	adj_start = max(col_start, scan->range_start);
692 	adj_end = min(col_end, scan->range_end);
693 	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
694 		return false;
695 
696 	if (scan->flags == DRM_MM_CREATE_TOP)
697 		adj_start = adj_end - scan->size;
698 
699 	if (scan->alignment) {
700 		u64 rem;
701 
702 		if (likely(scan->remainder_mask))
703 			rem = adj_start & scan->remainder_mask;
704 		else
705 			div64_u64_rem(adj_start, scan->alignment, &rem);
706 		if (rem) {
707 			adj_start -= rem;
708 			if (scan->flags != DRM_MM_CREATE_TOP)
709 				adj_start += scan->alignment;
710 			if (adj_start < max(col_start, scan->range_start) ||
711 			    min(col_end, scan->range_end) - adj_start < scan->size)
712 				return false;
713 
714 			if (adj_end <= adj_start ||
715 			    adj_end - adj_start < scan->size)
716 				return false;
717 		}
718 	}
719 
720 	scan->hit_start = adj_start;
721 	scan->hit_end = adj_start + scan->size;
722 
723 	DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
724 	DRM_MM_BUG_ON(scan->hit_start < hole_start);
725 	DRM_MM_BUG_ON(scan->hit_end > hole_end);
726 
727 	return true;
728 }
729 EXPORT_SYMBOL(drm_mm_scan_add_block);
730 
731 /**
732  * drm_mm_scan_remove_block - remove a node from the scan list
733  * @scan: the active drm_mm scanner
734  * @node: drm_mm_node to remove
735  *
736  * Nodes **must** be removed in exactly the reverse order from the scan list as
737  * they have been added (e.g. using list_add() as they are added and then
738  * list_for_each() over that eviction list to remove), otherwise the internal
739  * state of the memory manager will be corrupted.
740  *
741  * When the scan list is empty, the selected memory nodes can be freed. An
742  * immediately following drm_mm_insert_node_in_range_generic() or one of the
743  * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
744  * the just freed block (because its at the top of the free_stack list).
745  *
746  * Returns:
747  * True if this block should be evicted, false otherwise. Will always
748  * return false when no hole has been found.
749  */
750 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
751 			      struct drm_mm_node *node)
752 {
753 	struct drm_mm_node *prev_node;
754 
755 	DRM_MM_BUG_ON(node->mm != scan->mm);
756 	DRM_MM_BUG_ON(!node->scanned_block);
757 	node->scanned_block = false;
758 
759 	DRM_MM_BUG_ON(!node->mm->scan_active);
760 	node->mm->scan_active--;
761 
762 	/* During drm_mm_scan_add_block() we decoupled this node leaving
763 	 * its pointers intact. Now that the caller is walking back along
764 	 * the eviction list we can restore this block into its rightful
765 	 * place on the full node_list. To confirm that the caller is walking
766 	 * backwards correctly we check that prev_node->next == node->next,
767 	 * i.e. both believe the same node should be on the other side of the
768 	 * hole.
769 	 */
770 	prev_node = list_prev_entry(node, node_list);
771 	DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
772 		      list_next_entry(node, node_list));
773 	list_add(&node->node_list, &prev_node->node_list);
774 
775 	return (node->start + node->size > scan->hit_start &&
776 		node->start < scan->hit_end);
777 }
778 EXPORT_SYMBOL(drm_mm_scan_remove_block);
779 
780 /**
781  * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
782  * @scan: drm_mm scan with target hole
783  *
784  * After completing an eviction scan and removing the selected nodes, we may
785  * need to remove a few more nodes from either side of the target hole if
786  * mm.color_adjust is being used.
787  *
788  * Returns:
789  * A node to evict, or NULL if there are no overlapping nodes.
790  */
791 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
792 {
793 	struct drm_mm *mm = scan->mm;
794 	struct drm_mm_node *hole;
795 	u64 hole_start, hole_end;
796 
797 	DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
798 
799 	if (!mm->color_adjust)
800 		return NULL;
801 
802 	hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
803 	hole_start = __drm_mm_hole_node_start(hole);
804 	hole_end = __drm_mm_hole_node_end(hole);
805 
806 	DRM_MM_BUG_ON(hole_start > scan->hit_start);
807 	DRM_MM_BUG_ON(hole_end < scan->hit_end);
808 
809 	mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
810 	if (hole_start > scan->hit_start)
811 		return hole;
812 	if (hole_end < scan->hit_end)
813 		return list_next_entry(hole, node_list);
814 
815 	return NULL;
816 }
817 EXPORT_SYMBOL(drm_mm_scan_color_evict);
818 
819 /**
820  * drm_mm_init - initialize a drm-mm allocator
821  * @mm: the drm_mm structure to initialize
822  * @start: start of the range managed by @mm
823  * @size: end of the range managed by @mm
824  *
825  * Note that @mm must be cleared to 0 before calling this function.
826  */
827 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
828 {
829 	DRM_MM_BUG_ON(start + size <= start);
830 
831 	INIT_LIST_HEAD(&mm->hole_stack);
832 	mm->scan_active = 0;
833 
834 	/* Clever trick to avoid a special case in the free hole tracking. */
835 	INIT_LIST_HEAD(&mm->head_node.node_list);
836 	mm->head_node.allocated = 0;
837 	mm->head_node.hole_follows = 1;
838 	mm->head_node.mm = mm;
839 	mm->head_node.start = start + size;
840 	mm->head_node.size = start - mm->head_node.start;
841 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
842 
843 	mm->interval_tree = RB_ROOT;
844 
845 	mm->color_adjust = NULL;
846 }
847 EXPORT_SYMBOL(drm_mm_init);
848 
849 /**
850  * drm_mm_takedown - clean up a drm_mm allocator
851  * @mm: drm_mm allocator to clean up
852  *
853  * Note that it is a bug to call this function on an allocator which is not
854  * clean.
855  */
856 void drm_mm_takedown(struct drm_mm *mm)
857 {
858 	if (WARN(!drm_mm_clean(mm),
859 		 "Memory manager not clean during takedown.\n"))
860 		show_leaks(mm);
861 }
862 EXPORT_SYMBOL(drm_mm_takedown);
863 
864 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
865 {
866 	u64 hole_start, hole_end, hole_size;
867 
868 	if (entry->hole_follows) {
869 		hole_start = drm_mm_hole_node_start(entry);
870 		hole_end = drm_mm_hole_node_end(entry);
871 		hole_size = hole_end - hole_start;
872 		drm_printf(p, "%#018llx-%#018llx: %llu: free\n", hole_start,
873 			   hole_end, hole_size);
874 		return hole_size;
875 	}
876 
877 	return 0;
878 }
879 
880 /**
881  * drm_mm_print - print allocator state
882  * @mm: drm_mm allocator to print
883  * @p: DRM printer to use
884  */
885 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
886 {
887 	const struct drm_mm_node *entry;
888 	u64 total_used = 0, total_free = 0, total = 0;
889 
890 	total_free += drm_mm_dump_hole(p, &mm->head_node);
891 
892 	drm_mm_for_each_node(entry, mm) {
893 		drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
894 			   entry->start + entry->size, entry->size);
895 		total_used += entry->size;
896 		total_free += drm_mm_dump_hole(p, entry);
897 	}
898 	total = total_free + total_used;
899 
900 	drm_printf(p, "total: %llu, used %llu free %llu\n", total,
901 		   total_used, total_free);
902 }
903 EXPORT_SYMBOL(drm_mm_print);
904