xref: /dragonfly/sys/dev/drm/drm_mm.c (revision 52ffb7ff)
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28 
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43 
44 #include <drm/drmP.h>
45 #include <drm/drm_mm.h>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 #include <linux/export.h>
49 
50 #define MM_UNUSED_TARGET 4
51 
52 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53 {
54 	struct drm_mm_node *child;
55 
56 	if (atomic)
57 		child = kzalloc(sizeof(*child), GFP_ATOMIC);
58 	else
59 		child = kzalloc(sizeof(*child), GFP_KERNEL);
60 
61 	if (unlikely(child == NULL)) {
62 		spin_lock(&mm->unused_lock);
63 		if (list_empty(&mm->unused_nodes))
64 			child = NULL;
65 		else {
66 			child =
67 			    list_entry(mm->unused_nodes.next,
68 				       struct drm_mm_node, node_list);
69 			list_del(&child->node_list);
70 			--mm->num_unused;
71 		}
72 		spin_unlock(&mm->unused_lock);
73 	}
74 	return child;
75 }
76 
77 int drm_mm_pre_get(struct drm_mm *mm)
78 {
79 	struct drm_mm_node *node;
80 
81 	spin_lock(&mm->unused_lock);
82 	while (mm->num_unused < MM_UNUSED_TARGET) {
83 		spin_unlock(&mm->unused_lock);
84 		node = kzalloc(sizeof(*node), GFP_KERNEL);
85 		spin_lock(&mm->unused_lock);
86 
87 		if (unlikely(node == NULL)) {
88 			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
89 			spin_unlock(&mm->unused_lock);
90 			return ret;
91 		}
92 		++mm->num_unused;
93 		list_add_tail(&node->node_list, &mm->unused_nodes);
94 	}
95 	spin_unlock(&mm->unused_lock);
96 	return 0;
97 }
98 
99 /**
100  * DOC: Overview
101  *
102  * drm_mm provides a simple range allocator. The drivers are free to use the
103  * resource allocator from the linux core if it suits them, the upside of drm_mm
104  * is that it's in the DRM core. Which means that it's easier to extend for
105  * some of the crazier special purpose needs of gpus.
106  *
107  * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
108  * Drivers are free to embed either of them into their own suitable
109  * datastructures. drm_mm itself will not do any allocations of its own, so if
110  * drivers choose not to embed nodes they need to still allocate them
111  * themselves.
112  *
113  * The range allocator also supports reservation of preallocated blocks. This is
114  * useful for taking over initial mode setting configurations from the firmware,
115  * where an object needs to be created which exactly matches the firmware's
116  * scanout target. As long as the range is still free it can be inserted anytime
117  * after the allocator is initialized, which helps with avoiding looped
118  * depencies in the driver load sequence.
119  *
120  * drm_mm maintains a stack of most recently freed holes, which of all
121  * simplistic datastructures seems to be a fairly decent approach to clustering
122  * allocations and avoiding too much fragmentation. This means free space
123  * searches are O(num_holes). Given that all the fancy features drm_mm supports
124  * something better would be fairly complex and since gfx thrashing is a fairly
125  * steep cliff not a real concern. Removing a node again is O(1).
126  *
127  * drm_mm supports a few features: Alignment and range restrictions can be
128  * supplied. Further more every &drm_mm_node has a color value (which is just an
129  * opaqua unsigned long) which in conjunction with a driver callback can be used
130  * to implement sophisticated placement restrictions. The i915 DRM driver uses
131  * this to implement guard pages between incompatible caching domains in the
132  * graphics TT.
133  *
134  * Two behaviors are supported for searching and allocating: bottom-up and top-down.
135  * The default is bottom-up. Top-down allocation can be used if the memory area
136  * has different restrictions, or just to reduce fragmentation.
137  *
138  * Finally iteration helpers to walk all nodes and all holes are provided as are
139  * some basic allocator dumpers for debugging.
140  */
141 
142 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
143 				 struct drm_mm_node *node,
144 				 unsigned long size, unsigned alignment,
145 				 unsigned long color,
146 				 enum drm_mm_allocator_flags flags)
147 {
148 	struct drm_mm *mm = hole_node->mm;
149 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
150 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
151 	unsigned long adj_start = hole_start;
152 	unsigned long adj_end = hole_end;
153 
154 	BUG_ON(node->allocated);
155 
156 	if (mm->color_adjust)
157 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
158 
159 	if (flags & DRM_MM_CREATE_TOP)
160 		adj_start = adj_end - size;
161 
162 	if (alignment) {
163 		unsigned tmp = adj_start % alignment;
164 		if (tmp) {
165 			if (flags & DRM_MM_CREATE_TOP)
166 				adj_start -= tmp;
167 			else
168 				adj_start += alignment - tmp;
169 		}
170 	}
171 
172 	BUG_ON(adj_start < hole_start);
173 	BUG_ON(adj_end > hole_end);
174 
175 	if (adj_start == hole_start) {
176 		hole_node->hole_follows = 0;
177 		list_del(&hole_node->hole_stack);
178 	}
179 
180 	node->start = adj_start;
181 	node->size = size;
182 	node->mm = mm;
183 	node->color = color;
184 	node->allocated = 1;
185 
186 	INIT_LIST_HEAD(&node->hole_stack);
187 	list_add(&node->node_list, &hole_node->node_list);
188 
189 	BUG_ON(node->start + node->size > adj_end);
190 
191 	node->hole_follows = 0;
192 	if (__drm_mm_hole_node_start(node) < hole_end) {
193 		list_add(&node->hole_stack, &mm->hole_stack);
194 		node->hole_follows = 1;
195 	}
196 }
197 
198 /**
199  * drm_mm_reserve_node - insert an pre-initialized node
200  * @mm: drm_mm allocator to insert @node into
201  * @node: drm_mm_node to insert
202  *
203  * This functions inserts an already set-up drm_mm_node into the allocator,
204  * meaning that start, size and color must be set by the caller. This is useful
205  * to initialize the allocator with preallocated objects which must be set-up
206  * before the range allocator can be set-up, e.g. when taking over a firmware
207  * framebuffer.
208  *
209  * Returns:
210  * 0 on success, -ENOSPC if there's no hole where @node is.
211  */
212 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
213 {
214 	struct drm_mm_node *hole;
215 	unsigned long end = node->start + node->size;
216 	unsigned long hole_start;
217 	unsigned long hole_end;
218 
219 	BUG_ON(node == NULL);
220 
221 	/* Find the relevant hole to add our node to */
222 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
223 		if (hole_start > node->start || hole_end < end)
224 			continue;
225 
226 		node->mm = mm;
227 		node->allocated = 1;
228 
229 		INIT_LIST_HEAD(&node->hole_stack);
230 		list_add(&node->node_list, &hole->node_list);
231 
232 		if (node->start == hole_start) {
233 			hole->hole_follows = 0;
234 			list_del_init(&hole->hole_stack);
235 		}
236 
237 		node->hole_follows = 0;
238 		if (end != hole_end) {
239 			list_add(&node->hole_stack, &mm->hole_stack);
240 			node->hole_follows = 1;
241 		}
242 
243 		return 0;
244 	}
245 
246 	return -ENOSPC;
247 }
248 EXPORT_SYMBOL(drm_mm_reserve_node);
249 
250 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
251 					     unsigned long size,
252 					     unsigned alignment,
253 					     unsigned long color,
254 					     int atomic)
255 {
256 	struct drm_mm_node *node;
257 
258 	node = drm_mm_kmalloc(hole_node->mm, atomic);
259 	if (unlikely(node == NULL))
260 		return NULL;
261 
262 	drm_mm_insert_helper(hole_node, node, size, alignment, color, DRM_MM_CREATE_DEFAULT);
263 
264 	return node;
265 }
266 
267 /**
268  * drm_mm_insert_node_generic - search for space and insert @node
269  * @mm: drm_mm to allocate from
270  * @node: preallocate node to insert
271  * @size: size of the allocation
272  * @alignment: alignment of the allocation
273  * @color: opaque tag value to use for this node
274  * @sflags: flags to fine-tune the allocation search
275  * @aflags: flags to fine-tune the allocation behavior
276  *
277  * The preallocated node must be cleared to 0.
278  *
279  * Returns:
280  * 0 on success, -ENOSPC if there's no suitable hole.
281  */
282 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
283 			       unsigned long size, unsigned alignment,
284 			       unsigned long color,
285 			       enum drm_mm_search_flags sflags,
286 			       enum drm_mm_allocator_flags aflags)
287 {
288 	struct drm_mm_node *hole_node;
289 
290 	hole_node = drm_mm_search_free_generic(mm, size, alignment,
291 					       color, sflags);
292 	if (!hole_node)
293 		return -ENOSPC;
294 
295 	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
296 	return 0;
297 }
298 EXPORT_SYMBOL(drm_mm_insert_node_generic);
299 
300 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
301 				       struct drm_mm_node *node,
302 				       unsigned long size, unsigned alignment,
303 				       unsigned long color,
304 				       unsigned long start, unsigned long end,
305 				       enum drm_mm_allocator_flags flags)
306 {
307 	struct drm_mm *mm = hole_node->mm;
308 	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
309 	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
310 	unsigned long adj_start = hole_start;
311 	unsigned long adj_end = hole_end;
312 
313 	BUG_ON(!hole_node->hole_follows || node->allocated);
314 
315 	if (adj_start < start)
316 		adj_start = start;
317 	if (adj_end > end)
318 		adj_end = end;
319 
320 	if (flags & DRM_MM_CREATE_TOP)
321 		adj_start = adj_end - size;
322 
323 	if (mm->color_adjust)
324 		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
325 
326 	if (alignment) {
327 		unsigned tmp = adj_start % alignment;
328 		if (tmp) {
329 			if (flags & DRM_MM_CREATE_TOP)
330 				adj_start -= tmp;
331 			else
332 				adj_start += alignment - tmp;
333 		}
334 	}
335 
336 	if (adj_start == hole_start) {
337 		hole_node->hole_follows = 0;
338 		list_del(&hole_node->hole_stack);
339 	}
340 
341 	node->start = adj_start;
342 	node->size = size;
343 	node->mm = mm;
344 	node->color = color;
345 	node->allocated = 1;
346 
347 	INIT_LIST_HEAD(&node->hole_stack);
348 	list_add(&node->node_list, &hole_node->node_list);
349 
350 	BUG_ON(node->start < start);
351 	BUG_ON(node->start < adj_start);
352 	BUG_ON(node->start + node->size > adj_end);
353 	BUG_ON(node->start + node->size > end);
354 
355 	node->hole_follows = 0;
356 	if (__drm_mm_hole_node_start(node) < hole_end) {
357 		list_add(&node->hole_stack, &mm->hole_stack);
358 		node->hole_follows = 1;
359 	}
360 }
361 
362 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
363 						unsigned long size,
364 						unsigned alignment,
365 						unsigned long color,
366 						unsigned long start,
367 						unsigned long end,
368 						int atomic)
369 {
370 	struct drm_mm_node *node;
371 
372 	node = drm_mm_kmalloc(hole_node->mm, atomic);
373 	if (unlikely(node == NULL))
374 		return NULL;
375 
376 	drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
377 				   start, end, DRM_MM_CREATE_DEFAULT);
378 
379 	return node;
380 }
381 
382 /**
383  * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
384  * @mm: drm_mm to allocate from
385  * @node: preallocate node to insert
386  * @size: size of the allocation
387  * @alignment: alignment of the allocation
388  * @color: opaque tag value to use for this node
389  * @start: start of the allowed range for this node
390  * @end: end of the allowed range for this node
391  * @sflags: flags to fine-tune the allocation search
392  * @aflags: flags to fine-tune the allocation behavior
393  *
394  * The preallocated node must be cleared to 0.
395  *
396  * Returns:
397  * 0 on success, -ENOSPC if there's no suitable hole.
398  */
399 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
400 					unsigned long size, unsigned alignment,
401 					unsigned long color,
402 					unsigned long start, unsigned long end,
403 					enum drm_mm_search_flags sflags,
404 					enum drm_mm_allocator_flags aflags)
405 {
406 	struct drm_mm_node *hole_node;
407 
408 	hole_node = drm_mm_search_free_in_range_generic(mm,
409 							size, alignment, color,
410 							start, end, sflags);
411 	if (!hole_node)
412 		return -ENOSPC;
413 
414 	drm_mm_insert_helper_range(hole_node, node,
415 				   size, alignment, color,
416 				   start, end, aflags);
417 	return 0;
418 }
419 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
420 
421 /**
422  * drm_mm_remove_node - Remove a memory node from the allocator.
423  * @node: drm_mm_node to remove
424  *
425  * This just removes a node from its drm_mm allocator. The node does not need to
426  * be cleared again before it can be re-inserted into this or any other drm_mm
427  * allocator. It is a bug to call this function on a un-allocated node.
428  */
429 void drm_mm_remove_node(struct drm_mm_node *node)
430 {
431 	struct drm_mm *mm = node->mm;
432 	struct drm_mm_node *prev_node;
433 
434 	if (WARN_ON(!node->allocated))
435 		return;
436 
437 	BUG_ON(node->scanned_block || node->scanned_prev_free
438 				   || node->scanned_next_free);
439 
440 	prev_node =
441 	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
442 
443 	if (node->hole_follows) {
444 		BUG_ON(__drm_mm_hole_node_start(node) ==
445 		       __drm_mm_hole_node_end(node));
446 		list_del(&node->hole_stack);
447 	} else
448 		BUG_ON(__drm_mm_hole_node_start(node) !=
449 		       __drm_mm_hole_node_end(node));
450 
451 
452 	if (!prev_node->hole_follows) {
453 		prev_node->hole_follows = 1;
454 		list_add(&prev_node->hole_stack, &mm->hole_stack);
455 	} else
456 		list_move(&prev_node->hole_stack, &mm->hole_stack);
457 
458 	list_del(&node->node_list);
459 	node->allocated = 0;
460 }
461 EXPORT_SYMBOL(drm_mm_remove_node);
462 
463 /*
464  * Remove a memory node from the allocator and free the allocated struct
465  * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
466  * drm_mm_get_block functions.
467  */
468 void drm_mm_put_block(struct drm_mm_node *node)
469 {
470 
471 	struct drm_mm *mm = node->mm;
472 
473 	drm_mm_remove_node(node);
474 
475 	spin_lock(&mm->unused_lock);
476 	if (mm->num_unused < MM_UNUSED_TARGET) {
477 		list_add(&node->node_list, &mm->unused_nodes);
478 		++mm->num_unused;
479 	} else
480 		kfree(node);
481 	spin_unlock(&mm->unused_lock);
482 }
483 
484 static int check_free_hole(unsigned long start, unsigned long end,
485 			   unsigned long size, unsigned alignment)
486 {
487 	if (end - start < size)
488 		return 0;
489 
490 	if (alignment) {
491 		unsigned tmp = start % alignment;
492 		if (tmp)
493 			start += alignment - tmp;
494 	}
495 
496 	return end >= start + size;
497 }
498 
499 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
500 						      unsigned long size,
501 						      unsigned alignment,
502 						      unsigned long color,
503 						      enum drm_mm_search_flags flags)
504 {
505 	struct drm_mm_node *entry;
506 	struct drm_mm_node *best;
507 	unsigned long adj_start;
508 	unsigned long adj_end;
509 	unsigned long best_size;
510 
511 	BUG_ON(mm->scanned_blocks);
512 
513 	best = NULL;
514 	best_size = ~0UL;
515 
516 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
517 			       flags & DRM_MM_SEARCH_BELOW) {
518 		unsigned long hole_size = adj_end - adj_start;
519 
520 		if (mm->color_adjust) {
521 			mm->color_adjust(entry, color, &adj_start, &adj_end);
522 			if (adj_end <= adj_start)
523 				continue;
524 		}
525 
526 		if (!check_free_hole(adj_start, adj_end, size, alignment))
527 			continue;
528 
529 		if (!(flags & DRM_MM_SEARCH_BEST))
530 			return entry;
531 
532 		if (hole_size < best_size) {
533 			best = entry;
534 			best_size = hole_size;
535 		}
536 	}
537 
538 	return best;
539 }
540 
541 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
542 							unsigned long size,
543 							unsigned alignment,
544 							unsigned long color,
545 							unsigned long start,
546 							unsigned long end,
547 							enum drm_mm_search_flags flags)
548 {
549 	struct drm_mm_node *entry;
550 	struct drm_mm_node *best;
551 	unsigned long adj_start;
552 	unsigned long adj_end;
553 	unsigned long best_size;
554 
555 	BUG_ON(mm->scanned_blocks);
556 
557 	best = NULL;
558 	best_size = ~0UL;
559 
560 	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
561 			       flags & DRM_MM_SEARCH_BELOW) {
562 		unsigned long hole_size = adj_end - adj_start;
563 
564 		if (adj_start < start)
565 			adj_start = start;
566 		if (adj_end > end)
567 			adj_end = end;
568 
569 		if (mm->color_adjust) {
570 			mm->color_adjust(entry, color, &adj_start, &adj_end);
571 			if (adj_end <= adj_start)
572 				continue;
573 		}
574 
575 		if (!check_free_hole(adj_start, adj_end, size, alignment))
576 			continue;
577 
578 		if (!(flags & DRM_MM_SEARCH_BEST))
579 			return entry;
580 
581 		if (hole_size < best_size) {
582 			best = entry;
583 			best_size = hole_size;
584 		}
585 	}
586 
587 	return best;
588 }
589 
590 /**
591  * drm_mm_replace_node - move an allocation from @old to @new
592  * @old: drm_mm_node to remove from the allocator
593  * @new: drm_mm_node which should inherit @old's allocation
594  *
595  * This is useful for when drivers embed the drm_mm_node structure and hence
596  * can't move allocations by reassigning pointers. It's a combination of remove
597  * and insert with the guarantee that the allocation start will match.
598  */
599 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
600 {
601 	list_replace(&old->node_list, &new->node_list);
602 	list_replace(&old->hole_stack, &new->hole_stack);
603 	new->hole_follows = old->hole_follows;
604 	new->mm = old->mm;
605 	new->start = old->start;
606 	new->size = old->size;
607 	new->color = old->color;
608 
609 	old->allocated = 0;
610 	new->allocated = 1;
611 }
612 EXPORT_SYMBOL(drm_mm_replace_node);
613 
614 /**
615  * DOC: lru scan roaster
616  *
617  * Very often GPUs need to have continuous allocations for a given object. When
618  * evicting objects to make space for a new one it is therefore not most
619  * efficient when we simply start to select all objects from the tail of an LRU
620  * until there's a suitable hole: Especially for big objects or nodes that
621  * otherwise have special allocation constraints there's a good chance we evict
622  * lots of (smaller) objects unecessarily.
623  *
624  * The DRM range allocator supports this use-case through the scanning
625  * interfaces. First a scan operation needs to be initialized with
626  * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
627  * objects to the roaster (probably by walking an LRU list, but this can be
628  * freely implemented) until a suitable hole is found or there's no further
629  * evitable object.
630  *
631  * The the driver must walk through all objects again in exactly the reverse
632  * order to restore the allocator state. Note that while the allocator is used
633  * in the scan mode no other operation is allowed.
634  *
635  * Finally the driver evicts all objects selected in the scan. Adding and
636  * removing an object is O(1), and since freeing a node is also O(1) the overall
637  * complexity is O(scanned_objects). So like the free stack which needs to be
638  * walked before a scan operation even begins this is linear in the number of
639  * objects. It doesn't seem to hurt badly.
640  */
641 
642 /**
643  * drm_mm_init_scan - initialize lru scanning
644  * @mm: drm_mm to scan
645  * @size: size of the allocation
646  * @alignment: alignment of the allocation
647  * @color: opaque tag value to use for the allocation
648  *
649  * This simply sets up the scanning routines with the parameters for the desired
650  * hole. Note that there's no need to specify allocation flags, since they only
651  * change the place a node is allocated from within a suitable hole.
652  *
653  * Warning:
654  * As long as the scan list is non-empty, no other operations than
655  * adding/removing nodes to/from the scan list are allowed.
656  */
657 void drm_mm_init_scan(struct drm_mm *mm,
658 		      unsigned long size,
659 		      unsigned alignment,
660 		      unsigned long color)
661 {
662 	mm->scan_color = color;
663 	mm->scan_alignment = alignment;
664 	mm->scan_size = size;
665 	mm->scanned_blocks = 0;
666 	mm->scan_hit_start = 0;
667 	mm->scan_hit_end = 0;
668 	mm->scan_check_range = 0;
669 	mm->prev_scanned_node = NULL;
670 }
671 EXPORT_SYMBOL(drm_mm_init_scan);
672 
673 /**
674  * drm_mm_init_scan - initialize range-restricted lru scanning
675  * @mm: drm_mm to scan
676  * @size: size of the allocation
677  * @alignment: alignment of the allocation
678  * @color: opaque tag value to use for the allocation
679  * @start: start of the allowed range for the allocation
680  * @end: end of the allowed range for the allocation
681  *
682  * This simply sets up the scanning routines with the parameters for the desired
683  * hole. Note that there's no need to specify allocation flags, since they only
684  * change the place a node is allocated from within a suitable hole.
685  *
686  * Warning:
687  * As long as the scan list is non-empty, no other operations than
688  * adding/removing nodes to/from the scan list are allowed.
689  */
690 void drm_mm_init_scan_with_range(struct drm_mm *mm,
691 				 unsigned long size,
692 				 unsigned alignment,
693 				 unsigned long color,
694 				 unsigned long start,
695 				 unsigned long end)
696 {
697 	mm->scan_color = color;
698 	mm->scan_alignment = alignment;
699 	mm->scan_size = size;
700 	mm->scanned_blocks = 0;
701 	mm->scan_hit_start = 0;
702 	mm->scan_hit_end = 0;
703 	mm->scan_start = start;
704 	mm->scan_end = end;
705 	mm->scan_check_range = 1;
706 	mm->prev_scanned_node = NULL;
707 }
708 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
709 
710 /**
711  * drm_mm_scan_add_block - add a node to the scan list
712  * @node: drm_mm_node to add
713  *
714  * Add a node to the scan list that might be freed to make space for the desired
715  * hole.
716  *
717  * Returns:
718  * True if a hole has been found, false otherwise.
719  */
720 bool drm_mm_scan_add_block(struct drm_mm_node *node)
721 {
722 	struct drm_mm *mm = node->mm;
723 	struct drm_mm_node *prev_node;
724 	unsigned long hole_start, hole_end;
725 	unsigned long adj_start, adj_end;
726 
727 	mm->scanned_blocks++;
728 
729 	BUG_ON(node->scanned_block);
730 	node->scanned_block = 1;
731 
732 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
733 			       node_list);
734 
735 	node->scanned_preceeds_hole = prev_node->hole_follows;
736 	prev_node->hole_follows = 1;
737 	list_del(&node->node_list);
738 	node->node_list.prev = &prev_node->node_list;
739 	node->node_list.next = &mm->prev_scanned_node->node_list;
740 	mm->prev_scanned_node = node;
741 
742 	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
743 	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
744 
745 	if (mm->scan_check_range) {
746 		if (adj_start < mm->scan_start)
747 			adj_start = mm->scan_start;
748 		if (adj_end > mm->scan_end)
749 			adj_end = mm->scan_end;
750 	}
751 
752 	if (mm->color_adjust)
753 		mm->color_adjust(prev_node, mm->scan_color,
754 				 &adj_start, &adj_end);
755 
756 	if (check_free_hole(adj_start, adj_end,
757 			    mm->scan_size, mm->scan_alignment)) {
758 		mm->scan_hit_start = hole_start;
759 		mm->scan_hit_end = hole_end;
760 		return true;
761 	}
762 
763 	return false;
764 }
765 EXPORT_SYMBOL(drm_mm_scan_add_block);
766 
767 /**
768  * drm_mm_scan_remove_block - remove a node from the scan list
769  * @node: drm_mm_node to remove
770  *
771  * Nodes _must_ be removed in the exact same order from the scan list as they
772  * have been added, otherwise the internal state of the memory manager will be
773  * corrupted.
774  *
775  * When the scan list is empty, the selected memory nodes can be freed. An
776  * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
777  * return the just freed block (because its at the top of the free_stack list).
778  *
779  * Returns:
780  * True if this block should be evicted, false otherwise. Will always
781  * return false when no hole has been found.
782  */
783 bool drm_mm_scan_remove_block(struct drm_mm_node *node)
784 {
785 	struct drm_mm *mm = node->mm;
786 	struct drm_mm_node *prev_node;
787 
788 	mm->scanned_blocks--;
789 
790 	BUG_ON(!node->scanned_block);
791 	node->scanned_block = 0;
792 
793 	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
794 			       node_list);
795 
796 	prev_node->hole_follows = node->scanned_preceeds_hole;
797 	list_add(&node->node_list, &prev_node->node_list);
798 
799 	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
800 		 node->start < mm->scan_hit_end);
801 }
802 EXPORT_SYMBOL(drm_mm_scan_remove_block);
803 
804 /**
805  * drm_mm_clean - checks whether an allocator is clean
806  * @mm: drm_mm allocator to check
807  *
808  * Returns:
809  * True if the allocator is completely free, false if there's still a node
810  * allocated in it.
811  */
812 bool drm_mm_clean(struct drm_mm * mm)
813 {
814 	struct list_head *head = &mm->head_node.node_list;
815 
816 	return (head->next->next == head);
817 }
818 EXPORT_SYMBOL(drm_mm_clean);
819 
820 /**
821  * drm_mm_init - initialize a drm-mm allocator
822  * @mm: the drm_mm structure to initialize
823  * @start: start of the range managed by @mm
824  * @size: end of the range managed by @mm
825  *
826  * Note that @mm must be cleared to 0 before calling this function.
827  */
828 void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
829 {
830 	INIT_LIST_HEAD(&mm->hole_stack);
831 	INIT_LIST_HEAD(&mm->unused_nodes);
832 	mm->num_unused = 0;
833 	mm->scanned_blocks = 0;
834 
835 	/* Clever trick to avoid a special case in the free hole tracking. */
836 	INIT_LIST_HEAD(&mm->head_node.node_list);
837 	INIT_LIST_HEAD(&mm->head_node.hole_stack);
838 	mm->head_node.hole_follows = 1;
839 	mm->head_node.scanned_block = 0;
840 	mm->head_node.scanned_prev_free = 0;
841 	mm->head_node.scanned_next_free = 0;
842 	mm->head_node.mm = mm;
843 	mm->head_node.start = start + size;
844 	mm->head_node.size = start - mm->head_node.start;
845 	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
846 
847 	mm->color_adjust = NULL;
848 }
849 EXPORT_SYMBOL(drm_mm_init);
850 
851 /**
852  * drm_mm_takedown - clean up a drm_mm allocator
853  * @mm: drm_mm allocator to clean up
854  *
855  * Note that it is a bug to call this function on an allocator which is not
856  * clean.
857  */
858 void drm_mm_takedown(struct drm_mm * mm)
859 {
860 	WARN(!list_empty(&mm->head_node.node_list),
861 	     "Memory manager not clean during takedown.\n");
862 }
863 EXPORT_SYMBOL(drm_mm_takedown);
864 
865 static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
866 				       const char *prefix)
867 {
868 	unsigned long hole_start, hole_end, hole_size;
869 
870 	if (entry->hole_follows) {
871 		hole_start = drm_mm_hole_node_start(entry);
872 		hole_end = drm_mm_hole_node_end(entry);
873 		hole_size = hole_end - hole_start;
874 		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
875 			prefix, hole_start, hole_end,
876 			hole_size);
877 		return hole_size;
878 	}
879 
880 	return 0;
881 }
882 
883 /**
884  * drm_mm_debug_table - dump allocator state to dmesg
885  * @mm: drm_mm allocator to dump
886  * @prefix: prefix to use for dumping to dmesg
887  */
888 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
889 {
890 	struct drm_mm_node *entry;
891 	unsigned long total_used = 0, total_free = 0, total = 0;
892 
893 	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
894 
895 	drm_mm_for_each_node(entry, mm) {
896 		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
897 			prefix, entry->start, entry->start + entry->size,
898 			entry->size);
899 		total_used += entry->size;
900 		total_free += drm_mm_debug_hole(entry, prefix);
901 	}
902 	total = total_free + total_used;
903 
904 	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
905 		total_used, total_free);
906 }
907 EXPORT_SYMBOL(drm_mm_debug_table);
908 
909 #if defined(CONFIG_DEBUG_FS)
910 static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
911 {
912 	unsigned long hole_start, hole_end, hole_size;
913 
914 	if (entry->hole_follows) {
915 		hole_start = drm_mm_hole_node_start(entry);
916 		hole_end = drm_mm_hole_node_end(entry);
917 		hole_size = hole_end - hole_start;
918 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
919 				hole_start, hole_end, hole_size);
920 		return hole_size;
921 	}
922 
923 	return 0;
924 }
925 
926 /**
927  * drm_mm_dump_table - dump allocator state to a seq_file
928  * @m: seq_file to dump to
929  * @mm: drm_mm allocator to dump
930  */
931 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
932 {
933 	struct drm_mm_node *entry;
934 	unsigned long total_used = 0, total_free = 0, total = 0;
935 
936 	total_free += drm_mm_dump_hole(m, &mm->head_node);
937 
938 	drm_mm_for_each_node(entry, mm) {
939 		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
940 				entry->start, entry->start + entry->size,
941 				entry->size);
942 		total_used += entry->size;
943 		total_free += drm_mm_dump_hole(m, entry);
944 	}
945 	total = total_free + total_used;
946 
947 	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
948 	return 0;
949 }
950 EXPORT_SYMBOL(drm_mm_dump_table);
951 #endif
952