1 /**************************************************************************
2 *
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
4 * Copyright 2016 Intel Corporation
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 *
28 **************************************************************************/
29 /*
30 * Authors:
31 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 */
33
34 #ifndef _DRM_MM_H_
35 #define _DRM_MM_H_
36
37 /*
38 * Generic range manager structs
39 */
40 #include <linux/bug.h>
41 #include <linux/rbtree.h>
42 #include <linux/kernel.h>
43 #include <linux/list.h>
44 #include <linux/spinlock.h>
45 #ifdef CONFIG_DEBUG_FS
46 #include <linux/seq_file.h>
47 #endif
48 #ifdef CONFIG_DRM_DEBUG_MM
49 #include <linux/stackdepot.h>
50 #endif
51 #include <drm/drm_print.h>
52
53 #ifdef CONFIG_DRM_DEBUG_MM
54 #define DRM_MM_BUG_ON(expr) BUG_ON(expr)
55 #else
56 #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
57 #endif
58
59 /**
60 * enum drm_mm_insert_mode - control search and allocation behaviour
61 *
62 * The &struct drm_mm range manager supports finding a suitable modes using
63 * a number of search trees. These trees are oranised by size, by address and
64 * in most recent eviction order. This allows the user to find either the
65 * smallest hole to reuse, the lowest or highest address to reuse, or simply
66 * reuse the most recent eviction that fits. When allocating the &drm_mm_node
67 * from within the hole, the &drm_mm_insert_mode also dictate whether to
68 * allocate the lowest matching address or the highest.
69 */
70 enum drm_mm_insert_mode {
71 /**
72 * @DRM_MM_INSERT_BEST:
73 *
74 * Search for the smallest hole (within the search range) that fits
75 * the desired node.
76 *
77 * Allocates the node from the bottom of the found hole.
78 */
79 DRM_MM_INSERT_BEST = 0,
80
81 /**
82 * @DRM_MM_INSERT_LOW:
83 *
84 * Search for the lowest hole (address closest to 0, within the search
85 * range) that fits the desired node.
86 *
87 * Allocates the node from the bottom of the found hole.
88 */
89 DRM_MM_INSERT_LOW,
90
91 /**
92 * @DRM_MM_INSERT_HIGH:
93 *
94 * Search for the highest hole (address closest to U64_MAX, within the
95 * search range) that fits the desired node.
96 *
97 * Allocates the node from the *top* of the found hole. The specified
98 * alignment for the node is applied to the base of the node
99 * (&drm_mm_node.start).
100 */
101 DRM_MM_INSERT_HIGH,
102
103 /**
104 * @DRM_MM_INSERT_EVICT:
105 *
106 * Search for the most recently evicted hole (within the search range)
107 * that fits the desired node. This is appropriate for use immediately
108 * after performing an eviction scan (see drm_mm_scan_init()) and
109 * removing the selected nodes to form a hole.
110 *
111 * Allocates the node from the bottom of the found hole.
112 */
113 DRM_MM_INSERT_EVICT,
114 };
115
116 enum drm_mm_search_flags {
117 DRM_MM_SEARCH_DEFAULT = 0,
118 DRM_MM_SEARCH_BEST = 1 << 0,
119 DRM_MM_SEARCH_BELOW = 1 << 1,
120 };
121
122 enum drm_mm_allocator_flags {
123 DRM_MM_CREATE_DEFAULT = 0,
124 DRM_MM_CREATE_TOP = 1 << 0,
125 };
126
127 #define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
128 #define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
129
130 struct drm_mm_node {
131 struct list_head node_list;
132 struct list_head hole_stack;
133 struct rb_node rb;
134 unsigned hole_follows : 1;
135 unsigned allocated : 1;
136 bool scanned_block : 1;
137 unsigned long color;
138 u64 start;
139 u64 size;
140 u64 __subtree_last;
141 struct drm_mm *mm;
142 #ifdef CONFIG_DRM_DEBUG_MM
143 depot_stack_handle_t stack;
144 #endif
145 };
146
147 struct drm_mm {
148 /* List of all memory nodes that immediately precede a free hole. */
149 struct list_head hole_stack;
150 /* head_node.node_list is the list of all memory nodes, ordered
151 * according to the (increasing) start address of the memory node. */
152 struct drm_mm_node head_node;
153 /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
154 struct rb_root interval_tree;
155
156 void (*color_adjust)(const struct drm_mm_node *node,
157 unsigned long color,
158 u64 *start, u64 *end);
159
160 unsigned long scan_active;
161 };
162
163 struct drm_mm_scan {
164 struct drm_mm *mm;
165
166 u64 size;
167 u64 alignment;
168 u64 remainder_mask;
169
170 u64 range_start;
171 u64 range_end;
172
173 u64 hit_start;
174 u64 hit_end;
175
176 unsigned long color;
177 unsigned int flags;
178 };
179
180 /**
181 * drm_mm_node_allocated - checks whether a node is allocated
182 * @node: drm_mm_node to check
183 *
184 * Drivers are required to clear a node prior to using it with the
185 * drm_mm range manager.
186 *
187 * Drivers should use this helper for proper encapsulation of drm_mm
188 * internals.
189 *
190 * Returns:
191 * True if the @node is allocated.
192 */
drm_mm_node_allocated(const struct drm_mm_node * node)193 static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
194 {
195 return node->allocated;
196 }
197
198 /**
199 * drm_mm_initialized - checks whether an allocator is initialized
200 * @mm: drm_mm to check
201 *
202 * Drivers should clear the struct drm_mm prior to initialisation if they
203 * want to use this function.
204 *
205 * Drivers should use this helper for proper encapsulation of drm_mm
206 * internals.
207 *
208 * Returns:
209 * True if the @mm is initialized.
210 */
drm_mm_initialized(const struct drm_mm * mm)211 static inline bool drm_mm_initialized(const struct drm_mm *mm)
212 {
213 return mm->hole_stack.next;
214 }
215
216 /**
217 * drm_mm_hole_follows - checks whether a hole follows this node
218 * @node: drm_mm_node to check
219 *
220 * Holes are embedded into the drm_mm using the tail of a drm_mm_node.
221 * If you wish to know whether a hole follows this particular node,
222 * query this function.
223 *
224 * Returns:
225 * True if a hole follows the @node.
226 */
drm_mm_hole_follows(const struct drm_mm_node * node)227 static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
228 {
229 return node->hole_follows;
230 }
231
__drm_mm_hole_node_start(const struct drm_mm_node * hole_node)232 static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
233 {
234 return hole_node->start + hole_node->size;
235 }
236
237 /**
238 * drm_mm_hole_node_start - computes the start of the hole following @node
239 * @hole_node: drm_mm_node which implicitly tracks the following hole
240 *
241 * This is useful for driver-specific debug dumpers. Otherwise drivers should
242 * not inspect holes themselves. Drivers must check first whether a hole indeed
243 * follows by looking at drm_mm_hole_follows()
244 *
245 * Returns:
246 * Start of the subsequent hole.
247 */
drm_mm_hole_node_start(const struct drm_mm_node * hole_node)248 static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
249 {
250 DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node));
251 return __drm_mm_hole_node_start(hole_node);
252 }
253
__drm_mm_hole_node_end(struct drm_mm_node * hole_node)254 static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
255 {
256 return list_next_entry(hole_node, node_list)->start;
257 }
258
259 /**
260 * drm_mm_hole_node_end - computes the end of the hole following @node
261 * @hole_node: drm_mm_node which implicitly tracks the following hole
262 *
263 * This is useful for driver-specific debug dumpers. Otherwise drivers should
264 * not inspect holes themselves. Drivers must check first whether a hole indeed
265 * follows by looking at drm_mm_hole_follows().
266 *
267 * Returns:
268 * End of the subsequent hole.
269 */
drm_mm_hole_node_end(struct drm_mm_node * hole_node)270 static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
271 {
272 return __drm_mm_hole_node_end(hole_node);
273 }
274
275 /**
276 * drm_mm_nodes - list of nodes under the drm_mm range manager
277 * @mm: the struct drm_mm range manger
278 *
279 * As the drm_mm range manager hides its node_list deep with its
280 * structure, extracting it looks painful and repetitive. This is
281 * not expected to be used outside of the drm_mm_for_each_node()
282 * macros and similar internal functions.
283 *
284 * Returns:
285 * The node list, may be empty.
286 */
287 #define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
288
289 /**
290 * drm_mm_for_each_node - iterator to walk over all allocated nodes
291 * @entry: drm_mm_node structure to assign to in each iteration step
292 * @mm: drm_mm allocator to walk
293 *
294 * This iterator walks over all nodes in the range allocator. It is implemented
295 * with list_for_each, so not save against removal of elements.
296 */
297 #define drm_mm_for_each_node(entry, mm) \
298 list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
299
300 /**
301 * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
302 * @entry: drm_mm_node structure to assign to in each iteration step
303 * @next: drm_mm_node structure to store the next step
304 * @mm: drm_mm allocator to walk
305 *
306 * This iterator walks over all nodes in the range allocator. It is implemented
307 * with list_for_each_safe, so save against removal of elements.
308 */
309 #define drm_mm_for_each_node_safe(entry, next, mm) \
310 list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
311
312 #define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
313 for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
314 &entry->hole_stack != &(mm)->hole_stack ? \
315 hole_start = drm_mm_hole_node_start(entry), \
316 hole_end = drm_mm_hole_node_end(entry), \
317 1 : 0; \
318 entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
319
320 /**
321 * drm_mm_for_each_hole - iterator to walk over all holes
322 * @entry: drm_mm_node used internally to track progress
323 * @mm: drm_mm allocator to walk
324 * @hole_start: ulong variable to assign the hole start to on each iteration
325 * @hole_end: ulong variable to assign the hole end to on each iteration
326 *
327 * This iterator walks over all holes in the range allocator. It is implemented
328 * with list_for_each, so not save against removal of elements. @entry is used
329 * internally and will not reflect a real drm_mm_node for the very first hole.
330 * Hence users of this iterator may not access it.
331 *
332 * Implementation Note:
333 * We need to inline list_for_each_entry in order to be able to set hole_start
334 * and hole_end on each iteration while keeping the macro sane.
335 *
336 * The __drm_mm_for_each_hole version is similar, but with added support for
337 * going backwards.
338 */
339 #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
340 __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0)
341
342 /*
343 * Basic range manager support (drm_mm.c)
344 */
345 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
346 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
347 struct drm_mm_node *node,
348 u64 size,
349 u64 alignment,
350 unsigned long color,
351 u64 start,
352 u64 end,
353 enum drm_mm_search_flags sflags,
354 enum drm_mm_allocator_flags aflags);
355
356 /**
357 * drm_mm_insert_node_in_range - ranged search for space and insert @node
358 * @mm: drm_mm to allocate from
359 * @node: preallocate node to insert
360 * @size: size of the allocation
361 * @alignment: alignment of the allocation
362 * @start: start of the allowed range for this node
363 * @end: end of the allowed range for this node
364 * @flags: flags to fine-tune the allocation
365 *
366 * This is a simplified version of drm_mm_insert_node_in_range_generic() with
367 * @color set to 0.
368 *
369 * The preallocated node must be cleared to 0.
370 *
371 * Returns:
372 * 0 on success, -ENOSPC if there's no suitable hole.
373 */
drm_mm_insert_node_in_range(struct drm_mm * mm,struct drm_mm_node * node,u64 size,u64 alignment,unsigned long color,u64 start,u64 end,enum drm_mm_insert_mode mode)374 static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
375 struct drm_mm_node *node,
376 u64 size,
377 u64 alignment,
378 unsigned long color,
379 u64 start,
380 u64 end,
381 enum drm_mm_insert_mode mode)
382 {
383 enum drm_mm_search_flags sflags;
384 enum drm_mm_allocator_flags aflags;
385 switch (mode) {
386 case DRM_MM_INSERT_BEST:
387 sflags = DRM_MM_SEARCH_BEST;
388 aflags = DRM_MM_CREATE_DEFAULT;
389 break;
390 case DRM_MM_INSERT_LOW:
391 case DRM_MM_INSERT_HIGH:
392 case DRM_MM_INSERT_EVICT:
393 default:
394 sflags = DRM_MM_SEARCH_DEFAULT;
395 aflags = DRM_MM_CREATE_DEFAULT;
396 break;
397 }
398 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
399 color, start, end,
400 sflags, aflags);
401 }
402
403 /**
404 * drm_mm_insert_node_generic - search for space and insert @node
405 * @mm: drm_mm to allocate from
406 * @node: preallocate node to insert
407 * @size: size of the allocation
408 * @alignment: alignment of the allocation
409 * @color: opaque tag value to use for this node
410 * @sflags: flags to fine-tune the allocation search
411 * @aflags: flags to fine-tune the allocation behavior
412 *
413 * The preallocated node must be cleared to 0.
414 *
415 * Returns:
416 * 0 on success, -ENOSPC if there's no suitable hole.
417 */
418 static inline int
drm_mm_insert_node_generic(struct drm_mm * mm,struct drm_mm_node * node,u64 size,u64 alignment,unsigned long color,enum drm_mm_search_flags sflags,enum drm_mm_allocator_flags aflags)419 drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
420 u64 size, u64 alignment,
421 unsigned long color,
422 enum drm_mm_search_flags sflags,
423 enum drm_mm_allocator_flags aflags)
424 {
425 return drm_mm_insert_node_in_range_generic(mm, node,
426 size, alignment, 0,
427 0, U64_MAX,
428 sflags, aflags);
429 }
430
431 /**
432 * drm_mm_insert_node - search for space and insert @node
433 * @mm: drm_mm to allocate from
434 * @node: preallocate node to insert
435 * @size: size of the allocation
436 *
437 * This is a simplified version of drm_mm_insert_node_generic() with @color set
438 * to 0.
439 *
440 * The preallocated node must be cleared to 0.
441 *
442 * Returns:
443 * 0 on success, -ENOSPC if there's no suitable hole.
444 */
drm_mm_insert_node(struct drm_mm * mm,struct drm_mm_node * node,u64 size)445 static inline int drm_mm_insert_node(struct drm_mm *mm,
446 struct drm_mm_node *node,
447 u64 size)
448 {
449 return drm_mm_insert_node_generic(mm, node,
450 size, 0, 0,
451 0, DRM_MM_CREATE_DEFAULT);
452 }
453
454 #if 0
455 /**
456 * drm_mm_insert_node - search for space and insert @node
457 * @mm: drm_mm to allocate from
458 * @node: preallocate node to insert
459 * @size: size of the allocation
460 * @alignment: alignment of the allocation
461 * @flags: flags to fine-tune the allocation
462 *
463 * This is a simplified version of drm_mm_insert_node_generic() with @color set
464 * to 0.
465 *
466 * The preallocated node must be cleared to 0.
467 *
468 * Returns:
469 * 0 on success, -ENOSPC if there's no suitable hole.
470 */
471 static inline int drm_mm_insert_node(struct drm_mm *mm,
472 struct drm_mm_node *node,
473 u64 size,
474 u64 alignment,
475 enum drm_mm_search_flags flags)
476 {
477 return drm_mm_insert_node_generic(mm, node,
478 size, alignment, 0,
479 flags, DRM_MM_CREATE_DEFAULT);
480 }
481 #endif
482
483 void drm_mm_remove_node(struct drm_mm_node *node);
484 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
485 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
486 void drm_mm_takedown(struct drm_mm *mm);
487
488 /**
489 * drm_mm_clean - checks whether an allocator is clean
490 * @mm: drm_mm allocator to check
491 *
492 * Returns:
493 * True if the allocator is completely free, false if there's still a node
494 * allocated in it.
495 */
drm_mm_clean(const struct drm_mm * mm)496 static inline bool drm_mm_clean(const struct drm_mm *mm)
497 {
498 return list_empty(drm_mm_nodes(mm));
499 }
500
501 struct drm_mm_node *
502 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
503
504 /**
505 * drm_mm_for_each_node_in_range - iterator to walk over a range of
506 * allocated nodes
507 * @node__: drm_mm_node structure to assign to in each iteration step
508 * @mm__: drm_mm allocator to walk
509 * @start__: starting offset, the first node will overlap this
510 * @end__: ending offset, the last node will start before this (but may overlap)
511 *
512 * This iterator walks over all nodes in the range allocator that lie
513 * between @start and @end. It is implemented similarly to list_for_each(),
514 * but using the internal interval tree to accelerate the search for the
515 * starting node, and so not safe against removal of elements. It assumes
516 * that @end is within (or is the upper limit of) the drm_mm allocator.
517 * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk
518 * over the special _unallocated_ &drm_mm.head_node, and may even continue
519 * indefinitely.
520 */
521 #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \
522 for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
523 node__->start < (end__); \
524 node__ = list_next_entry(node__, node_list))
525
526 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
527 struct drm_mm *mm,
528 u64 size, u64 alignment, unsigned long color,
529 u64 start, u64 end,
530 unsigned int flags);
531
532 /**
533 * drm_mm_scan_init - initialize lru scanning
534 * @scan: scan state
535 * @mm: drm_mm to scan
536 * @size: size of the allocation
537 * @alignment: alignment of the allocation
538 * @color: opaque tag value to use for the allocation
539 * @flags: flags to specify how the allocation will be performed afterwards
540 *
541 * This simply sets up the scanning routines with the parameters for the desired
542 * hole.
543 *
544 * Warning:
545 * As long as the scan list is non-empty, no other operations than
546 * adding/removing nodes to/from the scan list are allowed.
547 */
drm_mm_scan_init(struct drm_mm_scan * scan,struct drm_mm * mm,u64 size,u64 alignment,unsigned long color,unsigned int flags)548 static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
549 struct drm_mm *mm,
550 u64 size,
551 u64 alignment,
552 unsigned long color,
553 unsigned int flags)
554 {
555 drm_mm_scan_init_with_range(scan, mm,
556 size, alignment, color,
557 0, U64_MAX,
558 flags);
559 }
560
561 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
562 struct drm_mm_node *node);
563 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
564 struct drm_mm_node *node);
565 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan);
566
567 void drm_mm_print(struct drm_mm *mm, struct drm_printer *p);
568 #ifdef CONFIG_DEBUG_FS
569 int drm_mm_dump_table(struct seq_file *m, const struct drm_mm *mm);
570 #endif
571
572 #endif
573