1 /* $NetBSD: drm_mm.c,v 1.20 2022/09/01 11:48:59 riastradh Exp $ */
2
3 /**************************************************************************
4 *
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 * Copyright 2016 Intel Corporation
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sub license, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the
18 * next paragraph) shall be included in all copies or substantial portions
19 * of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
25 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
26 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
27 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 *
29 *
30 **************************************************************************/
31
32 /*
33 * Generic simple memory manager implementation. Intended to be used as a base
34 * class implementation for more advanced memory managers.
35 *
36 * Note that the algorithm used is quite simple and there might be substantial
37 * performance gains if a smarter free list is implemented. Currently it is
38 * just an unordered stack of free regions. This could easily be improved if
39 * an RB-tree is used instead. At least if we expect heavy fragmentation.
40 *
41 * Aligned allocations can also see improvement.
42 *
43 * Authors:
44 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: drm_mm.c,v 1.20 2022/09/01 11:48:59 riastradh Exp $");
49
50 #include <linux/export.h>
51 #include <linux/interval_tree_generic.h>
52 #include <linux/seq_file.h>
53 #include <linux/slab.h>
54 #include <linux/stacktrace.h>
55
56 #include <drm/drm_mm.h>
57
58 /**
59 * DOC: Overview
60 *
61 * drm_mm provides a simple range allocator. The drivers are free to use the
62 * resource allocator from the linux core if it suits them, the upside of drm_mm
63 * is that it's in the DRM core. Which means that it's easier to extend for
64 * some of the crazier special purpose needs of gpus.
65 *
66 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
67 * Drivers are free to embed either of them into their own suitable
68 * datastructures. drm_mm itself will not do any memory allocations of its own,
69 * so if drivers choose not to embed nodes they need to still allocate them
70 * themselves.
71 *
72 * The range allocator also supports reservation of preallocated blocks. This is
73 * useful for taking over initial mode setting configurations from the firmware,
74 * where an object needs to be created which exactly matches the firmware's
75 * scanout target. As long as the range is still free it can be inserted anytime
76 * after the allocator is initialized, which helps with avoiding looped
77 * dependencies in the driver load sequence.
78 *
79 * drm_mm maintains a stack of most recently freed holes, which of all
80 * simplistic datastructures seems to be a fairly decent approach to clustering
81 * allocations and avoiding too much fragmentation. This means free space
82 * searches are O(num_holes). Given that all the fancy features drm_mm supports
83 * something better would be fairly complex and since gfx thrashing is a fairly
84 * steep cliff not a real concern. Removing a node again is O(1).
85 *
86 * drm_mm supports a few features: Alignment and range restrictions can be
87 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
88 * opaque unsigned long) which in conjunction with a driver callback can be used
89 * to implement sophisticated placement restrictions. The i915 DRM driver uses
90 * this to implement guard pages between incompatible caching domains in the
91 * graphics TT.
92 *
93 * Two behaviors are supported for searching and allocating: bottom-up and
94 * top-down. The default is bottom-up. Top-down allocation can be used if the
95 * memory area has different restrictions, or just to reduce fragmentation.
96 *
97 * Finally iteration helpers to walk all nodes and all holes are provided as are
98 * some basic allocator dumpers for debugging.
99 *
100 * Note that this range allocator is not thread-safe, drivers need to protect
101 * modifications with their own locking. The idea behind this is that for a full
102 * memory manager additional data needs to be protected anyway, hence internal
103 * locking would be fully redundant.
104 */
105
106 #ifdef CONFIG_DRM_DEBUG_MM
107 #include <linux/stackdepot.h>
108
109 #define STACKDEPTH 32
110 #define BUFSZ 4096
111
save_stack(struct drm_mm_node * node)112 static noinline void save_stack(struct drm_mm_node *node)
113 {
114 unsigned long entries[STACKDEPTH];
115 unsigned int n;
116
117 n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
118
119 /* May be called under spinlock, so avoid sleeping */
120 node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
121 }
122
show_leaks(struct drm_mm * mm)123 static void show_leaks(struct drm_mm *mm)
124 {
125 struct drm_mm_node *node;
126 unsigned long *entries;
127 unsigned int nr_entries;
128 char *buf;
129
130 buf = kmalloc(BUFSZ, GFP_KERNEL);
131 if (!buf)
132 return;
133
134 list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
135 if (!node->stack) {
136 DRM_ERROR("node [%08"PRIx64" + %08"PRIx64"]: unknown owner\n",
137 node->start, node->size);
138 continue;
139 }
140
141 nr_entries = stack_depot_fetch(node->stack, &entries);
142 stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
143 DRM_ERROR("node [%08"PRIx64" + %08"PRIx64"]: inserted at\n%s",
144 node->start, node->size, buf);
145 }
146
147 kfree(buf);
148 }
149
150 #undef STACKDEPTH
151 #undef BUFSZ
152 #else
save_stack(struct drm_mm_node * node)153 static void save_stack(struct drm_mm_node *node) { }
show_leaks(struct drm_mm * mm)154 static void show_leaks(struct drm_mm *mm) { }
155 #endif
156
157 #define START(node) ((node)->start)
158 #define LAST(node) ((node)->start + (node)->size - 1)
159
160 #ifndef __NetBSD__
INTERVAL_TREE_DEFINE(struct drm_mm_node,rb,u64,__subtree_last,START,LAST,static inline,drm_mm_interval_tree)161 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
162 u64, __subtree_last,
163 START, LAST, static inline, drm_mm_interval_tree)
164 #endif
165
166 struct drm_mm_node *
167 __drm_mm_interval_first(const struct drm_mm *mm_const, u64 start, u64 last)
168 {
169 struct drm_mm *mm = __UNCONST(mm_const);
170 #ifdef __NetBSD__
171 struct drm_mm_node *node;
172 list_for_each_entry(node, &mm->head_node.node_list, node_list) {
173 if (start <= LAST(node) && START(node) <= last)
174 return node;
175 }
176 return &mm->head_node;
177 #else
178 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
179 start, last) ?: (struct drm_mm_node *)&mm->head_node;
180 #endif
181 }
182 EXPORT_SYMBOL(__drm_mm_interval_first);
183
184 #ifndef __NetBSD__
drm_mm_interval_tree_add_node(struct drm_mm_node * hole_node,struct drm_mm_node * node)185 static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
186 struct drm_mm_node *node)
187 {
188 struct drm_mm *mm = hole_node->mm;
189 struct rb_node **link, *rb;
190 struct drm_mm_node *parent;
191 bool leftmost;
192
193 node->__subtree_last = LAST(node);
194
195 if (drm_mm_node_allocated(hole_node)) {
196 rb = &hole_node->rb;
197 while (rb) {
198 parent = rb_entry(rb, struct drm_mm_node, rb);
199 if (parent->__subtree_last >= node->__subtree_last)
200 break;
201
202 parent->__subtree_last = node->__subtree_last;
203 rb = rb_parent(rb);
204 }
205
206 rb = &hole_node->rb;
207 link = &hole_node->rb.rb_right;
208 leftmost = false;
209 } else {
210 rb = NULL;
211 link = &mm->interval_tree.rb_root.rb_node;
212 leftmost = true;
213 }
214
215 while (*link) {
216 rb = *link;
217 parent = rb_entry(rb, struct drm_mm_node, rb);
218 if (parent->__subtree_last < node->__subtree_last)
219 parent->__subtree_last = node->__subtree_last;
220 if (node->start < parent->start) {
221 link = &parent->rb.rb_left;
222 } else {
223 link = &parent->rb.rb_right;
224 leftmost = false;
225 }
226 }
227
228 rb_link_node(&node->rb, rb, link);
229 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
230 &drm_mm_interval_tree_augment);
231 }
232 #endif
233
234 #ifdef __NetBSD__
235
236 static int
compare_hole_addrs(void * cookie,const void * va,const void * vb)237 compare_hole_addrs(void *cookie, const void *va, const void *vb)
238 {
239 const struct drm_mm_node *a = va, *b = vb;
240 const u64 aa = __drm_mm_hole_node_start(a);
241 const u64 ba = __drm_mm_hole_node_start(b);
242
243 KASSERTMSG((aa == ba ||
244 aa + a->hole_size <= ba ||
245 aa >= ba + b->hole_size),
246 "overlapping holes: [0x%"PRIx64", 0x%"PRIx64"),"
247 " [0x%"PRIx64", 0x%"PRIx64")",
248 aa, aa + a->hole_size,
249 ba, ba + b->hole_size);
250 if (aa < ba)
251 return -1;
252 if (aa > ba)
253 return +1;
254 return 0;
255 }
256
257 static int
compare_hole_addr_key(void * cookie,const void * vn,const void * vk)258 compare_hole_addr_key(void *cookie, const void *vn, const void *vk)
259 {
260 const struct drm_mm_node *n = vn;
261 const u64 a = __drm_mm_hole_node_start(n);
262 const u64 *k = vk;
263
264 if (a < *k)
265 return -1;
266 if (a + n->hole_size >= *k) /* allows range lookups */
267 return +1;
268 return 0;
269 }
270
271 static const rb_tree_ops_t holes_addr_rb_ops = {
272 .rbto_compare_nodes = compare_hole_addrs,
273 .rbto_compare_key = compare_hole_addr_key,
274 .rbto_node_offset = offsetof(struct drm_mm_node, rb_hole_addr),
275 };
276
277 #else
278
279 #define RB_INSERT(root, member, expr) do { \
280 struct rb_node **link = &root.rb_node, *rb = NULL; \
281 u64 x = expr(node); \
282 while (*link) { \
283 rb = *link; \
284 if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
285 link = &rb->rb_left; \
286 else \
287 link = &rb->rb_right; \
288 } \
289 rb_link_node(&node->member, rb, link); \
290 rb_insert_color(&node->member, &root); \
291 } while (0)
292
293 #endif
294
295 #define HOLE_SIZE(NODE) ((NODE)->hole_size)
296 #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
297
rb_to_hole_size(struct rb_node * rb)298 static u64 rb_to_hole_size(struct rb_node *rb)
299 {
300 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
301 }
302
303 static int
compare_hole_sizes(void * cookie,const void * va,const void * vb)304 compare_hole_sizes(void *cookie, const void *va, const void *vb)
305 {
306 const struct drm_mm_node *a = va, *b = vb;
307
308 if (a->hole_size > b->hole_size)
309 return -1;
310 if (a->hole_size < b->hole_size)
311 return +1;
312 return (a < b ? -1 : a > b ? +1 : 0);
313 }
314
315 static int
compare_hole_size_key(void * cookie,const void * vn,const void * vk)316 compare_hole_size_key(void *cookie, const void *vn, const void *vk)
317 {
318 const struct drm_mm_node *n = vn;
319 const u64 *k = vk;
320
321 if (n->hole_size > *k)
322 return -1;
323 if (n->hole_size < *k)
324 return +1;
325 return 0;
326 }
327
328 static const rb_tree_ops_t holes_size_rb_ops = {
329 .rbto_compare_nodes = compare_hole_sizes,
330 .rbto_compare_key = compare_hole_size_key,
331 .rbto_node_offset = offsetof(struct drm_mm_node, rb_hole_size),
332 };
333
insert_hole_size(struct rb_root_cached * root,struct drm_mm_node * node)334 static void insert_hole_size(struct rb_root_cached *root,
335 struct drm_mm_node *node)
336 {
337 #ifdef __NetBSD__
338 struct drm_mm_node *collision __diagused;
339 collision = rb_tree_insert_node(&root->rb_root.rbr_tree, node);
340 KASSERT(collision == node);
341 #else
342 struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
343 u64 x = node->hole_size;
344 bool first = true;
345
346 while (*link) {
347 rb = *link;
348 if (x > rb_to_hole_size(rb)) {
349 link = &rb->rb_left;
350 } else {
351 link = &rb->rb_right;
352 first = false;
353 }
354 }
355
356 rb_link_node(&node->rb_hole_size, rb, link);
357 rb_insert_color_cached(&node->rb_hole_size, root, first);
358 #endif
359 }
360
add_hole(struct drm_mm_node * node)361 static void add_hole(struct drm_mm_node *node)
362 {
363 struct drm_mm *mm = node->mm;
364
365 node->hole_size =
366 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
367 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
368
369 insert_hole_size(&mm->holes_size, node);
370 #ifdef __NetBSD__
371 struct drm_mm_node *collision __diagused;
372 collision = rb_tree_insert_node(&mm->holes_addr.rbr_tree, node);
373 KASSERT(collision == node);
374 #else
375 RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
376 #endif
377
378 list_add(&node->hole_stack, &mm->hole_stack);
379 }
380
rm_hole(struct drm_mm_node * node)381 static void rm_hole(struct drm_mm_node *node)
382 {
383 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
384
385 list_del(&node->hole_stack);
386 rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
387 rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
388 node->hole_size = 0;
389
390 DRM_MM_BUG_ON(drm_mm_hole_follows(node));
391 }
392
rb_hole_size_to_node(struct rb_node * rb)393 static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
394 {
395 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
396 }
397
rb_hole_addr_to_node(struct rb_node * rb)398 static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
399 {
400 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
401 }
402
rb_hole_size(struct rb_node * rb)403 static inline u64 rb_hole_size(struct rb_node *rb)
404 {
405 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
406 }
407
best_hole(struct drm_mm * mm,u64 size)408 static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
409 {
410 #ifdef __NetBSD__
411 struct drm_mm_node *best;
412
413 best = rb_tree_find_node_leq(&mm->holes_size.rb_root.rbr_tree, &size);
414 KASSERT(best == NULL || size <= best->hole_size);
415
416 return best;
417 #else
418 struct rb_node *rb = mm->holes_size.rb_root.rb_node;
419 struct drm_mm_node *best = NULL;
420
421 do {
422 struct drm_mm_node *node =
423 rb_entry(rb, struct drm_mm_node, rb_hole_size);
424
425 if (size <= node->hole_size) {
426 best = node;
427 rb = rb->rb_right;
428 } else {
429 rb = rb->rb_left;
430 }
431 } while (rb);
432
433 return best;
434 #endif
435 }
436
find_hole(struct drm_mm * mm,u64 addr)437 static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
438 {
439 #ifdef __NetBSD__
440 struct rb_node *rb = mm->holes_addr.rbr_tree.rbt_root;
441 #else
442 struct rb_node *rb = mm->holes_addr.rb_node;
443 #endif
444 struct drm_mm_node *node = NULL;
445
446 while (rb) {
447 u64 hole_start;
448
449 node = rb_hole_addr_to_node(rb);
450 hole_start = __drm_mm_hole_node_start(node);
451
452 if (addr < hole_start)
453 rb = node->rb_hole_addr.rb_left;
454 else if (addr > hole_start + node->hole_size)
455 rb = node->rb_hole_addr.rb_right;
456 else
457 break;
458 }
459
460 return node;
461 }
462
463 static struct drm_mm_node *
first_hole(struct drm_mm * mm,u64 start,u64 end,u64 size,enum drm_mm_insert_mode mode)464 first_hole(struct drm_mm *mm,
465 u64 start, u64 end, u64 size,
466 enum drm_mm_insert_mode mode)
467 {
468 switch (mode) {
469 default:
470 case DRM_MM_INSERT_BEST:
471 return best_hole(mm, size);
472
473 case DRM_MM_INSERT_LOW:
474 return find_hole(mm, start);
475
476 case DRM_MM_INSERT_HIGH:
477 return find_hole(mm, end);
478
479 case DRM_MM_INSERT_EVICT:
480 return list_first_entry_or_null(&mm->hole_stack,
481 struct drm_mm_node,
482 hole_stack);
483 }
484 }
485
486 static struct drm_mm_node *
next_hole(struct drm_mm * mm,struct drm_mm_node * node,enum drm_mm_insert_mode mode)487 next_hole(struct drm_mm *mm,
488 struct drm_mm_node *node,
489 enum drm_mm_insert_mode mode)
490 {
491 switch (mode) {
492 default:
493 case DRM_MM_INSERT_BEST:
494 #ifdef __NetBSD__
495 return RB_TREE_PREV(&mm->holes_size.rb_root.rbr_tree, node);
496 #else
497 return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
498 #endif
499
500 case DRM_MM_INSERT_LOW:
501 #ifdef __NetBSD__
502 return RB_TREE_NEXT(&mm->holes_addr.rbr_tree, node);
503 #else
504 return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
505 #endif
506
507 case DRM_MM_INSERT_HIGH:
508 #ifdef __NetBSD__
509 return RB_TREE_PREV(&mm->holes_addr.rbr_tree, node);
510 #else
511 return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
512 #endif
513
514 case DRM_MM_INSERT_EVICT:
515 node = list_next_entry(node, hole_stack);
516 return &node->hole_stack == &mm->hole_stack ? NULL : node;
517 }
518 }
519
520 /**
521 * drm_mm_reserve_node - insert an pre-initialized node
522 * @mm: drm_mm allocator to insert @node into
523 * @node: drm_mm_node to insert
524 *
525 * This functions inserts an already set-up &drm_mm_node into the allocator,
526 * meaning that start, size and color must be set by the caller. All other
527 * fields must be cleared to 0. This is useful to initialize the allocator with
528 * preallocated objects which must be set-up before the range allocator can be
529 * set-up, e.g. when taking over a firmware framebuffer.
530 *
531 * Returns:
532 * 0 on success, -ENOSPC if there's no hole where @node is.
533 */
drm_mm_reserve_node(struct drm_mm * mm,struct drm_mm_node * node)534 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
535 {
536 u64 end = node->start + node->size;
537 struct drm_mm_node *hole;
538 u64 hole_start, hole_end;
539 u64 adj_start, adj_end;
540
541 end = node->start + node->size;
542 if (unlikely(end <= node->start))
543 return -ENOSPC;
544
545 /* Find the relevant hole to add our node to */
546 hole = find_hole(mm, node->start);
547 if (!hole)
548 return -ENOSPC;
549
550 adj_start = hole_start = __drm_mm_hole_node_start(hole);
551 adj_end = hole_end = hole_start + hole->hole_size;
552
553 if (mm->color_adjust)
554 mm->color_adjust(hole, node->color, &adj_start, &adj_end);
555
556 if (adj_start > node->start || adj_end < end)
557 return -ENOSPC;
558
559 node->mm = mm;
560
561 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
562 list_add(&node->node_list, &hole->node_list);
563 #ifndef __NetBSD__
564 drm_mm_interval_tree_add_node(hole, node);
565 #endif
566 node->hole_size = 0;
567
568 rm_hole(hole);
569 if (node->start > hole_start)
570 add_hole(hole);
571 if (end < hole_end)
572 add_hole(node);
573
574 save_stack(node);
575 return 0;
576 }
577 EXPORT_SYMBOL(drm_mm_reserve_node);
578
rb_to_hole_size_or_zero(struct rb_node * rb)579 static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
580 {
581 return rb ? rb_to_hole_size(rb) : 0;
582 }
583
584 /**
585 * drm_mm_insert_node_in_range - ranged search for space and insert @node
586 * @mm: drm_mm to allocate from
587 * @node: preallocate node to insert
588 * @size: size of the allocation
589 * @alignment: alignment of the allocation
590 * @color: opaque tag value to use for this node
591 * @range_start: start of the allowed range for this node
592 * @range_end: end of the allowed range for this node
593 * @mode: fine-tune the allocation search and placement
594 *
595 * The preallocated @node must be cleared to 0.
596 *
597 * Returns:
598 * 0 on success, -ENOSPC if there's no suitable hole.
599 */
drm_mm_insert_node_in_range(struct drm_mm * const mm,struct drm_mm_node * const node,u64 size,u64 alignment,unsigned long color,u64 range_start,u64 range_end,enum drm_mm_insert_mode mode)600 int drm_mm_insert_node_in_range(struct drm_mm * const mm,
601 struct drm_mm_node * const node,
602 u64 size, u64 alignment,
603 unsigned long color,
604 u64 range_start, u64 range_end,
605 enum drm_mm_insert_mode mode)
606 {
607 struct drm_mm_node *hole;
608 u64 remainder_mask;
609 bool once;
610
611 DRM_MM_BUG_ON(range_start > range_end);
612
613 if (unlikely(size == 0 || range_end - range_start < size))
614 return -ENOSPC;
615
616 if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
617 return -ENOSPC;
618
619 if (alignment <= 1)
620 alignment = 0;
621
622 once = mode & DRM_MM_INSERT_ONCE;
623 mode &= ~DRM_MM_INSERT_ONCE;
624
625 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
626 for (hole = first_hole(mm, range_start, range_end, size, mode);
627 hole;
628 hole = once ? NULL : next_hole(mm, hole, mode)) {
629 u64 hole_start = __drm_mm_hole_node_start(hole);
630 u64 hole_end = hole_start + hole->hole_size;
631 u64 adj_start, adj_end;
632 u64 col_start, col_end;
633
634 if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
635 break;
636
637 if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
638 break;
639
640 col_start = hole_start;
641 col_end = hole_end;
642 if (mm->color_adjust)
643 mm->color_adjust(hole, color, &col_start, &col_end);
644
645 adj_start = max(col_start, range_start);
646 adj_end = min(col_end, range_end);
647
648 if (adj_end <= adj_start || adj_end - adj_start < size)
649 continue;
650
651 if (mode == DRM_MM_INSERT_HIGH)
652 adj_start = adj_end - size;
653
654 if (alignment) {
655 u64 rem;
656
657 if (likely(remainder_mask))
658 rem = adj_start & remainder_mask;
659 else
660 div64_u64_rem(adj_start, alignment, &rem);
661 if (rem) {
662 adj_start -= rem;
663 if (mode != DRM_MM_INSERT_HIGH)
664 adj_start += alignment;
665
666 if (adj_start < max(col_start, range_start) ||
667 min(col_end, range_end) - adj_start < size)
668 continue;
669
670 if (adj_end <= adj_start ||
671 adj_end - adj_start < size)
672 continue;
673 }
674 }
675
676 node->mm = mm;
677 node->size = size;
678 node->start = adj_start;
679 node->color = color;
680 node->hole_size = 0;
681
682 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
683 list_add(&node->node_list, &hole->node_list);
684 #ifndef __NetBSD__
685 drm_mm_interval_tree_add_node(hole, node);
686 #endif
687
688 rm_hole(hole);
689 if (adj_start > hole_start)
690 add_hole(hole);
691 if (adj_start + size < hole_end)
692 add_hole(node);
693
694 save_stack(node);
695 return 0;
696 }
697
698 return -ENOSPC;
699 }
700 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
701
drm_mm_node_scanned_block(const struct drm_mm_node * node)702 static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
703 {
704 return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
705 }
706
707 /**
708 * drm_mm_remove_node - Remove a memory node from the allocator.
709 * @node: drm_mm_node to remove
710 *
711 * This just removes a node from its drm_mm allocator. The node does not need to
712 * be cleared again before it can be re-inserted into this or any other drm_mm
713 * allocator. It is a bug to call this function on a unallocated node.
714 */
drm_mm_remove_node(struct drm_mm_node * node)715 void drm_mm_remove_node(struct drm_mm_node *node)
716 {
717 struct drm_mm *mm = node->mm;
718 struct drm_mm_node *prev_node;
719
720 DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
721 DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
722
723 prev_node = list_prev_entry(node, node_list);
724
725 if (drm_mm_hole_follows(node))
726 rm_hole(node);
727
728 #ifdef __NetBSD__
729 __USE(mm);
730 #else
731 drm_mm_interval_tree_remove(node, &mm->interval_tree);
732 #endif
733 list_del(&node->node_list);
734
735 if (drm_mm_hole_follows(prev_node))
736 rm_hole(prev_node);
737 add_hole(prev_node);
738
739 clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
740 }
741 EXPORT_SYMBOL(drm_mm_remove_node);
742
743 /**
744 * drm_mm_replace_node - move an allocation from @old to @new
745 * @old: drm_mm_node to remove from the allocator
746 * @new: drm_mm_node which should inherit @old's allocation
747 *
748 * This is useful for when drivers embed the drm_mm_node structure and hence
749 * can't move allocations by reassigning pointers. It's a combination of remove
750 * and insert with the guarantee that the allocation start will match.
751 */
drm_mm_replace_node(struct drm_mm_node * old,struct drm_mm_node * new)752 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
753 {
754 struct drm_mm *mm = old->mm;
755
756 DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
757
758 *new = *old;
759
760 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
761 list_replace(&old->node_list, &new->node_list);
762 #ifndef __NetBSD__
763 rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
764 #endif
765
766 if (drm_mm_hole_follows(old)) {
767 list_replace(&old->hole_stack, &new->hole_stack);
768 rb_replace_node_cached(&old->rb_hole_size,
769 &new->rb_hole_size,
770 &mm->holes_size);
771 rb_replace_node(&old->rb_hole_addr,
772 &new->rb_hole_addr,
773 &mm->holes_addr);
774 }
775
776 clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
777 }
778 EXPORT_SYMBOL(drm_mm_replace_node);
779
780 /**
781 * DOC: lru scan roster
782 *
783 * Very often GPUs need to have continuous allocations for a given object. When
784 * evicting objects to make space for a new one it is therefore not most
785 * efficient when we simply start to select all objects from the tail of an LRU
786 * until there's a suitable hole: Especially for big objects or nodes that
787 * otherwise have special allocation constraints there's a good chance we evict
788 * lots of (smaller) objects unnecessarily.
789 *
790 * The DRM range allocator supports this use-case through the scanning
791 * interfaces. First a scan operation needs to be initialized with
792 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
793 * objects to the roster, probably by walking an LRU list, but this can be
794 * freely implemented. Eviction candiates are added using
795 * drm_mm_scan_add_block() until a suitable hole is found or there are no
796 * further evictable objects. Eviction roster metadata is tracked in &struct
797 * drm_mm_scan.
798 *
799 * The driver must walk through all objects again in exactly the reverse
800 * order to restore the allocator state. Note that while the allocator is used
801 * in the scan mode no other operation is allowed.
802 *
803 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
804 * reported true) in the scan, and any overlapping nodes after color adjustment
805 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
806 * since freeing a node is also O(1) the overall complexity is
807 * O(scanned_objects). So like the free stack which needs to be walked before a
808 * scan operation even begins this is linear in the number of objects. It
809 * doesn't seem to hurt too badly.
810 */
811
812 /**
813 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
814 * @scan: scan state
815 * @mm: drm_mm to scan
816 * @size: size of the allocation
817 * @alignment: alignment of the allocation
818 * @color: opaque tag value to use for the allocation
819 * @start: start of the allowed range for the allocation
820 * @end: end of the allowed range for the allocation
821 * @mode: fine-tune the allocation search and placement
822 *
823 * This simply sets up the scanning routines with the parameters for the desired
824 * hole.
825 *
826 * Warning:
827 * As long as the scan list is non-empty, no other operations than
828 * adding/removing nodes to/from the scan list are allowed.
829 */
drm_mm_scan_init_with_range(struct drm_mm_scan * scan,struct drm_mm * mm,u64 size,u64 alignment,unsigned long color,u64 start,u64 end,enum drm_mm_insert_mode mode)830 void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
831 struct drm_mm *mm,
832 u64 size,
833 u64 alignment,
834 unsigned long color,
835 u64 start,
836 u64 end,
837 enum drm_mm_insert_mode mode)
838 {
839 DRM_MM_BUG_ON(start >= end);
840 DRM_MM_BUG_ON(!size || size > end - start);
841 DRM_MM_BUG_ON(mm->scan_active);
842
843 scan->mm = mm;
844
845 if (alignment <= 1)
846 alignment = 0;
847
848 scan->color = color;
849 scan->alignment = alignment;
850 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
851 scan->size = size;
852 scan->mode = mode;
853
854 DRM_MM_BUG_ON(end <= start);
855 scan->range_start = start;
856 scan->range_end = end;
857
858 scan->hit_start = U64_MAX;
859 scan->hit_end = 0;
860 }
861 EXPORT_SYMBOL(drm_mm_scan_init_with_range);
862
863 /**
864 * drm_mm_scan_add_block - add a node to the scan list
865 * @scan: the active drm_mm scanner
866 * @node: drm_mm_node to add
867 *
868 * Add a node to the scan list that might be freed to make space for the desired
869 * hole.
870 *
871 * Returns:
872 * True if a hole has been found, false otherwise.
873 */
drm_mm_scan_add_block(struct drm_mm_scan * scan,struct drm_mm_node * node)874 bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
875 struct drm_mm_node *node)
876 {
877 struct drm_mm *mm = scan->mm;
878 struct drm_mm_node *hole;
879 u64 hole_start, hole_end;
880 u64 col_start, col_end;
881 u64 adj_start, adj_end;
882
883 DRM_MM_BUG_ON(node->mm != mm);
884 DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
885 DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
886 __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
887 mm->scan_active++;
888
889 /* Remove this block from the node_list so that we enlarge the hole
890 * (distance between the end of our previous node and the start of
891 * or next), without poisoning the link so that we can restore it
892 * later in drm_mm_scan_remove_block().
893 */
894 hole = list_prev_entry(node, node_list);
895 DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
896 __list_del_entry(&node->node_list);
897
898 hole_start = __drm_mm_hole_node_start(hole);
899 hole_end = __drm_mm_hole_node_end(hole);
900
901 col_start = hole_start;
902 col_end = hole_end;
903 if (mm->color_adjust)
904 mm->color_adjust(hole, scan->color, &col_start, &col_end);
905
906 adj_start = max(col_start, scan->range_start);
907 adj_end = min(col_end, scan->range_end);
908 if (adj_end <= adj_start || adj_end - adj_start < scan->size)
909 return false;
910
911 if (scan->mode == DRM_MM_INSERT_HIGH)
912 adj_start = adj_end - scan->size;
913
914 if (scan->alignment) {
915 u64 rem;
916
917 if (likely(scan->remainder_mask))
918 rem = adj_start & scan->remainder_mask;
919 else
920 div64_u64_rem(adj_start, scan->alignment, &rem);
921 if (rem) {
922 adj_start -= rem;
923 if (scan->mode != DRM_MM_INSERT_HIGH)
924 adj_start += scan->alignment;
925 if (adj_start < max(col_start, scan->range_start) ||
926 min(col_end, scan->range_end) - adj_start < scan->size)
927 return false;
928
929 if (adj_end <= adj_start ||
930 adj_end - adj_start < scan->size)
931 return false;
932 }
933 }
934
935 scan->hit_start = adj_start;
936 scan->hit_end = adj_start + scan->size;
937
938 DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
939 DRM_MM_BUG_ON(scan->hit_start < hole_start);
940 DRM_MM_BUG_ON(scan->hit_end > hole_end);
941
942 return true;
943 }
944 EXPORT_SYMBOL(drm_mm_scan_add_block);
945
946 /**
947 * drm_mm_scan_remove_block - remove a node from the scan list
948 * @scan: the active drm_mm scanner
949 * @node: drm_mm_node to remove
950 *
951 * Nodes **must** be removed in exactly the reverse order from the scan list as
952 * they have been added (e.g. using list_add() as they are added and then
953 * list_for_each() over that eviction list to remove), otherwise the internal
954 * state of the memory manager will be corrupted.
955 *
956 * When the scan list is empty, the selected memory nodes can be freed. An
957 * immediately following drm_mm_insert_node_in_range_generic() or one of the
958 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
959 * the just freed block (because it's at the top of the free_stack list).
960 *
961 * Returns:
962 * True if this block should be evicted, false otherwise. Will always
963 * return false when no hole has been found.
964 */
drm_mm_scan_remove_block(struct drm_mm_scan * scan,struct drm_mm_node * node)965 bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
966 struct drm_mm_node *node)
967 {
968 struct drm_mm_node *prev_node;
969
970 DRM_MM_BUG_ON(node->mm != scan->mm);
971 DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
972 __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
973
974 DRM_MM_BUG_ON(!node->mm->scan_active);
975 node->mm->scan_active--;
976
977 /* During drm_mm_scan_add_block() we decoupled this node leaving
978 * its pointers intact. Now that the caller is walking back along
979 * the eviction list we can restore this block into its rightful
980 * place on the full node_list. To confirm that the caller is walking
981 * backwards correctly we check that prev_node->next == node->next,
982 * i.e. both believe the same node should be on the other side of the
983 * hole.
984 */
985 prev_node = list_prev_entry(node, node_list);
986 DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
987 list_next_entry(node, node_list));
988 list_add(&node->node_list, &prev_node->node_list);
989
990 return (node->start + node->size > scan->hit_start &&
991 node->start < scan->hit_end);
992 }
993 EXPORT_SYMBOL(drm_mm_scan_remove_block);
994
995 /**
996 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
997 * @scan: drm_mm scan with target hole
998 *
999 * After completing an eviction scan and removing the selected nodes, we may
1000 * need to remove a few more nodes from either side of the target hole if
1001 * mm.color_adjust is being used.
1002 *
1003 * Returns:
1004 * A node to evict, or NULL if there are no overlapping nodes.
1005 */
drm_mm_scan_color_evict(struct drm_mm_scan * scan)1006 struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
1007 {
1008 struct drm_mm *mm = scan->mm;
1009 struct drm_mm_node *hole;
1010 u64 hole_start, hole_end;
1011
1012 DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
1013
1014 if (!mm->color_adjust)
1015 return NULL;
1016
1017 /*
1018 * The hole found during scanning should ideally be the first element
1019 * in the hole_stack list, but due to side-effects in the driver it
1020 * may not be.
1021 */
1022 list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
1023 hole_start = __drm_mm_hole_node_start(hole);
1024 hole_end = hole_start + hole->hole_size;
1025
1026 if (hole_start <= scan->hit_start &&
1027 hole_end >= scan->hit_end)
1028 break;
1029 }
1030
1031 /* We should only be called after we found the hole previously */
1032 DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
1033 if (unlikely(&hole->hole_stack == &mm->hole_stack))
1034 return NULL;
1035
1036 DRM_MM_BUG_ON(hole_start > scan->hit_start);
1037 DRM_MM_BUG_ON(hole_end < scan->hit_end);
1038
1039 mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
1040 if (hole_start > scan->hit_start)
1041 return hole;
1042 if (hole_end < scan->hit_end)
1043 return list_next_entry(hole, node_list);
1044
1045 return NULL;
1046 }
1047 EXPORT_SYMBOL(drm_mm_scan_color_evict);
1048
1049 /**
1050 * drm_mm_init - initialize a drm-mm allocator
1051 * @mm: the drm_mm structure to initialize
1052 * @start: start of the range managed by @mm
1053 * @size: end of the range managed by @mm
1054 *
1055 * Note that @mm must be cleared to 0 before calling this function.
1056 */
drm_mm_init(struct drm_mm * mm,u64 start,u64 size)1057 void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
1058 {
1059 DRM_MM_BUG_ON(start + size <= start);
1060
1061 mm->color_adjust = NULL;
1062
1063 INIT_LIST_HEAD(&mm->hole_stack);
1064 #ifdef __NetBSD__
1065 /* XXX interval tree */
1066 rb_tree_init(&mm->holes_size.rb_root.rbr_tree, &holes_size_rb_ops);
1067 rb_tree_init(&mm->holes_addr.rbr_tree, &holes_addr_rb_ops);
1068 #else
1069 mm->interval_tree = RB_ROOT_CACHED;
1070 mm->holes_size = RB_ROOT_CACHED;
1071 mm->holes_addr = RB_ROOT;
1072 #endif
1073
1074 /* Clever trick to avoid a special case in the free hole tracking. */
1075 INIT_LIST_HEAD(&mm->head_node.node_list);
1076 mm->head_node.flags = 0;
1077 mm->head_node.mm = mm;
1078 mm->head_node.start = start + size;
1079 mm->head_node.size = -size;
1080 add_hole(&mm->head_node);
1081
1082 mm->scan_active = 0;
1083 }
1084 EXPORT_SYMBOL(drm_mm_init);
1085
1086 /**
1087 * drm_mm_takedown - clean up a drm_mm allocator
1088 * @mm: drm_mm allocator to clean up
1089 *
1090 * Note that it is a bug to call this function on an allocator which is not
1091 * clean.
1092 */
drm_mm_takedown(struct drm_mm * mm)1093 void drm_mm_takedown(struct drm_mm *mm)
1094 {
1095 if (WARN(!drm_mm_clean(mm),
1096 "Memory manager not clean during takedown.\n"))
1097 show_leaks(mm);
1098 }
1099 EXPORT_SYMBOL(drm_mm_takedown);
1100
drm_mm_dump_hole(struct drm_printer * p,const struct drm_mm_node * entry)1101 static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
1102 {
1103 u64 start, size;
1104
1105 size = entry->hole_size;
1106 if (size) {
1107 start = drm_mm_hole_node_start(entry);
1108 drm_printf(p, "%#018"PRIx64"-%#018"PRIx64": %"PRIu64": free\n",
1109 start, start + size, size);
1110 }
1111
1112 return size;
1113 }
1114 /**
1115 * drm_mm_print - print allocator state
1116 * @mm: drm_mm allocator to print
1117 * @p: DRM printer to use
1118 */
drm_mm_print(const struct drm_mm * mm,struct drm_printer * p)1119 void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
1120 {
1121 const struct drm_mm_node *entry;
1122 u64 total_used = 0, total_free = 0, total = 0;
1123
1124 total_free += drm_mm_dump_hole(p, &mm->head_node);
1125
1126 drm_mm_for_each_node(entry, mm) {
1127 drm_printf(p, "%#018"PRIx64"-%#018"PRIx64": %"PRIu64": used\n", entry->start,
1128 entry->start + entry->size, entry->size);
1129 total_used += entry->size;
1130 total_free += drm_mm_dump_hole(p, entry);
1131 }
1132 total = total_free + total_used;
1133
1134 drm_printf(p, "total: %"PRIu64", used %"PRIu64" free %"PRIu64"\n", total,
1135 total_used, total_free);
1136 }
1137 EXPORT_SYMBOL(drm_mm_print);
1138