1 /*	$NetBSD: test-drm_mm.c,v 1.2 2021/12/18 23:45:44 riastradh Exp $	*/
2 
3 // SPDX-License-Identifier: GPL-2.0-only
4 /*
5  * Test cases for the drm_mm range manager
6  */
7 
8 #include <sys/cdefs.h>
9 __KERNEL_RCSID(0, "$NetBSD: test-drm_mm.c,v 1.2 2021/12/18 23:45:44 riastradh Exp $");
10 
11 #define pr_fmt(fmt) "drm_mm: " fmt
12 
13 #include <linux/module.h>
14 #include <linux/prime_numbers.h>
15 #include <linux/slab.h>
16 #include <linux/random.h>
17 #include <linux/vmalloc.h>
18 
19 #include <drm/drm_mm.h>
20 
21 #include "../lib/drm_random.h"
22 
23 #define TESTS "drm_mm_selftests.h"
24 #include "drm_selftest.h"
25 
26 static unsigned int random_seed;
27 static unsigned int max_iterations = 8192;
28 static unsigned int max_prime = 128;
29 
30 enum {
31 	BEST,
32 	BOTTOMUP,
33 	TOPDOWN,
34 	EVICT,
35 };
36 
37 static const struct insert_mode {
38 	const char *name;
39 	enum drm_mm_insert_mode mode;
40 } insert_modes[] = {
41 	[BEST] = { "best", DRM_MM_INSERT_BEST },
42 	[BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
43 	[TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
44 	[EVICT] = { "evict", DRM_MM_INSERT_EVICT },
45 	{}
46 }, evict_modes[] = {
47 	{ "bottom-up", DRM_MM_INSERT_LOW },
48 	{ "top-down", DRM_MM_INSERT_HIGH },
49 	{}
50 };
51 
igt_sanitycheck(void * ignored)52 static int igt_sanitycheck(void *ignored)
53 {
54 	pr_info("%s - ok!\n", __func__);
55 	return 0;
56 }
57 
assert_no_holes(const struct drm_mm * mm)58 static bool assert_no_holes(const struct drm_mm *mm)
59 {
60 	struct drm_mm_node *hole;
61 	u64 hole_start, hole_end;
62 	unsigned long count;
63 
64 	count = 0;
65 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
66 		count++;
67 	if (count) {
68 		pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
69 		return false;
70 	}
71 
72 	drm_mm_for_each_node(hole, mm) {
73 		if (drm_mm_hole_follows(hole)) {
74 			pr_err("Hole follows node, expected none!\n");
75 			return false;
76 		}
77 	}
78 
79 	return true;
80 }
81 
assert_one_hole(const struct drm_mm * mm,u64 start,u64 end)82 static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
83 {
84 	struct drm_mm_node *hole;
85 	u64 hole_start, hole_end;
86 	unsigned long count;
87 	bool ok = true;
88 
89 	if (end <= start)
90 		return true;
91 
92 	count = 0;
93 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
94 		if (start != hole_start || end != hole_end) {
95 			if (ok)
96 				pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
97 				       hole_start, hole_end,
98 				       start, end);
99 			ok = false;
100 		}
101 		count++;
102 	}
103 	if (count != 1) {
104 		pr_err("Expected to find one hole, found %lu instead\n", count);
105 		ok = false;
106 	}
107 
108 	return ok;
109 }
110 
assert_continuous(const struct drm_mm * mm,u64 size)111 static bool assert_continuous(const struct drm_mm *mm, u64 size)
112 {
113 	struct drm_mm_node *node, *check, *found;
114 	unsigned long n;
115 	u64 addr;
116 
117 	if (!assert_no_holes(mm))
118 		return false;
119 
120 	n = 0;
121 	addr = 0;
122 	drm_mm_for_each_node(node, mm) {
123 		if (node->start != addr) {
124 			pr_err("node[%ld] list out of order, expected %llx found %llx\n",
125 			       n, addr, node->start);
126 			return false;
127 		}
128 
129 		if (node->size != size) {
130 			pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
131 			       n, size, node->size);
132 			return false;
133 		}
134 
135 		if (drm_mm_hole_follows(node)) {
136 			pr_err("node[%ld] is followed by a hole!\n", n);
137 			return false;
138 		}
139 
140 		found = NULL;
141 		drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
142 			if (node != check) {
143 				pr_err("lookup return wrong node, expected start %llx, found %llx\n",
144 				       node->start, check->start);
145 				return false;
146 			}
147 			found = check;
148 		}
149 		if (!found) {
150 			pr_err("lookup failed for node %llx + %llx\n",
151 			       addr, size);
152 			return false;
153 		}
154 
155 		addr += size;
156 		n++;
157 	}
158 
159 	return true;
160 }
161 
misalignment(struct drm_mm_node * node,u64 alignment)162 static u64 misalignment(struct drm_mm_node *node, u64 alignment)
163 {
164 	u64 rem;
165 
166 	if (!alignment)
167 		return 0;
168 
169 	div64_u64_rem(node->start, alignment, &rem);
170 	return rem;
171 }
172 
assert_node(struct drm_mm_node * node,struct drm_mm * mm,u64 size,u64 alignment,unsigned long color)173 static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
174 			u64 size, u64 alignment, unsigned long color)
175 {
176 	bool ok = true;
177 
178 	if (!drm_mm_node_allocated(node) || node->mm != mm) {
179 		pr_err("node not allocated\n");
180 		ok = false;
181 	}
182 
183 	if (node->size != size) {
184 		pr_err("node has wrong size, found %llu, expected %llu\n",
185 		       node->size, size);
186 		ok = false;
187 	}
188 
189 	if (misalignment(node, alignment)) {
190 		pr_err("node is misaligned, start %llx rem %llu, expected alignment %llu\n",
191 		       node->start, misalignment(node, alignment), alignment);
192 		ok = false;
193 	}
194 
195 	if (node->color != color) {
196 		pr_err("node has wrong color, found %lu, expected %lu\n",
197 		       node->color, color);
198 		ok = false;
199 	}
200 
201 	return ok;
202 }
203 
204 #define show_mm(mm) do { \
205 	struct drm_printer __p = drm_debug_printer(__func__); \
206 	drm_mm_print((mm), &__p); } while (0)
207 
igt_init(void * ignored)208 static int igt_init(void *ignored)
209 {
210 	const unsigned int size = 4096;
211 	struct drm_mm mm;
212 	struct drm_mm_node tmp;
213 	int ret = -EINVAL;
214 
215 	/* Start with some simple checks on initialising the struct drm_mm */
216 	memset(&mm, 0, sizeof(mm));
217 	if (drm_mm_initialized(&mm)) {
218 		pr_err("zeroed mm claims to be initialized\n");
219 		return ret;
220 	}
221 
222 	memset(&mm, 0xff, sizeof(mm));
223 	drm_mm_init(&mm, 0, size);
224 	if (!drm_mm_initialized(&mm)) {
225 		pr_err("mm claims not to be initialized\n");
226 		goto out;
227 	}
228 
229 	if (!drm_mm_clean(&mm)) {
230 		pr_err("mm not empty on creation\n");
231 		goto out;
232 	}
233 
234 	/* After creation, it should all be one massive hole */
235 	if (!assert_one_hole(&mm, 0, size)) {
236 		ret = -EINVAL;
237 		goto out;
238 	}
239 
240 	memset(&tmp, 0, sizeof(tmp));
241 	tmp.start = 0;
242 	tmp.size = size;
243 	ret = drm_mm_reserve_node(&mm, &tmp);
244 	if (ret) {
245 		pr_err("failed to reserve whole drm_mm\n");
246 		goto out;
247 	}
248 
249 	/* After filling the range entirely, there should be no holes */
250 	if (!assert_no_holes(&mm)) {
251 		ret = -EINVAL;
252 		goto out;
253 	}
254 
255 	/* And then after emptying it again, the massive hole should be back */
256 	drm_mm_remove_node(&tmp);
257 	if (!assert_one_hole(&mm, 0, size)) {
258 		ret = -EINVAL;
259 		goto out;
260 	}
261 
262 out:
263 	if (ret)
264 		show_mm(&mm);
265 	drm_mm_takedown(&mm);
266 	return ret;
267 }
268 
igt_debug(void * ignored)269 static int igt_debug(void *ignored)
270 {
271 	struct drm_mm mm;
272 	struct drm_mm_node nodes[2];
273 	int ret;
274 
275 	/* Create a small drm_mm with a couple of nodes and a few holes, and
276 	 * check that the debug iterator doesn't explode over a trivial drm_mm.
277 	 */
278 
279 	drm_mm_init(&mm, 0, 4096);
280 
281 	memset(nodes, 0, sizeof(nodes));
282 	nodes[0].start = 512;
283 	nodes[0].size = 1024;
284 	ret = drm_mm_reserve_node(&mm, &nodes[0]);
285 	if (ret) {
286 		pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
287 		       nodes[0].start, nodes[0].size);
288 		return ret;
289 	}
290 
291 	nodes[1].size = 1024;
292 	nodes[1].start = 4096 - 512 - nodes[1].size;
293 	ret = drm_mm_reserve_node(&mm, &nodes[1]);
294 	if (ret) {
295 		pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
296 		       nodes[1].start, nodes[1].size);
297 		return ret;
298 	}
299 
300 	show_mm(&mm);
301 	return 0;
302 }
303 
set_node(struct drm_mm_node * node,u64 start,u64 size)304 static struct drm_mm_node *set_node(struct drm_mm_node *node,
305 				    u64 start, u64 size)
306 {
307 	node->start = start;
308 	node->size = size;
309 	return node;
310 }
311 
expect_reserve_fail(struct drm_mm * mm,struct drm_mm_node * node)312 static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
313 {
314 	int err;
315 
316 	err = drm_mm_reserve_node(mm, node);
317 	if (likely(err == -ENOSPC))
318 		return true;
319 
320 	if (!err) {
321 		pr_err("impossible reserve succeeded, node %llu + %llu\n",
322 		       node->start, node->size);
323 		drm_mm_remove_node(node);
324 	} else {
325 		pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
326 		       err, -ENOSPC, node->start, node->size);
327 	}
328 	return false;
329 }
330 
check_reserve_boundaries(struct drm_mm * mm,unsigned int count,u64 size)331 static bool check_reserve_boundaries(struct drm_mm *mm,
332 				     unsigned int count,
333 				     u64 size)
334 {
335 	const struct boundary {
336 		u64 start, size;
337 		const char *name;
338 	} boundaries[] = {
339 #define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
340 		B(0, 0),
341 		B(-size, 0),
342 		B(size, 0),
343 		B(size * count, 0),
344 		B(-size, size),
345 		B(-size, -size),
346 		B(-size, 2*size),
347 		B(0, -size),
348 		B(size, -size),
349 		B(count*size, size),
350 		B(count*size, -size),
351 		B(count*size, count*size),
352 		B(count*size, -count*size),
353 		B(count*size, -(count+1)*size),
354 		B((count+1)*size, size),
355 		B((count+1)*size, -size),
356 		B((count+1)*size, -2*size),
357 #undef B
358 	};
359 	struct drm_mm_node tmp = {};
360 	int n;
361 
362 	for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
363 		if (!expect_reserve_fail(mm,
364 					 set_node(&tmp,
365 						  boundaries[n].start,
366 						  boundaries[n].size))) {
367 			pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
368 			       n, boundaries[n].name, count, size);
369 			return false;
370 		}
371 	}
372 
373 	return true;
374 }
375 
__igt_reserve(unsigned int count,u64 size)376 static int __igt_reserve(unsigned int count, u64 size)
377 {
378 	DRM_RND_STATE(prng, random_seed);
379 	struct drm_mm mm;
380 	struct drm_mm_node tmp, *nodes, *node, *next;
381 	unsigned int *order, n, m, o = 0;
382 	int ret, err;
383 
384 	/* For exercising drm_mm_reserve_node(), we want to check that
385 	 * reservations outside of the drm_mm range are rejected, and to
386 	 * overlapping and otherwise already occupied ranges. Afterwards,
387 	 * the tree and nodes should be intact.
388 	 */
389 
390 	DRM_MM_BUG_ON(!count);
391 	DRM_MM_BUG_ON(!size);
392 
393 	ret = -ENOMEM;
394 	order = drm_random_order(count, &prng);
395 	if (!order)
396 		goto err;
397 
398 	nodes = vzalloc(array_size(count, sizeof(*nodes)));
399 	if (!nodes)
400 		goto err_order;
401 
402 	ret = -EINVAL;
403 	drm_mm_init(&mm, 0, count * size);
404 
405 	if (!check_reserve_boundaries(&mm, count, size))
406 		goto out;
407 
408 	for (n = 0; n < count; n++) {
409 		nodes[n].start = order[n] * size;
410 		nodes[n].size = size;
411 
412 		err = drm_mm_reserve_node(&mm, &nodes[n]);
413 		if (err) {
414 			pr_err("reserve failed, step %d, start %llu\n",
415 			       n, nodes[n].start);
416 			ret = err;
417 			goto out;
418 		}
419 
420 		if (!drm_mm_node_allocated(&nodes[n])) {
421 			pr_err("reserved node not allocated! step %d, start %llu\n",
422 			       n, nodes[n].start);
423 			goto out;
424 		}
425 
426 		if (!expect_reserve_fail(&mm, &nodes[n]))
427 			goto out;
428 	}
429 
430 	/* After random insertion the nodes should be in order */
431 	if (!assert_continuous(&mm, size))
432 		goto out;
433 
434 	/* Repeated use should then fail */
435 	drm_random_reorder(order, count, &prng);
436 	for (n = 0; n < count; n++) {
437 		if (!expect_reserve_fail(&mm,
438 					 set_node(&tmp, order[n] * size, 1)))
439 			goto out;
440 
441 		/* Remove and reinsert should work */
442 		drm_mm_remove_node(&nodes[order[n]]);
443 		err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
444 		if (err) {
445 			pr_err("reserve failed, step %d, start %llu\n",
446 			       n, nodes[n].start);
447 			ret = err;
448 			goto out;
449 		}
450 	}
451 
452 	if (!assert_continuous(&mm, size))
453 		goto out;
454 
455 	/* Overlapping use should then fail */
456 	for (n = 0; n < count; n++) {
457 		if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
458 			goto out;
459 	}
460 	for (n = 0; n < count; n++) {
461 		if (!expect_reserve_fail(&mm,
462 					 set_node(&tmp,
463 						  size * n,
464 						  size * (count - n))))
465 			goto out;
466 	}
467 
468 	/* Remove several, reinsert, check full */
469 	for_each_prime_number(n, min(max_prime, count)) {
470 		for (m = 0; m < n; m++) {
471 			node = &nodes[order[(o + m) % count]];
472 			drm_mm_remove_node(node);
473 		}
474 
475 		for (m = 0; m < n; m++) {
476 			node = &nodes[order[(o + m) % count]];
477 			err = drm_mm_reserve_node(&mm, node);
478 			if (err) {
479 				pr_err("reserve failed, step %d/%d, start %llu\n",
480 				       m, n, node->start);
481 				ret = err;
482 				goto out;
483 			}
484 		}
485 
486 		o += n;
487 
488 		if (!assert_continuous(&mm, size))
489 			goto out;
490 	}
491 
492 	ret = 0;
493 out:
494 	drm_mm_for_each_node_safe(node, next, &mm)
495 		drm_mm_remove_node(node);
496 	drm_mm_takedown(&mm);
497 	vfree(nodes);
498 err_order:
499 	kfree(order);
500 err:
501 	return ret;
502 }
503 
igt_reserve(void * ignored)504 static int igt_reserve(void *ignored)
505 {
506 	const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
507 	int n, ret;
508 
509 	for_each_prime_number_from(n, 1, 54) {
510 		u64 size = BIT_ULL(n);
511 
512 		ret = __igt_reserve(count, size - 1);
513 		if (ret)
514 			return ret;
515 
516 		ret = __igt_reserve(count, size);
517 		if (ret)
518 			return ret;
519 
520 		ret = __igt_reserve(count, size + 1);
521 		if (ret)
522 			return ret;
523 
524 		cond_resched();
525 	}
526 
527 	return 0;
528 }
529 
expect_insert(struct drm_mm * mm,struct drm_mm_node * node,u64 size,u64 alignment,unsigned long color,const struct insert_mode * mode)530 static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
531 			  u64 size, u64 alignment, unsigned long color,
532 			  const struct insert_mode *mode)
533 {
534 	int err;
535 
536 	err = drm_mm_insert_node_generic(mm, node,
537 					 size, alignment, color,
538 					 mode->mode);
539 	if (err) {
540 		pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
541 		       size, alignment, color, mode->name, err);
542 		return false;
543 	}
544 
545 	if (!assert_node(node, mm, size, alignment, color)) {
546 		drm_mm_remove_node(node);
547 		return false;
548 	}
549 
550 	return true;
551 }
552 
expect_insert_fail(struct drm_mm * mm,u64 size)553 static bool expect_insert_fail(struct drm_mm *mm, u64 size)
554 {
555 	struct drm_mm_node tmp = {};
556 	int err;
557 
558 	err = drm_mm_insert_node(mm, &tmp, size);
559 	if (likely(err == -ENOSPC))
560 		return true;
561 
562 	if (!err) {
563 		pr_err("impossible insert succeeded, node %llu + %llu\n",
564 		       tmp.start, tmp.size);
565 		drm_mm_remove_node(&tmp);
566 	} else {
567 		pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
568 		       err, -ENOSPC, size);
569 	}
570 	return false;
571 }
572 
__igt_insert(unsigned int count,u64 size,bool replace)573 static int __igt_insert(unsigned int count, u64 size, bool replace)
574 {
575 	DRM_RND_STATE(prng, random_seed);
576 	const struct insert_mode *mode;
577 	struct drm_mm mm;
578 	struct drm_mm_node *nodes, *node, *next;
579 	unsigned int *order, n, m, o = 0;
580 	int ret;
581 
582 	/* Fill a range with lots of nodes, check it doesn't fail too early */
583 
584 	DRM_MM_BUG_ON(!count);
585 	DRM_MM_BUG_ON(!size);
586 
587 	ret = -ENOMEM;
588 	nodes = vmalloc(array_size(count, sizeof(*nodes)));
589 	if (!nodes)
590 		goto err;
591 
592 	order = drm_random_order(count, &prng);
593 	if (!order)
594 		goto err_nodes;
595 
596 	ret = -EINVAL;
597 	drm_mm_init(&mm, 0, count * size);
598 
599 	for (mode = insert_modes; mode->name; mode++) {
600 		for (n = 0; n < count; n++) {
601 			struct drm_mm_node tmp;
602 
603 			node = replace ? &tmp : &nodes[n];
604 			memset(node, 0, sizeof(*node));
605 			if (!expect_insert(&mm, node, size, 0, n, mode)) {
606 				pr_err("%s insert failed, size %llu step %d\n",
607 				       mode->name, size, n);
608 				goto out;
609 			}
610 
611 			if (replace) {
612 				drm_mm_replace_node(&tmp, &nodes[n]);
613 				if (drm_mm_node_allocated(&tmp)) {
614 					pr_err("replaced old-node still allocated! step %d\n",
615 					       n);
616 					goto out;
617 				}
618 
619 				if (!assert_node(&nodes[n], &mm, size, 0, n)) {
620 					pr_err("replaced node did not inherit parameters, size %llu step %d\n",
621 					       size, n);
622 					goto out;
623 				}
624 
625 				if (tmp.start != nodes[n].start) {
626 					pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
627 					       tmp.start, size,
628 					       nodes[n].start, nodes[n].size);
629 					goto out;
630 				}
631 			}
632 		}
633 
634 		/* After random insertion the nodes should be in order */
635 		if (!assert_continuous(&mm, size))
636 			goto out;
637 
638 		/* Repeated use should then fail */
639 		if (!expect_insert_fail(&mm, size))
640 			goto out;
641 
642 		/* Remove one and reinsert, as the only hole it should refill itself */
643 		for (n = 0; n < count; n++) {
644 			u64 addr = nodes[n].start;
645 
646 			drm_mm_remove_node(&nodes[n]);
647 			if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
648 				pr_err("%s reinsert failed, size %llu step %d\n",
649 				       mode->name, size, n);
650 				goto out;
651 			}
652 
653 			if (nodes[n].start != addr) {
654 				pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
655 				       mode->name, n, addr, nodes[n].start);
656 				goto out;
657 			}
658 
659 			if (!assert_continuous(&mm, size))
660 				goto out;
661 		}
662 
663 		/* Remove several, reinsert, check full */
664 		for_each_prime_number(n, min(max_prime, count)) {
665 			for (m = 0; m < n; m++) {
666 				node = &nodes[order[(o + m) % count]];
667 				drm_mm_remove_node(node);
668 			}
669 
670 			for (m = 0; m < n; m++) {
671 				node = &nodes[order[(o + m) % count]];
672 				if (!expect_insert(&mm, node, size, 0, n, mode)) {
673 					pr_err("%s multiple reinsert failed, size %llu step %d\n",
674 					       mode->name, size, n);
675 					goto out;
676 				}
677 			}
678 
679 			o += n;
680 
681 			if (!assert_continuous(&mm, size))
682 				goto out;
683 
684 			if (!expect_insert_fail(&mm, size))
685 				goto out;
686 		}
687 
688 		drm_mm_for_each_node_safe(node, next, &mm)
689 			drm_mm_remove_node(node);
690 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
691 
692 		cond_resched();
693 	}
694 
695 	ret = 0;
696 out:
697 	drm_mm_for_each_node_safe(node, next, &mm)
698 		drm_mm_remove_node(node);
699 	drm_mm_takedown(&mm);
700 	kfree(order);
701 err_nodes:
702 	vfree(nodes);
703 err:
704 	return ret;
705 }
706 
igt_insert(void * ignored)707 static int igt_insert(void *ignored)
708 {
709 	const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
710 	unsigned int n;
711 	int ret;
712 
713 	for_each_prime_number_from(n, 1, 54) {
714 		u64 size = BIT_ULL(n);
715 
716 		ret = __igt_insert(count, size - 1, false);
717 		if (ret)
718 			return ret;
719 
720 		ret = __igt_insert(count, size, false);
721 		if (ret)
722 			return ret;
723 
724 		ret = __igt_insert(count, size + 1, false);
725 		if (ret)
726 			return ret;
727 
728 		cond_resched();
729 	}
730 
731 	return 0;
732 }
733 
igt_replace(void * ignored)734 static int igt_replace(void *ignored)
735 {
736 	const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
737 	unsigned int n;
738 	int ret;
739 
740 	/* Reuse igt_insert to exercise replacement by inserting a dummy node,
741 	 * then replacing it with the intended node. We want to check that
742 	 * the tree is intact and all the information we need is carried
743 	 * across to the target node.
744 	 */
745 
746 	for_each_prime_number_from(n, 1, 54) {
747 		u64 size = BIT_ULL(n);
748 
749 		ret = __igt_insert(count, size - 1, true);
750 		if (ret)
751 			return ret;
752 
753 		ret = __igt_insert(count, size, true);
754 		if (ret)
755 			return ret;
756 
757 		ret = __igt_insert(count, size + 1, true);
758 		if (ret)
759 			return ret;
760 
761 		cond_resched();
762 	}
763 
764 	return 0;
765 }
766 
expect_insert_in_range(struct drm_mm * mm,struct drm_mm_node * node,u64 size,u64 alignment,unsigned long color,u64 range_start,u64 range_end,const struct insert_mode * mode)767 static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
768 				   u64 size, u64 alignment, unsigned long color,
769 				   u64 range_start, u64 range_end,
770 				   const struct insert_mode *mode)
771 {
772 	int err;
773 
774 	err = drm_mm_insert_node_in_range(mm, node,
775 					  size, alignment, color,
776 					  range_start, range_end,
777 					  mode->mode);
778 	if (err) {
779 		pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
780 		       size, alignment, color, mode->name,
781 		       range_start, range_end, err);
782 		return false;
783 	}
784 
785 	if (!assert_node(node, mm, size, alignment, color)) {
786 		drm_mm_remove_node(node);
787 		return false;
788 	}
789 
790 	return true;
791 }
792 
expect_insert_in_range_fail(struct drm_mm * mm,u64 size,u64 range_start,u64 range_end)793 static bool expect_insert_in_range_fail(struct drm_mm *mm,
794 					u64 size,
795 					u64 range_start,
796 					u64 range_end)
797 {
798 	struct drm_mm_node tmp = {};
799 	int err;
800 
801 	err = drm_mm_insert_node_in_range(mm, &tmp,
802 					  size, 0, 0,
803 					  range_start, range_end,
804 					  0);
805 	if (likely(err == -ENOSPC))
806 		return true;
807 
808 	if (!err) {
809 		pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
810 		       tmp.start, tmp.size, range_start, range_end);
811 		drm_mm_remove_node(&tmp);
812 	} else {
813 		pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
814 		       err, -ENOSPC, size, range_start, range_end);
815 	}
816 
817 	return false;
818 }
819 
assert_contiguous_in_range(struct drm_mm * mm,u64 size,u64 start,u64 end)820 static bool assert_contiguous_in_range(struct drm_mm *mm,
821 				       u64 size,
822 				       u64 start,
823 				       u64 end)
824 {
825 	struct drm_mm_node *node;
826 	unsigned int n;
827 
828 	if (!expect_insert_in_range_fail(mm, size, start, end))
829 		return false;
830 
831 	n = div64_u64(start + size - 1, size);
832 	drm_mm_for_each_node(node, mm) {
833 		if (node->start < start || node->start + node->size > end) {
834 			pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
835 			       n, node->start, node->start + node->size, start, end);
836 			return false;
837 		}
838 
839 		if (node->start != n * size) {
840 			pr_err("node %d out of order, expected start %llx, found %llx\n",
841 			       n, n * size, node->start);
842 			return false;
843 		}
844 
845 		if (node->size != size) {
846 			pr_err("node %d has wrong size, expected size %llx, found %llx\n",
847 			       n, size, node->size);
848 			return false;
849 		}
850 
851 		if (drm_mm_hole_follows(node) &&
852 		    drm_mm_hole_node_end(node) < end) {
853 			pr_err("node %d is followed by a hole!\n", n);
854 			return false;
855 		}
856 
857 		n++;
858 	}
859 
860 	if (start > 0) {
861 		node = __drm_mm_interval_first(mm, 0, start - 1);
862 		if (drm_mm_node_allocated(node)) {
863 			pr_err("node before start: node=%llx+%llu, start=%llx\n",
864 			       node->start, node->size, start);
865 			return false;
866 		}
867 	}
868 
869 	if (end < U64_MAX) {
870 		node = __drm_mm_interval_first(mm, end, U64_MAX);
871 		if (drm_mm_node_allocated(node)) {
872 			pr_err("node after end: node=%llx+%llu, end=%llx\n",
873 			       node->start, node->size, end);
874 			return false;
875 		}
876 	}
877 
878 	return true;
879 }
880 
__igt_insert_range(unsigned int count,u64 size,u64 start,u64 end)881 static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
882 {
883 	const struct insert_mode *mode;
884 	struct drm_mm mm;
885 	struct drm_mm_node *nodes, *node, *next;
886 	unsigned int n, start_n, end_n;
887 	int ret;
888 
889 	DRM_MM_BUG_ON(!count);
890 	DRM_MM_BUG_ON(!size);
891 	DRM_MM_BUG_ON(end <= start);
892 
893 	/* Very similar to __igt_insert(), but now instead of populating the
894 	 * full range of the drm_mm, we try to fill a small portion of it.
895 	 */
896 
897 	ret = -ENOMEM;
898 	nodes = vzalloc(array_size(count, sizeof(*nodes)));
899 	if (!nodes)
900 		goto err;
901 
902 	ret = -EINVAL;
903 	drm_mm_init(&mm, 0, count * size);
904 
905 	start_n = div64_u64(start + size - 1, size);
906 	end_n = div64_u64(end - size, size);
907 
908 	for (mode = insert_modes; mode->name; mode++) {
909 		for (n = start_n; n <= end_n; n++) {
910 			if (!expect_insert_in_range(&mm, &nodes[n],
911 						    size, size, n,
912 						    start, end, mode)) {
913 				pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
914 				       mode->name, size, n,
915 				       start_n, end_n,
916 				       start, end);
917 				goto out;
918 			}
919 		}
920 
921 		if (!assert_contiguous_in_range(&mm, size, start, end)) {
922 			pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
923 			       mode->name, start, end, size);
924 			goto out;
925 		}
926 
927 		/* Remove one and reinsert, it should refill itself */
928 		for (n = start_n; n <= end_n; n++) {
929 			u64 addr = nodes[n].start;
930 
931 			drm_mm_remove_node(&nodes[n]);
932 			if (!expect_insert_in_range(&mm, &nodes[n],
933 						    size, size, n,
934 						    start, end, mode)) {
935 				pr_err("%s reinsert failed, step %d\n", mode->name, n);
936 				goto out;
937 			}
938 
939 			if (nodes[n].start != addr) {
940 				pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
941 				       mode->name, n, addr, nodes[n].start);
942 				goto out;
943 			}
944 		}
945 
946 		if (!assert_contiguous_in_range(&mm, size, start, end)) {
947 			pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
948 			       mode->name, start, end, size);
949 			goto out;
950 		}
951 
952 		drm_mm_for_each_node_safe(node, next, &mm)
953 			drm_mm_remove_node(node);
954 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
955 
956 		cond_resched();
957 	}
958 
959 	ret = 0;
960 out:
961 	drm_mm_for_each_node_safe(node, next, &mm)
962 		drm_mm_remove_node(node);
963 	drm_mm_takedown(&mm);
964 	vfree(nodes);
965 err:
966 	return ret;
967 }
968 
insert_outside_range(void)969 static int insert_outside_range(void)
970 {
971 	struct drm_mm mm;
972 	const unsigned int start = 1024;
973 	const unsigned int end = 2048;
974 	const unsigned int size = end - start;
975 
976 	drm_mm_init(&mm, start, size);
977 
978 	if (!expect_insert_in_range_fail(&mm, 1, 0, start))
979 		return -EINVAL;
980 
981 	if (!expect_insert_in_range_fail(&mm, size,
982 					 start - size/2, start + (size+1)/2))
983 		return -EINVAL;
984 
985 	if (!expect_insert_in_range_fail(&mm, size,
986 					 end - (size+1)/2, end + size/2))
987 		return -EINVAL;
988 
989 	if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
990 		return -EINVAL;
991 
992 	drm_mm_takedown(&mm);
993 	return 0;
994 }
995 
igt_insert_range(void * ignored)996 static int igt_insert_range(void *ignored)
997 {
998 	const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
999 	unsigned int n;
1000 	int ret;
1001 
1002 	/* Check that requests outside the bounds of drm_mm are rejected. */
1003 	ret = insert_outside_range();
1004 	if (ret)
1005 		return ret;
1006 
1007 	for_each_prime_number_from(n, 1, 50) {
1008 		const u64 size = BIT_ULL(n);
1009 		const u64 max = count * size;
1010 
1011 		ret = __igt_insert_range(count, size, 0, max);
1012 		if (ret)
1013 			return ret;
1014 
1015 		ret = __igt_insert_range(count, size, 1, max);
1016 		if (ret)
1017 			return ret;
1018 
1019 		ret = __igt_insert_range(count, size, 0, max - 1);
1020 		if (ret)
1021 			return ret;
1022 
1023 		ret = __igt_insert_range(count, size, 0, max/2);
1024 		if (ret)
1025 			return ret;
1026 
1027 		ret = __igt_insert_range(count, size, max/2, max);
1028 		if (ret)
1029 			return ret;
1030 
1031 		ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
1032 		if (ret)
1033 			return ret;
1034 
1035 		cond_resched();
1036 	}
1037 
1038 	return 0;
1039 }
1040 
igt_align(void * ignored)1041 static int igt_align(void *ignored)
1042 {
1043 	const struct insert_mode *mode;
1044 	const unsigned int max_count = min(8192u, max_prime);
1045 	struct drm_mm mm;
1046 	struct drm_mm_node *nodes, *node, *next;
1047 	unsigned int prime;
1048 	int ret = -EINVAL;
1049 
1050 	/* For each of the possible insertion modes, we pick a few
1051 	 * arbitrary alignments and check that the inserted node
1052 	 * meets our requirements.
1053 	 */
1054 
1055 	nodes = vzalloc(array_size(max_count, sizeof(*nodes)));
1056 	if (!nodes)
1057 		goto err;
1058 
1059 	drm_mm_init(&mm, 1, U64_MAX - 2);
1060 
1061 	for (mode = insert_modes; mode->name; mode++) {
1062 		unsigned int i = 0;
1063 
1064 		for_each_prime_number_from(prime, 1, max_count) {
1065 			u64 size = next_prime_number(prime);
1066 
1067 			if (!expect_insert(&mm, &nodes[i],
1068 					   size, prime, i,
1069 					   mode)) {
1070 				pr_err("%s insert failed with alignment=%d",
1071 				       mode->name, prime);
1072 				goto out;
1073 			}
1074 
1075 			i++;
1076 		}
1077 
1078 		drm_mm_for_each_node_safe(node, next, &mm)
1079 			drm_mm_remove_node(node);
1080 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1081 
1082 		cond_resched();
1083 	}
1084 
1085 	ret = 0;
1086 out:
1087 	drm_mm_for_each_node_safe(node, next, &mm)
1088 		drm_mm_remove_node(node);
1089 	drm_mm_takedown(&mm);
1090 	vfree(nodes);
1091 err:
1092 	return ret;
1093 }
1094 
igt_align_pot(int max)1095 static int igt_align_pot(int max)
1096 {
1097 	struct drm_mm mm;
1098 	struct drm_mm_node *node, *next;
1099 	int bit;
1100 	int ret = -EINVAL;
1101 
1102 	/* Check that we can align to the full u64 address space */
1103 
1104 	drm_mm_init(&mm, 1, U64_MAX - 2);
1105 
1106 	for (bit = max - 1; bit; bit--) {
1107 		u64 align, size;
1108 
1109 		node = kzalloc(sizeof(*node), GFP_KERNEL);
1110 		if (!node) {
1111 			ret = -ENOMEM;
1112 			goto out;
1113 		}
1114 
1115 		align = BIT_ULL(bit);
1116 		size = BIT_ULL(bit-1) + 1;
1117 		if (!expect_insert(&mm, node,
1118 				   size, align, bit,
1119 				   &insert_modes[0])) {
1120 			pr_err("insert failed with alignment=%llx [%d]",
1121 			       align, bit);
1122 			goto out;
1123 		}
1124 
1125 		cond_resched();
1126 	}
1127 
1128 	ret = 0;
1129 out:
1130 	drm_mm_for_each_node_safe(node, next, &mm) {
1131 		drm_mm_remove_node(node);
1132 		kfree(node);
1133 	}
1134 	drm_mm_takedown(&mm);
1135 	return ret;
1136 }
1137 
igt_align32(void * ignored)1138 static int igt_align32(void *ignored)
1139 {
1140 	return igt_align_pot(32);
1141 }
1142 
igt_align64(void * ignored)1143 static int igt_align64(void *ignored)
1144 {
1145 	return igt_align_pot(64);
1146 }
1147 
show_scan(const struct drm_mm_scan * scan)1148 static void show_scan(const struct drm_mm_scan *scan)
1149 {
1150 	pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
1151 		scan->hit_start, scan->hit_end,
1152 		scan->size, scan->alignment, scan->color);
1153 }
1154 
show_holes(const struct drm_mm * mm,int count)1155 static void show_holes(const struct drm_mm *mm, int count)
1156 {
1157 	u64 hole_start, hole_end;
1158 	struct drm_mm_node *hole;
1159 
1160 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1161 		struct drm_mm_node *next = list_next_entry(hole, node_list);
1162 		const char *node1 = NULL, *node2 = NULL;
1163 
1164 		if (drm_mm_node_allocated(hole))
1165 			node1 = kasprintf(GFP_KERNEL,
1166 					  "[%llx + %lld, color=%ld], ",
1167 					  hole->start, hole->size, hole->color);
1168 
1169 		if (drm_mm_node_allocated(next))
1170 			node2 = kasprintf(GFP_KERNEL,
1171 					  ", [%llx + %lld, color=%ld]",
1172 					  next->start, next->size, next->color);
1173 
1174 		pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1175 			node1,
1176 			hole_start, hole_end, hole_end - hole_start,
1177 			node2);
1178 
1179 		kfree(node2);
1180 		kfree(node1);
1181 
1182 		if (!--count)
1183 			break;
1184 	}
1185 }
1186 
1187 struct evict_node {
1188 	struct drm_mm_node node;
1189 	struct list_head link;
1190 };
1191 
evict_nodes(struct drm_mm_scan * scan,struct evict_node * nodes,unsigned int * order,unsigned int count,bool use_color,struct list_head * evict_list)1192 static bool evict_nodes(struct drm_mm_scan *scan,
1193 			struct evict_node *nodes,
1194 			unsigned int *order,
1195 			unsigned int count,
1196 			bool use_color,
1197 			struct list_head *evict_list)
1198 {
1199 	struct evict_node *e, *en;
1200 	unsigned int i;
1201 
1202 	for (i = 0; i < count; i++) {
1203 		e = &nodes[order ? order[i] : i];
1204 		list_add(&e->link, evict_list);
1205 		if (drm_mm_scan_add_block(scan, &e->node))
1206 			break;
1207 	}
1208 	list_for_each_entry_safe(e, en, evict_list, link) {
1209 		if (!drm_mm_scan_remove_block(scan, &e->node))
1210 			list_del(&e->link);
1211 	}
1212 	if (list_empty(evict_list)) {
1213 		pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
1214 		       scan->size, count, scan->alignment, scan->color);
1215 		return false;
1216 	}
1217 
1218 	list_for_each_entry(e, evict_list, link)
1219 		drm_mm_remove_node(&e->node);
1220 
1221 	if (use_color) {
1222 		struct drm_mm_node *node;
1223 
1224 		while ((node = drm_mm_scan_color_evict(scan))) {
1225 			e = container_of(node, typeof(*e), node);
1226 			drm_mm_remove_node(&e->node);
1227 			list_add(&e->link, evict_list);
1228 		}
1229 	} else {
1230 		if (drm_mm_scan_color_evict(scan)) {
1231 			pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1232 			return false;
1233 		}
1234 	}
1235 
1236 	return true;
1237 }
1238 
evict_nothing(struct drm_mm * mm,unsigned int total_size,struct evict_node * nodes)1239 static bool evict_nothing(struct drm_mm *mm,
1240 			  unsigned int total_size,
1241 			  struct evict_node *nodes)
1242 {
1243 	struct drm_mm_scan scan;
1244 	LIST_HEAD(evict_list);
1245 	struct evict_node *e;
1246 	struct drm_mm_node *node;
1247 	unsigned int n;
1248 
1249 	drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
1250 	for (n = 0; n < total_size; n++) {
1251 		e = &nodes[n];
1252 		list_add(&e->link, &evict_list);
1253 		drm_mm_scan_add_block(&scan, &e->node);
1254 	}
1255 	list_for_each_entry(e, &evict_list, link)
1256 		drm_mm_scan_remove_block(&scan, &e->node);
1257 
1258 	for (n = 0; n < total_size; n++) {
1259 		e = &nodes[n];
1260 
1261 		if (!drm_mm_node_allocated(&e->node)) {
1262 			pr_err("node[%d] no longer allocated!\n", n);
1263 			return false;
1264 		}
1265 
1266 		e->link.next = NULL;
1267 	}
1268 
1269 	drm_mm_for_each_node(node, mm) {
1270 		e = container_of(node, typeof(*e), node);
1271 		e->link.next = &e->link;
1272 	}
1273 
1274 	for (n = 0; n < total_size; n++) {
1275 		e = &nodes[n];
1276 
1277 		if (!e->link.next) {
1278 			pr_err("node[%d] no longer connected!\n", n);
1279 			return false;
1280 		}
1281 	}
1282 
1283 	return assert_continuous(mm, nodes[0].node.size);
1284 }
1285 
evict_everything(struct drm_mm * mm,unsigned int total_size,struct evict_node * nodes)1286 static bool evict_everything(struct drm_mm *mm,
1287 			     unsigned int total_size,
1288 			     struct evict_node *nodes)
1289 {
1290 	struct drm_mm_scan scan;
1291 	LIST_HEAD(evict_list);
1292 	struct evict_node *e;
1293 	unsigned int n;
1294 	int err;
1295 
1296 	drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
1297 	for (n = 0; n < total_size; n++) {
1298 		e = &nodes[n];
1299 		list_add(&e->link, &evict_list);
1300 		if (drm_mm_scan_add_block(&scan, &e->node))
1301 			break;
1302 	}
1303 
1304 	err = 0;
1305 	list_for_each_entry(e, &evict_list, link) {
1306 		if (!drm_mm_scan_remove_block(&scan, &e->node)) {
1307 			if (!err) {
1308 				pr_err("Node %lld not marked for eviction!\n",
1309 				       e->node.start);
1310 				err = -EINVAL;
1311 			}
1312 		}
1313 	}
1314 	if (err)
1315 		return false;
1316 
1317 	list_for_each_entry(e, &evict_list, link)
1318 		drm_mm_remove_node(&e->node);
1319 
1320 	if (!assert_one_hole(mm, 0, total_size))
1321 		return false;
1322 
1323 	list_for_each_entry(e, &evict_list, link) {
1324 		err = drm_mm_reserve_node(mm, &e->node);
1325 		if (err) {
1326 			pr_err("Failed to reinsert node after eviction: start=%llx\n",
1327 			       e->node.start);
1328 			return false;
1329 		}
1330 	}
1331 
1332 	return assert_continuous(mm, nodes[0].node.size);
1333 }
1334 
evict_something(struct drm_mm * mm,u64 range_start,u64 range_end,struct evict_node * nodes,unsigned int * order,unsigned int count,unsigned int size,unsigned int alignment,const struct insert_mode * mode)1335 static int evict_something(struct drm_mm *mm,
1336 			   u64 range_start, u64 range_end,
1337 			   struct evict_node *nodes,
1338 			   unsigned int *order,
1339 			   unsigned int count,
1340 			   unsigned int size,
1341 			   unsigned int alignment,
1342 			   const struct insert_mode *mode)
1343 {
1344 	struct drm_mm_scan scan;
1345 	LIST_HEAD(evict_list);
1346 	struct evict_node *e;
1347 	struct drm_mm_node tmp;
1348 	int err;
1349 
1350 	drm_mm_scan_init_with_range(&scan, mm,
1351 				    size, alignment, 0,
1352 				    range_start, range_end,
1353 				    mode->mode);
1354 	if (!evict_nodes(&scan,
1355 			 nodes, order, count, false,
1356 			 &evict_list))
1357 		return -EINVAL;
1358 
1359 	memset(&tmp, 0, sizeof(tmp));
1360 	err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1361 					 DRM_MM_INSERT_EVICT);
1362 	if (err) {
1363 		pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1364 		       size, alignment);
1365 		show_scan(&scan);
1366 		show_holes(mm, 3);
1367 		return err;
1368 	}
1369 
1370 	if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1371 		pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1372 		       tmp.start, tmp.size, range_start, range_end);
1373 		err = -EINVAL;
1374 	}
1375 
1376 	if (!assert_node(&tmp, mm, size, alignment, 0) ||
1377 	    drm_mm_hole_follows(&tmp)) {
1378 		pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1379 		       tmp.size, size,
1380 		       alignment, misalignment(&tmp, alignment),
1381 		       tmp.start, drm_mm_hole_follows(&tmp));
1382 		err = -EINVAL;
1383 	}
1384 
1385 	drm_mm_remove_node(&tmp);
1386 	if (err)
1387 		return err;
1388 
1389 	list_for_each_entry(e, &evict_list, link) {
1390 		err = drm_mm_reserve_node(mm, &e->node);
1391 		if (err) {
1392 			pr_err("Failed to reinsert node after eviction: start=%llx\n",
1393 			       e->node.start);
1394 			return err;
1395 		}
1396 	}
1397 
1398 	if (!assert_continuous(mm, nodes[0].node.size)) {
1399 		pr_err("range is no longer continuous\n");
1400 		return -EINVAL;
1401 	}
1402 
1403 	return 0;
1404 }
1405 
igt_evict(void * ignored)1406 static int igt_evict(void *ignored)
1407 {
1408 	DRM_RND_STATE(prng, random_seed);
1409 	const unsigned int size = 8192;
1410 	const struct insert_mode *mode;
1411 	struct drm_mm mm;
1412 	struct evict_node *nodes;
1413 	struct drm_mm_node *node, *next;
1414 	unsigned int *order, n;
1415 	int ret, err;
1416 
1417 	/* Here we populate a full drm_mm and then try and insert a new node
1418 	 * by evicting other nodes in a random order. The drm_mm_scan should
1419 	 * pick the first matching hole it finds from the random list. We
1420 	 * repeat that for different allocation strategies, alignments and
1421 	 * sizes to try and stress the hole finder.
1422 	 */
1423 
1424 	ret = -ENOMEM;
1425 	nodes = vzalloc(array_size(size, sizeof(*nodes)));
1426 	if (!nodes)
1427 		goto err;
1428 
1429 	order = drm_random_order(size, &prng);
1430 	if (!order)
1431 		goto err_nodes;
1432 
1433 	ret = -EINVAL;
1434 	drm_mm_init(&mm, 0, size);
1435 	for (n = 0; n < size; n++) {
1436 		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1437 		if (err) {
1438 			pr_err("insert failed, step %d\n", n);
1439 			ret = err;
1440 			goto out;
1441 		}
1442 	}
1443 
1444 	/* First check that using the scanner doesn't break the mm */
1445 	if (!evict_nothing(&mm, size, nodes)) {
1446 		pr_err("evict_nothing() failed\n");
1447 		goto out;
1448 	}
1449 	if (!evict_everything(&mm, size, nodes)) {
1450 		pr_err("evict_everything() failed\n");
1451 		goto out;
1452 	}
1453 
1454 	for (mode = evict_modes; mode->name; mode++) {
1455 		for (n = 1; n <= size; n <<= 1) {
1456 			drm_random_reorder(order, size, &prng);
1457 			err = evict_something(&mm, 0, U64_MAX,
1458 					      nodes, order, size,
1459 					      n, 1,
1460 					      mode);
1461 			if (err) {
1462 				pr_err("%s evict_something(size=%u) failed\n",
1463 				       mode->name, n);
1464 				ret = err;
1465 				goto out;
1466 			}
1467 		}
1468 
1469 		for (n = 1; n < size; n <<= 1) {
1470 			drm_random_reorder(order, size, &prng);
1471 			err = evict_something(&mm, 0, U64_MAX,
1472 					      nodes, order, size,
1473 					      size/2, n,
1474 					      mode);
1475 			if (err) {
1476 				pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1477 				       mode->name, size/2, n);
1478 				ret = err;
1479 				goto out;
1480 			}
1481 		}
1482 
1483 		for_each_prime_number_from(n, 1, min(size, max_prime)) {
1484 			unsigned int nsize = (size - n + 1) / 2;
1485 
1486 			DRM_MM_BUG_ON(!nsize);
1487 
1488 			drm_random_reorder(order, size, &prng);
1489 			err = evict_something(&mm, 0, U64_MAX,
1490 					      nodes, order, size,
1491 					      nsize, n,
1492 					      mode);
1493 			if (err) {
1494 				pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1495 				       mode->name, nsize, n);
1496 				ret = err;
1497 				goto out;
1498 			}
1499 		}
1500 
1501 		cond_resched();
1502 	}
1503 
1504 	ret = 0;
1505 out:
1506 	drm_mm_for_each_node_safe(node, next, &mm)
1507 		drm_mm_remove_node(node);
1508 	drm_mm_takedown(&mm);
1509 	kfree(order);
1510 err_nodes:
1511 	vfree(nodes);
1512 err:
1513 	return ret;
1514 }
1515 
igt_evict_range(void * ignored)1516 static int igt_evict_range(void *ignored)
1517 {
1518 	DRM_RND_STATE(prng, random_seed);
1519 	const unsigned int size = 8192;
1520 	const unsigned int range_size = size / 2;
1521 	const unsigned int range_start = size / 4;
1522 	const unsigned int range_end = range_start + range_size;
1523 	const struct insert_mode *mode;
1524 	struct drm_mm mm;
1525 	struct evict_node *nodes;
1526 	struct drm_mm_node *node, *next;
1527 	unsigned int *order, n;
1528 	int ret, err;
1529 
1530 	/* Like igt_evict() but now we are limiting the search to a
1531 	 * small portion of the full drm_mm.
1532 	 */
1533 
1534 	ret = -ENOMEM;
1535 	nodes = vzalloc(array_size(size, sizeof(*nodes)));
1536 	if (!nodes)
1537 		goto err;
1538 
1539 	order = drm_random_order(size, &prng);
1540 	if (!order)
1541 		goto err_nodes;
1542 
1543 	ret = -EINVAL;
1544 	drm_mm_init(&mm, 0, size);
1545 	for (n = 0; n < size; n++) {
1546 		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1547 		if (err) {
1548 			pr_err("insert failed, step %d\n", n);
1549 			ret = err;
1550 			goto out;
1551 		}
1552 	}
1553 
1554 	for (mode = evict_modes; mode->name; mode++) {
1555 		for (n = 1; n <= range_size; n <<= 1) {
1556 			drm_random_reorder(order, size, &prng);
1557 			err = evict_something(&mm, range_start, range_end,
1558 					      nodes, order, size,
1559 					      n, 1,
1560 					      mode);
1561 			if (err) {
1562 				pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1563 				       mode->name, n, range_start, range_end);
1564 				goto out;
1565 			}
1566 		}
1567 
1568 		for (n = 1; n <= range_size; n <<= 1) {
1569 			drm_random_reorder(order, size, &prng);
1570 			err = evict_something(&mm, range_start, range_end,
1571 					      nodes, order, size,
1572 					      range_size/2, n,
1573 					      mode);
1574 			if (err) {
1575 				pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1576 				       mode->name, range_size/2, n, range_start, range_end);
1577 				goto out;
1578 			}
1579 		}
1580 
1581 		for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1582 			unsigned int nsize = (range_size - n + 1) / 2;
1583 
1584 			DRM_MM_BUG_ON(!nsize);
1585 
1586 			drm_random_reorder(order, size, &prng);
1587 			err = evict_something(&mm, range_start, range_end,
1588 					      nodes, order, size,
1589 					      nsize, n,
1590 					      mode);
1591 			if (err) {
1592 				pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1593 				       mode->name, nsize, n, range_start, range_end);
1594 				goto out;
1595 			}
1596 		}
1597 
1598 		cond_resched();
1599 	}
1600 
1601 	ret = 0;
1602 out:
1603 	drm_mm_for_each_node_safe(node, next, &mm)
1604 		drm_mm_remove_node(node);
1605 	drm_mm_takedown(&mm);
1606 	kfree(order);
1607 err_nodes:
1608 	vfree(nodes);
1609 err:
1610 	return ret;
1611 }
1612 
node_index(const struct drm_mm_node * node)1613 static unsigned int node_index(const struct drm_mm_node *node)
1614 {
1615 	return div64_u64(node->start, node->size);
1616 }
1617 
igt_topdown(void * ignored)1618 static int igt_topdown(void *ignored)
1619 {
1620 	const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1621 	DRM_RND_STATE(prng, random_seed);
1622 	const unsigned int count = 8192;
1623 	unsigned int size;
1624 	unsigned long *bitmap;
1625 	struct drm_mm mm;
1626 	struct drm_mm_node *nodes, *node, *next;
1627 	unsigned int *order, n, m, o = 0;
1628 	int ret;
1629 
1630 	/* When allocating top-down, we expect to be returned a node
1631 	 * from a suitable hole at the top of the drm_mm. We check that
1632 	 * the returned node does match the highest available slot.
1633 	 */
1634 
1635 	ret = -ENOMEM;
1636 	nodes = vzalloc(array_size(count, sizeof(*nodes)));
1637 	if (!nodes)
1638 		goto err;
1639 
1640 	bitmap = bitmap_zalloc(count, GFP_KERNEL);
1641 	if (!bitmap)
1642 		goto err_nodes;
1643 
1644 	order = drm_random_order(count, &prng);
1645 	if (!order)
1646 		goto err_bitmap;
1647 
1648 	ret = -EINVAL;
1649 	for (size = 1; size <= 64; size <<= 1) {
1650 		drm_mm_init(&mm, 0, size*count);
1651 		for (n = 0; n < count; n++) {
1652 			if (!expect_insert(&mm, &nodes[n],
1653 					   size, 0, n,
1654 					   topdown)) {
1655 				pr_err("insert failed, size %u step %d\n", size, n);
1656 				goto out;
1657 			}
1658 
1659 			if (drm_mm_hole_follows(&nodes[n])) {
1660 				pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1661 				       n, nodes[n].start, size);
1662 				goto out;
1663 			}
1664 
1665 			if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
1666 				goto out;
1667 		}
1668 
1669 		if (!assert_continuous(&mm, size))
1670 			goto out;
1671 
1672 		drm_random_reorder(order, count, &prng);
1673 		for_each_prime_number_from(n, 1, min(count, max_prime)) {
1674 			for (m = 0; m < n; m++) {
1675 				node = &nodes[order[(o + m) % count]];
1676 				drm_mm_remove_node(node);
1677 				__set_bit(node_index(node), bitmap);
1678 			}
1679 
1680 			for (m = 0; m < n; m++) {
1681 				unsigned int last;
1682 
1683 				node = &nodes[order[(o + m) % count]];
1684 				if (!expect_insert(&mm, node,
1685 						   size, 0, 0,
1686 						   topdown)) {
1687 					pr_err("insert failed, step %d/%d\n", m, n);
1688 					goto out;
1689 				}
1690 
1691 				if (drm_mm_hole_follows(node)) {
1692 					pr_err("hole after topdown insert %d/%d, start=%llx\n",
1693 					       m, n, node->start);
1694 					goto out;
1695 				}
1696 
1697 				last = find_last_bit(bitmap, count);
1698 				if (node_index(node) != last) {
1699 					pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1700 					       m, n, size, last, node_index(node));
1701 					goto out;
1702 				}
1703 
1704 				__clear_bit(last, bitmap);
1705 			}
1706 
1707 			DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1708 
1709 			o += n;
1710 		}
1711 
1712 		drm_mm_for_each_node_safe(node, next, &mm)
1713 			drm_mm_remove_node(node);
1714 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1715 		cond_resched();
1716 	}
1717 
1718 	ret = 0;
1719 out:
1720 	drm_mm_for_each_node_safe(node, next, &mm)
1721 		drm_mm_remove_node(node);
1722 	drm_mm_takedown(&mm);
1723 	kfree(order);
1724 err_bitmap:
1725 	bitmap_free(bitmap);
1726 err_nodes:
1727 	vfree(nodes);
1728 err:
1729 	return ret;
1730 }
1731 
igt_bottomup(void * ignored)1732 static int igt_bottomup(void *ignored)
1733 {
1734 	const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
1735 	DRM_RND_STATE(prng, random_seed);
1736 	const unsigned int count = 8192;
1737 	unsigned int size;
1738 	unsigned long *bitmap;
1739 	struct drm_mm mm;
1740 	struct drm_mm_node *nodes, *node, *next;
1741 	unsigned int *order, n, m, o = 0;
1742 	int ret;
1743 
1744 	/* Like igt_topdown, but instead of searching for the last hole,
1745 	 * we search for the first.
1746 	 */
1747 
1748 	ret = -ENOMEM;
1749 	nodes = vzalloc(array_size(count, sizeof(*nodes)));
1750 	if (!nodes)
1751 		goto err;
1752 
1753 	bitmap = bitmap_zalloc(count, GFP_KERNEL);
1754 	if (!bitmap)
1755 		goto err_nodes;
1756 
1757 	order = drm_random_order(count, &prng);
1758 	if (!order)
1759 		goto err_bitmap;
1760 
1761 	ret = -EINVAL;
1762 	for (size = 1; size <= 64; size <<= 1) {
1763 		drm_mm_init(&mm, 0, size*count);
1764 		for (n = 0; n < count; n++) {
1765 			if (!expect_insert(&mm, &nodes[n],
1766 					   size, 0, n,
1767 					   bottomup)) {
1768 				pr_err("bottomup insert failed, size %u step %d\n", size, n);
1769 				goto out;
1770 			}
1771 
1772 			if (!assert_one_hole(&mm, size*(n + 1), size*count))
1773 				goto out;
1774 		}
1775 
1776 		if (!assert_continuous(&mm, size))
1777 			goto out;
1778 
1779 		drm_random_reorder(order, count, &prng);
1780 		for_each_prime_number_from(n, 1, min(count, max_prime)) {
1781 			for (m = 0; m < n; m++) {
1782 				node = &nodes[order[(o + m) % count]];
1783 				drm_mm_remove_node(node);
1784 				__set_bit(node_index(node), bitmap);
1785 			}
1786 
1787 			for (m = 0; m < n; m++) {
1788 				unsigned int first;
1789 
1790 				node = &nodes[order[(o + m) % count]];
1791 				if (!expect_insert(&mm, node,
1792 						   size, 0, 0,
1793 						   bottomup)) {
1794 					pr_err("insert failed, step %d/%d\n", m, n);
1795 					goto out;
1796 				}
1797 
1798 				first = find_first_bit(bitmap, count);
1799 				if (node_index(node) != first) {
1800 					pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n",
1801 					       m, n, first, node_index(node));
1802 					goto out;
1803 				}
1804 				__clear_bit(first, bitmap);
1805 			}
1806 
1807 			DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1808 
1809 			o += n;
1810 		}
1811 
1812 		drm_mm_for_each_node_safe(node, next, &mm)
1813 			drm_mm_remove_node(node);
1814 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1815 		cond_resched();
1816 	}
1817 
1818 	ret = 0;
1819 out:
1820 	drm_mm_for_each_node_safe(node, next, &mm)
1821 		drm_mm_remove_node(node);
1822 	drm_mm_takedown(&mm);
1823 	kfree(order);
1824 err_bitmap:
1825 	bitmap_free(bitmap);
1826 err_nodes:
1827 	vfree(nodes);
1828 err:
1829 	return ret;
1830 }
1831 
__igt_once(unsigned int mode)1832 static int __igt_once(unsigned int mode)
1833 {
1834 	struct drm_mm mm;
1835 	struct drm_mm_node rsvd_lo, rsvd_hi, node;
1836 	int err;
1837 
1838 	drm_mm_init(&mm, 0, 7);
1839 
1840 	memset(&rsvd_lo, 0, sizeof(rsvd_lo));
1841 	rsvd_lo.start = 1;
1842 	rsvd_lo.size = 1;
1843 	err = drm_mm_reserve_node(&mm, &rsvd_lo);
1844 	if (err) {
1845 		pr_err("Could not reserve low node\n");
1846 		goto err;
1847 	}
1848 
1849 	memset(&rsvd_hi, 0, sizeof(rsvd_hi));
1850 	rsvd_hi.start = 5;
1851 	rsvd_hi.size = 1;
1852 	err = drm_mm_reserve_node(&mm, &rsvd_hi);
1853 	if (err) {
1854 		pr_err("Could not reserve low node\n");
1855 		goto err_lo;
1856 	}
1857 
1858 	if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
1859 		pr_err("Expected a hole after lo and high nodes!\n");
1860 		err = -EINVAL;
1861 		goto err_hi;
1862 	}
1863 
1864 	memset(&node, 0, sizeof(node));
1865 	err = drm_mm_insert_node_generic(&mm, &node,
1866 					 2, 0, 0,
1867 					 mode | DRM_MM_INSERT_ONCE);
1868 	if (!err) {
1869 		pr_err("Unexpectedly inserted the node into the wrong hole: node.start=%llx\n",
1870 		       node.start);
1871 		err = -EINVAL;
1872 		goto err_node;
1873 	}
1874 
1875 	err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
1876 	if (err) {
1877 		pr_err("Could not insert the node into the available hole!\n");
1878 		err = -EINVAL;
1879 		goto err_hi;
1880 	}
1881 
1882 err_node:
1883 	drm_mm_remove_node(&node);
1884 err_hi:
1885 	drm_mm_remove_node(&rsvd_hi);
1886 err_lo:
1887 	drm_mm_remove_node(&rsvd_lo);
1888 err:
1889 	drm_mm_takedown(&mm);
1890 	return err;
1891 }
1892 
igt_lowest(void * ignored)1893 static int igt_lowest(void *ignored)
1894 {
1895 	return __igt_once(DRM_MM_INSERT_LOW);
1896 }
1897 
igt_highest(void * ignored)1898 static int igt_highest(void *ignored)
1899 {
1900 	return __igt_once(DRM_MM_INSERT_HIGH);
1901 }
1902 
separate_adjacent_colors(const struct drm_mm_node * node,unsigned long color,u64 * start,u64 * end)1903 static void separate_adjacent_colors(const struct drm_mm_node *node,
1904 				     unsigned long color,
1905 				     u64 *start,
1906 				     u64 *end)
1907 {
1908 	if (drm_mm_node_allocated(node) && node->color != color)
1909 		++*start;
1910 
1911 	node = list_next_entry(node, node_list);
1912 	if (drm_mm_node_allocated(node) && node->color != color)
1913 		--*end;
1914 }
1915 
colors_abutt(const struct drm_mm_node * node)1916 static bool colors_abutt(const struct drm_mm_node *node)
1917 {
1918 	if (!drm_mm_hole_follows(node) &&
1919 	    drm_mm_node_allocated(list_next_entry(node, node_list))) {
1920 		pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
1921 		       node->color, node->start, node->size,
1922 		       list_next_entry(node, node_list)->color,
1923 		       list_next_entry(node, node_list)->start,
1924 		       list_next_entry(node, node_list)->size);
1925 		return true;
1926 	}
1927 
1928 	return false;
1929 }
1930 
igt_color(void * ignored)1931 static int igt_color(void *ignored)
1932 {
1933 	const unsigned int count = min(4096u, max_iterations);
1934 	const struct insert_mode *mode;
1935 	struct drm_mm mm;
1936 	struct drm_mm_node *node, *nn;
1937 	unsigned int n;
1938 	int ret = -EINVAL, err;
1939 
1940 	/* Color adjustment complicates everything. First we just check
1941 	 * that when we insert a node we apply any color_adjustment callback.
1942 	 * The callback we use should ensure that there is a gap between
1943 	 * any two nodes, and so after each insertion we check that those
1944 	 * holes are inserted and that they are preserved.
1945 	 */
1946 
1947 	drm_mm_init(&mm, 0, U64_MAX);
1948 
1949 	for (n = 1; n <= count; n++) {
1950 		node = kzalloc(sizeof(*node), GFP_KERNEL);
1951 		if (!node) {
1952 			ret = -ENOMEM;
1953 			goto out;
1954 		}
1955 
1956 		if (!expect_insert(&mm, node,
1957 				   n, 0, n,
1958 				   &insert_modes[0])) {
1959 			pr_err("insert failed, step %d\n", n);
1960 			kfree(node);
1961 			goto out;
1962 		}
1963 	}
1964 
1965 	drm_mm_for_each_node_safe(node, nn, &mm) {
1966 		if (node->color != node->size) {
1967 			pr_err("invalid color stored: expected %lld, found %ld\n",
1968 			       node->size, node->color);
1969 
1970 			goto out;
1971 		}
1972 
1973 		drm_mm_remove_node(node);
1974 		kfree(node);
1975 	}
1976 
1977 	/* Now, let's start experimenting with applying a color callback */
1978 	mm.color_adjust = separate_adjacent_colors;
1979 	for (mode = insert_modes; mode->name; mode++) {
1980 		u64 last;
1981 
1982 		node = kzalloc(sizeof(*node), GFP_KERNEL);
1983 		if (!node) {
1984 			ret = -ENOMEM;
1985 			goto out;
1986 		}
1987 
1988 		node->size = 1 + 2*count;
1989 		node->color = node->size;
1990 
1991 		err = drm_mm_reserve_node(&mm, node);
1992 		if (err) {
1993 			pr_err("initial reserve failed!\n");
1994 			ret = err;
1995 			goto out;
1996 		}
1997 
1998 		last = node->start + node->size;
1999 
2000 		for (n = 1; n <= count; n++) {
2001 			int rem;
2002 
2003 			node = kzalloc(sizeof(*node), GFP_KERNEL);
2004 			if (!node) {
2005 				ret = -ENOMEM;
2006 				goto out;
2007 			}
2008 
2009 			node->start = last;
2010 			node->size = n + count;
2011 			node->color = node->size;
2012 
2013 			err = drm_mm_reserve_node(&mm, node);
2014 			if (err != -ENOSPC) {
2015 				pr_err("reserve %d did not report color overlap! err=%d\n",
2016 				       n, err);
2017 				goto out;
2018 			}
2019 
2020 			node->start += n + 1;
2021 			rem = misalignment(node, n + count);
2022 			node->start += n + count - rem;
2023 
2024 			err = drm_mm_reserve_node(&mm, node);
2025 			if (err) {
2026 				pr_err("reserve %d failed, err=%d\n", n, err);
2027 				ret = err;
2028 				goto out;
2029 			}
2030 
2031 			last = node->start + node->size;
2032 		}
2033 
2034 		for (n = 1; n <= count; n++) {
2035 			node = kzalloc(sizeof(*node), GFP_KERNEL);
2036 			if (!node) {
2037 				ret = -ENOMEM;
2038 				goto out;
2039 			}
2040 
2041 			if (!expect_insert(&mm, node,
2042 					   n, n, n,
2043 					   mode)) {
2044 				pr_err("%s insert failed, step %d\n",
2045 				       mode->name, n);
2046 				kfree(node);
2047 				goto out;
2048 			}
2049 		}
2050 
2051 		drm_mm_for_each_node_safe(node, nn, &mm) {
2052 			u64 rem;
2053 
2054 			if (node->color != node->size) {
2055 				pr_err("%s invalid color stored: expected %lld, found %ld\n",
2056 				       mode->name, node->size, node->color);
2057 
2058 				goto out;
2059 			}
2060 
2061 			if (colors_abutt(node))
2062 				goto out;
2063 
2064 			div64_u64_rem(node->start, node->size, &rem);
2065 			if (rem) {
2066 				pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
2067 				       mode->name, node->start, node->size, rem);
2068 				goto out;
2069 			}
2070 
2071 			drm_mm_remove_node(node);
2072 			kfree(node);
2073 		}
2074 
2075 		cond_resched();
2076 	}
2077 
2078 	ret = 0;
2079 out:
2080 	drm_mm_for_each_node_safe(node, nn, &mm) {
2081 		drm_mm_remove_node(node);
2082 		kfree(node);
2083 	}
2084 	drm_mm_takedown(&mm);
2085 	return ret;
2086 }
2087 
evict_color(struct drm_mm * mm,u64 range_start,u64 range_end,struct evict_node * nodes,unsigned int * order,unsigned int count,unsigned int size,unsigned int alignment,unsigned long color,const struct insert_mode * mode)2088 static int evict_color(struct drm_mm *mm,
2089 		       u64 range_start, u64 range_end,
2090 		       struct evict_node *nodes,
2091 		       unsigned int *order,
2092 		       unsigned int count,
2093 		       unsigned int size,
2094 		       unsigned int alignment,
2095 		       unsigned long color,
2096 		       const struct insert_mode *mode)
2097 {
2098 	struct drm_mm_scan scan;
2099 	LIST_HEAD(evict_list);
2100 	struct evict_node *e;
2101 	struct drm_mm_node tmp;
2102 	int err;
2103 
2104 	drm_mm_scan_init_with_range(&scan, mm,
2105 				    size, alignment, color,
2106 				    range_start, range_end,
2107 				    mode->mode);
2108 	if (!evict_nodes(&scan,
2109 			 nodes, order, count, true,
2110 			 &evict_list))
2111 		return -EINVAL;
2112 
2113 	memset(&tmp, 0, sizeof(tmp));
2114 	err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
2115 					 DRM_MM_INSERT_EVICT);
2116 	if (err) {
2117 		pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
2118 		       size, alignment, color, err);
2119 		show_scan(&scan);
2120 		show_holes(mm, 3);
2121 		return err;
2122 	}
2123 
2124 	if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
2125 		pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
2126 		       tmp.start, tmp.size, range_start, range_end);
2127 		err = -EINVAL;
2128 	}
2129 
2130 	if (colors_abutt(&tmp))
2131 		err = -EINVAL;
2132 
2133 	if (!assert_node(&tmp, mm, size, alignment, color)) {
2134 		pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
2135 		       tmp.size, size,
2136 		       alignment, misalignment(&tmp, alignment), tmp.start);
2137 		err = -EINVAL;
2138 	}
2139 
2140 	drm_mm_remove_node(&tmp);
2141 	if (err)
2142 		return err;
2143 
2144 	list_for_each_entry(e, &evict_list, link) {
2145 		err = drm_mm_reserve_node(mm, &e->node);
2146 		if (err) {
2147 			pr_err("Failed to reinsert node after eviction: start=%llx\n",
2148 			       e->node.start);
2149 			return err;
2150 		}
2151 	}
2152 
2153 	cond_resched();
2154 	return 0;
2155 }
2156 
igt_color_evict(void * ignored)2157 static int igt_color_evict(void *ignored)
2158 {
2159 	DRM_RND_STATE(prng, random_seed);
2160 	const unsigned int total_size = min(8192u, max_iterations);
2161 	const struct insert_mode *mode;
2162 	unsigned long color = 0;
2163 	struct drm_mm mm;
2164 	struct evict_node *nodes;
2165 	struct drm_mm_node *node, *next;
2166 	unsigned int *order, n;
2167 	int ret, err;
2168 
2169 	/* Check that the drm_mm_scan also honours color adjustment when
2170 	 * choosing its victims to create a hole. Our color_adjust does not
2171 	 * allow two nodes to be placed together without an intervening hole
2172 	 * enlarging the set of victims that must be evicted.
2173 	 */
2174 
2175 	ret = -ENOMEM;
2176 	nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2177 	if (!nodes)
2178 		goto err;
2179 
2180 	order = drm_random_order(total_size, &prng);
2181 	if (!order)
2182 		goto err_nodes;
2183 
2184 	ret = -EINVAL;
2185 	drm_mm_init(&mm, 0, 2*total_size - 1);
2186 	mm.color_adjust = separate_adjacent_colors;
2187 	for (n = 0; n < total_size; n++) {
2188 		if (!expect_insert(&mm, &nodes[n].node,
2189 				   1, 0, color++,
2190 				   &insert_modes[0])) {
2191 			pr_err("insert failed, step %d\n", n);
2192 			goto out;
2193 		}
2194 	}
2195 
2196 	for (mode = evict_modes; mode->name; mode++) {
2197 		for (n = 1; n <= total_size; n <<= 1) {
2198 			drm_random_reorder(order, total_size, &prng);
2199 			err = evict_color(&mm, 0, U64_MAX,
2200 					  nodes, order, total_size,
2201 					  n, 1, color++,
2202 					  mode);
2203 			if (err) {
2204 				pr_err("%s evict_color(size=%u) failed\n",
2205 				       mode->name, n);
2206 				goto out;
2207 			}
2208 		}
2209 
2210 		for (n = 1; n < total_size; n <<= 1) {
2211 			drm_random_reorder(order, total_size, &prng);
2212 			err = evict_color(&mm, 0, U64_MAX,
2213 					  nodes, order, total_size,
2214 					  total_size/2, n, color++,
2215 					  mode);
2216 			if (err) {
2217 				pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2218 				       mode->name, total_size/2, n);
2219 				goto out;
2220 			}
2221 		}
2222 
2223 		for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
2224 			unsigned int nsize = (total_size - n + 1) / 2;
2225 
2226 			DRM_MM_BUG_ON(!nsize);
2227 
2228 			drm_random_reorder(order, total_size, &prng);
2229 			err = evict_color(&mm, 0, U64_MAX,
2230 					  nodes, order, total_size,
2231 					  nsize, n, color++,
2232 					  mode);
2233 			if (err) {
2234 				pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2235 				       mode->name, nsize, n);
2236 				goto out;
2237 			}
2238 		}
2239 
2240 		cond_resched();
2241 	}
2242 
2243 	ret = 0;
2244 out:
2245 	if (ret)
2246 		show_mm(&mm);
2247 	drm_mm_for_each_node_safe(node, next, &mm)
2248 		drm_mm_remove_node(node);
2249 	drm_mm_takedown(&mm);
2250 	kfree(order);
2251 err_nodes:
2252 	vfree(nodes);
2253 err:
2254 	return ret;
2255 }
2256 
igt_color_evict_range(void * ignored)2257 static int igt_color_evict_range(void *ignored)
2258 {
2259 	DRM_RND_STATE(prng, random_seed);
2260 	const unsigned int total_size = 8192;
2261 	const unsigned int range_size = total_size / 2;
2262 	const unsigned int range_start = total_size / 4;
2263 	const unsigned int range_end = range_start + range_size;
2264 	const struct insert_mode *mode;
2265 	unsigned long color = 0;
2266 	struct drm_mm mm;
2267 	struct evict_node *nodes;
2268 	struct drm_mm_node *node, *next;
2269 	unsigned int *order, n;
2270 	int ret, err;
2271 
2272 	/* Like igt_color_evict(), but limited to small portion of the full
2273 	 * drm_mm range.
2274 	 */
2275 
2276 	ret = -ENOMEM;
2277 	nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2278 	if (!nodes)
2279 		goto err;
2280 
2281 	order = drm_random_order(total_size, &prng);
2282 	if (!order)
2283 		goto err_nodes;
2284 
2285 	ret = -EINVAL;
2286 	drm_mm_init(&mm, 0, 2*total_size - 1);
2287 	mm.color_adjust = separate_adjacent_colors;
2288 	for (n = 0; n < total_size; n++) {
2289 		if (!expect_insert(&mm, &nodes[n].node,
2290 				   1, 0, color++,
2291 				   &insert_modes[0])) {
2292 			pr_err("insert failed, step %d\n", n);
2293 			goto out;
2294 		}
2295 	}
2296 
2297 	for (mode = evict_modes; mode->name; mode++) {
2298 		for (n = 1; n <= range_size; n <<= 1) {
2299 			drm_random_reorder(order, range_size, &prng);
2300 			err = evict_color(&mm, range_start, range_end,
2301 					  nodes, order, total_size,
2302 					  n, 1, color++,
2303 					  mode);
2304 			if (err) {
2305 				pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2306 				       mode->name, n, range_start, range_end);
2307 				goto out;
2308 			}
2309 		}
2310 
2311 		for (n = 1; n < range_size; n <<= 1) {
2312 			drm_random_reorder(order, total_size, &prng);
2313 			err = evict_color(&mm, range_start, range_end,
2314 					  nodes, order, total_size,
2315 					  range_size/2, n, color++,
2316 					  mode);
2317 			if (err) {
2318 				pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2319 				       mode->name, total_size/2, n, range_start, range_end);
2320 				goto out;
2321 			}
2322 		}
2323 
2324 		for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2325 			unsigned int nsize = (range_size - n + 1) / 2;
2326 
2327 			DRM_MM_BUG_ON(!nsize);
2328 
2329 			drm_random_reorder(order, total_size, &prng);
2330 			err = evict_color(&mm, range_start, range_end,
2331 					  nodes, order, total_size,
2332 					  nsize, n, color++,
2333 					  mode);
2334 			if (err) {
2335 				pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2336 				       mode->name, nsize, n, range_start, range_end);
2337 				goto out;
2338 			}
2339 		}
2340 
2341 		cond_resched();
2342 	}
2343 
2344 	ret = 0;
2345 out:
2346 	if (ret)
2347 		show_mm(&mm);
2348 	drm_mm_for_each_node_safe(node, next, &mm)
2349 		drm_mm_remove_node(node);
2350 	drm_mm_takedown(&mm);
2351 	kfree(order);
2352 err_nodes:
2353 	vfree(nodes);
2354 err:
2355 	return ret;
2356 }
2357 
2358 #include "drm_selftest.c"
2359 
test_drm_mm_init(void)2360 static int __init test_drm_mm_init(void)
2361 {
2362 	int err;
2363 
2364 	while (!random_seed)
2365 		random_seed = get_random_int();
2366 
2367 	pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2368 		random_seed, max_iterations, max_prime);
2369 	err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
2370 
2371 	return err > 0 ? 0 : err;
2372 }
2373 
test_drm_mm_exit(void)2374 static void __exit test_drm_mm_exit(void)
2375 {
2376 }
2377 
2378 module_init(test_drm_mm_init);
2379 module_exit(test_drm_mm_exit);
2380 
2381 module_param(random_seed, uint, 0400);
2382 module_param(max_iterations, uint, 0400);
2383 module_param(max_prime, uint, 0400);
2384 
2385 MODULE_AUTHOR("Intel Corporation");
2386 MODULE_LICENSE("GPL");
2387