1 /*
2  * Test cases for the drm_mm range manager
3  */
4 
5 #define pr_fmt(fmt) "drm_mm: " fmt
6 
7 #include <linux/module.h>
8 #include <linux/prime_numbers.h>
9 #include <linux/slab.h>
10 #include <linux/random.h>
11 #include <linux/vmalloc.h>
12 
13 #include <drm/drm_mm.h>
14 
15 #include "../lib/drm_random.h"
16 
17 #define TESTS "drm_mm_selftests.h"
18 #include "drm_selftest.h"
19 
20 static unsigned int random_seed;
21 static unsigned int max_iterations = 8192;
22 static unsigned int max_prime = 128;
23 
24 enum {
25 	BEST,
26 	BOTTOMUP,
27 	TOPDOWN,
28 	EVICT,
29 };
30 
31 static const struct insert_mode {
32 	const char *name;
33 	enum drm_mm_insert_mode mode;
34 } insert_modes[] = {
35 	[BEST] = { "best", DRM_MM_INSERT_BEST },
36 	[BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
37 	[TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
38 	[EVICT] = { "evict", DRM_MM_INSERT_EVICT },
39 	{}
40 }, evict_modes[] = {
41 	{ "bottom-up", DRM_MM_INSERT_LOW },
42 	{ "top-down", DRM_MM_INSERT_HIGH },
43 	{}
44 };
45 
igt_sanitycheck(void * ignored)46 static int igt_sanitycheck(void *ignored)
47 {
48 	pr_info("%s - ok!\n", __func__);
49 	return 0;
50 }
51 
assert_no_holes(const struct drm_mm * mm)52 static bool assert_no_holes(const struct drm_mm *mm)
53 {
54 	struct drm_mm_node *hole;
55 	u64 hole_start, hole_end;
56 	unsigned long count;
57 
58 	count = 0;
59 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
60 		count++;
61 	if (count) {
62 		pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
63 		return false;
64 	}
65 
66 	drm_mm_for_each_node(hole, mm) {
67 		if (drm_mm_hole_follows(hole)) {
68 			pr_err("Hole follows node, expected none!\n");
69 			return false;
70 		}
71 	}
72 
73 	return true;
74 }
75 
assert_one_hole(const struct drm_mm * mm,u64 start,u64 end)76 static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
77 {
78 	struct drm_mm_node *hole;
79 	u64 hole_start, hole_end;
80 	unsigned long count;
81 	bool ok = true;
82 
83 	if (end <= start)
84 		return true;
85 
86 	count = 0;
87 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
88 		if (start != hole_start || end != hole_end) {
89 			if (ok)
90 				pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
91 				       hole_start, hole_end,
92 				       start, end);
93 			ok = false;
94 		}
95 		count++;
96 	}
97 	if (count != 1) {
98 		pr_err("Expected to find one hole, found %lu instead\n", count);
99 		ok = false;
100 	}
101 
102 	return ok;
103 }
104 
assert_continuous(const struct drm_mm * mm,u64 size)105 static bool assert_continuous(const struct drm_mm *mm, u64 size)
106 {
107 	struct drm_mm_node *node, *check, *found;
108 	unsigned long n;
109 	u64 addr;
110 
111 	if (!assert_no_holes(mm))
112 		return false;
113 
114 	n = 0;
115 	addr = 0;
116 	drm_mm_for_each_node(node, mm) {
117 		if (node->start != addr) {
118 			pr_err("node[%ld] list out of order, expected %llx found %llx\n",
119 			       n, addr, node->start);
120 			return false;
121 		}
122 
123 		if (node->size != size) {
124 			pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
125 			       n, size, node->size);
126 			return false;
127 		}
128 
129 		if (drm_mm_hole_follows(node)) {
130 			pr_err("node[%ld] is followed by a hole!\n", n);
131 			return false;
132 		}
133 
134 		found = NULL;
135 		drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
136 			if (node != check) {
137 				pr_err("lookup return wrong node, expected start %llx, found %llx\n",
138 				       node->start, check->start);
139 				return false;
140 			}
141 			found = check;
142 		}
143 		if (!found) {
144 			pr_err("lookup failed for node %llx + %llx\n",
145 			       addr, size);
146 			return false;
147 		}
148 
149 		addr += size;
150 		n++;
151 	}
152 
153 	return true;
154 }
155 
misalignment(struct drm_mm_node * node,u64 alignment)156 static u64 misalignment(struct drm_mm_node *node, u64 alignment)
157 {
158 	u64 rem;
159 
160 	if (!alignment)
161 		return 0;
162 
163 	div64_u64_rem(node->start, alignment, &rem);
164 	return rem;
165 }
166 
assert_node(struct drm_mm_node * node,struct drm_mm * mm,u64 size,u64 alignment,unsigned long color)167 static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
168 			u64 size, u64 alignment, unsigned long color)
169 {
170 	bool ok = true;
171 
172 	if (!drm_mm_node_allocated(node) || node->mm != mm) {
173 		pr_err("node not allocated\n");
174 		ok = false;
175 	}
176 
177 	if (node->size != size) {
178 		pr_err("node has wrong size, found %llu, expected %llu\n",
179 		       node->size, size);
180 		ok = false;
181 	}
182 
183 	if (misalignment(node, alignment)) {
184 		pr_err("node is misaligned, start %llx rem %llu, expected alignment %llu\n",
185 		       node->start, misalignment(node, alignment), alignment);
186 		ok = false;
187 	}
188 
189 	if (node->color != color) {
190 		pr_err("node has wrong color, found %lu, expected %lu\n",
191 		       node->color, color);
192 		ok = false;
193 	}
194 
195 	return ok;
196 }
197 
198 #define show_mm(mm) do { \
199 	struct drm_printer __p = drm_debug_printer(__func__); \
200 	drm_mm_print((mm), &__p); } while (0)
201 
igt_init(void * ignored)202 static int igt_init(void *ignored)
203 {
204 	const unsigned int size = 4096;
205 	struct drm_mm mm;
206 	struct drm_mm_node tmp;
207 	int ret = -EINVAL;
208 
209 	/* Start with some simple checks on initialising the struct drm_mm */
210 	memset(&mm, 0, sizeof(mm));
211 	if (drm_mm_initialized(&mm)) {
212 		pr_err("zeroed mm claims to be initialized\n");
213 		return ret;
214 	}
215 
216 	memset(&mm, 0xff, sizeof(mm));
217 	drm_mm_init(&mm, 0, size);
218 	if (!drm_mm_initialized(&mm)) {
219 		pr_err("mm claims not to be initialized\n");
220 		goto out;
221 	}
222 
223 	if (!drm_mm_clean(&mm)) {
224 		pr_err("mm not empty on creation\n");
225 		goto out;
226 	}
227 
228 	/* After creation, it should all be one massive hole */
229 	if (!assert_one_hole(&mm, 0, size)) {
230 		ret = -EINVAL;
231 		goto out;
232 	}
233 
234 	memset(&tmp, 0, sizeof(tmp));
235 	tmp.start = 0;
236 	tmp.size = size;
237 	ret = drm_mm_reserve_node(&mm, &tmp);
238 	if (ret) {
239 		pr_err("failed to reserve whole drm_mm\n");
240 		goto out;
241 	}
242 
243 	/* After filling the range entirely, there should be no holes */
244 	if (!assert_no_holes(&mm)) {
245 		ret = -EINVAL;
246 		goto out;
247 	}
248 
249 	/* And then after emptying it again, the massive hole should be back */
250 	drm_mm_remove_node(&tmp);
251 	if (!assert_one_hole(&mm, 0, size)) {
252 		ret = -EINVAL;
253 		goto out;
254 	}
255 
256 out:
257 	if (ret)
258 		show_mm(&mm);
259 	drm_mm_takedown(&mm);
260 	return ret;
261 }
262 
igt_debug(void * ignored)263 static int igt_debug(void *ignored)
264 {
265 	struct drm_mm mm;
266 	struct drm_mm_node nodes[2];
267 	int ret;
268 
269 	/* Create a small drm_mm with a couple of nodes and a few holes, and
270 	 * check that the debug iterator doesn't explode over a trivial drm_mm.
271 	 */
272 
273 	drm_mm_init(&mm, 0, 4096);
274 
275 	memset(nodes, 0, sizeof(nodes));
276 	nodes[0].start = 512;
277 	nodes[0].size = 1024;
278 	ret = drm_mm_reserve_node(&mm, &nodes[0]);
279 	if (ret) {
280 		pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
281 		       nodes[0].start, nodes[0].size);
282 		return ret;
283 	}
284 
285 	nodes[1].size = 1024;
286 	nodes[1].start = 4096 - 512 - nodes[1].size;
287 	ret = drm_mm_reserve_node(&mm, &nodes[1]);
288 	if (ret) {
289 		pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
290 		       nodes[1].start, nodes[1].size);
291 		return ret;
292 	}
293 
294 	show_mm(&mm);
295 	return 0;
296 }
297 
set_node(struct drm_mm_node * node,u64 start,u64 size)298 static struct drm_mm_node *set_node(struct drm_mm_node *node,
299 				    u64 start, u64 size)
300 {
301 	node->start = start;
302 	node->size = size;
303 	return node;
304 }
305 
expect_reserve_fail(struct drm_mm * mm,struct drm_mm_node * node)306 static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
307 {
308 	int err;
309 
310 	err = drm_mm_reserve_node(mm, node);
311 	if (likely(err == -ENOSPC))
312 		return true;
313 
314 	if (!err) {
315 		pr_err("impossible reserve succeeded, node %llu + %llu\n",
316 		       node->start, node->size);
317 		drm_mm_remove_node(node);
318 	} else {
319 		pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
320 		       err, -ENOSPC, node->start, node->size);
321 	}
322 	return false;
323 }
324 
check_reserve_boundaries(struct drm_mm * mm,unsigned int count,u64 size)325 static bool check_reserve_boundaries(struct drm_mm *mm,
326 				     unsigned int count,
327 				     u64 size)
328 {
329 	const struct boundary {
330 		u64 start, size;
331 		const char *name;
332 	} boundaries[] = {
333 #define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
334 		B(0, 0),
335 		B(-size, 0),
336 		B(size, 0),
337 		B(size * count, 0),
338 		B(-size, size),
339 		B(-size, -size),
340 		B(-size, 2*size),
341 		B(0, -size),
342 		B(size, -size),
343 		B(count*size, size),
344 		B(count*size, -size),
345 		B(count*size, count*size),
346 		B(count*size, -count*size),
347 		B(count*size, -(count+1)*size),
348 		B((count+1)*size, size),
349 		B((count+1)*size, -size),
350 		B((count+1)*size, -2*size),
351 #undef B
352 	};
353 	struct drm_mm_node tmp = {};
354 	int n;
355 
356 	for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
357 		if (!expect_reserve_fail(mm,
358 					 set_node(&tmp,
359 						  boundaries[n].start,
360 						  boundaries[n].size))) {
361 			pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
362 			       n, boundaries[n].name, count, size);
363 			return false;
364 		}
365 	}
366 
367 	return true;
368 }
369 
__igt_reserve(unsigned int count,u64 size)370 static int __igt_reserve(unsigned int count, u64 size)
371 {
372 	DRM_RND_STATE(prng, random_seed);
373 	struct drm_mm mm;
374 	struct drm_mm_node tmp, *nodes, *node, *next;
375 	unsigned int *order, n, m, o = 0;
376 	int ret, err;
377 
378 	/* For exercising drm_mm_reserve_node(), we want to check that
379 	 * reservations outside of the drm_mm range are rejected, and to
380 	 * overlapping and otherwise already occupied ranges. Afterwards,
381 	 * the tree and nodes should be intact.
382 	 */
383 
384 	DRM_MM_BUG_ON(!count);
385 	DRM_MM_BUG_ON(!size);
386 
387 	ret = -ENOMEM;
388 	order = drm_random_order(count, &prng);
389 	if (!order)
390 		goto err;
391 
392 	nodes = vzalloc(array_size(count, sizeof(*nodes)));
393 	if (!nodes)
394 		goto err_order;
395 
396 	ret = -EINVAL;
397 	drm_mm_init(&mm, 0, count * size);
398 
399 	if (!check_reserve_boundaries(&mm, count, size))
400 		goto out;
401 
402 	for (n = 0; n < count; n++) {
403 		nodes[n].start = order[n] * size;
404 		nodes[n].size = size;
405 
406 		err = drm_mm_reserve_node(&mm, &nodes[n]);
407 		if (err) {
408 			pr_err("reserve failed, step %d, start %llu\n",
409 			       n, nodes[n].start);
410 			ret = err;
411 			goto out;
412 		}
413 
414 		if (!drm_mm_node_allocated(&nodes[n])) {
415 			pr_err("reserved node not allocated! step %d, start %llu\n",
416 			       n, nodes[n].start);
417 			goto out;
418 		}
419 
420 		if (!expect_reserve_fail(&mm, &nodes[n]))
421 			goto out;
422 	}
423 
424 	/* After random insertion the nodes should be in order */
425 	if (!assert_continuous(&mm, size))
426 		goto out;
427 
428 	/* Repeated use should then fail */
429 	drm_random_reorder(order, count, &prng);
430 	for (n = 0; n < count; n++) {
431 		if (!expect_reserve_fail(&mm,
432 					 set_node(&tmp, order[n] * size, 1)))
433 			goto out;
434 
435 		/* Remove and reinsert should work */
436 		drm_mm_remove_node(&nodes[order[n]]);
437 		err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
438 		if (err) {
439 			pr_err("reserve failed, step %d, start %llu\n",
440 			       n, nodes[n].start);
441 			ret = err;
442 			goto out;
443 		}
444 	}
445 
446 	if (!assert_continuous(&mm, size))
447 		goto out;
448 
449 	/* Overlapping use should then fail */
450 	for (n = 0; n < count; n++) {
451 		if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
452 			goto out;
453 	}
454 	for (n = 0; n < count; n++) {
455 		if (!expect_reserve_fail(&mm,
456 					 set_node(&tmp,
457 						  size * n,
458 						  size * (count - n))))
459 			goto out;
460 	}
461 
462 	/* Remove several, reinsert, check full */
463 	for_each_prime_number(n, min(max_prime, count)) {
464 		for (m = 0; m < n; m++) {
465 			node = &nodes[order[(o + m) % count]];
466 			drm_mm_remove_node(node);
467 		}
468 
469 		for (m = 0; m < n; m++) {
470 			node = &nodes[order[(o + m) % count]];
471 			err = drm_mm_reserve_node(&mm, node);
472 			if (err) {
473 				pr_err("reserve failed, step %d/%d, start %llu\n",
474 				       m, n, node->start);
475 				ret = err;
476 				goto out;
477 			}
478 		}
479 
480 		o += n;
481 
482 		if (!assert_continuous(&mm, size))
483 			goto out;
484 	}
485 
486 	ret = 0;
487 out:
488 	drm_mm_for_each_node_safe(node, next, &mm)
489 		drm_mm_remove_node(node);
490 	drm_mm_takedown(&mm);
491 	vfree(nodes);
492 err_order:
493 	kfree(order);
494 err:
495 	return ret;
496 }
497 
igt_reserve(void * ignored)498 static int igt_reserve(void *ignored)
499 {
500 	const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
501 	int n, ret;
502 
503 	for_each_prime_number_from(n, 1, 54) {
504 		u64 size = BIT_ULL(n);
505 
506 		ret = __igt_reserve(count, size - 1);
507 		if (ret)
508 			return ret;
509 
510 		ret = __igt_reserve(count, size);
511 		if (ret)
512 			return ret;
513 
514 		ret = __igt_reserve(count, size + 1);
515 		if (ret)
516 			return ret;
517 
518 		cond_resched();
519 	}
520 
521 	return 0;
522 }
523 
expect_insert(struct drm_mm * mm,struct drm_mm_node * node,u64 size,u64 alignment,unsigned long color,const struct insert_mode * mode)524 static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
525 			  u64 size, u64 alignment, unsigned long color,
526 			  const struct insert_mode *mode)
527 {
528 	int err;
529 
530 	err = drm_mm_insert_node_generic(mm, node,
531 					 size, alignment, color,
532 					 mode->mode);
533 	if (err) {
534 		pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
535 		       size, alignment, color, mode->name, err);
536 		return false;
537 	}
538 
539 	if (!assert_node(node, mm, size, alignment, color)) {
540 		drm_mm_remove_node(node);
541 		return false;
542 	}
543 
544 	return true;
545 }
546 
expect_insert_fail(struct drm_mm * mm,u64 size)547 static bool expect_insert_fail(struct drm_mm *mm, u64 size)
548 {
549 	struct drm_mm_node tmp = {};
550 	int err;
551 
552 	err = drm_mm_insert_node(mm, &tmp, size);
553 	if (likely(err == -ENOSPC))
554 		return true;
555 
556 	if (!err) {
557 		pr_err("impossible insert succeeded, node %llu + %llu\n",
558 		       tmp.start, tmp.size);
559 		drm_mm_remove_node(&tmp);
560 	} else {
561 		pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
562 		       err, -ENOSPC, size);
563 	}
564 	return false;
565 }
566 
__igt_insert(unsigned int count,u64 size,bool replace)567 static int __igt_insert(unsigned int count, u64 size, bool replace)
568 {
569 	DRM_RND_STATE(prng, random_seed);
570 	const struct insert_mode *mode;
571 	struct drm_mm mm;
572 	struct drm_mm_node *nodes, *node, *next;
573 	unsigned int *order, n, m, o = 0;
574 	int ret;
575 
576 	/* Fill a range with lots of nodes, check it doesn't fail too early */
577 
578 	DRM_MM_BUG_ON(!count);
579 	DRM_MM_BUG_ON(!size);
580 
581 	ret = -ENOMEM;
582 	nodes = vmalloc(array_size(count, sizeof(*nodes)));
583 	if (!nodes)
584 		goto err;
585 
586 	order = drm_random_order(count, &prng);
587 	if (!order)
588 		goto err_nodes;
589 
590 	ret = -EINVAL;
591 	drm_mm_init(&mm, 0, count * size);
592 
593 	for (mode = insert_modes; mode->name; mode++) {
594 		for (n = 0; n < count; n++) {
595 			struct drm_mm_node tmp;
596 
597 			node = replace ? &tmp : &nodes[n];
598 			memset(node, 0, sizeof(*node));
599 			if (!expect_insert(&mm, node, size, 0, n, mode)) {
600 				pr_err("%s insert failed, size %llu step %d\n",
601 				       mode->name, size, n);
602 				goto out;
603 			}
604 
605 			if (replace) {
606 				drm_mm_replace_node(&tmp, &nodes[n]);
607 				if (drm_mm_node_allocated(&tmp)) {
608 					pr_err("replaced old-node still allocated! step %d\n",
609 					       n);
610 					goto out;
611 				}
612 
613 				if (!assert_node(&nodes[n], &mm, size, 0, n)) {
614 					pr_err("replaced node did not inherit parameters, size %llu step %d\n",
615 					       size, n);
616 					goto out;
617 				}
618 
619 				if (tmp.start != nodes[n].start) {
620 					pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
621 					       tmp.start, size,
622 					       nodes[n].start, nodes[n].size);
623 					goto out;
624 				}
625 			}
626 		}
627 
628 		/* After random insertion the nodes should be in order */
629 		if (!assert_continuous(&mm, size))
630 			goto out;
631 
632 		/* Repeated use should then fail */
633 		if (!expect_insert_fail(&mm, size))
634 			goto out;
635 
636 		/* Remove one and reinsert, as the only hole it should refill itself */
637 		for (n = 0; n < count; n++) {
638 			u64 addr = nodes[n].start;
639 
640 			drm_mm_remove_node(&nodes[n]);
641 			if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
642 				pr_err("%s reinsert failed, size %llu step %d\n",
643 				       mode->name, size, n);
644 				goto out;
645 			}
646 
647 			if (nodes[n].start != addr) {
648 				pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
649 				       mode->name, n, addr, nodes[n].start);
650 				goto out;
651 			}
652 
653 			if (!assert_continuous(&mm, size))
654 				goto out;
655 		}
656 
657 		/* Remove several, reinsert, check full */
658 		for_each_prime_number(n, min(max_prime, count)) {
659 			for (m = 0; m < n; m++) {
660 				node = &nodes[order[(o + m) % count]];
661 				drm_mm_remove_node(node);
662 			}
663 
664 			for (m = 0; m < n; m++) {
665 				node = &nodes[order[(o + m) % count]];
666 				if (!expect_insert(&mm, node, size, 0, n, mode)) {
667 					pr_err("%s multiple reinsert failed, size %llu step %d\n",
668 					       mode->name, size, n);
669 					goto out;
670 				}
671 			}
672 
673 			o += n;
674 
675 			if (!assert_continuous(&mm, size))
676 				goto out;
677 
678 			if (!expect_insert_fail(&mm, size))
679 				goto out;
680 		}
681 
682 		drm_mm_for_each_node_safe(node, next, &mm)
683 			drm_mm_remove_node(node);
684 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
685 
686 		cond_resched();
687 	}
688 
689 	ret = 0;
690 out:
691 	drm_mm_for_each_node_safe(node, next, &mm)
692 		drm_mm_remove_node(node);
693 	drm_mm_takedown(&mm);
694 	kfree(order);
695 err_nodes:
696 	vfree(nodes);
697 err:
698 	return ret;
699 }
700 
igt_insert(void * ignored)701 static int igt_insert(void *ignored)
702 {
703 	const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
704 	unsigned int n;
705 	int ret;
706 
707 	for_each_prime_number_from(n, 1, 54) {
708 		u64 size = BIT_ULL(n);
709 
710 		ret = __igt_insert(count, size - 1, false);
711 		if (ret)
712 			return ret;
713 
714 		ret = __igt_insert(count, size, false);
715 		if (ret)
716 			return ret;
717 
718 		ret = __igt_insert(count, size + 1, false);
719 		if (ret)
720 			return ret;
721 
722 		cond_resched();
723 	}
724 
725 	return 0;
726 }
727 
igt_replace(void * ignored)728 static int igt_replace(void *ignored)
729 {
730 	const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
731 	unsigned int n;
732 	int ret;
733 
734 	/* Reuse igt_insert to exercise replacement by inserting a dummy node,
735 	 * then replacing it with the intended node. We want to check that
736 	 * the tree is intact and all the information we need is carried
737 	 * across to the target node.
738 	 */
739 
740 	for_each_prime_number_from(n, 1, 54) {
741 		u64 size = BIT_ULL(n);
742 
743 		ret = __igt_insert(count, size - 1, true);
744 		if (ret)
745 			return ret;
746 
747 		ret = __igt_insert(count, size, true);
748 		if (ret)
749 			return ret;
750 
751 		ret = __igt_insert(count, size + 1, true);
752 		if (ret)
753 			return ret;
754 
755 		cond_resched();
756 	}
757 
758 	return 0;
759 }
760 
expect_insert_in_range(struct drm_mm * mm,struct drm_mm_node * node,u64 size,u64 alignment,unsigned long color,u64 range_start,u64 range_end,const struct insert_mode * mode)761 static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
762 				   u64 size, u64 alignment, unsigned long color,
763 				   u64 range_start, u64 range_end,
764 				   const struct insert_mode *mode)
765 {
766 	int err;
767 
768 	err = drm_mm_insert_node_in_range(mm, node,
769 					  size, alignment, color,
770 					  range_start, range_end,
771 					  mode->mode);
772 	if (err) {
773 		pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
774 		       size, alignment, color, mode->name,
775 		       range_start, range_end, err);
776 		return false;
777 	}
778 
779 	if (!assert_node(node, mm, size, alignment, color)) {
780 		drm_mm_remove_node(node);
781 		return false;
782 	}
783 
784 	return true;
785 }
786 
expect_insert_in_range_fail(struct drm_mm * mm,u64 size,u64 range_start,u64 range_end)787 static bool expect_insert_in_range_fail(struct drm_mm *mm,
788 					u64 size,
789 					u64 range_start,
790 					u64 range_end)
791 {
792 	struct drm_mm_node tmp = {};
793 	int err;
794 
795 	err = drm_mm_insert_node_in_range(mm, &tmp,
796 					  size, 0, 0,
797 					  range_start, range_end,
798 					  0);
799 	if (likely(err == -ENOSPC))
800 		return true;
801 
802 	if (!err) {
803 		pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
804 		       tmp.start, tmp.size, range_start, range_end);
805 		drm_mm_remove_node(&tmp);
806 	} else {
807 		pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
808 		       err, -ENOSPC, size, range_start, range_end);
809 	}
810 
811 	return false;
812 }
813 
assert_contiguous_in_range(struct drm_mm * mm,u64 size,u64 start,u64 end)814 static bool assert_contiguous_in_range(struct drm_mm *mm,
815 				       u64 size,
816 				       u64 start,
817 				       u64 end)
818 {
819 	struct drm_mm_node *node;
820 	unsigned int n;
821 
822 	if (!expect_insert_in_range_fail(mm, size, start, end))
823 		return false;
824 
825 	n = div64_u64(start + size - 1, size);
826 	drm_mm_for_each_node(node, mm) {
827 		if (node->start < start || node->start + node->size > end) {
828 			pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
829 			       n, node->start, node->start + node->size, start, end);
830 			return false;
831 		}
832 
833 		if (node->start != n * size) {
834 			pr_err("node %d out of order, expected start %llx, found %llx\n",
835 			       n, n * size, node->start);
836 			return false;
837 		}
838 
839 		if (node->size != size) {
840 			pr_err("node %d has wrong size, expected size %llx, found %llx\n",
841 			       n, size, node->size);
842 			return false;
843 		}
844 
845 		if (drm_mm_hole_follows(node) &&
846 		    drm_mm_hole_node_end(node) < end) {
847 			pr_err("node %d is followed by a hole!\n", n);
848 			return false;
849 		}
850 
851 		n++;
852 	}
853 
854 	if (start > 0) {
855 		node = __drm_mm_interval_first(mm, 0, start - 1);
856 		if (node->allocated) {
857 			pr_err("node before start: node=%llx+%llu, start=%llx\n",
858 			       node->start, node->size, start);
859 			return false;
860 		}
861 	}
862 
863 	if (end < U64_MAX) {
864 		node = __drm_mm_interval_first(mm, end, U64_MAX);
865 		if (node->allocated) {
866 			pr_err("node after end: node=%llx+%llu, end=%llx\n",
867 			       node->start, node->size, end);
868 			return false;
869 		}
870 	}
871 
872 	return true;
873 }
874 
__igt_insert_range(unsigned int count,u64 size,u64 start,u64 end)875 static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
876 {
877 	const struct insert_mode *mode;
878 	struct drm_mm mm;
879 	struct drm_mm_node *nodes, *node, *next;
880 	unsigned int n, start_n, end_n;
881 	int ret;
882 
883 	DRM_MM_BUG_ON(!count);
884 	DRM_MM_BUG_ON(!size);
885 	DRM_MM_BUG_ON(end <= start);
886 
887 	/* Very similar to __igt_insert(), but now instead of populating the
888 	 * full range of the drm_mm, we try to fill a small portion of it.
889 	 */
890 
891 	ret = -ENOMEM;
892 	nodes = vzalloc(array_size(count, sizeof(*nodes)));
893 	if (!nodes)
894 		goto err;
895 
896 	ret = -EINVAL;
897 	drm_mm_init(&mm, 0, count * size);
898 
899 	start_n = div64_u64(start + size - 1, size);
900 	end_n = div64_u64(end - size, size);
901 
902 	for (mode = insert_modes; mode->name; mode++) {
903 		for (n = start_n; n <= end_n; n++) {
904 			if (!expect_insert_in_range(&mm, &nodes[n],
905 						    size, size, n,
906 						    start, end, mode)) {
907 				pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
908 				       mode->name, size, n,
909 				       start_n, end_n,
910 				       start, end);
911 				goto out;
912 			}
913 		}
914 
915 		if (!assert_contiguous_in_range(&mm, size, start, end)) {
916 			pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
917 			       mode->name, start, end, size);
918 			goto out;
919 		}
920 
921 		/* Remove one and reinsert, it should refill itself */
922 		for (n = start_n; n <= end_n; n++) {
923 			u64 addr = nodes[n].start;
924 
925 			drm_mm_remove_node(&nodes[n]);
926 			if (!expect_insert_in_range(&mm, &nodes[n],
927 						    size, size, n,
928 						    start, end, mode)) {
929 				pr_err("%s reinsert failed, step %d\n", mode->name, n);
930 				goto out;
931 			}
932 
933 			if (nodes[n].start != addr) {
934 				pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
935 				       mode->name, n, addr, nodes[n].start);
936 				goto out;
937 			}
938 		}
939 
940 		if (!assert_contiguous_in_range(&mm, size, start, end)) {
941 			pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
942 			       mode->name, start, end, size);
943 			goto out;
944 		}
945 
946 		drm_mm_for_each_node_safe(node, next, &mm)
947 			drm_mm_remove_node(node);
948 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
949 
950 		cond_resched();
951 	}
952 
953 	ret = 0;
954 out:
955 	drm_mm_for_each_node_safe(node, next, &mm)
956 		drm_mm_remove_node(node);
957 	drm_mm_takedown(&mm);
958 	vfree(nodes);
959 err:
960 	return ret;
961 }
962 
insert_outside_range(void)963 static int insert_outside_range(void)
964 {
965 	struct drm_mm mm;
966 	const unsigned int start = 1024;
967 	const unsigned int end = 2048;
968 	const unsigned int size = end - start;
969 
970 	drm_mm_init(&mm, start, size);
971 
972 	if (!expect_insert_in_range_fail(&mm, 1, 0, start))
973 		return -EINVAL;
974 
975 	if (!expect_insert_in_range_fail(&mm, size,
976 					 start - size/2, start + (size+1)/2))
977 		return -EINVAL;
978 
979 	if (!expect_insert_in_range_fail(&mm, size,
980 					 end - (size+1)/2, end + size/2))
981 		return -EINVAL;
982 
983 	if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
984 		return -EINVAL;
985 
986 	drm_mm_takedown(&mm);
987 	return 0;
988 }
989 
igt_insert_range(void * ignored)990 static int igt_insert_range(void *ignored)
991 {
992 	const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
993 	unsigned int n;
994 	int ret;
995 
996 	/* Check that requests outside the bounds of drm_mm are rejected. */
997 	ret = insert_outside_range();
998 	if (ret)
999 		return ret;
1000 
1001 	for_each_prime_number_from(n, 1, 50) {
1002 		const u64 size = BIT_ULL(n);
1003 		const u64 max = count * size;
1004 
1005 		ret = __igt_insert_range(count, size, 0, max);
1006 		if (ret)
1007 			return ret;
1008 
1009 		ret = __igt_insert_range(count, size, 1, max);
1010 		if (ret)
1011 			return ret;
1012 
1013 		ret = __igt_insert_range(count, size, 0, max - 1);
1014 		if (ret)
1015 			return ret;
1016 
1017 		ret = __igt_insert_range(count, size, 0, max/2);
1018 		if (ret)
1019 			return ret;
1020 
1021 		ret = __igt_insert_range(count, size, max/2, max);
1022 		if (ret)
1023 			return ret;
1024 
1025 		ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
1026 		if (ret)
1027 			return ret;
1028 
1029 		cond_resched();
1030 	}
1031 
1032 	return 0;
1033 }
1034 
igt_align(void * ignored)1035 static int igt_align(void *ignored)
1036 {
1037 	const struct insert_mode *mode;
1038 	const unsigned int max_count = min(8192u, max_prime);
1039 	struct drm_mm mm;
1040 	struct drm_mm_node *nodes, *node, *next;
1041 	unsigned int prime;
1042 	int ret = -EINVAL;
1043 
1044 	/* For each of the possible insertion modes, we pick a few
1045 	 * arbitrary alignments and check that the inserted node
1046 	 * meets our requirements.
1047 	 */
1048 
1049 	nodes = vzalloc(array_size(max_count, sizeof(*nodes)));
1050 	if (!nodes)
1051 		goto err;
1052 
1053 	drm_mm_init(&mm, 1, U64_MAX - 2);
1054 
1055 	for (mode = insert_modes; mode->name; mode++) {
1056 		unsigned int i = 0;
1057 
1058 		for_each_prime_number_from(prime, 1, max_count) {
1059 			u64 size = next_prime_number(prime);
1060 
1061 			if (!expect_insert(&mm, &nodes[i],
1062 					   size, prime, i,
1063 					   mode)) {
1064 				pr_err("%s insert failed with alignment=%d",
1065 				       mode->name, prime);
1066 				goto out;
1067 			}
1068 
1069 			i++;
1070 		}
1071 
1072 		drm_mm_for_each_node_safe(node, next, &mm)
1073 			drm_mm_remove_node(node);
1074 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1075 
1076 		cond_resched();
1077 	}
1078 
1079 	ret = 0;
1080 out:
1081 	drm_mm_for_each_node_safe(node, next, &mm)
1082 		drm_mm_remove_node(node);
1083 	drm_mm_takedown(&mm);
1084 	vfree(nodes);
1085 err:
1086 	return ret;
1087 }
1088 
igt_align_pot(int max)1089 static int igt_align_pot(int max)
1090 {
1091 	struct drm_mm mm;
1092 	struct drm_mm_node *node, *next;
1093 	int bit;
1094 	int ret = -EINVAL;
1095 
1096 	/* Check that we can align to the full u64 address space */
1097 
1098 	drm_mm_init(&mm, 1, U64_MAX - 2);
1099 
1100 	for (bit = max - 1; bit; bit--) {
1101 		u64 align, size;
1102 
1103 		node = kzalloc(sizeof(*node), GFP_KERNEL);
1104 		if (!node) {
1105 			ret = -ENOMEM;
1106 			goto out;
1107 		}
1108 
1109 		align = BIT_ULL(bit);
1110 		size = BIT_ULL(bit-1) + 1;
1111 		if (!expect_insert(&mm, node,
1112 				   size, align, bit,
1113 				   &insert_modes[0])) {
1114 			pr_err("insert failed with alignment=%llx [%d]",
1115 			       align, bit);
1116 			goto out;
1117 		}
1118 
1119 		cond_resched();
1120 	}
1121 
1122 	ret = 0;
1123 out:
1124 	drm_mm_for_each_node_safe(node, next, &mm) {
1125 		drm_mm_remove_node(node);
1126 		kfree(node);
1127 	}
1128 	drm_mm_takedown(&mm);
1129 	return ret;
1130 }
1131 
igt_align32(void * ignored)1132 static int igt_align32(void *ignored)
1133 {
1134 	return igt_align_pot(32);
1135 }
1136 
igt_align64(void * ignored)1137 static int igt_align64(void *ignored)
1138 {
1139 	return igt_align_pot(64);
1140 }
1141 
show_scan(const struct drm_mm_scan * scan)1142 static void show_scan(const struct drm_mm_scan *scan)
1143 {
1144 	pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
1145 		scan->hit_start, scan->hit_end,
1146 		scan->size, scan->alignment, scan->color);
1147 }
1148 
show_holes(const struct drm_mm * mm,int count)1149 static void show_holes(const struct drm_mm *mm, int count)
1150 {
1151 	u64 hole_start, hole_end;
1152 	struct drm_mm_node *hole;
1153 
1154 	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1155 		struct drm_mm_node *next = list_next_entry(hole, node_list);
1156 		const char *node1 = NULL, *node2 = NULL;
1157 
1158 		if (hole->allocated)
1159 			node1 = kasprintf(GFP_KERNEL,
1160 					  "[%llx + %lld, color=%ld], ",
1161 					  hole->start, hole->size, hole->color);
1162 
1163 		if (next->allocated)
1164 			node2 = kasprintf(GFP_KERNEL,
1165 					  ", [%llx + %lld, color=%ld]",
1166 					  next->start, next->size, next->color);
1167 
1168 		pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1169 			node1,
1170 			hole_start, hole_end, hole_end - hole_start,
1171 			node2);
1172 
1173 		kfree(node2);
1174 		kfree(node1);
1175 
1176 		if (!--count)
1177 			break;
1178 	}
1179 }
1180 
1181 struct evict_node {
1182 	struct drm_mm_node node;
1183 	struct list_head link;
1184 };
1185 
evict_nodes(struct drm_mm_scan * scan,struct evict_node * nodes,unsigned int * order,unsigned int count,bool use_color,struct list_head * evict_list)1186 static bool evict_nodes(struct drm_mm_scan *scan,
1187 			struct evict_node *nodes,
1188 			unsigned int *order,
1189 			unsigned int count,
1190 			bool use_color,
1191 			struct list_head *evict_list)
1192 {
1193 	struct evict_node *e, *en;
1194 	unsigned int i;
1195 
1196 	for (i = 0; i < count; i++) {
1197 		e = &nodes[order ? order[i] : i];
1198 		list_add(&e->link, evict_list);
1199 		if (drm_mm_scan_add_block(scan, &e->node))
1200 			break;
1201 	}
1202 	list_for_each_entry_safe(e, en, evict_list, link) {
1203 		if (!drm_mm_scan_remove_block(scan, &e->node))
1204 			list_del(&e->link);
1205 	}
1206 	if (list_empty(evict_list)) {
1207 		pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
1208 		       scan->size, count, scan->alignment, scan->color);
1209 		return false;
1210 	}
1211 
1212 	list_for_each_entry(e, evict_list, link)
1213 		drm_mm_remove_node(&e->node);
1214 
1215 	if (use_color) {
1216 		struct drm_mm_node *node;
1217 
1218 		while ((node = drm_mm_scan_color_evict(scan))) {
1219 			e = container_of(node, typeof(*e), node);
1220 			drm_mm_remove_node(&e->node);
1221 			list_add(&e->link, evict_list);
1222 		}
1223 	} else {
1224 		if (drm_mm_scan_color_evict(scan)) {
1225 			pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1226 			return false;
1227 		}
1228 	}
1229 
1230 	return true;
1231 }
1232 
evict_nothing(struct drm_mm * mm,unsigned int total_size,struct evict_node * nodes)1233 static bool evict_nothing(struct drm_mm *mm,
1234 			  unsigned int total_size,
1235 			  struct evict_node *nodes)
1236 {
1237 	struct drm_mm_scan scan;
1238 	LIST_HEAD(evict_list);
1239 	struct evict_node *e;
1240 	struct drm_mm_node *node;
1241 	unsigned int n;
1242 
1243 	drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
1244 	for (n = 0; n < total_size; n++) {
1245 		e = &nodes[n];
1246 		list_add(&e->link, &evict_list);
1247 		drm_mm_scan_add_block(&scan, &e->node);
1248 	}
1249 	list_for_each_entry(e, &evict_list, link)
1250 		drm_mm_scan_remove_block(&scan, &e->node);
1251 
1252 	for (n = 0; n < total_size; n++) {
1253 		e = &nodes[n];
1254 
1255 		if (!drm_mm_node_allocated(&e->node)) {
1256 			pr_err("node[%d] no longer allocated!\n", n);
1257 			return false;
1258 		}
1259 
1260 		e->link.next = NULL;
1261 	}
1262 
1263 	drm_mm_for_each_node(node, mm) {
1264 		e = container_of(node, typeof(*e), node);
1265 		e->link.next = &e->link;
1266 	}
1267 
1268 	for (n = 0; n < total_size; n++) {
1269 		e = &nodes[n];
1270 
1271 		if (!e->link.next) {
1272 			pr_err("node[%d] no longer connected!\n", n);
1273 			return false;
1274 		}
1275 	}
1276 
1277 	return assert_continuous(mm, nodes[0].node.size);
1278 }
1279 
evict_everything(struct drm_mm * mm,unsigned int total_size,struct evict_node * nodes)1280 static bool evict_everything(struct drm_mm *mm,
1281 			     unsigned int total_size,
1282 			     struct evict_node *nodes)
1283 {
1284 	struct drm_mm_scan scan;
1285 	LIST_HEAD(evict_list);
1286 	struct evict_node *e;
1287 	unsigned int n;
1288 	int err;
1289 
1290 	drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
1291 	for (n = 0; n < total_size; n++) {
1292 		e = &nodes[n];
1293 		list_add(&e->link, &evict_list);
1294 		if (drm_mm_scan_add_block(&scan, &e->node))
1295 			break;
1296 	}
1297 
1298 	err = 0;
1299 	list_for_each_entry(e, &evict_list, link) {
1300 		if (!drm_mm_scan_remove_block(&scan, &e->node)) {
1301 			if (!err) {
1302 				pr_err("Node %lld not marked for eviction!\n",
1303 				       e->node.start);
1304 				err = -EINVAL;
1305 			}
1306 		}
1307 	}
1308 	if (err)
1309 		return false;
1310 
1311 	list_for_each_entry(e, &evict_list, link)
1312 		drm_mm_remove_node(&e->node);
1313 
1314 	if (!assert_one_hole(mm, 0, total_size))
1315 		return false;
1316 
1317 	list_for_each_entry(e, &evict_list, link) {
1318 		err = drm_mm_reserve_node(mm, &e->node);
1319 		if (err) {
1320 			pr_err("Failed to reinsert node after eviction: start=%llx\n",
1321 			       e->node.start);
1322 			return false;
1323 		}
1324 	}
1325 
1326 	return assert_continuous(mm, nodes[0].node.size);
1327 }
1328 
evict_something(struct drm_mm * mm,u64 range_start,u64 range_end,struct evict_node * nodes,unsigned int * order,unsigned int count,unsigned int size,unsigned int alignment,const struct insert_mode * mode)1329 static int evict_something(struct drm_mm *mm,
1330 			   u64 range_start, u64 range_end,
1331 			   struct evict_node *nodes,
1332 			   unsigned int *order,
1333 			   unsigned int count,
1334 			   unsigned int size,
1335 			   unsigned int alignment,
1336 			   const struct insert_mode *mode)
1337 {
1338 	struct drm_mm_scan scan;
1339 	LIST_HEAD(evict_list);
1340 	struct evict_node *e;
1341 	struct drm_mm_node tmp;
1342 	int err;
1343 
1344 	drm_mm_scan_init_with_range(&scan, mm,
1345 				    size, alignment, 0,
1346 				    range_start, range_end,
1347 				    mode->mode);
1348 	if (!evict_nodes(&scan,
1349 			 nodes, order, count, false,
1350 			 &evict_list))
1351 		return -EINVAL;
1352 
1353 	memset(&tmp, 0, sizeof(tmp));
1354 	err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1355 					 DRM_MM_INSERT_EVICT);
1356 	if (err) {
1357 		pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1358 		       size, alignment);
1359 		show_scan(&scan);
1360 		show_holes(mm, 3);
1361 		return err;
1362 	}
1363 
1364 	if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1365 		pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1366 		       tmp.start, tmp.size, range_start, range_end);
1367 		err = -EINVAL;
1368 	}
1369 
1370 	if (!assert_node(&tmp, mm, size, alignment, 0) ||
1371 	    drm_mm_hole_follows(&tmp)) {
1372 		pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1373 		       tmp.size, size,
1374 		       alignment, misalignment(&tmp, alignment),
1375 		       tmp.start, drm_mm_hole_follows(&tmp));
1376 		err = -EINVAL;
1377 	}
1378 
1379 	drm_mm_remove_node(&tmp);
1380 	if (err)
1381 		return err;
1382 
1383 	list_for_each_entry(e, &evict_list, link) {
1384 		err = drm_mm_reserve_node(mm, &e->node);
1385 		if (err) {
1386 			pr_err("Failed to reinsert node after eviction: start=%llx\n",
1387 			       e->node.start);
1388 			return err;
1389 		}
1390 	}
1391 
1392 	if (!assert_continuous(mm, nodes[0].node.size)) {
1393 		pr_err("range is no longer continuous\n");
1394 		return -EINVAL;
1395 	}
1396 
1397 	return 0;
1398 }
1399 
igt_evict(void * ignored)1400 static int igt_evict(void *ignored)
1401 {
1402 	DRM_RND_STATE(prng, random_seed);
1403 	const unsigned int size = 8192;
1404 	const struct insert_mode *mode;
1405 	struct drm_mm mm;
1406 	struct evict_node *nodes;
1407 	struct drm_mm_node *node, *next;
1408 	unsigned int *order, n;
1409 	int ret, err;
1410 
1411 	/* Here we populate a full drm_mm and then try and insert a new node
1412 	 * by evicting other nodes in a random order. The drm_mm_scan should
1413 	 * pick the first matching hole it finds from the random list. We
1414 	 * repeat that for different allocation strategies, alignments and
1415 	 * sizes to try and stress the hole finder.
1416 	 */
1417 
1418 	ret = -ENOMEM;
1419 	nodes = vzalloc(array_size(size, sizeof(*nodes)));
1420 	if (!nodes)
1421 		goto err;
1422 
1423 	order = drm_random_order(size, &prng);
1424 	if (!order)
1425 		goto err_nodes;
1426 
1427 	ret = -EINVAL;
1428 	drm_mm_init(&mm, 0, size);
1429 	for (n = 0; n < size; n++) {
1430 		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1431 		if (err) {
1432 			pr_err("insert failed, step %d\n", n);
1433 			ret = err;
1434 			goto out;
1435 		}
1436 	}
1437 
1438 	/* First check that using the scanner doesn't break the mm */
1439 	if (!evict_nothing(&mm, size, nodes)) {
1440 		pr_err("evict_nothing() failed\n");
1441 		goto out;
1442 	}
1443 	if (!evict_everything(&mm, size, nodes)) {
1444 		pr_err("evict_everything() failed\n");
1445 		goto out;
1446 	}
1447 
1448 	for (mode = evict_modes; mode->name; mode++) {
1449 		for (n = 1; n <= size; n <<= 1) {
1450 			drm_random_reorder(order, size, &prng);
1451 			err = evict_something(&mm, 0, U64_MAX,
1452 					      nodes, order, size,
1453 					      n, 1,
1454 					      mode);
1455 			if (err) {
1456 				pr_err("%s evict_something(size=%u) failed\n",
1457 				       mode->name, n);
1458 				ret = err;
1459 				goto out;
1460 			}
1461 		}
1462 
1463 		for (n = 1; n < size; n <<= 1) {
1464 			drm_random_reorder(order, size, &prng);
1465 			err = evict_something(&mm, 0, U64_MAX,
1466 					      nodes, order, size,
1467 					      size/2, n,
1468 					      mode);
1469 			if (err) {
1470 				pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1471 				       mode->name, size/2, n);
1472 				ret = err;
1473 				goto out;
1474 			}
1475 		}
1476 
1477 		for_each_prime_number_from(n, 1, min(size, max_prime)) {
1478 			unsigned int nsize = (size - n + 1) / 2;
1479 
1480 			DRM_MM_BUG_ON(!nsize);
1481 
1482 			drm_random_reorder(order, size, &prng);
1483 			err = evict_something(&mm, 0, U64_MAX,
1484 					      nodes, order, size,
1485 					      nsize, n,
1486 					      mode);
1487 			if (err) {
1488 				pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1489 				       mode->name, nsize, n);
1490 				ret = err;
1491 				goto out;
1492 			}
1493 		}
1494 
1495 		cond_resched();
1496 	}
1497 
1498 	ret = 0;
1499 out:
1500 	drm_mm_for_each_node_safe(node, next, &mm)
1501 		drm_mm_remove_node(node);
1502 	drm_mm_takedown(&mm);
1503 	kfree(order);
1504 err_nodes:
1505 	vfree(nodes);
1506 err:
1507 	return ret;
1508 }
1509 
igt_evict_range(void * ignored)1510 static int igt_evict_range(void *ignored)
1511 {
1512 	DRM_RND_STATE(prng, random_seed);
1513 	const unsigned int size = 8192;
1514 	const unsigned int range_size = size / 2;
1515 	const unsigned int range_start = size / 4;
1516 	const unsigned int range_end = range_start + range_size;
1517 	const struct insert_mode *mode;
1518 	struct drm_mm mm;
1519 	struct evict_node *nodes;
1520 	struct drm_mm_node *node, *next;
1521 	unsigned int *order, n;
1522 	int ret, err;
1523 
1524 	/* Like igt_evict() but now we are limiting the search to a
1525 	 * small portion of the full drm_mm.
1526 	 */
1527 
1528 	ret = -ENOMEM;
1529 	nodes = vzalloc(array_size(size, sizeof(*nodes)));
1530 	if (!nodes)
1531 		goto err;
1532 
1533 	order = drm_random_order(size, &prng);
1534 	if (!order)
1535 		goto err_nodes;
1536 
1537 	ret = -EINVAL;
1538 	drm_mm_init(&mm, 0, size);
1539 	for (n = 0; n < size; n++) {
1540 		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1541 		if (err) {
1542 			pr_err("insert failed, step %d\n", n);
1543 			ret = err;
1544 			goto out;
1545 		}
1546 	}
1547 
1548 	for (mode = evict_modes; mode->name; mode++) {
1549 		for (n = 1; n <= range_size; n <<= 1) {
1550 			drm_random_reorder(order, size, &prng);
1551 			err = evict_something(&mm, range_start, range_end,
1552 					      nodes, order, size,
1553 					      n, 1,
1554 					      mode);
1555 			if (err) {
1556 				pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1557 				       mode->name, n, range_start, range_end);
1558 				goto out;
1559 			}
1560 		}
1561 
1562 		for (n = 1; n <= range_size; n <<= 1) {
1563 			drm_random_reorder(order, size, &prng);
1564 			err = evict_something(&mm, range_start, range_end,
1565 					      nodes, order, size,
1566 					      range_size/2, n,
1567 					      mode);
1568 			if (err) {
1569 				pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1570 				       mode->name, range_size/2, n, range_start, range_end);
1571 				goto out;
1572 			}
1573 		}
1574 
1575 		for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1576 			unsigned int nsize = (range_size - n + 1) / 2;
1577 
1578 			DRM_MM_BUG_ON(!nsize);
1579 
1580 			drm_random_reorder(order, size, &prng);
1581 			err = evict_something(&mm, range_start, range_end,
1582 					      nodes, order, size,
1583 					      nsize, n,
1584 					      mode);
1585 			if (err) {
1586 				pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1587 				       mode->name, nsize, n, range_start, range_end);
1588 				goto out;
1589 			}
1590 		}
1591 
1592 		cond_resched();
1593 	}
1594 
1595 	ret = 0;
1596 out:
1597 	drm_mm_for_each_node_safe(node, next, &mm)
1598 		drm_mm_remove_node(node);
1599 	drm_mm_takedown(&mm);
1600 	kfree(order);
1601 err_nodes:
1602 	vfree(nodes);
1603 err:
1604 	return ret;
1605 }
1606 
node_index(const struct drm_mm_node * node)1607 static unsigned int node_index(const struct drm_mm_node *node)
1608 {
1609 	return div64_u64(node->start, node->size);
1610 }
1611 
igt_topdown(void * ignored)1612 static int igt_topdown(void *ignored)
1613 {
1614 	const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1615 	DRM_RND_STATE(prng, random_seed);
1616 	const unsigned int count = 8192;
1617 	unsigned int size;
1618 	unsigned long *bitmap = NULL;
1619 	struct drm_mm mm;
1620 	struct drm_mm_node *nodes, *node, *next;
1621 	unsigned int *order, n, m, o = 0;
1622 	int ret;
1623 
1624 	/* When allocating top-down, we expect to be returned a node
1625 	 * from a suitable hole at the top of the drm_mm. We check that
1626 	 * the returned node does match the highest available slot.
1627 	 */
1628 
1629 	ret = -ENOMEM;
1630 	nodes = vzalloc(array_size(count, sizeof(*nodes)));
1631 	if (!nodes)
1632 		goto err;
1633 
1634 	bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long),
1635 			 GFP_KERNEL);
1636 	if (!bitmap)
1637 		goto err_nodes;
1638 
1639 	order = drm_random_order(count, &prng);
1640 	if (!order)
1641 		goto err_bitmap;
1642 
1643 	ret = -EINVAL;
1644 	for (size = 1; size <= 64; size <<= 1) {
1645 		drm_mm_init(&mm, 0, size*count);
1646 		for (n = 0; n < count; n++) {
1647 			if (!expect_insert(&mm, &nodes[n],
1648 					   size, 0, n,
1649 					   topdown)) {
1650 				pr_err("insert failed, size %u step %d\n", size, n);
1651 				goto out;
1652 			}
1653 
1654 			if (drm_mm_hole_follows(&nodes[n])) {
1655 				pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1656 				       n, nodes[n].start, size);
1657 				goto out;
1658 			}
1659 
1660 			if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
1661 				goto out;
1662 		}
1663 
1664 		if (!assert_continuous(&mm, size))
1665 			goto out;
1666 
1667 		drm_random_reorder(order, count, &prng);
1668 		for_each_prime_number_from(n, 1, min(count, max_prime)) {
1669 			for (m = 0; m < n; m++) {
1670 				node = &nodes[order[(o + m) % count]];
1671 				drm_mm_remove_node(node);
1672 				__set_bit(node_index(node), bitmap);
1673 			}
1674 
1675 			for (m = 0; m < n; m++) {
1676 				unsigned int last;
1677 
1678 				node = &nodes[order[(o + m) % count]];
1679 				if (!expect_insert(&mm, node,
1680 						   size, 0, 0,
1681 						   topdown)) {
1682 					pr_err("insert failed, step %d/%d\n", m, n);
1683 					goto out;
1684 				}
1685 
1686 				if (drm_mm_hole_follows(node)) {
1687 					pr_err("hole after topdown insert %d/%d, start=%llx\n",
1688 					       m, n, node->start);
1689 					goto out;
1690 				}
1691 
1692 				last = find_last_bit(bitmap, count);
1693 				if (node_index(node) != last) {
1694 					pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1695 					       m, n, size, last, node_index(node));
1696 					goto out;
1697 				}
1698 
1699 				__clear_bit(last, bitmap);
1700 			}
1701 
1702 			DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1703 
1704 			o += n;
1705 		}
1706 
1707 		drm_mm_for_each_node_safe(node, next, &mm)
1708 			drm_mm_remove_node(node);
1709 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1710 		cond_resched();
1711 	}
1712 
1713 	ret = 0;
1714 out:
1715 	drm_mm_for_each_node_safe(node, next, &mm)
1716 		drm_mm_remove_node(node);
1717 	drm_mm_takedown(&mm);
1718 	kfree(order);
1719 err_bitmap:
1720 	kfree(bitmap);
1721 err_nodes:
1722 	vfree(nodes);
1723 err:
1724 	return ret;
1725 }
1726 
igt_bottomup(void * ignored)1727 static int igt_bottomup(void *ignored)
1728 {
1729 	const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
1730 	DRM_RND_STATE(prng, random_seed);
1731 	const unsigned int count = 8192;
1732 	unsigned int size;
1733 	unsigned long *bitmap;
1734 	struct drm_mm mm;
1735 	struct drm_mm_node *nodes, *node, *next;
1736 	unsigned int *order, n, m, o = 0;
1737 	int ret;
1738 
1739 	/* Like igt_topdown, but instead of searching for the last hole,
1740 	 * we search for the first.
1741 	 */
1742 
1743 	ret = -ENOMEM;
1744 	nodes = vzalloc(array_size(count, sizeof(*nodes)));
1745 	if (!nodes)
1746 		goto err;
1747 
1748 	bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long),
1749 			 GFP_KERNEL);
1750 	if (!bitmap)
1751 		goto err_nodes;
1752 
1753 	order = drm_random_order(count, &prng);
1754 	if (!order)
1755 		goto err_bitmap;
1756 
1757 	ret = -EINVAL;
1758 	for (size = 1; size <= 64; size <<= 1) {
1759 		drm_mm_init(&mm, 0, size*count);
1760 		for (n = 0; n < count; n++) {
1761 			if (!expect_insert(&mm, &nodes[n],
1762 					   size, 0, n,
1763 					   bottomup)) {
1764 				pr_err("bottomup insert failed, size %u step %d\n", size, n);
1765 				goto out;
1766 			}
1767 
1768 			if (!assert_one_hole(&mm, size*(n + 1), size*count))
1769 				goto out;
1770 		}
1771 
1772 		if (!assert_continuous(&mm, size))
1773 			goto out;
1774 
1775 		drm_random_reorder(order, count, &prng);
1776 		for_each_prime_number_from(n, 1, min(count, max_prime)) {
1777 			for (m = 0; m < n; m++) {
1778 				node = &nodes[order[(o + m) % count]];
1779 				drm_mm_remove_node(node);
1780 				__set_bit(node_index(node), bitmap);
1781 			}
1782 
1783 			for (m = 0; m < n; m++) {
1784 				unsigned int first;
1785 
1786 				node = &nodes[order[(o + m) % count]];
1787 				if (!expect_insert(&mm, node,
1788 						   size, 0, 0,
1789 						   bottomup)) {
1790 					pr_err("insert failed, step %d/%d\n", m, n);
1791 					goto out;
1792 				}
1793 
1794 				first = find_first_bit(bitmap, count);
1795 				if (node_index(node) != first) {
1796 					pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n",
1797 					       m, n, first, node_index(node));
1798 					goto out;
1799 				}
1800 				__clear_bit(first, bitmap);
1801 			}
1802 
1803 			DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1804 
1805 			o += n;
1806 		}
1807 
1808 		drm_mm_for_each_node_safe(node, next, &mm)
1809 			drm_mm_remove_node(node);
1810 		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1811 		cond_resched();
1812 	}
1813 
1814 	ret = 0;
1815 out:
1816 	drm_mm_for_each_node_safe(node, next, &mm)
1817 		drm_mm_remove_node(node);
1818 	drm_mm_takedown(&mm);
1819 	kfree(order);
1820 err_bitmap:
1821 	kfree(bitmap);
1822 err_nodes:
1823 	vfree(nodes);
1824 err:
1825 	return ret;
1826 }
1827 
__igt_once(unsigned int mode)1828 static int __igt_once(unsigned int mode)
1829 {
1830 	struct drm_mm mm;
1831 	struct drm_mm_node rsvd_lo, rsvd_hi, node;
1832 	int err;
1833 
1834 	drm_mm_init(&mm, 0, 7);
1835 
1836 	memset(&rsvd_lo, 0, sizeof(rsvd_lo));
1837 	rsvd_lo.start = 1;
1838 	rsvd_lo.size = 1;
1839 	err = drm_mm_reserve_node(&mm, &rsvd_lo);
1840 	if (err) {
1841 		pr_err("Could not reserve low node\n");
1842 		goto err;
1843 	}
1844 
1845 	memset(&rsvd_hi, 0, sizeof(rsvd_hi));
1846 	rsvd_hi.start = 5;
1847 	rsvd_hi.size = 1;
1848 	err = drm_mm_reserve_node(&mm, &rsvd_hi);
1849 	if (err) {
1850 		pr_err("Could not reserve low node\n");
1851 		goto err_lo;
1852 	}
1853 
1854 	if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
1855 		pr_err("Expected a hole after lo and high nodes!\n");
1856 		err = -EINVAL;
1857 		goto err_hi;
1858 	}
1859 
1860 	memset(&node, 0, sizeof(node));
1861 	err = drm_mm_insert_node_generic(&mm, &node,
1862 					 2, 0, 0,
1863 					 mode | DRM_MM_INSERT_ONCE);
1864 	if (!err) {
1865 		pr_err("Unexpectedly inserted the node into the wrong hole: node.start=%llx\n",
1866 		       node.start);
1867 		err = -EINVAL;
1868 		goto err_node;
1869 	}
1870 
1871 	err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
1872 	if (err) {
1873 		pr_err("Could not insert the node into the available hole!\n");
1874 		err = -EINVAL;
1875 		goto err_hi;
1876 	}
1877 
1878 err_node:
1879 	drm_mm_remove_node(&node);
1880 err_hi:
1881 	drm_mm_remove_node(&rsvd_hi);
1882 err_lo:
1883 	drm_mm_remove_node(&rsvd_lo);
1884 err:
1885 	drm_mm_takedown(&mm);
1886 	return err;
1887 }
1888 
igt_lowest(void * ignored)1889 static int igt_lowest(void *ignored)
1890 {
1891 	return __igt_once(DRM_MM_INSERT_LOW);
1892 }
1893 
igt_highest(void * ignored)1894 static int igt_highest(void *ignored)
1895 {
1896 	return __igt_once(DRM_MM_INSERT_HIGH);
1897 }
1898 
separate_adjacent_colors(const struct drm_mm_node * node,unsigned long color,u64 * start,u64 * end)1899 static void separate_adjacent_colors(const struct drm_mm_node *node,
1900 				     unsigned long color,
1901 				     u64 *start,
1902 				     u64 *end)
1903 {
1904 	if (node->allocated && node->color != color)
1905 		++*start;
1906 
1907 	node = list_next_entry(node, node_list);
1908 	if (node->allocated && node->color != color)
1909 		--*end;
1910 }
1911 
colors_abutt(const struct drm_mm_node * node)1912 static bool colors_abutt(const struct drm_mm_node *node)
1913 {
1914 	if (!drm_mm_hole_follows(node) &&
1915 	    list_next_entry(node, node_list)->allocated) {
1916 		pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
1917 		       node->color, node->start, node->size,
1918 		       list_next_entry(node, node_list)->color,
1919 		       list_next_entry(node, node_list)->start,
1920 		       list_next_entry(node, node_list)->size);
1921 		return true;
1922 	}
1923 
1924 	return false;
1925 }
1926 
igt_color(void * ignored)1927 static int igt_color(void *ignored)
1928 {
1929 	const unsigned int count = min(4096u, max_iterations);
1930 	const struct insert_mode *mode;
1931 	struct drm_mm mm;
1932 	struct drm_mm_node *node, *nn;
1933 	unsigned int n;
1934 	int ret = -EINVAL, err;
1935 
1936 	/* Color adjustment complicates everything. First we just check
1937 	 * that when we insert a node we apply any color_adjustment callback.
1938 	 * The callback we use should ensure that there is a gap between
1939 	 * any two nodes, and so after each insertion we check that those
1940 	 * holes are inserted and that they are preserved.
1941 	 */
1942 
1943 	drm_mm_init(&mm, 0, U64_MAX);
1944 
1945 	for (n = 1; n <= count; n++) {
1946 		node = kzalloc(sizeof(*node), GFP_KERNEL);
1947 		if (!node) {
1948 			ret = -ENOMEM;
1949 			goto out;
1950 		}
1951 
1952 		if (!expect_insert(&mm, node,
1953 				   n, 0, n,
1954 				   &insert_modes[0])) {
1955 			pr_err("insert failed, step %d\n", n);
1956 			kfree(node);
1957 			goto out;
1958 		}
1959 	}
1960 
1961 	drm_mm_for_each_node_safe(node, nn, &mm) {
1962 		if (node->color != node->size) {
1963 			pr_err("invalid color stored: expected %lld, found %ld\n",
1964 			       node->size, node->color);
1965 
1966 			goto out;
1967 		}
1968 
1969 		drm_mm_remove_node(node);
1970 		kfree(node);
1971 	}
1972 
1973 	/* Now, let's start experimenting with applying a color callback */
1974 	mm.color_adjust = separate_adjacent_colors;
1975 	for (mode = insert_modes; mode->name; mode++) {
1976 		u64 last;
1977 
1978 		node = kzalloc(sizeof(*node), GFP_KERNEL);
1979 		if (!node) {
1980 			ret = -ENOMEM;
1981 			goto out;
1982 		}
1983 
1984 		node->size = 1 + 2*count;
1985 		node->color = node->size;
1986 
1987 		err = drm_mm_reserve_node(&mm, node);
1988 		if (err) {
1989 			pr_err("initial reserve failed!\n");
1990 			ret = err;
1991 			goto out;
1992 		}
1993 
1994 		last = node->start + node->size;
1995 
1996 		for (n = 1; n <= count; n++) {
1997 			int rem;
1998 
1999 			node = kzalloc(sizeof(*node), GFP_KERNEL);
2000 			if (!node) {
2001 				ret = -ENOMEM;
2002 				goto out;
2003 			}
2004 
2005 			node->start = last;
2006 			node->size = n + count;
2007 			node->color = node->size;
2008 
2009 			err = drm_mm_reserve_node(&mm, node);
2010 			if (err != -ENOSPC) {
2011 				pr_err("reserve %d did not report color overlap! err=%d\n",
2012 				       n, err);
2013 				goto out;
2014 			}
2015 
2016 			node->start += n + 1;
2017 			rem = misalignment(node, n + count);
2018 			node->start += n + count - rem;
2019 
2020 			err = drm_mm_reserve_node(&mm, node);
2021 			if (err) {
2022 				pr_err("reserve %d failed, err=%d\n", n, err);
2023 				ret = err;
2024 				goto out;
2025 			}
2026 
2027 			last = node->start + node->size;
2028 		}
2029 
2030 		for (n = 1; n <= count; n++) {
2031 			node = kzalloc(sizeof(*node), GFP_KERNEL);
2032 			if (!node) {
2033 				ret = -ENOMEM;
2034 				goto out;
2035 			}
2036 
2037 			if (!expect_insert(&mm, node,
2038 					   n, n, n,
2039 					   mode)) {
2040 				pr_err("%s insert failed, step %d\n",
2041 				       mode->name, n);
2042 				kfree(node);
2043 				goto out;
2044 			}
2045 		}
2046 
2047 		drm_mm_for_each_node_safe(node, nn, &mm) {
2048 			u64 rem;
2049 
2050 			if (node->color != node->size) {
2051 				pr_err("%s invalid color stored: expected %lld, found %ld\n",
2052 				       mode->name, node->size, node->color);
2053 
2054 				goto out;
2055 			}
2056 
2057 			if (colors_abutt(node))
2058 				goto out;
2059 
2060 			div64_u64_rem(node->start, node->size, &rem);
2061 			if (rem) {
2062 				pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
2063 				       mode->name, node->start, node->size, rem);
2064 				goto out;
2065 			}
2066 
2067 			drm_mm_remove_node(node);
2068 			kfree(node);
2069 		}
2070 
2071 		cond_resched();
2072 	}
2073 
2074 	ret = 0;
2075 out:
2076 	drm_mm_for_each_node_safe(node, nn, &mm) {
2077 		drm_mm_remove_node(node);
2078 		kfree(node);
2079 	}
2080 	drm_mm_takedown(&mm);
2081 	return ret;
2082 }
2083 
evict_color(struct drm_mm * mm,u64 range_start,u64 range_end,struct evict_node * nodes,unsigned int * order,unsigned int count,unsigned int size,unsigned int alignment,unsigned long color,const struct insert_mode * mode)2084 static int evict_color(struct drm_mm *mm,
2085 		       u64 range_start, u64 range_end,
2086 		       struct evict_node *nodes,
2087 		       unsigned int *order,
2088 		       unsigned int count,
2089 		       unsigned int size,
2090 		       unsigned int alignment,
2091 		       unsigned long color,
2092 		       const struct insert_mode *mode)
2093 {
2094 	struct drm_mm_scan scan;
2095 	LIST_HEAD(evict_list);
2096 	struct evict_node *e;
2097 	struct drm_mm_node tmp;
2098 	int err;
2099 
2100 	drm_mm_scan_init_with_range(&scan, mm,
2101 				    size, alignment, color,
2102 				    range_start, range_end,
2103 				    mode->mode);
2104 	if (!evict_nodes(&scan,
2105 			 nodes, order, count, true,
2106 			 &evict_list))
2107 		return -EINVAL;
2108 
2109 	memset(&tmp, 0, sizeof(tmp));
2110 	err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
2111 					 DRM_MM_INSERT_EVICT);
2112 	if (err) {
2113 		pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
2114 		       size, alignment, color, err);
2115 		show_scan(&scan);
2116 		show_holes(mm, 3);
2117 		return err;
2118 	}
2119 
2120 	if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
2121 		pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
2122 		       tmp.start, tmp.size, range_start, range_end);
2123 		err = -EINVAL;
2124 	}
2125 
2126 	if (colors_abutt(&tmp))
2127 		err = -EINVAL;
2128 
2129 	if (!assert_node(&tmp, mm, size, alignment, color)) {
2130 		pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
2131 		       tmp.size, size,
2132 		       alignment, misalignment(&tmp, alignment), tmp.start);
2133 		err = -EINVAL;
2134 	}
2135 
2136 	drm_mm_remove_node(&tmp);
2137 	if (err)
2138 		return err;
2139 
2140 	list_for_each_entry(e, &evict_list, link) {
2141 		err = drm_mm_reserve_node(mm, &e->node);
2142 		if (err) {
2143 			pr_err("Failed to reinsert node after eviction: start=%llx\n",
2144 			       e->node.start);
2145 			return err;
2146 		}
2147 	}
2148 
2149 	cond_resched();
2150 	return 0;
2151 }
2152 
igt_color_evict(void * ignored)2153 static int igt_color_evict(void *ignored)
2154 {
2155 	DRM_RND_STATE(prng, random_seed);
2156 	const unsigned int total_size = min(8192u, max_iterations);
2157 	const struct insert_mode *mode;
2158 	unsigned long color = 0;
2159 	struct drm_mm mm;
2160 	struct evict_node *nodes;
2161 	struct drm_mm_node *node, *next;
2162 	unsigned int *order, n;
2163 	int ret, err;
2164 
2165 	/* Check that the drm_mm_scan also honours color adjustment when
2166 	 * choosing its victims to create a hole. Our color_adjust does not
2167 	 * allow two nodes to be placed together without an intervening hole
2168 	 * enlarging the set of victims that must be evicted.
2169 	 */
2170 
2171 	ret = -ENOMEM;
2172 	nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2173 	if (!nodes)
2174 		goto err;
2175 
2176 	order = drm_random_order(total_size, &prng);
2177 	if (!order)
2178 		goto err_nodes;
2179 
2180 	ret = -EINVAL;
2181 	drm_mm_init(&mm, 0, 2*total_size - 1);
2182 	mm.color_adjust = separate_adjacent_colors;
2183 	for (n = 0; n < total_size; n++) {
2184 		if (!expect_insert(&mm, &nodes[n].node,
2185 				   1, 0, color++,
2186 				   &insert_modes[0])) {
2187 			pr_err("insert failed, step %d\n", n);
2188 			goto out;
2189 		}
2190 	}
2191 
2192 	for (mode = evict_modes; mode->name; mode++) {
2193 		for (n = 1; n <= total_size; n <<= 1) {
2194 			drm_random_reorder(order, total_size, &prng);
2195 			err = evict_color(&mm, 0, U64_MAX,
2196 					  nodes, order, total_size,
2197 					  n, 1, color++,
2198 					  mode);
2199 			if (err) {
2200 				pr_err("%s evict_color(size=%u) failed\n",
2201 				       mode->name, n);
2202 				goto out;
2203 			}
2204 		}
2205 
2206 		for (n = 1; n < total_size; n <<= 1) {
2207 			drm_random_reorder(order, total_size, &prng);
2208 			err = evict_color(&mm, 0, U64_MAX,
2209 					  nodes, order, total_size,
2210 					  total_size/2, n, color++,
2211 					  mode);
2212 			if (err) {
2213 				pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2214 				       mode->name, total_size/2, n);
2215 				goto out;
2216 			}
2217 		}
2218 
2219 		for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
2220 			unsigned int nsize = (total_size - n + 1) / 2;
2221 
2222 			DRM_MM_BUG_ON(!nsize);
2223 
2224 			drm_random_reorder(order, total_size, &prng);
2225 			err = evict_color(&mm, 0, U64_MAX,
2226 					  nodes, order, total_size,
2227 					  nsize, n, color++,
2228 					  mode);
2229 			if (err) {
2230 				pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2231 				       mode->name, nsize, n);
2232 				goto out;
2233 			}
2234 		}
2235 
2236 		cond_resched();
2237 	}
2238 
2239 	ret = 0;
2240 out:
2241 	if (ret)
2242 		show_mm(&mm);
2243 	drm_mm_for_each_node_safe(node, next, &mm)
2244 		drm_mm_remove_node(node);
2245 	drm_mm_takedown(&mm);
2246 	kfree(order);
2247 err_nodes:
2248 	vfree(nodes);
2249 err:
2250 	return ret;
2251 }
2252 
igt_color_evict_range(void * ignored)2253 static int igt_color_evict_range(void *ignored)
2254 {
2255 	DRM_RND_STATE(prng, random_seed);
2256 	const unsigned int total_size = 8192;
2257 	const unsigned int range_size = total_size / 2;
2258 	const unsigned int range_start = total_size / 4;
2259 	const unsigned int range_end = range_start + range_size;
2260 	const struct insert_mode *mode;
2261 	unsigned long color = 0;
2262 	struct drm_mm mm;
2263 	struct evict_node *nodes;
2264 	struct drm_mm_node *node, *next;
2265 	unsigned int *order, n;
2266 	int ret, err;
2267 
2268 	/* Like igt_color_evict(), but limited to small portion of the full
2269 	 * drm_mm range.
2270 	 */
2271 
2272 	ret = -ENOMEM;
2273 	nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2274 	if (!nodes)
2275 		goto err;
2276 
2277 	order = drm_random_order(total_size, &prng);
2278 	if (!order)
2279 		goto err_nodes;
2280 
2281 	ret = -EINVAL;
2282 	drm_mm_init(&mm, 0, 2*total_size - 1);
2283 	mm.color_adjust = separate_adjacent_colors;
2284 	for (n = 0; n < total_size; n++) {
2285 		if (!expect_insert(&mm, &nodes[n].node,
2286 				   1, 0, color++,
2287 				   &insert_modes[0])) {
2288 			pr_err("insert failed, step %d\n", n);
2289 			goto out;
2290 		}
2291 	}
2292 
2293 	for (mode = evict_modes; mode->name; mode++) {
2294 		for (n = 1; n <= range_size; n <<= 1) {
2295 			drm_random_reorder(order, range_size, &prng);
2296 			err = evict_color(&mm, range_start, range_end,
2297 					  nodes, order, total_size,
2298 					  n, 1, color++,
2299 					  mode);
2300 			if (err) {
2301 				pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2302 				       mode->name, n, range_start, range_end);
2303 				goto out;
2304 			}
2305 		}
2306 
2307 		for (n = 1; n < range_size; n <<= 1) {
2308 			drm_random_reorder(order, total_size, &prng);
2309 			err = evict_color(&mm, range_start, range_end,
2310 					  nodes, order, total_size,
2311 					  range_size/2, n, color++,
2312 					  mode);
2313 			if (err) {
2314 				pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2315 				       mode->name, total_size/2, n, range_start, range_end);
2316 				goto out;
2317 			}
2318 		}
2319 
2320 		for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2321 			unsigned int nsize = (range_size - n + 1) / 2;
2322 
2323 			DRM_MM_BUG_ON(!nsize);
2324 
2325 			drm_random_reorder(order, total_size, &prng);
2326 			err = evict_color(&mm, range_start, range_end,
2327 					  nodes, order, total_size,
2328 					  nsize, n, color++,
2329 					  mode);
2330 			if (err) {
2331 				pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2332 				       mode->name, nsize, n, range_start, range_end);
2333 				goto out;
2334 			}
2335 		}
2336 
2337 		cond_resched();
2338 	}
2339 
2340 	ret = 0;
2341 out:
2342 	if (ret)
2343 		show_mm(&mm);
2344 	drm_mm_for_each_node_safe(node, next, &mm)
2345 		drm_mm_remove_node(node);
2346 	drm_mm_takedown(&mm);
2347 	kfree(order);
2348 err_nodes:
2349 	vfree(nodes);
2350 err:
2351 	return ret;
2352 }
2353 
2354 #include "drm_selftest.c"
2355 
test_drm_mm_init(void)2356 static int __init test_drm_mm_init(void)
2357 {
2358 	int err;
2359 
2360 	while (!random_seed)
2361 		random_seed = get_random_int();
2362 
2363 	pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2364 		random_seed, max_iterations, max_prime);
2365 	err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
2366 
2367 	return err > 0 ? 0 : err;
2368 }
2369 
test_drm_mm_exit(void)2370 static void __exit test_drm_mm_exit(void)
2371 {
2372 }
2373 
2374 module_init(test_drm_mm_init);
2375 module_exit(test_drm_mm_exit);
2376 
2377 module_param(random_seed, uint, 0400);
2378 module_param(max_iterations, uint, 0400);
2379 module_param(max_prime, uint, 0400);
2380 
2381 MODULE_AUTHOR("Intel Corporation");
2382 MODULE_LICENSE("GPL");
2383