1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Test cases for the drm_mm range manager
4 *
5 * Copyright (c) 2022 Arthur Grillo <arthur.grillo@usp.br>
6 */
7
8 #include <kunit/test.h>
9
10 #include <linux/prime_numbers.h>
11 #include <linux/slab.h>
12 #include <linux/random.h>
13 #include <linux/vmalloc.h>
14 #include <linux/ktime.h>
15
16 #include <drm/drm_mm.h>
17
18 #include "../lib/drm_random.h"
19
20 static unsigned int random_seed;
21 static unsigned int max_iterations = 8192;
22 static unsigned int max_prime = 128;
23
24 enum {
25 BEST,
26 BOTTOMUP,
27 TOPDOWN,
28 EVICT,
29 };
30
31 static const struct insert_mode {
32 const char *name;
33 enum drm_mm_insert_mode mode;
34 } insert_modes[] = {
35 [BEST] = { "best", DRM_MM_INSERT_BEST },
36 [BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
37 [TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
38 [EVICT] = { "evict", DRM_MM_INSERT_EVICT },
39 {}
40 }, evict_modes[] = {
41 { "bottom-up", DRM_MM_INSERT_LOW },
42 { "top-down", DRM_MM_INSERT_HIGH },
43 {}
44 };
45
assert_no_holes(struct kunit * test,const struct drm_mm * mm)46 static bool assert_no_holes(struct kunit *test, const struct drm_mm *mm)
47 {
48 struct drm_mm_node *hole;
49 u64 hole_start, __always_unused hole_end;
50 unsigned long count;
51
52 count = 0;
53 drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
54 count++;
55 if (count) {
56 KUNIT_FAIL(test,
57 "Expected to find no holes (after reserve), found %lu instead\n", count);
58 return false;
59 }
60
61 drm_mm_for_each_node(hole, mm) {
62 if (drm_mm_hole_follows(hole)) {
63 KUNIT_FAIL(test, "Hole follows node, expected none!\n");
64 return false;
65 }
66 }
67
68 return true;
69 }
70
assert_one_hole(struct kunit * test,const struct drm_mm * mm,u64 start,u64 end)71 static bool assert_one_hole(struct kunit *test, const struct drm_mm *mm, u64 start, u64 end)
72 {
73 struct drm_mm_node *hole;
74 u64 hole_start, hole_end;
75 unsigned long count;
76 bool ok = true;
77
78 if (end <= start)
79 return true;
80
81 count = 0;
82 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
83 if (start != hole_start || end != hole_end) {
84 if (ok)
85 KUNIT_FAIL(test,
86 "empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
87 hole_start, hole_end, start, end);
88 ok = false;
89 }
90 count++;
91 }
92 if (count != 1) {
93 KUNIT_FAIL(test, "Expected to find one hole, found %lu instead\n", count);
94 ok = false;
95 }
96
97 return ok;
98 }
99
assert_continuous(struct kunit * test,const struct drm_mm * mm,u64 size)100 static bool assert_continuous(struct kunit *test, const struct drm_mm *mm, u64 size)
101 {
102 struct drm_mm_node *node, *check, *found;
103 unsigned long n;
104 u64 addr;
105
106 if (!assert_no_holes(test, mm))
107 return false;
108
109 n = 0;
110 addr = 0;
111 drm_mm_for_each_node(node, mm) {
112 if (node->start != addr) {
113 KUNIT_FAIL(test, "node[%ld] list out of order, expected %llx found %llx\n",
114 n, addr, node->start);
115 return false;
116 }
117
118 if (node->size != size) {
119 KUNIT_FAIL(test, "node[%ld].size incorrect, expected %llx, found %llx\n",
120 n, size, node->size);
121 return false;
122 }
123
124 if (drm_mm_hole_follows(node)) {
125 KUNIT_FAIL(test, "node[%ld] is followed by a hole!\n", n);
126 return false;
127 }
128
129 found = NULL;
130 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
131 if (node != check) {
132 KUNIT_FAIL(test,
133 "lookup return wrong node, expected start %llx, found %llx\n",
134 node->start, check->start);
135 return false;
136 }
137 found = check;
138 }
139 if (!found) {
140 KUNIT_FAIL(test, "lookup failed for node %llx + %llx\n", addr, size);
141 return false;
142 }
143
144 addr += size;
145 n++;
146 }
147
148 return true;
149 }
150
misalignment(struct drm_mm_node * node,u64 alignment)151 static u64 misalignment(struct drm_mm_node *node, u64 alignment)
152 {
153 u64 rem;
154
155 if (!alignment)
156 return 0;
157
158 div64_u64_rem(node->start, alignment, &rem);
159 return rem;
160 }
161
assert_node(struct kunit * test,struct drm_mm_node * node,struct drm_mm * mm,u64 size,u64 alignment,unsigned long color)162 static bool assert_node(struct kunit *test, struct drm_mm_node *node, struct drm_mm *mm,
163 u64 size, u64 alignment, unsigned long color)
164 {
165 bool ok = true;
166
167 if (!drm_mm_node_allocated(node) || node->mm != mm) {
168 KUNIT_FAIL(test, "node not allocated\n");
169 ok = false;
170 }
171
172 if (node->size != size) {
173 KUNIT_FAIL(test, "node has wrong size, found %llu, expected %llu\n",
174 node->size, size);
175 ok = false;
176 }
177
178 if (misalignment(node, alignment)) {
179 KUNIT_FAIL(test,
180 "node is misaligned, start %llx rem %llu, expected alignment %llu\n",
181 node->start, misalignment(node, alignment), alignment);
182 ok = false;
183 }
184
185 if (node->color != color) {
186 KUNIT_FAIL(test, "node has wrong color, found %lu, expected %lu\n",
187 node->color, color);
188 ok = false;
189 }
190
191 return ok;
192 }
193
drm_test_mm_init(struct kunit * test)194 static void drm_test_mm_init(struct kunit *test)
195 {
196 const unsigned int size = 4096;
197 struct drm_mm mm;
198 struct drm_mm_node tmp;
199
200 /* Start with some simple checks on initialising the struct drm_mm */
201 memset(&mm, 0, sizeof(mm));
202 KUNIT_ASSERT_FALSE_MSG(test, drm_mm_initialized(&mm),
203 "zeroed mm claims to be initialized\n");
204
205 memset(&mm, 0xff, sizeof(mm));
206 drm_mm_init(&mm, 0, size);
207 if (!drm_mm_initialized(&mm)) {
208 KUNIT_FAIL(test, "mm claims not to be initialized\n");
209 goto out;
210 }
211
212 if (!drm_mm_clean(&mm)) {
213 KUNIT_FAIL(test, "mm not empty on creation\n");
214 goto out;
215 }
216
217 /* After creation, it should all be one massive hole */
218 if (!assert_one_hole(test, &mm, 0, size)) {
219 KUNIT_FAIL(test, "");
220 goto out;
221 }
222
223 memset(&tmp, 0, sizeof(tmp));
224 tmp.start = 0;
225 tmp.size = size;
226 if (drm_mm_reserve_node(&mm, &tmp)) {
227 KUNIT_FAIL(test, "failed to reserve whole drm_mm\n");
228 goto out;
229 }
230
231 /* After filling the range entirely, there should be no holes */
232 if (!assert_no_holes(test, &mm)) {
233 KUNIT_FAIL(test, "");
234 goto out;
235 }
236
237 /* And then after emptying it again, the massive hole should be back */
238 drm_mm_remove_node(&tmp);
239 if (!assert_one_hole(test, &mm, 0, size)) {
240 KUNIT_FAIL(test, "");
241 goto out;
242 }
243
244 out:
245 drm_mm_takedown(&mm);
246 }
247
drm_test_mm_debug(struct kunit * test)248 static void drm_test_mm_debug(struct kunit *test)
249 {
250 struct drm_mm mm;
251 struct drm_mm_node nodes[2];
252
253 /* Create a small drm_mm with a couple of nodes and a few holes, and
254 * check that the debug iterator doesn't explode over a trivial drm_mm.
255 */
256
257 drm_mm_init(&mm, 0, 4096);
258
259 memset(nodes, 0, sizeof(nodes));
260 nodes[0].start = 512;
261 nodes[0].size = 1024;
262 KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[0]),
263 "failed to reserve node[0] {start=%lld, size=%lld)\n",
264 nodes[0].start, nodes[0].size);
265
266 nodes[1].size = 1024;
267 nodes[1].start = 4096 - 512 - nodes[1].size;
268 KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[1]),
269 "failed to reserve node[0] {start=%lld, size=%lld)\n",
270 nodes[0].start, nodes[0].size);
271 }
272
set_node(struct drm_mm_node * node,u64 start,u64 size)273 static struct drm_mm_node *set_node(struct drm_mm_node *node,
274 u64 start, u64 size)
275 {
276 node->start = start;
277 node->size = size;
278 return node;
279 }
280
expect_reserve_fail(struct kunit * test,struct drm_mm * mm,struct drm_mm_node * node)281 static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node)
282 {
283 int err;
284
285 err = drm_mm_reserve_node(mm, node);
286 if (likely(err == -ENOSPC))
287 return true;
288
289 if (!err) {
290 KUNIT_FAIL(test, "impossible reserve succeeded, node %llu + %llu\n",
291 node->start, node->size);
292 drm_mm_remove_node(node);
293 } else {
294 KUNIT_FAIL(test,
295 "impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
296 err, -ENOSPC, node->start, node->size);
297 }
298 return false;
299 }
300
check_reserve_boundaries(struct kunit * test,struct drm_mm * mm,unsigned int count,u64 size)301 static bool check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
302 unsigned int count,
303 u64 size)
304 {
305 const struct boundary {
306 u64 start, size;
307 const char *name;
308 } boundaries[] = {
309 #define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
310 B(0, 0),
311 B(-size, 0),
312 B(size, 0),
313 B(size * count, 0),
314 B(-size, size),
315 B(-size, -size),
316 B(-size, 2 * size),
317 B(0, -size),
318 B(size, -size),
319 B(count * size, size),
320 B(count * size, -size),
321 B(count * size, count * size),
322 B(count * size, -count * size),
323 B(count * size, -(count + 1) * size),
324 B((count + 1) * size, size),
325 B((count + 1) * size, -size),
326 B((count + 1) * size, -2 * size),
327 #undef B
328 };
329 struct drm_mm_node tmp = {};
330 int n;
331
332 for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
333 if (!expect_reserve_fail(test, mm, set_node(&tmp, boundaries[n].start,
334 boundaries[n].size))) {
335 KUNIT_FAIL(test, "boundary[%d:%s] failed, count=%u, size=%lld\n",
336 n, boundaries[n].name, count, size);
337 return false;
338 }
339 }
340
341 return true;
342 }
343
__drm_test_mm_reserve(struct kunit * test,unsigned int count,u64 size)344 static int __drm_test_mm_reserve(struct kunit *test, unsigned int count, u64 size)
345 {
346 DRM_RND_STATE(prng, random_seed);
347 struct drm_mm mm;
348 struct drm_mm_node tmp, *nodes, *node, *next;
349 unsigned int *order, n, m, o = 0;
350 int ret, err;
351
352 /* For exercising drm_mm_reserve_node(), we want to check that
353 * reservations outside of the drm_mm range are rejected, and to
354 * overlapping and otherwise already occupied ranges. Afterwards,
355 * the tree and nodes should be intact.
356 */
357
358 DRM_MM_BUG_ON(!count);
359 DRM_MM_BUG_ON(!size);
360
361 ret = -ENOMEM;
362 order = drm_random_order(count, &prng);
363 if (!order)
364 goto err;
365
366 nodes = vzalloc(array_size(count, sizeof(*nodes)));
367 KUNIT_ASSERT_TRUE(test, nodes);
368
369 ret = -EINVAL;
370 drm_mm_init(&mm, 0, count * size);
371
372 if (!check_reserve_boundaries(test, &mm, count, size))
373 goto out;
374
375 for (n = 0; n < count; n++) {
376 nodes[n].start = order[n] * size;
377 nodes[n].size = size;
378
379 err = drm_mm_reserve_node(&mm, &nodes[n]);
380 if (err) {
381 KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
382 n, nodes[n].start);
383 ret = err;
384 goto out;
385 }
386
387 if (!drm_mm_node_allocated(&nodes[n])) {
388 KUNIT_FAIL(test, "reserved node not allocated! step %d, start %llu\n",
389 n, nodes[n].start);
390 goto out;
391 }
392
393 if (!expect_reserve_fail(test, &mm, &nodes[n]))
394 goto out;
395 }
396
397 /* After random insertion the nodes should be in order */
398 if (!assert_continuous(test, &mm, size))
399 goto out;
400
401 /* Repeated use should then fail */
402 drm_random_reorder(order, count, &prng);
403 for (n = 0; n < count; n++) {
404 if (!expect_reserve_fail(test, &mm, set_node(&tmp, order[n] * size, 1)))
405 goto out;
406
407 /* Remove and reinsert should work */
408 drm_mm_remove_node(&nodes[order[n]]);
409 err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
410 if (err) {
411 KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
412 n, nodes[n].start);
413 ret = err;
414 goto out;
415 }
416 }
417
418 if (!assert_continuous(test, &mm, size))
419 goto out;
420
421 /* Overlapping use should then fail */
422 for (n = 0; n < count; n++) {
423 if (!expect_reserve_fail(test, &mm, set_node(&tmp, 0, size * count)))
424 goto out;
425 }
426 for (n = 0; n < count; n++) {
427 if (!expect_reserve_fail(test, &mm, set_node(&tmp, size * n, size * (count - n))))
428 goto out;
429 }
430
431 /* Remove several, reinsert, check full */
432 for_each_prime_number(n, min(max_prime, count)) {
433 for (m = 0; m < n; m++) {
434 node = &nodes[order[(o + m) % count]];
435 drm_mm_remove_node(node);
436 }
437
438 for (m = 0; m < n; m++) {
439 node = &nodes[order[(o + m) % count]];
440 err = drm_mm_reserve_node(&mm, node);
441 if (err) {
442 KUNIT_FAIL(test, "reserve failed, step %d/%d, start %llu\n",
443 m, n, node->start);
444 ret = err;
445 goto out;
446 }
447 }
448
449 o += n;
450
451 if (!assert_continuous(test, &mm, size))
452 goto out;
453 }
454
455 ret = 0;
456 out:
457 drm_mm_for_each_node_safe(node, next, &mm)
458 drm_mm_remove_node(node);
459 drm_mm_takedown(&mm);
460 vfree(nodes);
461 kfree(order);
462 err:
463 return ret;
464 }
465
drm_test_mm_reserve(struct kunit * test)466 static void drm_test_mm_reserve(struct kunit *test)
467 {
468 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
469 int n;
470
471 for_each_prime_number_from(n, 1, 54) {
472 u64 size = BIT_ULL(n);
473
474 KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size - 1));
475 KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size));
476 KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size + 1));
477
478 cond_resched();
479 }
480 }
481
expect_insert(struct kunit * test,struct drm_mm * mm,struct drm_mm_node * node,u64 size,u64 alignment,unsigned long color,const struct insert_mode * mode)482 static bool expect_insert(struct kunit *test, struct drm_mm *mm,
483 struct drm_mm_node *node, u64 size, u64 alignment, unsigned long color,
484 const struct insert_mode *mode)
485 {
486 int err;
487
488 err = drm_mm_insert_node_generic(mm, node,
489 size, alignment, color,
490 mode->mode);
491 if (err) {
492 KUNIT_FAIL(test,
493 "insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
494 size, alignment, color, mode->name, err);
495 return false;
496 }
497
498 if (!assert_node(test, node, mm, size, alignment, color)) {
499 drm_mm_remove_node(node);
500 return false;
501 }
502
503 return true;
504 }
505
expect_insert_fail(struct kunit * test,struct drm_mm * mm,u64 size)506 static bool expect_insert_fail(struct kunit *test, struct drm_mm *mm, u64 size)
507 {
508 struct drm_mm_node tmp = {};
509 int err;
510
511 err = drm_mm_insert_node(mm, &tmp, size);
512 if (likely(err == -ENOSPC))
513 return true;
514
515 if (!err) {
516 KUNIT_FAIL(test, "impossible insert succeeded, node %llu + %llu\n",
517 tmp.start, tmp.size);
518 drm_mm_remove_node(&tmp);
519 } else {
520 KUNIT_FAIL(test,
521 "impossible insert failed with wrong error %d [expected %d], size %llu\n",
522 err, -ENOSPC, size);
523 }
524 return false;
525 }
526
__drm_test_mm_insert(struct kunit * test,unsigned int count,u64 size,bool replace)527 static int __drm_test_mm_insert(struct kunit *test, unsigned int count, u64 size, bool replace)
528 {
529 DRM_RND_STATE(prng, random_seed);
530 const struct insert_mode *mode;
531 struct drm_mm mm;
532 struct drm_mm_node *nodes, *node, *next;
533 unsigned int *order, n, m, o = 0;
534 int ret;
535
536 /* Fill a range with lots of nodes, check it doesn't fail too early */
537
538 DRM_MM_BUG_ON(!count);
539 DRM_MM_BUG_ON(!size);
540
541 ret = -ENOMEM;
542 nodes = vmalloc(array_size(count, sizeof(*nodes)));
543 KUNIT_ASSERT_TRUE(test, nodes);
544
545 order = drm_random_order(count, &prng);
546 if (!order)
547 goto err_nodes;
548
549 ret = -EINVAL;
550 drm_mm_init(&mm, 0, count * size);
551
552 for (mode = insert_modes; mode->name; mode++) {
553 for (n = 0; n < count; n++) {
554 struct drm_mm_node tmp;
555
556 node = replace ? &tmp : &nodes[n];
557 memset(node, 0, sizeof(*node));
558 if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
559 KUNIT_FAIL(test, "%s insert failed, size %llu step %d\n",
560 mode->name, size, n);
561 goto out;
562 }
563
564 if (replace) {
565 drm_mm_replace_node(&tmp, &nodes[n]);
566 if (drm_mm_node_allocated(&tmp)) {
567 KUNIT_FAIL(test,
568 "replaced old-node still allocated! step %d\n",
569 n);
570 goto out;
571 }
572
573 if (!assert_node(test, &nodes[n], &mm, size, 0, n)) {
574 KUNIT_FAIL(test,
575 "replaced node did not inherit parameters, size %llu step %d\n",
576 size, n);
577 goto out;
578 }
579
580 if (tmp.start != nodes[n].start) {
581 KUNIT_FAIL(test,
582 "replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
583 tmp.start, size, nodes[n].start, nodes[n].size);
584 goto out;
585 }
586 }
587 }
588
589 /* After random insertion the nodes should be in order */
590 if (!assert_continuous(test, &mm, size))
591 goto out;
592
593 /* Repeated use should then fail */
594 if (!expect_insert_fail(test, &mm, size))
595 goto out;
596
597 /* Remove one and reinsert, as the only hole it should refill itself */
598 for (n = 0; n < count; n++) {
599 u64 addr = nodes[n].start;
600
601 drm_mm_remove_node(&nodes[n]);
602 if (!expect_insert(test, &mm, &nodes[n], size, 0, n, mode)) {
603 KUNIT_FAIL(test, "%s reinsert failed, size %llu step %d\n",
604 mode->name, size, n);
605 goto out;
606 }
607
608 if (nodes[n].start != addr) {
609 KUNIT_FAIL(test,
610 "%s reinsert node moved, step %d, expected %llx, found %llx\n",
611 mode->name, n, addr, nodes[n].start);
612 goto out;
613 }
614
615 if (!assert_continuous(test, &mm, size))
616 goto out;
617 }
618
619 /* Remove several, reinsert, check full */
620 for_each_prime_number(n, min(max_prime, count)) {
621 for (m = 0; m < n; m++) {
622 node = &nodes[order[(o + m) % count]];
623 drm_mm_remove_node(node);
624 }
625
626 for (m = 0; m < n; m++) {
627 node = &nodes[order[(o + m) % count]];
628 if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
629 KUNIT_FAIL(test,
630 "%s multiple reinsert failed, size %llu step %d\n",
631 mode->name, size, n);
632 goto out;
633 }
634 }
635
636 o += n;
637
638 if (!assert_continuous(test, &mm, size))
639 goto out;
640
641 if (!expect_insert_fail(test, &mm, size))
642 goto out;
643 }
644
645 drm_mm_for_each_node_safe(node, next, &mm)
646 drm_mm_remove_node(node);
647 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
648
649 cond_resched();
650 }
651
652 ret = 0;
653 out:
654 drm_mm_for_each_node_safe(node, next, &mm)
655 drm_mm_remove_node(node);
656 drm_mm_takedown(&mm);
657 kfree(order);
658 err_nodes:
659 vfree(nodes);
660 return ret;
661 }
662
drm_test_mm_insert(struct kunit * test)663 static void drm_test_mm_insert(struct kunit *test)
664 {
665 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
666 unsigned int n;
667
668 for_each_prime_number_from(n, 1, 54) {
669 u64 size = BIT_ULL(n);
670
671 KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, false));
672 KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, false));
673 KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, false));
674
675 cond_resched();
676 }
677 }
678
drm_test_mm_replace(struct kunit * test)679 static void drm_test_mm_replace(struct kunit *test)
680 {
681 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
682 unsigned int n;
683
684 /* Reuse __drm_test_mm_insert to exercise replacement by inserting a dummy node,
685 * then replacing it with the intended node. We want to check that
686 * the tree is intact and all the information we need is carried
687 * across to the target node.
688 */
689
690 for_each_prime_number_from(n, 1, 54) {
691 u64 size = BIT_ULL(n);
692
693 KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, true));
694 KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, true));
695 KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, true));
696
697 cond_resched();
698 }
699 }
700
expect_insert_in_range(struct kunit * test,struct drm_mm * mm,struct drm_mm_node * node,u64 size,u64 alignment,unsigned long color,u64 range_start,u64 range_end,const struct insert_mode * mode)701 static bool expect_insert_in_range(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node,
702 u64 size, u64 alignment, unsigned long color,
703 u64 range_start, u64 range_end, const struct insert_mode *mode)
704 {
705 int err;
706
707 err = drm_mm_insert_node_in_range(mm, node,
708 size, alignment, color,
709 range_start, range_end,
710 mode->mode);
711 if (err) {
712 KUNIT_FAIL(test,
713 "insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
714 size, alignment, color, mode->name,
715 range_start, range_end, err);
716 return false;
717 }
718
719 if (!assert_node(test, node, mm, size, alignment, color)) {
720 drm_mm_remove_node(node);
721 return false;
722 }
723
724 return true;
725 }
726
expect_insert_in_range_fail(struct kunit * test,struct drm_mm * mm,u64 size,u64 range_start,u64 range_end)727 static bool expect_insert_in_range_fail(struct kunit *test, struct drm_mm *mm,
728 u64 size, u64 range_start, u64 range_end)
729 {
730 struct drm_mm_node tmp = {};
731 int err;
732
733 err = drm_mm_insert_node_in_range(mm, &tmp, size, 0, 0, range_start, range_end,
734 0);
735 if (likely(err == -ENOSPC))
736 return true;
737
738 if (!err) {
739 KUNIT_FAIL(test,
740 "impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
741 tmp.start, tmp.size, range_start, range_end);
742 drm_mm_remove_node(&tmp);
743 } else {
744 KUNIT_FAIL(test,
745 "impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
746 err, -ENOSPC, size, range_start, range_end);
747 }
748
749 return false;
750 }
751
assert_contiguous_in_range(struct kunit * test,struct drm_mm * mm,u64 size,u64 start,u64 end)752 static bool assert_contiguous_in_range(struct kunit *test, struct drm_mm *mm,
753 u64 size, u64 start, u64 end)
754 {
755 struct drm_mm_node *node;
756 unsigned int n;
757
758 if (!expect_insert_in_range_fail(test, mm, size, start, end))
759 return false;
760
761 n = div64_u64(start + size - 1, size);
762 drm_mm_for_each_node(node, mm) {
763 if (node->start < start || node->start + node->size > end) {
764 KUNIT_FAIL(test,
765 "node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
766 n, node->start, node->start + node->size, start, end);
767 return false;
768 }
769
770 if (node->start != n * size) {
771 KUNIT_FAIL(test, "node %d out of order, expected start %llx, found %llx\n",
772 n, n * size, node->start);
773 return false;
774 }
775
776 if (node->size != size) {
777 KUNIT_FAIL(test, "node %d has wrong size, expected size %llx, found %llx\n",
778 n, size, node->size);
779 return false;
780 }
781
782 if (drm_mm_hole_follows(node) && drm_mm_hole_node_end(node) < end) {
783 KUNIT_FAIL(test, "node %d is followed by a hole!\n", n);
784 return false;
785 }
786
787 n++;
788 }
789
790 if (start > 0) {
791 node = __drm_mm_interval_first(mm, 0, start - 1);
792 if (drm_mm_node_allocated(node)) {
793 KUNIT_FAIL(test, "node before start: node=%llx+%llu, start=%llx\n",
794 node->start, node->size, start);
795 return false;
796 }
797 }
798
799 if (end < U64_MAX) {
800 node = __drm_mm_interval_first(mm, end, U64_MAX);
801 if (drm_mm_node_allocated(node)) {
802 KUNIT_FAIL(test, "node after end: node=%llx+%llu, end=%llx\n",
803 node->start, node->size, end);
804 return false;
805 }
806 }
807
808 return true;
809 }
810
__drm_test_mm_insert_range(struct kunit * test,unsigned int count,u64 size,u64 start,u64 end)811 static int __drm_test_mm_insert_range(struct kunit *test, unsigned int count, u64 size,
812 u64 start, u64 end)
813 {
814 const struct insert_mode *mode;
815 struct drm_mm mm;
816 struct drm_mm_node *nodes, *node, *next;
817 unsigned int n, start_n, end_n;
818 int ret;
819
820 DRM_MM_BUG_ON(!count);
821 DRM_MM_BUG_ON(!size);
822 DRM_MM_BUG_ON(end <= start);
823
824 /* Very similar to __drm_test_mm_insert(), but now instead of populating the
825 * full range of the drm_mm, we try to fill a small portion of it.
826 */
827
828 ret = -ENOMEM;
829 nodes = vzalloc(array_size(count, sizeof(*nodes)));
830 KUNIT_ASSERT_TRUE(test, nodes);
831
832 ret = -EINVAL;
833 drm_mm_init(&mm, 0, count * size);
834
835 start_n = div64_u64(start + size - 1, size);
836 end_n = div64_u64(end - size, size);
837
838 for (mode = insert_modes; mode->name; mode++) {
839 for (n = start_n; n <= end_n; n++) {
840 if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
841 start, end, mode)) {
842 KUNIT_FAIL(test,
843 "%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
844 mode->name, size, n, start_n, end_n, start, end);
845 goto out;
846 }
847 }
848
849 if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
850 KUNIT_FAIL(test,
851 "%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
852 mode->name, start, end, size);
853 goto out;
854 }
855
856 /* Remove one and reinsert, it should refill itself */
857 for (n = start_n; n <= end_n; n++) {
858 u64 addr = nodes[n].start;
859
860 drm_mm_remove_node(&nodes[n]);
861 if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
862 start, end, mode)) {
863 KUNIT_FAIL(test, "%s reinsert failed, step %d\n", mode->name, n);
864 goto out;
865 }
866
867 if (nodes[n].start != addr) {
868 KUNIT_FAIL(test,
869 "%s reinsert node moved, step %d, expected %llx, found %llx\n",
870 mode->name, n, addr, nodes[n].start);
871 goto out;
872 }
873 }
874
875 if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
876 KUNIT_FAIL(test,
877 "%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
878 mode->name, start, end, size);
879 goto out;
880 }
881
882 drm_mm_for_each_node_safe(node, next, &mm)
883 drm_mm_remove_node(node);
884 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
885
886 cond_resched();
887 }
888
889 ret = 0;
890 out:
891 drm_mm_for_each_node_safe(node, next, &mm)
892 drm_mm_remove_node(node);
893 drm_mm_takedown(&mm);
894 vfree(nodes);
895 return ret;
896 }
897
insert_outside_range(struct kunit * test)898 static int insert_outside_range(struct kunit *test)
899 {
900 struct drm_mm mm;
901 const unsigned int start = 1024;
902 const unsigned int end = 2048;
903 const unsigned int size = end - start;
904
905 drm_mm_init(&mm, start, size);
906
907 if (!expect_insert_in_range_fail(test, &mm, 1, 0, start))
908 return -EINVAL;
909
910 if (!expect_insert_in_range_fail(test, &mm, size,
911 start - size / 2, start + (size + 1) / 2))
912 return -EINVAL;
913
914 if (!expect_insert_in_range_fail(test, &mm, size,
915 end - (size + 1) / 2, end + size / 2))
916 return -EINVAL;
917
918 if (!expect_insert_in_range_fail(test, &mm, 1, end, end + size))
919 return -EINVAL;
920
921 drm_mm_takedown(&mm);
922 return 0;
923 }
924
drm_test_mm_insert_range(struct kunit * test)925 static void drm_test_mm_insert_range(struct kunit *test)
926 {
927 const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
928 unsigned int n;
929
930 /* Check that requests outside the bounds of drm_mm are rejected. */
931 KUNIT_ASSERT_FALSE(test, insert_outside_range(test));
932
933 for_each_prime_number_from(n, 1, 50) {
934 const u64 size = BIT_ULL(n);
935 const u64 max = count * size;
936
937 KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max));
938 KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 1, max));
939 KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max - 1));
940 KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max / 2));
941 KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
942 max / 2, max / 2));
943 KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
944 max / 4 + 1, 3 * max / 4 - 1));
945
946 cond_resched();
947 }
948 }
949
prepare_frag(struct kunit * test,struct drm_mm * mm,struct drm_mm_node * nodes,unsigned int num_insert,const struct insert_mode * mode)950 static int prepare_frag(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *nodes,
951 unsigned int num_insert, const struct insert_mode *mode)
952 {
953 unsigned int size = 4096;
954 unsigned int i;
955
956 for (i = 0; i < num_insert; i++) {
957 if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
958 KUNIT_FAIL(test, "%s insert failed\n", mode->name);
959 return -EINVAL;
960 }
961 }
962
963 /* introduce fragmentation by freeing every other node */
964 for (i = 0; i < num_insert; i++) {
965 if (i % 2 == 0)
966 drm_mm_remove_node(&nodes[i]);
967 }
968
969 return 0;
970 }
971
get_insert_time(struct kunit * test,struct drm_mm * mm,unsigned int num_insert,struct drm_mm_node * nodes,const struct insert_mode * mode)972 static u64 get_insert_time(struct kunit *test, struct drm_mm *mm,
973 unsigned int num_insert, struct drm_mm_node *nodes,
974 const struct insert_mode *mode)
975 {
976 unsigned int size = 8192;
977 ktime_t start;
978 unsigned int i;
979
980 start = ktime_get();
981 for (i = 0; i < num_insert; i++) {
982 if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
983 KUNIT_FAIL(test, "%s insert failed\n", mode->name);
984 return 0;
985 }
986 }
987
988 return ktime_to_ns(ktime_sub(ktime_get(), start));
989 }
990
drm_test_mm_frag(struct kunit * test)991 static void drm_test_mm_frag(struct kunit *test)
992 {
993 struct drm_mm mm;
994 const struct insert_mode *mode;
995 struct drm_mm_node *nodes, *node, *next;
996 unsigned int insert_size = 10000;
997 unsigned int scale_factor = 4;
998
999 /* We need 4 * insert_size nodes to hold intermediate allocated
1000 * drm_mm nodes.
1001 * 1 times for prepare_frag()
1002 * 1 times for get_insert_time()
1003 * 2 times for get_insert_time()
1004 */
1005 nodes = vzalloc(array_size(insert_size * 4, sizeof(*nodes)));
1006 KUNIT_ASSERT_TRUE(test, nodes);
1007
1008 /* For BOTTOMUP and TOPDOWN, we first fragment the
1009 * address space using prepare_frag() and then try to verify
1010 * that insertions scale quadratically from 10k to 20k insertions
1011 */
1012 drm_mm_init(&mm, 1, U64_MAX - 2);
1013 for (mode = insert_modes; mode->name; mode++) {
1014 u64 insert_time1, insert_time2;
1015
1016 if (mode->mode != DRM_MM_INSERT_LOW &&
1017 mode->mode != DRM_MM_INSERT_HIGH)
1018 continue;
1019
1020 if (prepare_frag(test, &mm, nodes, insert_size, mode))
1021 goto err;
1022
1023 insert_time1 = get_insert_time(test, &mm, insert_size,
1024 nodes + insert_size, mode);
1025 if (insert_time1 == 0)
1026 goto err;
1027
1028 insert_time2 = get_insert_time(test, &mm, (insert_size * 2),
1029 nodes + insert_size * 2, mode);
1030 if (insert_time2 == 0)
1031 goto err;
1032
1033 kunit_info(test, "%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n",
1034 mode->name, insert_size, insert_size * 2, insert_time1, insert_time2);
1035
1036 if (insert_time2 > (scale_factor * insert_time1)) {
1037 KUNIT_FAIL(test, "%s fragmented insert took %llu nsecs more\n",
1038 mode->name, insert_time2 - (scale_factor * insert_time1));
1039 goto err;
1040 }
1041
1042 drm_mm_for_each_node_safe(node, next, &mm)
1043 drm_mm_remove_node(node);
1044 }
1045
1046 err:
1047 drm_mm_for_each_node_safe(node, next, &mm)
1048 drm_mm_remove_node(node);
1049 drm_mm_takedown(&mm);
1050 vfree(nodes);
1051 }
1052
drm_test_mm_align(struct kunit * test)1053 static void drm_test_mm_align(struct kunit *test)
1054 {
1055 const struct insert_mode *mode;
1056 const unsigned int max_count = min(8192u, max_prime);
1057 struct drm_mm mm;
1058 struct drm_mm_node *nodes, *node, *next;
1059 unsigned int prime;
1060
1061 /* For each of the possible insertion modes, we pick a few
1062 * arbitrary alignments and check that the inserted node
1063 * meets our requirements.
1064 */
1065
1066 nodes = vzalloc(array_size(max_count, sizeof(*nodes)));
1067 KUNIT_ASSERT_TRUE(test, nodes);
1068
1069 drm_mm_init(&mm, 1, U64_MAX - 2);
1070
1071 for (mode = insert_modes; mode->name; mode++) {
1072 unsigned int i = 0;
1073
1074 for_each_prime_number_from(prime, 1, max_count) {
1075 u64 size = next_prime_number(prime);
1076
1077 if (!expect_insert(test, &mm, &nodes[i], size, prime, i, mode)) {
1078 KUNIT_FAIL(test, "%s insert failed with alignment=%d",
1079 mode->name, prime);
1080 goto out;
1081 }
1082
1083 i++;
1084 }
1085
1086 drm_mm_for_each_node_safe(node, next, &mm)
1087 drm_mm_remove_node(node);
1088 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1089
1090 cond_resched();
1091 }
1092
1093 out:
1094 drm_mm_for_each_node_safe(node, next, &mm)
1095 drm_mm_remove_node(node);
1096 drm_mm_takedown(&mm);
1097 vfree(nodes);
1098 }
1099
drm_test_mm_align_pot(struct kunit * test,int max)1100 static void drm_test_mm_align_pot(struct kunit *test, int max)
1101 {
1102 struct drm_mm mm;
1103 struct drm_mm_node *node, *next;
1104 int bit;
1105
1106 /* Check that we can align to the full u64 address space */
1107
1108 drm_mm_init(&mm, 1, U64_MAX - 2);
1109
1110 for (bit = max - 1; bit; bit--) {
1111 u64 align, size;
1112
1113 node = kzalloc(sizeof(*node), GFP_KERNEL);
1114 if (!node) {
1115 KUNIT_FAIL(test, "failed to allocate node");
1116 goto out;
1117 }
1118
1119 align = BIT_ULL(bit);
1120 size = BIT_ULL(bit - 1) + 1;
1121 if (!expect_insert(test, &mm, node, size, align, bit, &insert_modes[0])) {
1122 KUNIT_FAIL(test, "insert failed with alignment=%llx [%d]", align, bit);
1123 goto out;
1124 }
1125
1126 cond_resched();
1127 }
1128
1129 out:
1130 drm_mm_for_each_node_safe(node, next, &mm) {
1131 drm_mm_remove_node(node);
1132 kfree(node);
1133 }
1134 drm_mm_takedown(&mm);
1135 }
1136
drm_test_mm_align32(struct kunit * test)1137 static void drm_test_mm_align32(struct kunit *test)
1138 {
1139 drm_test_mm_align_pot(test, 32);
1140 }
1141
drm_test_mm_align64(struct kunit * test)1142 static void drm_test_mm_align64(struct kunit *test)
1143 {
1144 drm_test_mm_align_pot(test, 64);
1145 }
1146
show_scan(struct kunit * test,const struct drm_mm_scan * scan)1147 static void show_scan(struct kunit *test, const struct drm_mm_scan *scan)
1148 {
1149 kunit_info(test, "scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
1150 scan->hit_start, scan->hit_end, scan->size, scan->alignment, scan->color);
1151 }
1152
show_holes(struct kunit * test,const struct drm_mm * mm,int count)1153 static void show_holes(struct kunit *test, const struct drm_mm *mm, int count)
1154 {
1155 u64 hole_start, hole_end;
1156 struct drm_mm_node *hole;
1157
1158 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1159 struct drm_mm_node *next = list_next_entry(hole, node_list);
1160 const char *node1 = NULL, *node2 = NULL;
1161
1162 if (drm_mm_node_allocated(hole))
1163 node1 = kasprintf(GFP_KERNEL, "[%llx + %lld, color=%ld], ",
1164 hole->start, hole->size, hole->color);
1165
1166 if (drm_mm_node_allocated(next))
1167 node2 = kasprintf(GFP_KERNEL, ", [%llx + %lld, color=%ld]",
1168 next->start, next->size, next->color);
1169
1170 kunit_info(test, "%sHole [%llx - %llx, size %lld]%s\n", node1,
1171 hole_start, hole_end, hole_end - hole_start, node2);
1172
1173 kfree(node2);
1174 kfree(node1);
1175
1176 if (!--count)
1177 break;
1178 }
1179 }
1180
1181 struct evict_node {
1182 struct drm_mm_node node;
1183 struct list_head link;
1184 };
1185
evict_nodes(struct kunit * test,struct drm_mm_scan * scan,struct evict_node * nodes,unsigned int * order,unsigned int count,bool use_color,struct list_head * evict_list)1186 static bool evict_nodes(struct kunit *test, struct drm_mm_scan *scan,
1187 struct evict_node *nodes, unsigned int *order, unsigned int count,
1188 bool use_color, struct list_head *evict_list)
1189 {
1190 struct evict_node *e, *en;
1191 unsigned int i;
1192
1193 for (i = 0; i < count; i++) {
1194 e = &nodes[order ? order[i] : i];
1195 list_add(&e->link, evict_list);
1196 if (drm_mm_scan_add_block(scan, &e->node))
1197 break;
1198 }
1199 list_for_each_entry_safe(e, en, evict_list, link) {
1200 if (!drm_mm_scan_remove_block(scan, &e->node))
1201 list_del(&e->link);
1202 }
1203 if (list_empty(evict_list)) {
1204 KUNIT_FAIL(test,
1205 "Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
1206 scan->size, count, scan->alignment, scan->color);
1207 return false;
1208 }
1209
1210 list_for_each_entry(e, evict_list, link)
1211 drm_mm_remove_node(&e->node);
1212
1213 if (use_color) {
1214 struct drm_mm_node *node;
1215
1216 while ((node = drm_mm_scan_color_evict(scan))) {
1217 e = container_of(node, typeof(*e), node);
1218 drm_mm_remove_node(&e->node);
1219 list_add(&e->link, evict_list);
1220 }
1221 } else {
1222 if (drm_mm_scan_color_evict(scan)) {
1223 KUNIT_FAIL(test,
1224 "drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1225 return false;
1226 }
1227 }
1228
1229 return true;
1230 }
1231
evict_nothing(struct kunit * test,struct drm_mm * mm,unsigned int total_size,struct evict_node * nodes)1232 static bool evict_nothing(struct kunit *test, struct drm_mm *mm,
1233 unsigned int total_size, struct evict_node *nodes)
1234 {
1235 struct drm_mm_scan scan;
1236 LIST_HEAD(evict_list);
1237 struct evict_node *e;
1238 struct drm_mm_node *node;
1239 unsigned int n;
1240
1241 drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
1242 for (n = 0; n < total_size; n++) {
1243 e = &nodes[n];
1244 list_add(&e->link, &evict_list);
1245 drm_mm_scan_add_block(&scan, &e->node);
1246 }
1247 list_for_each_entry(e, &evict_list, link)
1248 drm_mm_scan_remove_block(&scan, &e->node);
1249
1250 for (n = 0; n < total_size; n++) {
1251 e = &nodes[n];
1252
1253 if (!drm_mm_node_allocated(&e->node)) {
1254 KUNIT_FAIL(test, "node[%d] no longer allocated!\n", n);
1255 return false;
1256 }
1257
1258 e->link.next = NULL;
1259 }
1260
1261 drm_mm_for_each_node(node, mm) {
1262 e = container_of(node, typeof(*e), node);
1263 e->link.next = &e->link;
1264 }
1265
1266 for (n = 0; n < total_size; n++) {
1267 e = &nodes[n];
1268
1269 if (!e->link.next) {
1270 KUNIT_FAIL(test, "node[%d] no longer connected!\n", n);
1271 return false;
1272 }
1273 }
1274
1275 return assert_continuous(test, mm, nodes[0].node.size);
1276 }
1277
evict_everything(struct kunit * test,struct drm_mm * mm,unsigned int total_size,struct evict_node * nodes)1278 static bool evict_everything(struct kunit *test, struct drm_mm *mm,
1279 unsigned int total_size, struct evict_node *nodes)
1280 {
1281 struct drm_mm_scan scan;
1282 LIST_HEAD(evict_list);
1283 struct evict_node *e;
1284 unsigned int n;
1285 int err;
1286
1287 drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
1288 for (n = 0; n < total_size; n++) {
1289 e = &nodes[n];
1290 list_add(&e->link, &evict_list);
1291 if (drm_mm_scan_add_block(&scan, &e->node))
1292 break;
1293 }
1294
1295 err = 0;
1296 list_for_each_entry(e, &evict_list, link) {
1297 if (!drm_mm_scan_remove_block(&scan, &e->node)) {
1298 if (!err) {
1299 KUNIT_FAIL(test, "Node %lld not marked for eviction!\n",
1300 e->node.start);
1301 err = -EINVAL;
1302 }
1303 }
1304 }
1305 if (err)
1306 return false;
1307
1308 list_for_each_entry(e, &evict_list, link)
1309 drm_mm_remove_node(&e->node);
1310
1311 if (!assert_one_hole(test, mm, 0, total_size))
1312 return false;
1313
1314 list_for_each_entry(e, &evict_list, link) {
1315 err = drm_mm_reserve_node(mm, &e->node);
1316 if (err) {
1317 KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
1318 e->node.start);
1319 return false;
1320 }
1321 }
1322
1323 return assert_continuous(test, mm, nodes[0].node.size);
1324 }
1325
evict_something(struct kunit * test,struct drm_mm * mm,u64 range_start,u64 range_end,struct evict_node * nodes,unsigned int * order,unsigned int count,unsigned int size,unsigned int alignment,const struct insert_mode * mode)1326 static int evict_something(struct kunit *test, struct drm_mm *mm,
1327 u64 range_start, u64 range_end, struct evict_node *nodes,
1328 unsigned int *order, unsigned int count, unsigned int size,
1329 unsigned int alignment, const struct insert_mode *mode)
1330 {
1331 struct drm_mm_scan scan;
1332 LIST_HEAD(evict_list);
1333 struct evict_node *e;
1334 struct drm_mm_node tmp;
1335 int err;
1336
1337 drm_mm_scan_init_with_range(&scan, mm, size, alignment, 0, range_start,
1338 range_end, mode->mode);
1339 if (!evict_nodes(test, &scan, nodes, order, count, false, &evict_list))
1340 return -EINVAL;
1341
1342 memset(&tmp, 0, sizeof(tmp));
1343 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1344 DRM_MM_INSERT_EVICT);
1345 if (err) {
1346 KUNIT_FAIL(test, "Failed to insert into eviction hole: size=%d, align=%d\n",
1347 size, alignment);
1348 show_scan(test, &scan);
1349 show_holes(test, mm, 3);
1350 return err;
1351 }
1352
1353 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1354 KUNIT_FAIL(test,
1355 "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1356 tmp.start, tmp.size, range_start, range_end);
1357 err = -EINVAL;
1358 }
1359
1360 if (!assert_node(test, &tmp, mm, size, alignment, 0) ||
1361 drm_mm_hole_follows(&tmp)) {
1362 KUNIT_FAIL(test,
1363 "Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1364 tmp.size, size, alignment, misalignment(&tmp, alignment),
1365 tmp.start, drm_mm_hole_follows(&tmp));
1366 err = -EINVAL;
1367 }
1368
1369 drm_mm_remove_node(&tmp);
1370 if (err)
1371 return err;
1372
1373 list_for_each_entry(e, &evict_list, link) {
1374 err = drm_mm_reserve_node(mm, &e->node);
1375 if (err) {
1376 KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
1377 e->node.start);
1378 return err;
1379 }
1380 }
1381
1382 if (!assert_continuous(test, mm, nodes[0].node.size)) {
1383 KUNIT_FAIL(test, "range is no longer continuous\n");
1384 return -EINVAL;
1385 }
1386
1387 return 0;
1388 }
1389
drm_test_mm_evict(struct kunit * test)1390 static void drm_test_mm_evict(struct kunit *test)
1391 {
1392 DRM_RND_STATE(prng, random_seed);
1393 const unsigned int size = 8192;
1394 const struct insert_mode *mode;
1395 struct drm_mm mm;
1396 struct evict_node *nodes;
1397 struct drm_mm_node *node, *next;
1398 unsigned int *order, n;
1399
1400 /* Here we populate a full drm_mm and then try and insert a new node
1401 * by evicting other nodes in a random order. The drm_mm_scan should
1402 * pick the first matching hole it finds from the random list. We
1403 * repeat that for different allocation strategies, alignments and
1404 * sizes to try and stress the hole finder.
1405 */
1406
1407 nodes = vzalloc(array_size(size, sizeof(*nodes)));
1408 KUNIT_ASSERT_TRUE(test, nodes);
1409
1410 order = drm_random_order(size, &prng);
1411 if (!order)
1412 goto err_nodes;
1413
1414 drm_mm_init(&mm, 0, size);
1415 for (n = 0; n < size; n++) {
1416 if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
1417 KUNIT_FAIL(test, "insert failed, step %d\n", n);
1418 goto out;
1419 }
1420 }
1421
1422 /* First check that using the scanner doesn't break the mm */
1423 if (!evict_nothing(test, &mm, size, nodes)) {
1424 KUNIT_FAIL(test, "evict_nothing() failed\n");
1425 goto out;
1426 }
1427 if (!evict_everything(test, &mm, size, nodes)) {
1428 KUNIT_FAIL(test, "evict_everything() failed\n");
1429 goto out;
1430 }
1431
1432 for (mode = evict_modes; mode->name; mode++) {
1433 for (n = 1; n <= size; n <<= 1) {
1434 drm_random_reorder(order, size, &prng);
1435 if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size, n, 1,
1436 mode)) {
1437 KUNIT_FAIL(test, "%s evict_something(size=%u) failed\n",
1438 mode->name, n);
1439 goto out;
1440 }
1441 }
1442
1443 for (n = 1; n < size; n <<= 1) {
1444 drm_random_reorder(order, size, &prng);
1445 if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
1446 size / 2, n, mode)) {
1447 KUNIT_FAIL(test,
1448 "%s evict_something(size=%u, alignment=%u) failed\n",
1449 mode->name, size / 2, n);
1450 goto out;
1451 }
1452 }
1453
1454 for_each_prime_number_from(n, 1, min(size, max_prime)) {
1455 unsigned int nsize = (size - n + 1) / 2;
1456
1457 DRM_MM_BUG_ON(!nsize);
1458
1459 drm_random_reorder(order, size, &prng);
1460 if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
1461 nsize, n, mode)) {
1462 KUNIT_FAIL(test,
1463 "%s evict_something(size=%u, alignment=%u) failed\n",
1464 mode->name, nsize, n);
1465 goto out;
1466 }
1467 }
1468
1469 cond_resched();
1470 }
1471
1472 out:
1473 drm_mm_for_each_node_safe(node, next, &mm)
1474 drm_mm_remove_node(node);
1475 drm_mm_takedown(&mm);
1476 kfree(order);
1477 err_nodes:
1478 vfree(nodes);
1479 }
1480
drm_test_mm_evict_range(struct kunit * test)1481 static void drm_test_mm_evict_range(struct kunit *test)
1482 {
1483 DRM_RND_STATE(prng, random_seed);
1484 const unsigned int size = 8192;
1485 const unsigned int range_size = size / 2;
1486 const unsigned int range_start = size / 4;
1487 const unsigned int range_end = range_start + range_size;
1488 const struct insert_mode *mode;
1489 struct drm_mm mm;
1490 struct evict_node *nodes;
1491 struct drm_mm_node *node, *next;
1492 unsigned int *order, n;
1493
1494 /* Like drm_test_mm_evict() but now we are limiting the search to a
1495 * small portion of the full drm_mm.
1496 */
1497
1498 nodes = vzalloc(array_size(size, sizeof(*nodes)));
1499 KUNIT_ASSERT_TRUE(test, nodes);
1500
1501 order = drm_random_order(size, &prng);
1502 if (!order)
1503 goto err_nodes;
1504
1505 drm_mm_init(&mm, 0, size);
1506 for (n = 0; n < size; n++) {
1507 if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
1508 KUNIT_FAIL(test, "insert failed, step %d\n", n);
1509 goto out;
1510 }
1511 }
1512
1513 for (mode = evict_modes; mode->name; mode++) {
1514 for (n = 1; n <= range_size; n <<= 1) {
1515 drm_random_reorder(order, size, &prng);
1516 if (evict_something(test, &mm, range_start, range_end, nodes,
1517 order, size, n, 1, mode)) {
1518 KUNIT_FAIL(test,
1519 "%s evict_something(size=%u) failed with range [%u, %u]\n",
1520 mode->name, n, range_start, range_end);
1521 goto out;
1522 }
1523 }
1524
1525 for (n = 1; n <= range_size; n <<= 1) {
1526 drm_random_reorder(order, size, &prng);
1527 if (evict_something(test, &mm, range_start, range_end, nodes,
1528 order, size, range_size / 2, n, mode)) {
1529 KUNIT_FAIL(test,
1530 "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1531 mode->name, range_size / 2, n, range_start, range_end);
1532 goto out;
1533 }
1534 }
1535
1536 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1537 unsigned int nsize = (range_size - n + 1) / 2;
1538
1539 DRM_MM_BUG_ON(!nsize);
1540
1541 drm_random_reorder(order, size, &prng);
1542 if (evict_something(test, &mm, range_start, range_end, nodes,
1543 order, size, nsize, n, mode)) {
1544 KUNIT_FAIL(test,
1545 "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1546 mode->name, nsize, n, range_start, range_end);
1547 goto out;
1548 }
1549 }
1550
1551 cond_resched();
1552 }
1553
1554 out:
1555 drm_mm_for_each_node_safe(node, next, &mm)
1556 drm_mm_remove_node(node);
1557 drm_mm_takedown(&mm);
1558 kfree(order);
1559 err_nodes:
1560 vfree(nodes);
1561 }
1562
node_index(const struct drm_mm_node * node)1563 static unsigned int node_index(const struct drm_mm_node *node)
1564 {
1565 return div64_u64(node->start, node->size);
1566 }
1567
drm_test_mm_topdown(struct kunit * test)1568 static void drm_test_mm_topdown(struct kunit *test)
1569 {
1570 const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1571
1572 DRM_RND_STATE(prng, random_seed);
1573 const unsigned int count = 8192;
1574 unsigned int size;
1575 unsigned long *bitmap;
1576 struct drm_mm mm;
1577 struct drm_mm_node *nodes, *node, *next;
1578 unsigned int *order, n, m, o = 0;
1579
1580 /* When allocating top-down, we expect to be returned a node
1581 * from a suitable hole at the top of the drm_mm. We check that
1582 * the returned node does match the highest available slot.
1583 */
1584
1585 nodes = vzalloc(array_size(count, sizeof(*nodes)));
1586 KUNIT_ASSERT_TRUE(test, nodes);
1587
1588 bitmap = bitmap_zalloc(count, GFP_KERNEL);
1589 if (!bitmap)
1590 goto err_nodes;
1591
1592 order = drm_random_order(count, &prng);
1593 if (!order)
1594 goto err_bitmap;
1595
1596 for (size = 1; size <= 64; size <<= 1) {
1597 drm_mm_init(&mm, 0, size * count);
1598 for (n = 0; n < count; n++) {
1599 if (!expect_insert(test, &mm, &nodes[n], size, 0, n, topdown)) {
1600 KUNIT_FAIL(test, "insert failed, size %u step %d\n", size, n);
1601 goto out;
1602 }
1603
1604 if (drm_mm_hole_follows(&nodes[n])) {
1605 KUNIT_FAIL(test,
1606 "hole after topdown insert %d, start=%llx\n, size=%u",
1607 n, nodes[n].start, size);
1608 goto out;
1609 }
1610
1611 if (!assert_one_hole(test, &mm, 0, size * (count - n - 1)))
1612 goto out;
1613 }
1614
1615 if (!assert_continuous(test, &mm, size))
1616 goto out;
1617
1618 drm_random_reorder(order, count, &prng);
1619 for_each_prime_number_from(n, 1, min(count, max_prime)) {
1620 for (m = 0; m < n; m++) {
1621 node = &nodes[order[(o + m) % count]];
1622 drm_mm_remove_node(node);
1623 __set_bit(node_index(node), bitmap);
1624 }
1625
1626 for (m = 0; m < n; m++) {
1627 unsigned int last;
1628
1629 node = &nodes[order[(o + m) % count]];
1630 if (!expect_insert(test, &mm, node, size, 0, 0, topdown)) {
1631 KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
1632 goto out;
1633 }
1634
1635 if (drm_mm_hole_follows(node)) {
1636 KUNIT_FAIL(test,
1637 "hole after topdown insert %d/%d, start=%llx\n",
1638 m, n, node->start);
1639 goto out;
1640 }
1641
1642 last = find_last_bit(bitmap, count);
1643 if (node_index(node) != last) {
1644 KUNIT_FAIL(test,
1645 "node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1646 m, n, size, last, node_index(node));
1647 goto out;
1648 }
1649
1650 __clear_bit(last, bitmap);
1651 }
1652
1653 DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1654
1655 o += n;
1656 }
1657
1658 drm_mm_for_each_node_safe(node, next, &mm)
1659 drm_mm_remove_node(node);
1660 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1661 cond_resched();
1662 }
1663
1664 out:
1665 drm_mm_for_each_node_safe(node, next, &mm)
1666 drm_mm_remove_node(node);
1667 drm_mm_takedown(&mm);
1668 kfree(order);
1669 err_bitmap:
1670 bitmap_free(bitmap);
1671 err_nodes:
1672 vfree(nodes);
1673 }
1674
drm_test_mm_bottomup(struct kunit * test)1675 static void drm_test_mm_bottomup(struct kunit *test)
1676 {
1677 const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
1678
1679 DRM_RND_STATE(prng, random_seed);
1680 const unsigned int count = 8192;
1681 unsigned int size;
1682 unsigned long *bitmap;
1683 struct drm_mm mm;
1684 struct drm_mm_node *nodes, *node, *next;
1685 unsigned int *order, n, m, o = 0;
1686
1687 /* Like drm_test_mm_topdown, but instead of searching for the last hole,
1688 * we search for the first.
1689 */
1690
1691 nodes = vzalloc(array_size(count, sizeof(*nodes)));
1692 KUNIT_ASSERT_TRUE(test, nodes);
1693
1694 bitmap = bitmap_zalloc(count, GFP_KERNEL);
1695 if (!bitmap)
1696 goto err_nodes;
1697
1698 order = drm_random_order(count, &prng);
1699 if (!order)
1700 goto err_bitmap;
1701
1702 for (size = 1; size <= 64; size <<= 1) {
1703 drm_mm_init(&mm, 0, size * count);
1704 for (n = 0; n < count; n++) {
1705 if (!expect_insert(test, &mm, &nodes[n], size, 0, n, bottomup)) {
1706 KUNIT_FAIL(test,
1707 "bottomup insert failed, size %u step %d\n", size, n);
1708 goto out;
1709 }
1710
1711 if (!assert_one_hole(test, &mm, size * (n + 1), size * count))
1712 goto out;
1713 }
1714
1715 if (!assert_continuous(test, &mm, size))
1716 goto out;
1717
1718 drm_random_reorder(order, count, &prng);
1719 for_each_prime_number_from(n, 1, min(count, max_prime)) {
1720 for (m = 0; m < n; m++) {
1721 node = &nodes[order[(o + m) % count]];
1722 drm_mm_remove_node(node);
1723 __set_bit(node_index(node), bitmap);
1724 }
1725
1726 for (m = 0; m < n; m++) {
1727 unsigned int first;
1728
1729 node = &nodes[order[(o + m) % count]];
1730 if (!expect_insert(test, &mm, node, size, 0, 0, bottomup)) {
1731 KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
1732 goto out;
1733 }
1734
1735 first = find_first_bit(bitmap, count);
1736 if (node_index(node) != first) {
1737 KUNIT_FAIL(test,
1738 "node %d/%d not inserted into bottom hole, expected %d, found %d\n",
1739 m, n, first, node_index(node));
1740 goto out;
1741 }
1742 __clear_bit(first, bitmap);
1743 }
1744
1745 DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1746
1747 o += n;
1748 }
1749
1750 drm_mm_for_each_node_safe(node, next, &mm)
1751 drm_mm_remove_node(node);
1752 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1753 cond_resched();
1754 }
1755
1756 out:
1757 drm_mm_for_each_node_safe(node, next, &mm)
1758 drm_mm_remove_node(node);
1759 drm_mm_takedown(&mm);
1760 kfree(order);
1761 err_bitmap:
1762 bitmap_free(bitmap);
1763 err_nodes:
1764 vfree(nodes);
1765 }
1766
drm_test_mm_once(struct kunit * test,unsigned int mode)1767 static void drm_test_mm_once(struct kunit *test, unsigned int mode)
1768 {
1769 struct drm_mm mm;
1770 struct drm_mm_node rsvd_lo, rsvd_hi, node;
1771
1772 drm_mm_init(&mm, 0, 7);
1773
1774 memset(&rsvd_lo, 0, sizeof(rsvd_lo));
1775 rsvd_lo.start = 1;
1776 rsvd_lo.size = 1;
1777 if (drm_mm_reserve_node(&mm, &rsvd_lo)) {
1778 KUNIT_FAIL(test, "Could not reserve low node\n");
1779 goto err;
1780 }
1781
1782 memset(&rsvd_hi, 0, sizeof(rsvd_hi));
1783 rsvd_hi.start = 5;
1784 rsvd_hi.size = 1;
1785 if (drm_mm_reserve_node(&mm, &rsvd_hi)) {
1786 KUNIT_FAIL(test, "Could not reserve low node\n");
1787 goto err_lo;
1788 }
1789
1790 if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
1791 KUNIT_FAIL(test, "Expected a hole after lo and high nodes!\n");
1792 goto err_hi;
1793 }
1794
1795 memset(&node, 0, sizeof(node));
1796 if (drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode)) {
1797 KUNIT_FAIL(test, "Could not insert the node into the available hole!\n");
1798 goto err_hi;
1799 }
1800
1801 drm_mm_remove_node(&node);
1802 err_hi:
1803 drm_mm_remove_node(&rsvd_hi);
1804 err_lo:
1805 drm_mm_remove_node(&rsvd_lo);
1806 err:
1807 drm_mm_takedown(&mm);
1808 }
1809
drm_test_mm_lowest(struct kunit * test)1810 static void drm_test_mm_lowest(struct kunit *test)
1811 {
1812 drm_test_mm_once(test, DRM_MM_INSERT_LOW);
1813 }
1814
drm_test_mm_highest(struct kunit * test)1815 static void drm_test_mm_highest(struct kunit *test)
1816 {
1817 drm_test_mm_once(test, DRM_MM_INSERT_HIGH);
1818 }
1819
separate_adjacent_colors(const struct drm_mm_node * node,unsigned long color,u64 * start,u64 * end)1820 static void separate_adjacent_colors(const struct drm_mm_node *node,
1821 unsigned long color, u64 *start, u64 *end)
1822 {
1823 if (drm_mm_node_allocated(node) && node->color != color)
1824 ++*start;
1825
1826 node = list_next_entry(node, node_list);
1827 if (drm_mm_node_allocated(node) && node->color != color)
1828 --*end;
1829 }
1830
colors_abutt(struct kunit * test,const struct drm_mm_node * node)1831 static bool colors_abutt(struct kunit *test, const struct drm_mm_node *node)
1832 {
1833 if (!drm_mm_hole_follows(node) &&
1834 drm_mm_node_allocated(list_next_entry(node, node_list))) {
1835 KUNIT_FAIL(test, "colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
1836 node->color, node->start, node->size,
1837 list_next_entry(node, node_list)->color,
1838 list_next_entry(node, node_list)->start,
1839 list_next_entry(node, node_list)->size);
1840 return true;
1841 }
1842
1843 return false;
1844 }
1845
drm_test_mm_color(struct kunit * test)1846 static void drm_test_mm_color(struct kunit *test)
1847 {
1848 const unsigned int count = min(4096u, max_iterations);
1849 const struct insert_mode *mode;
1850 struct drm_mm mm;
1851 struct drm_mm_node *node, *nn;
1852 unsigned int n;
1853
1854 /* Color adjustment complicates everything. First we just check
1855 * that when we insert a node we apply any color_adjustment callback.
1856 * The callback we use should ensure that there is a gap between
1857 * any two nodes, and so after each insertion we check that those
1858 * holes are inserted and that they are preserved.
1859 */
1860
1861 drm_mm_init(&mm, 0, U64_MAX);
1862
1863 for (n = 1; n <= count; n++) {
1864 node = kzalloc(sizeof(*node), GFP_KERNEL);
1865 if (!node)
1866 goto out;
1867
1868 if (!expect_insert(test, &mm, node, n, 0, n, &insert_modes[0])) {
1869 KUNIT_FAIL(test, "insert failed, step %d\n", n);
1870 kfree(node);
1871 goto out;
1872 }
1873 }
1874
1875 drm_mm_for_each_node_safe(node, nn, &mm) {
1876 if (node->color != node->size) {
1877 KUNIT_FAIL(test, "invalid color stored: expected %lld, found %ld\n",
1878 node->size, node->color);
1879
1880 goto out;
1881 }
1882
1883 drm_mm_remove_node(node);
1884 kfree(node);
1885 }
1886
1887 /* Now, let's start experimenting with applying a color callback */
1888 mm.color_adjust = separate_adjacent_colors;
1889 for (mode = insert_modes; mode->name; mode++) {
1890 u64 last;
1891
1892 node = kzalloc(sizeof(*node), GFP_KERNEL);
1893 if (!node)
1894 goto out;
1895
1896 node->size = 1 + 2 * count;
1897 node->color = node->size;
1898
1899 if (drm_mm_reserve_node(&mm, node)) {
1900 KUNIT_FAIL(test, "initial reserve failed!\n");
1901 goto out;
1902 }
1903
1904 last = node->start + node->size;
1905
1906 for (n = 1; n <= count; n++) {
1907 int rem;
1908
1909 node = kzalloc(sizeof(*node), GFP_KERNEL);
1910 if (!node)
1911 goto out;
1912
1913 node->start = last;
1914 node->size = n + count;
1915 node->color = node->size;
1916
1917 if (drm_mm_reserve_node(&mm, node) != -ENOSPC) {
1918 KUNIT_FAIL(test, "reserve %d did not report color overlap!", n);
1919 goto out;
1920 }
1921
1922 node->start += n + 1;
1923 rem = misalignment(node, n + count);
1924 node->start += n + count - rem;
1925
1926 if (drm_mm_reserve_node(&mm, node)) {
1927 KUNIT_FAIL(test, "reserve %d failed", n);
1928 goto out;
1929 }
1930
1931 last = node->start + node->size;
1932 }
1933
1934 for (n = 1; n <= count; n++) {
1935 node = kzalloc(sizeof(*node), GFP_KERNEL);
1936 if (!node)
1937 goto out;
1938
1939 if (!expect_insert(test, &mm, node, n, n, n, mode)) {
1940 KUNIT_FAIL(test, "%s insert failed, step %d\n", mode->name, n);
1941 kfree(node);
1942 goto out;
1943 }
1944 }
1945
1946 drm_mm_for_each_node_safe(node, nn, &mm) {
1947 u64 rem;
1948
1949 if (node->color != node->size) {
1950 KUNIT_FAIL(test,
1951 "%s invalid color stored: expected %lld, found %ld\n",
1952 mode->name, node->size, node->color);
1953
1954 goto out;
1955 }
1956
1957 if (colors_abutt(test, node))
1958 goto out;
1959
1960 div64_u64_rem(node->start, node->size, &rem);
1961 if (rem) {
1962 KUNIT_FAIL(test,
1963 "%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
1964 mode->name, node->start, node->size, rem);
1965 goto out;
1966 }
1967
1968 drm_mm_remove_node(node);
1969 kfree(node);
1970 }
1971
1972 cond_resched();
1973 }
1974
1975 out:
1976 drm_mm_for_each_node_safe(node, nn, &mm) {
1977 drm_mm_remove_node(node);
1978 kfree(node);
1979 }
1980 drm_mm_takedown(&mm);
1981 }
1982
evict_color(struct kunit * test,struct drm_mm * mm,u64 range_start,u64 range_end,struct evict_node * nodes,unsigned int * order,unsigned int count,unsigned int size,unsigned int alignment,unsigned long color,const struct insert_mode * mode)1983 static int evict_color(struct kunit *test, struct drm_mm *mm, u64 range_start,
1984 u64 range_end, struct evict_node *nodes, unsigned int *order,
1985 unsigned int count, unsigned int size, unsigned int alignment,
1986 unsigned long color, const struct insert_mode *mode)
1987 {
1988 struct drm_mm_scan scan;
1989 LIST_HEAD(evict_list);
1990 struct evict_node *e;
1991 struct drm_mm_node tmp;
1992 int err;
1993
1994 drm_mm_scan_init_with_range(&scan, mm, size, alignment, color, range_start,
1995 range_end, mode->mode);
1996 if (!evict_nodes(test, &scan, nodes, order, count, true, &evict_list))
1997 return -EINVAL;
1998
1999 memset(&tmp, 0, sizeof(tmp));
2000 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
2001 DRM_MM_INSERT_EVICT);
2002 if (err) {
2003 KUNIT_FAIL(test,
2004 "Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
2005 size, alignment, color, err);
2006 show_scan(test, &scan);
2007 show_holes(test, mm, 3);
2008 return err;
2009 }
2010
2011 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
2012 KUNIT_FAIL(test,
2013 "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
2014 tmp.start, tmp.size, range_start, range_end);
2015 err = -EINVAL;
2016 }
2017
2018 if (colors_abutt(test, &tmp))
2019 err = -EINVAL;
2020
2021 if (!assert_node(test, &tmp, mm, size, alignment, color)) {
2022 KUNIT_FAIL(test,
2023 "Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
2024 tmp.size, size, alignment, misalignment(&tmp, alignment), tmp.start);
2025 err = -EINVAL;
2026 }
2027
2028 drm_mm_remove_node(&tmp);
2029 if (err)
2030 return err;
2031
2032 list_for_each_entry(e, &evict_list, link) {
2033 err = drm_mm_reserve_node(mm, &e->node);
2034 if (err) {
2035 KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
2036 e->node.start);
2037 return err;
2038 }
2039 }
2040
2041 cond_resched();
2042 return 0;
2043 }
2044
drm_test_mm_color_evict(struct kunit * test)2045 static void drm_test_mm_color_evict(struct kunit *test)
2046 {
2047 DRM_RND_STATE(prng, random_seed);
2048 const unsigned int total_size = min(8192u, max_iterations);
2049 const struct insert_mode *mode;
2050 unsigned long color = 0;
2051 struct drm_mm mm;
2052 struct evict_node *nodes;
2053 struct drm_mm_node *node, *next;
2054 unsigned int *order, n;
2055
2056 /* Check that the drm_mm_scan also honours color adjustment when
2057 * choosing its victims to create a hole. Our color_adjust does not
2058 * allow two nodes to be placed together without an intervening hole
2059 * enlarging the set of victims that must be evicted.
2060 */
2061
2062 nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2063 KUNIT_ASSERT_TRUE(test, nodes);
2064
2065 order = drm_random_order(total_size, &prng);
2066 if (!order)
2067 goto err_nodes;
2068
2069 drm_mm_init(&mm, 0, 2 * total_size - 1);
2070 mm.color_adjust = separate_adjacent_colors;
2071 for (n = 0; n < total_size; n++) {
2072 if (!expect_insert(test, &mm, &nodes[n].node,
2073 1, 0, color++,
2074 &insert_modes[0])) {
2075 KUNIT_FAIL(test, "insert failed, step %d\n", n);
2076 goto out;
2077 }
2078 }
2079
2080 for (mode = evict_modes; mode->name; mode++) {
2081 for (n = 1; n <= total_size; n <<= 1) {
2082 drm_random_reorder(order, total_size, &prng);
2083 if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
2084 n, 1, color++, mode)) {
2085 KUNIT_FAIL(test, "%s evict_color(size=%u) failed\n", mode->name, n);
2086 goto out;
2087 }
2088 }
2089
2090 for (n = 1; n < total_size; n <<= 1) {
2091 drm_random_reorder(order, total_size, &prng);
2092 if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
2093 total_size / 2, n, color++, mode)) {
2094 KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
2095 mode->name, total_size / 2, n);
2096 goto out;
2097 }
2098 }
2099
2100 for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
2101 unsigned int nsize = (total_size - n + 1) / 2;
2102
2103 DRM_MM_BUG_ON(!nsize);
2104
2105 drm_random_reorder(order, total_size, &prng);
2106 if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
2107 nsize, n, color++, mode)) {
2108 KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
2109 mode->name, nsize, n);
2110 goto out;
2111 }
2112 }
2113
2114 cond_resched();
2115 }
2116
2117 out:
2118 drm_mm_for_each_node_safe(node, next, &mm)
2119 drm_mm_remove_node(node);
2120 drm_mm_takedown(&mm);
2121 kfree(order);
2122 err_nodes:
2123 vfree(nodes);
2124 }
2125
drm_test_mm_color_evict_range(struct kunit * test)2126 static void drm_test_mm_color_evict_range(struct kunit *test)
2127 {
2128 DRM_RND_STATE(prng, random_seed);
2129 const unsigned int total_size = 8192;
2130 const unsigned int range_size = total_size / 2;
2131 const unsigned int range_start = total_size / 4;
2132 const unsigned int range_end = range_start + range_size;
2133 const struct insert_mode *mode;
2134 unsigned long color = 0;
2135 struct drm_mm mm;
2136 struct evict_node *nodes;
2137 struct drm_mm_node *node, *next;
2138 unsigned int *order, n;
2139
2140 /* Like drm_test_mm_color_evict(), but limited to small portion of the full
2141 * drm_mm range.
2142 */
2143
2144 nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2145 KUNIT_ASSERT_TRUE(test, nodes);
2146
2147 order = drm_random_order(total_size, &prng);
2148 if (!order)
2149 goto err_nodes;
2150
2151 drm_mm_init(&mm, 0, 2 * total_size - 1);
2152 mm.color_adjust = separate_adjacent_colors;
2153 for (n = 0; n < total_size; n++) {
2154 if (!expect_insert(test, &mm, &nodes[n].node,
2155 1, 0, color++,
2156 &insert_modes[0])) {
2157 KUNIT_FAIL(test, "insert failed, step %d\n", n);
2158 goto out;
2159 }
2160 }
2161
2162 for (mode = evict_modes; mode->name; mode++) {
2163 for (n = 1; n <= range_size; n <<= 1) {
2164 drm_random_reorder(order, range_size, &prng);
2165 if (evict_color(test, &mm, range_start, range_end, nodes, order,
2166 total_size, n, 1, color++, mode)) {
2167 KUNIT_FAIL(test,
2168 "%s evict_color(size=%u) failed for range [%x, %x]\n",
2169 mode->name, n, range_start, range_end);
2170 goto out;
2171 }
2172 }
2173
2174 for (n = 1; n < range_size; n <<= 1) {
2175 drm_random_reorder(order, total_size, &prng);
2176 if (evict_color(test, &mm, range_start, range_end, nodes, order,
2177 total_size, range_size / 2, n, color++, mode)) {
2178 KUNIT_FAIL(test,
2179 "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2180 mode->name, total_size / 2, n, range_start, range_end);
2181 goto out;
2182 }
2183 }
2184
2185 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2186 unsigned int nsize = (range_size - n + 1) / 2;
2187
2188 DRM_MM_BUG_ON(!nsize);
2189
2190 drm_random_reorder(order, total_size, &prng);
2191 if (evict_color(test, &mm, range_start, range_end, nodes, order,
2192 total_size, nsize, n, color++, mode)) {
2193 KUNIT_FAIL(test,
2194 "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2195 mode->name, nsize, n, range_start, range_end);
2196 goto out;
2197 }
2198 }
2199
2200 cond_resched();
2201 }
2202
2203 out:
2204 drm_mm_for_each_node_safe(node, next, &mm)
2205 drm_mm_remove_node(node);
2206 drm_mm_takedown(&mm);
2207 kfree(order);
2208 err_nodes:
2209 vfree(nodes);
2210 }
2211
drm_mm_init_test(struct kunit * test)2212 static int drm_mm_init_test(struct kunit *test)
2213 {
2214 while (!random_seed)
2215 random_seed = get_random_u32();
2216
2217 return 0;
2218 }
2219
2220 module_param(random_seed, uint, 0400);
2221 module_param(max_iterations, uint, 0400);
2222 module_param(max_prime, uint, 0400);
2223
2224 static struct kunit_case drm_mm_tests[] = {
2225 KUNIT_CASE(drm_test_mm_init),
2226 KUNIT_CASE(drm_test_mm_debug),
2227 KUNIT_CASE(drm_test_mm_reserve),
2228 KUNIT_CASE(drm_test_mm_insert),
2229 KUNIT_CASE(drm_test_mm_replace),
2230 KUNIT_CASE(drm_test_mm_insert_range),
2231 KUNIT_CASE(drm_test_mm_frag),
2232 KUNIT_CASE(drm_test_mm_align),
2233 KUNIT_CASE(drm_test_mm_align32),
2234 KUNIT_CASE(drm_test_mm_align64),
2235 KUNIT_CASE(drm_test_mm_evict),
2236 KUNIT_CASE(drm_test_mm_evict_range),
2237 KUNIT_CASE(drm_test_mm_topdown),
2238 KUNIT_CASE(drm_test_mm_bottomup),
2239 KUNIT_CASE(drm_test_mm_lowest),
2240 KUNIT_CASE(drm_test_mm_highest),
2241 KUNIT_CASE(drm_test_mm_color),
2242 KUNIT_CASE(drm_test_mm_color_evict),
2243 KUNIT_CASE(drm_test_mm_color_evict_range),
2244 {}
2245 };
2246
2247 static struct kunit_suite drm_mm_test_suite = {
2248 .name = "drm_mm",
2249 .init = drm_mm_init_test,
2250 .test_cases = drm_mm_tests,
2251 };
2252
2253 kunit_test_suite(drm_mm_test_suite);
2254
2255 MODULE_AUTHOR("Intel Corporation");
2256 MODULE_LICENSE("GPL");
2257