1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2017 Intel Corporation
5 */
6
7 #include <linux/prime_numbers.h>
8
9 #include "i915_selftest.h"
10
11 #include "gem/i915_gem_region.h"
12 #include "gem/i915_gem_lmem.h"
13 #include "gem/i915_gem_pm.h"
14
15 #include "gt/intel_gt.h"
16
17 #include "igt_gem_utils.h"
18 #include "mock_context.h"
19
20 #include "selftests/mock_drm.h"
21 #include "selftests/mock_gem_device.h"
22 #include "selftests/mock_region.h"
23 #include "selftests/i915_random.h"
24
25 static const unsigned int page_sizes[] = {
26 I915_GTT_PAGE_SIZE_2M,
27 I915_GTT_PAGE_SIZE_64K,
28 I915_GTT_PAGE_SIZE_4K,
29 };
30
get_largest_page_size(struct drm_i915_private * i915,u64 rem)31 static unsigned int get_largest_page_size(struct drm_i915_private *i915,
32 u64 rem)
33 {
34 int i;
35
36 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
37 unsigned int page_size = page_sizes[i];
38
39 if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
40 return page_size;
41 }
42
43 return 0;
44 }
45
huge_pages_free_pages(struct sg_table * st)46 static void huge_pages_free_pages(struct sg_table *st)
47 {
48 struct scatterlist *sg;
49
50 for (sg = st->sgl; sg; sg = __sg_next(sg)) {
51 if (sg_page(sg))
52 __free_pages(sg_page(sg), get_order(sg->length));
53 }
54
55 sg_free_table(st);
56 kfree(st);
57 }
58
get_huge_pages(struct drm_i915_gem_object * obj)59 static int get_huge_pages(struct drm_i915_gem_object *obj)
60 {
61 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
62 unsigned int page_mask = obj->mm.page_mask;
63 struct sg_table *st;
64 struct scatterlist *sg;
65 unsigned int sg_page_sizes;
66 u64 rem;
67
68 st = kmalloc(sizeof(*st), GFP);
69 if (!st)
70 return -ENOMEM;
71
72 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
73 kfree(st);
74 return -ENOMEM;
75 }
76
77 rem = obj->base.size;
78 sg = st->sgl;
79 st->nents = 0;
80 sg_page_sizes = 0;
81
82 /*
83 * Our goal here is simple, we want to greedily fill the object from
84 * largest to smallest page-size, while ensuring that we use *every*
85 * page-size as per the given page-mask.
86 */
87 do {
88 unsigned int bit = ilog2(page_mask);
89 unsigned int page_size = BIT(bit);
90 int order = get_order(page_size);
91
92 do {
93 struct page *page;
94
95 GEM_BUG_ON(order >= MAX_ORDER);
96 page = alloc_pages(GFP | __GFP_ZERO, order);
97 if (!page)
98 goto err;
99
100 sg_set_page(sg, page, page_size, 0);
101 sg_page_sizes |= page_size;
102 st->nents++;
103
104 rem -= page_size;
105 if (!rem) {
106 sg_mark_end(sg);
107 break;
108 }
109
110 sg = __sg_next(sg);
111 } while ((rem - ((page_size-1) & page_mask)) >= page_size);
112
113 page_mask &= (page_size-1);
114 } while (page_mask);
115
116 if (i915_gem_gtt_prepare_pages(obj, st))
117 goto err;
118
119 GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
120 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
121
122 return 0;
123
124 err:
125 sg_set_page(sg, NULL, 0, 0);
126 sg_mark_end(sg);
127 huge_pages_free_pages(st);
128
129 return -ENOMEM;
130 }
131
put_huge_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)132 static void put_huge_pages(struct drm_i915_gem_object *obj,
133 struct sg_table *pages)
134 {
135 i915_gem_gtt_finish_pages(obj, pages);
136 huge_pages_free_pages(pages);
137
138 obj->mm.dirty = false;
139 }
140
141 static const struct drm_i915_gem_object_ops huge_page_ops = {
142 .name = "huge-gem",
143 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
144 .get_pages = get_huge_pages,
145 .put_pages = put_huge_pages,
146 };
147
148 static struct drm_i915_gem_object *
huge_pages_object(struct drm_i915_private * i915,u64 size,unsigned int page_mask)149 huge_pages_object(struct drm_i915_private *i915,
150 u64 size,
151 unsigned int page_mask)
152 {
153 static struct lock_class_key lock_class;
154 struct drm_i915_gem_object *obj;
155
156 GEM_BUG_ON(!size);
157 GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
158
159 if (size >> PAGE_SHIFT > INT_MAX)
160 return ERR_PTR(-E2BIG);
161
162 if (overflows_type(size, obj->base.size))
163 return ERR_PTR(-E2BIG);
164
165 obj = i915_gem_object_alloc();
166 if (!obj)
167 return ERR_PTR(-ENOMEM);
168
169 drm_gem_private_object_init(&i915->drm, &obj->base, size);
170 i915_gem_object_init(obj, &huge_page_ops, &lock_class, 0);
171 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
172 i915_gem_object_set_volatile(obj);
173
174 obj->write_domain = I915_GEM_DOMAIN_CPU;
175 obj->read_domains = I915_GEM_DOMAIN_CPU;
176 obj->cache_level = I915_CACHE_NONE;
177
178 obj->mm.page_mask = page_mask;
179
180 return obj;
181 }
182
fake_get_huge_pages(struct drm_i915_gem_object * obj)183 static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
184 {
185 struct drm_i915_private *i915 = to_i915(obj->base.dev);
186 const u64 max_len = rounddown_pow_of_two(UINT_MAX);
187 struct sg_table *st;
188 struct scatterlist *sg;
189 unsigned int sg_page_sizes;
190 u64 rem;
191
192 st = kmalloc(sizeof(*st), GFP);
193 if (!st)
194 return -ENOMEM;
195
196 if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
197 kfree(st);
198 return -ENOMEM;
199 }
200
201 /* Use optimal page sized chunks to fill in the sg table */
202 rem = obj->base.size;
203 sg = st->sgl;
204 st->nents = 0;
205 sg_page_sizes = 0;
206 do {
207 unsigned int page_size = get_largest_page_size(i915, rem);
208 unsigned int len = min(page_size * div_u64(rem, page_size),
209 max_len);
210
211 GEM_BUG_ON(!page_size);
212
213 sg->offset = 0;
214 sg->length = len;
215 sg_dma_len(sg) = len;
216 sg_dma_address(sg) = page_size;
217
218 sg_page_sizes |= len;
219
220 st->nents++;
221
222 rem -= len;
223 if (!rem) {
224 sg_mark_end(sg);
225 break;
226 }
227
228 sg = sg_next(sg);
229 } while (1);
230
231 i915_sg_trim(st);
232
233 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
234
235 return 0;
236 }
237
fake_get_huge_pages_single(struct drm_i915_gem_object * obj)238 static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
239 {
240 struct drm_i915_private *i915 = to_i915(obj->base.dev);
241 struct sg_table *st;
242 struct scatterlist *sg;
243 unsigned int page_size;
244
245 st = kmalloc(sizeof(*st), GFP);
246 if (!st)
247 return -ENOMEM;
248
249 if (sg_alloc_table(st, 1, GFP)) {
250 kfree(st);
251 return -ENOMEM;
252 }
253
254 sg = st->sgl;
255 st->nents = 1;
256
257 page_size = get_largest_page_size(i915, obj->base.size);
258 GEM_BUG_ON(!page_size);
259
260 sg->offset = 0;
261 sg->length = obj->base.size;
262 sg_dma_len(sg) = obj->base.size;
263 sg_dma_address(sg) = page_size;
264
265 __i915_gem_object_set_pages(obj, st, sg->length);
266
267 return 0;
268 #undef GFP
269 }
270
fake_free_huge_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)271 static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
272 struct sg_table *pages)
273 {
274 sg_free_table(pages);
275 kfree(pages);
276 }
277
fake_put_huge_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)278 static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
279 struct sg_table *pages)
280 {
281 fake_free_huge_pages(obj, pages);
282 obj->mm.dirty = false;
283 }
284
285 static const struct drm_i915_gem_object_ops fake_ops = {
286 .name = "fake-gem",
287 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
288 .get_pages = fake_get_huge_pages,
289 .put_pages = fake_put_huge_pages,
290 };
291
292 static const struct drm_i915_gem_object_ops fake_ops_single = {
293 .name = "fake-gem",
294 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
295 .get_pages = fake_get_huge_pages_single,
296 .put_pages = fake_put_huge_pages,
297 };
298
299 static struct drm_i915_gem_object *
fake_huge_pages_object(struct drm_i915_private * i915,u64 size,bool single)300 fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
301 {
302 static struct lock_class_key lock_class;
303 struct drm_i915_gem_object *obj;
304
305 GEM_BUG_ON(!size);
306 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
307
308 if (size >> PAGE_SHIFT > UINT_MAX)
309 return ERR_PTR(-E2BIG);
310
311 if (overflows_type(size, obj->base.size))
312 return ERR_PTR(-E2BIG);
313
314 obj = i915_gem_object_alloc();
315 if (!obj)
316 return ERR_PTR(-ENOMEM);
317
318 drm_gem_private_object_init(&i915->drm, &obj->base, size);
319
320 if (single)
321 i915_gem_object_init(obj, &fake_ops_single, &lock_class, 0);
322 else
323 i915_gem_object_init(obj, &fake_ops, &lock_class, 0);
324
325 i915_gem_object_set_volatile(obj);
326
327 obj->write_domain = I915_GEM_DOMAIN_CPU;
328 obj->read_domains = I915_GEM_DOMAIN_CPU;
329 obj->cache_level = I915_CACHE_NONE;
330
331 return obj;
332 }
333
igt_check_page_sizes(struct i915_vma * vma)334 static int igt_check_page_sizes(struct i915_vma *vma)
335 {
336 struct drm_i915_private *i915 = vma->vm->i915;
337 unsigned int supported = INTEL_INFO(i915)->page_sizes;
338 struct drm_i915_gem_object *obj = vma->obj;
339 int err;
340
341 /* We have to wait for the async bind to complete before our asserts */
342 err = i915_vma_sync(vma);
343 if (err)
344 return err;
345
346 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
347 pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
348 vma->page_sizes.sg & ~supported, supported);
349 err = -EINVAL;
350 }
351
352 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
353 pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
354 vma->page_sizes.gtt & ~supported, supported);
355 err = -EINVAL;
356 }
357
358 if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
359 pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
360 vma->page_sizes.phys, obj->mm.page_sizes.phys);
361 err = -EINVAL;
362 }
363
364 if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
365 pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
366 vma->page_sizes.sg, obj->mm.page_sizes.sg);
367 err = -EINVAL;
368 }
369
370 /*
371 * The dma-api is like a box of chocolates when it comes to the
372 * alignment of dma addresses, however for LMEM we have total control
373 * and so can guarantee alignment, likewise when we allocate our blocks
374 * they should appear in descending order, and if we know that we align
375 * to the largest page size for the GTT address, we should be able to
376 * assert that if we see 2M physical pages then we should also get 2M
377 * GTT pages. If we don't then something might be wrong in our
378 * construction of the backing pages.
379 *
380 * Maintaining alignment is required to utilise huge pages in the ppGGT.
381 */
382 if (i915_gem_object_is_lmem(obj) &&
383 IS_ALIGNED(vma->node.start, SZ_2M) &&
384 vma->page_sizes.sg & SZ_2M &&
385 vma->page_sizes.gtt < SZ_2M) {
386 pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
387 vma->page_sizes.sg, vma->page_sizes.gtt);
388 err = -EINVAL;
389 }
390
391 if (obj->mm.page_sizes.gtt) {
392 pr_err("obj->page_sizes.gtt(%u) should never be set\n",
393 obj->mm.page_sizes.gtt);
394 err = -EINVAL;
395 }
396
397 return err;
398 }
399
igt_mock_exhaust_device_supported_pages(void * arg)400 static int igt_mock_exhaust_device_supported_pages(void *arg)
401 {
402 struct i915_ppgtt *ppgtt = arg;
403 struct drm_i915_private *i915 = ppgtt->vm.i915;
404 unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
405 struct drm_i915_gem_object *obj;
406 struct i915_vma *vma;
407 int i, j, single;
408 int err;
409
410 /*
411 * Sanity check creating objects with every valid page support
412 * combination for our mock device.
413 */
414
415 for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
416 unsigned int combination = SZ_4K; /* Required for ppGTT */
417
418 for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
419 if (i & BIT(j))
420 combination |= page_sizes[j];
421 }
422
423 mkwrite_device_info(i915)->page_sizes = combination;
424
425 for (single = 0; single <= 1; ++single) {
426 obj = fake_huge_pages_object(i915, combination, !!single);
427 if (IS_ERR(obj)) {
428 err = PTR_ERR(obj);
429 goto out_device;
430 }
431
432 if (obj->base.size != combination) {
433 pr_err("obj->base.size=%zu, expected=%u\n",
434 obj->base.size, combination);
435 err = -EINVAL;
436 goto out_put;
437 }
438
439 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
440 if (IS_ERR(vma)) {
441 err = PTR_ERR(vma);
442 goto out_put;
443 }
444
445 err = i915_vma_pin(vma, 0, 0, PIN_USER);
446 if (err)
447 goto out_put;
448
449 err = igt_check_page_sizes(vma);
450
451 if (vma->page_sizes.sg != combination) {
452 pr_err("page_sizes.sg=%u, expected=%u\n",
453 vma->page_sizes.sg, combination);
454 err = -EINVAL;
455 }
456
457 i915_vma_unpin(vma);
458 i915_gem_object_put(obj);
459
460 if (err)
461 goto out_device;
462 }
463 }
464
465 goto out_device;
466
467 out_put:
468 i915_gem_object_put(obj);
469 out_device:
470 mkwrite_device_info(i915)->page_sizes = saved_mask;
471
472 return err;
473 }
474
igt_mock_memory_region_huge_pages(void * arg)475 static int igt_mock_memory_region_huge_pages(void *arg)
476 {
477 const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
478 struct i915_ppgtt *ppgtt = arg;
479 struct drm_i915_private *i915 = ppgtt->vm.i915;
480 unsigned long supported = INTEL_INFO(i915)->page_sizes;
481 struct intel_memory_region *mem;
482 struct drm_i915_gem_object *obj;
483 struct i915_vma *vma;
484 int bit;
485 int err = 0;
486
487 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
488 if (IS_ERR(mem)) {
489 pr_err("%s failed to create memory region\n", __func__);
490 return PTR_ERR(mem);
491 }
492
493 for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
494 unsigned int page_size = BIT(bit);
495 resource_size_t phys;
496 int i;
497
498 for (i = 0; i < ARRAY_SIZE(flags); ++i) {
499 obj = i915_gem_object_create_region(mem,
500 page_size, page_size,
501 flags[i]);
502 if (IS_ERR(obj)) {
503 err = PTR_ERR(obj);
504 goto out_region;
505 }
506
507 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
508 if (IS_ERR(vma)) {
509 err = PTR_ERR(vma);
510 goto out_put;
511 }
512
513 err = i915_vma_pin(vma, 0, 0, PIN_USER);
514 if (err)
515 goto out_put;
516
517 err = igt_check_page_sizes(vma);
518 if (err)
519 goto out_unpin;
520
521 phys = i915_gem_object_get_dma_address(obj, 0);
522 if (!IS_ALIGNED(phys, page_size)) {
523 pr_err("%s addr misaligned(%pa) page_size=%u\n",
524 __func__, &phys, page_size);
525 err = -EINVAL;
526 goto out_unpin;
527 }
528
529 if (vma->page_sizes.gtt != page_size) {
530 pr_err("%s page_sizes.gtt=%u, expected=%u\n",
531 __func__, vma->page_sizes.gtt,
532 page_size);
533 err = -EINVAL;
534 goto out_unpin;
535 }
536
537 i915_vma_unpin(vma);
538 __i915_gem_object_put_pages(obj);
539 i915_gem_object_put(obj);
540 }
541 }
542
543 goto out_region;
544
545 out_unpin:
546 i915_vma_unpin(vma);
547 out_put:
548 i915_gem_object_put(obj);
549 out_region:
550 intel_memory_region_put(mem);
551 return err;
552 }
553
igt_mock_ppgtt_misaligned_dma(void * arg)554 static int igt_mock_ppgtt_misaligned_dma(void *arg)
555 {
556 struct i915_ppgtt *ppgtt = arg;
557 struct drm_i915_private *i915 = ppgtt->vm.i915;
558 unsigned long supported = INTEL_INFO(i915)->page_sizes;
559 struct drm_i915_gem_object *obj;
560 int bit;
561 int err;
562
563 /*
564 * Sanity check dma misalignment for huge pages -- the dma addresses we
565 * insert into the paging structures need to always respect the page
566 * size alignment.
567 */
568
569 bit = ilog2(I915_GTT_PAGE_SIZE_64K);
570
571 for_each_set_bit_from(bit, &supported,
572 ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
573 IGT_TIMEOUT(end_time);
574 unsigned int page_size = BIT(bit);
575 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
576 unsigned int offset;
577 unsigned int size =
578 round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
579 struct i915_vma *vma;
580
581 obj = fake_huge_pages_object(i915, size, true);
582 if (IS_ERR(obj))
583 return PTR_ERR(obj);
584
585 if (obj->base.size != size) {
586 pr_err("obj->base.size=%zu, expected=%u\n",
587 obj->base.size, size);
588 err = -EINVAL;
589 goto out_put;
590 }
591
592 err = i915_gem_object_pin_pages_unlocked(obj);
593 if (err)
594 goto out_put;
595
596 /* Force the page size for this object */
597 obj->mm.page_sizes.sg = page_size;
598
599 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
600 if (IS_ERR(vma)) {
601 err = PTR_ERR(vma);
602 goto out_unpin;
603 }
604
605 err = i915_vma_pin(vma, 0, 0, flags);
606 if (err)
607 goto out_unpin;
608
609
610 err = igt_check_page_sizes(vma);
611
612 if (vma->page_sizes.gtt != page_size) {
613 pr_err("page_sizes.gtt=%u, expected %u\n",
614 vma->page_sizes.gtt, page_size);
615 err = -EINVAL;
616 }
617
618 i915_vma_unpin(vma);
619
620 if (err)
621 goto out_unpin;
622
623 /*
624 * Try all the other valid offsets until the next
625 * boundary -- should always fall back to using 4K
626 * pages.
627 */
628 for (offset = 4096; offset < page_size; offset += 4096) {
629 err = i915_vma_unbind(vma);
630 if (err)
631 goto out_unpin;
632
633 err = i915_vma_pin(vma, 0, 0, flags | offset);
634 if (err)
635 goto out_unpin;
636
637 err = igt_check_page_sizes(vma);
638
639 if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
640 pr_err("page_sizes.gtt=%u, expected %llu\n",
641 vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
642 err = -EINVAL;
643 }
644
645 i915_vma_unpin(vma);
646
647 if (err)
648 goto out_unpin;
649
650 if (igt_timeout(end_time,
651 "%s timed out at offset %x with page-size %x\n",
652 __func__, offset, page_size))
653 break;
654 }
655
656 i915_gem_object_lock(obj, NULL);
657 i915_gem_object_unpin_pages(obj);
658 __i915_gem_object_put_pages(obj);
659 i915_gem_object_unlock(obj);
660 i915_gem_object_put(obj);
661 }
662
663 return 0;
664
665 out_unpin:
666 i915_gem_object_lock(obj, NULL);
667 i915_gem_object_unpin_pages(obj);
668 i915_gem_object_unlock(obj);
669 out_put:
670 i915_gem_object_put(obj);
671
672 return err;
673 }
674
close_object_list(struct list_head * objects,struct i915_ppgtt * ppgtt)675 static void close_object_list(struct list_head *objects,
676 struct i915_ppgtt *ppgtt)
677 {
678 struct drm_i915_gem_object *obj, *on;
679
680 list_for_each_entry_safe(obj, on, objects, st_link) {
681 list_del(&obj->st_link);
682 i915_gem_object_lock(obj, NULL);
683 i915_gem_object_unpin_pages(obj);
684 __i915_gem_object_put_pages(obj);
685 i915_gem_object_unlock(obj);
686 i915_gem_object_put(obj);
687 }
688 }
689
igt_mock_ppgtt_huge_fill(void * arg)690 static int igt_mock_ppgtt_huge_fill(void *arg)
691 {
692 struct i915_ppgtt *ppgtt = arg;
693 struct drm_i915_private *i915 = ppgtt->vm.i915;
694 unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
695 unsigned long page_num;
696 bool single = false;
697 LIST_HEAD(objects);
698 IGT_TIMEOUT(end_time);
699 int err = -ENODEV;
700
701 for_each_prime_number_from(page_num, 1, max_pages) {
702 struct drm_i915_gem_object *obj;
703 u64 size = page_num << PAGE_SHIFT;
704 struct i915_vma *vma;
705 unsigned int expected_gtt = 0;
706 int i;
707
708 obj = fake_huge_pages_object(i915, size, single);
709 if (IS_ERR(obj)) {
710 err = PTR_ERR(obj);
711 break;
712 }
713
714 if (obj->base.size != size) {
715 pr_err("obj->base.size=%zd, expected=%llu\n",
716 obj->base.size, size);
717 i915_gem_object_put(obj);
718 err = -EINVAL;
719 break;
720 }
721
722 err = i915_gem_object_pin_pages_unlocked(obj);
723 if (err) {
724 i915_gem_object_put(obj);
725 break;
726 }
727
728 list_add(&obj->st_link, &objects);
729
730 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
731 if (IS_ERR(vma)) {
732 err = PTR_ERR(vma);
733 break;
734 }
735
736 err = i915_vma_pin(vma, 0, 0, PIN_USER);
737 if (err)
738 break;
739
740 err = igt_check_page_sizes(vma);
741 if (err) {
742 i915_vma_unpin(vma);
743 break;
744 }
745
746 /*
747 * Figure out the expected gtt page size knowing that we go from
748 * largest to smallest page size sg chunks, and that we align to
749 * the largest page size.
750 */
751 for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
752 unsigned int page_size = page_sizes[i];
753
754 if (HAS_PAGE_SIZES(i915, page_size) &&
755 size >= page_size) {
756 expected_gtt |= page_size;
757 size &= page_size-1;
758 }
759 }
760
761 GEM_BUG_ON(!expected_gtt);
762 GEM_BUG_ON(size);
763
764 if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
765 expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
766
767 i915_vma_unpin(vma);
768
769 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
770 if (!IS_ALIGNED(vma->node.start,
771 I915_GTT_PAGE_SIZE_2M)) {
772 pr_err("node.start(%llx) not aligned to 2M\n",
773 vma->node.start);
774 err = -EINVAL;
775 break;
776 }
777
778 if (!IS_ALIGNED(vma->node.size,
779 I915_GTT_PAGE_SIZE_2M)) {
780 pr_err("node.size(%llx) not aligned to 2M\n",
781 vma->node.size);
782 err = -EINVAL;
783 break;
784 }
785 }
786
787 if (vma->page_sizes.gtt != expected_gtt) {
788 pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
789 vma->page_sizes.gtt, expected_gtt,
790 obj->base.size, yesno(!!single));
791 err = -EINVAL;
792 break;
793 }
794
795 if (igt_timeout(end_time,
796 "%s timed out at size %zd\n",
797 __func__, obj->base.size))
798 break;
799
800 single = !single;
801 }
802
803 close_object_list(&objects, ppgtt);
804
805 if (err == -ENOMEM || err == -ENOSPC)
806 err = 0;
807
808 return err;
809 }
810
igt_mock_ppgtt_64K(void * arg)811 static int igt_mock_ppgtt_64K(void *arg)
812 {
813 struct i915_ppgtt *ppgtt = arg;
814 struct drm_i915_private *i915 = ppgtt->vm.i915;
815 struct drm_i915_gem_object *obj;
816 const struct object_info {
817 unsigned int size;
818 unsigned int gtt;
819 unsigned int offset;
820 } objects[] = {
821 /* Cases with forced padding/alignment */
822 {
823 .size = SZ_64K,
824 .gtt = I915_GTT_PAGE_SIZE_64K,
825 .offset = 0,
826 },
827 {
828 .size = SZ_64K + SZ_4K,
829 .gtt = I915_GTT_PAGE_SIZE_4K,
830 .offset = 0,
831 },
832 {
833 .size = SZ_64K - SZ_4K,
834 .gtt = I915_GTT_PAGE_SIZE_4K,
835 .offset = 0,
836 },
837 {
838 .size = SZ_2M,
839 .gtt = I915_GTT_PAGE_SIZE_64K,
840 .offset = 0,
841 },
842 {
843 .size = SZ_2M - SZ_4K,
844 .gtt = I915_GTT_PAGE_SIZE_4K,
845 .offset = 0,
846 },
847 {
848 .size = SZ_2M + SZ_4K,
849 .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
850 .offset = 0,
851 },
852 {
853 .size = SZ_2M + SZ_64K,
854 .gtt = I915_GTT_PAGE_SIZE_64K,
855 .offset = 0,
856 },
857 {
858 .size = SZ_2M - SZ_64K,
859 .gtt = I915_GTT_PAGE_SIZE_64K,
860 .offset = 0,
861 },
862 /* Try without any forced padding/alignment */
863 {
864 .size = SZ_64K,
865 .offset = SZ_2M,
866 .gtt = I915_GTT_PAGE_SIZE_4K,
867 },
868 {
869 .size = SZ_128K,
870 .offset = SZ_2M - SZ_64K,
871 .gtt = I915_GTT_PAGE_SIZE_4K,
872 },
873 };
874 struct i915_vma *vma;
875 int i, single;
876 int err;
877
878 /*
879 * Sanity check some of the trickiness with 64K pages -- either we can
880 * safely mark the whole page-table(2M block) as 64K, or we have to
881 * always fallback to 4K.
882 */
883
884 if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
885 return 0;
886
887 for (i = 0; i < ARRAY_SIZE(objects); ++i) {
888 unsigned int size = objects[i].size;
889 unsigned int expected_gtt = objects[i].gtt;
890 unsigned int offset = objects[i].offset;
891 unsigned int flags = PIN_USER;
892
893 for (single = 0; single <= 1; single++) {
894 obj = fake_huge_pages_object(i915, size, !!single);
895 if (IS_ERR(obj))
896 return PTR_ERR(obj);
897
898 err = i915_gem_object_pin_pages_unlocked(obj);
899 if (err)
900 goto out_object_put;
901
902 /*
903 * Disable 2M pages -- We only want to use 64K/4K pages
904 * for this test.
905 */
906 obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
907
908 vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
909 if (IS_ERR(vma)) {
910 err = PTR_ERR(vma);
911 goto out_object_unpin;
912 }
913
914 if (offset)
915 flags |= PIN_OFFSET_FIXED | offset;
916
917 err = i915_vma_pin(vma, 0, 0, flags);
918 if (err)
919 goto out_object_unpin;
920
921 err = igt_check_page_sizes(vma);
922 if (err)
923 goto out_vma_unpin;
924
925 if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
926 if (!IS_ALIGNED(vma->node.start,
927 I915_GTT_PAGE_SIZE_2M)) {
928 pr_err("node.start(%llx) not aligned to 2M\n",
929 vma->node.start);
930 err = -EINVAL;
931 goto out_vma_unpin;
932 }
933
934 if (!IS_ALIGNED(vma->node.size,
935 I915_GTT_PAGE_SIZE_2M)) {
936 pr_err("node.size(%llx) not aligned to 2M\n",
937 vma->node.size);
938 err = -EINVAL;
939 goto out_vma_unpin;
940 }
941 }
942
943 if (vma->page_sizes.gtt != expected_gtt) {
944 pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
945 vma->page_sizes.gtt, expected_gtt, i,
946 yesno(!!single));
947 err = -EINVAL;
948 goto out_vma_unpin;
949 }
950
951 i915_vma_unpin(vma);
952 i915_gem_object_lock(obj, NULL);
953 i915_gem_object_unpin_pages(obj);
954 __i915_gem_object_put_pages(obj);
955 i915_gem_object_unlock(obj);
956 i915_gem_object_put(obj);
957 }
958 }
959
960 return 0;
961
962 out_vma_unpin:
963 i915_vma_unpin(vma);
964 out_object_unpin:
965 i915_gem_object_lock(obj, NULL);
966 i915_gem_object_unpin_pages(obj);
967 i915_gem_object_unlock(obj);
968 out_object_put:
969 i915_gem_object_put(obj);
970
971 return err;
972 }
973
gpu_write(struct intel_context * ce,struct i915_vma * vma,u32 dw,u32 val)974 static int gpu_write(struct intel_context *ce,
975 struct i915_vma *vma,
976 u32 dw,
977 u32 val)
978 {
979 int err;
980
981 i915_gem_object_lock(vma->obj, NULL);
982 err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
983 i915_gem_object_unlock(vma->obj);
984 if (err)
985 return err;
986
987 return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
988 vma->size >> PAGE_SHIFT, val);
989 }
990
991 static int
__cpu_check_shmem(struct drm_i915_gem_object * obj,u32 dword,u32 val)992 __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
993 {
994 unsigned int needs_flush;
995 unsigned long n;
996 int err;
997
998 i915_gem_object_lock(obj, NULL);
999 err = i915_gem_object_prepare_read(obj, &needs_flush);
1000 if (err)
1001 goto err_unlock;
1002
1003 for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
1004 u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
1005
1006 if (needs_flush & CLFLUSH_BEFORE)
1007 drm_clflush_virt_range(ptr, PAGE_SIZE);
1008
1009 if (ptr[dword] != val) {
1010 pr_err("n=%lu ptr[%u]=%u, val=%u\n",
1011 n, dword, ptr[dword], val);
1012 kunmap_atomic(ptr);
1013 err = -EINVAL;
1014 break;
1015 }
1016
1017 kunmap_atomic(ptr);
1018 }
1019
1020 i915_gem_object_finish_access(obj);
1021 err_unlock:
1022 i915_gem_object_unlock(obj);
1023
1024 return err;
1025 }
1026
__cpu_check_vmap(struct drm_i915_gem_object * obj,u32 dword,u32 val)1027 static int __cpu_check_vmap(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1028 {
1029 unsigned long n = obj->base.size >> PAGE_SHIFT;
1030 u32 *ptr;
1031 int err;
1032
1033 err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
1034 if (err)
1035 return err;
1036
1037 ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1038 if (IS_ERR(ptr))
1039 return PTR_ERR(ptr);
1040
1041 ptr += dword;
1042 while (n--) {
1043 if (*ptr != val) {
1044 pr_err("base[%u]=%08x, val=%08x\n",
1045 dword, *ptr, val);
1046 err = -EINVAL;
1047 break;
1048 }
1049
1050 ptr += PAGE_SIZE / sizeof(*ptr);
1051 }
1052
1053 i915_gem_object_unpin_map(obj);
1054 return err;
1055 }
1056
cpu_check(struct drm_i915_gem_object * obj,u32 dword,u32 val)1057 static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
1058 {
1059 if (i915_gem_object_has_struct_page(obj))
1060 return __cpu_check_shmem(obj, dword, val);
1061 else
1062 return __cpu_check_vmap(obj, dword, val);
1063 }
1064
__igt_write_huge(struct intel_context * ce,struct drm_i915_gem_object * obj,u64 size,u64 offset,u32 dword,u32 val)1065 static int __igt_write_huge(struct intel_context *ce,
1066 struct drm_i915_gem_object *obj,
1067 u64 size, u64 offset,
1068 u32 dword, u32 val)
1069 {
1070 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
1071 struct i915_vma *vma;
1072 int err;
1073
1074 vma = i915_vma_instance(obj, ce->vm, NULL);
1075 if (IS_ERR(vma))
1076 return PTR_ERR(vma);
1077
1078 err = i915_vma_unbind(vma);
1079 if (err)
1080 return err;
1081
1082 err = i915_vma_pin(vma, size, 0, flags | offset);
1083 if (err) {
1084 /*
1085 * The ggtt may have some pages reserved so
1086 * refrain from erroring out.
1087 */
1088 if (err == -ENOSPC && i915_is_ggtt(ce->vm))
1089 err = 0;
1090
1091 return err;
1092 }
1093
1094 err = igt_check_page_sizes(vma);
1095 if (err)
1096 goto out_vma_unpin;
1097
1098 err = gpu_write(ce, vma, dword, val);
1099 if (err) {
1100 pr_err("gpu-write failed at offset=%llx\n", offset);
1101 goto out_vma_unpin;
1102 }
1103
1104 err = cpu_check(obj, dword, val);
1105 if (err) {
1106 pr_err("cpu-check failed at offset=%llx\n", offset);
1107 goto out_vma_unpin;
1108 }
1109
1110 out_vma_unpin:
1111 i915_vma_unpin(vma);
1112 return err;
1113 }
1114
igt_write_huge(struct i915_gem_context * ctx,struct drm_i915_gem_object * obj)1115 static int igt_write_huge(struct i915_gem_context *ctx,
1116 struct drm_i915_gem_object *obj)
1117 {
1118 struct i915_gem_engines *engines;
1119 struct i915_gem_engines_iter it;
1120 struct intel_context *ce;
1121 I915_RND_STATE(prng);
1122 IGT_TIMEOUT(end_time);
1123 unsigned int max_page_size;
1124 unsigned int count;
1125 u64 max;
1126 u64 num;
1127 u64 size;
1128 int *order;
1129 int i, n;
1130 int err = 0;
1131
1132 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1133
1134 size = obj->base.size;
1135 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1136 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
1137
1138 n = 0;
1139 count = 0;
1140 max = U64_MAX;
1141 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1142 count++;
1143 if (!intel_engine_can_store_dword(ce->engine))
1144 continue;
1145
1146 max = min(max, ce->vm->total);
1147 n++;
1148 }
1149 i915_gem_context_unlock_engines(ctx);
1150 if (!n)
1151 return 0;
1152
1153 /*
1154 * To keep things interesting when alternating between engines in our
1155 * randomized order, lets also make feeding to the same engine a few
1156 * times in succession a possibility by enlarging the permutation array.
1157 */
1158 order = i915_random_order(count * count, &prng);
1159 if (!order)
1160 return -ENOMEM;
1161
1162 max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
1163 max = div_u64(max - size, max_page_size);
1164
1165 /*
1166 * Try various offsets in an ascending/descending fashion until we
1167 * timeout -- we want to avoid issues hidden by effectively always using
1168 * offset = 0.
1169 */
1170 i = 0;
1171 engines = i915_gem_context_lock_engines(ctx);
1172 for_each_prime_number_from(num, 0, max) {
1173 u64 offset_low = num * max_page_size;
1174 u64 offset_high = (max - num) * max_page_size;
1175 u32 dword = offset_in_page(num) / 4;
1176 struct intel_context *ce;
1177
1178 ce = engines->engines[order[i] % engines->num_engines];
1179 i = (i + 1) % (count * count);
1180 if (!ce || !intel_engine_can_store_dword(ce->engine))
1181 continue;
1182
1183 /*
1184 * In order to utilize 64K pages we need to both pad the vma
1185 * size and ensure the vma offset is at the start of the pt
1186 * boundary, however to improve coverage we opt for testing both
1187 * aligned and unaligned offsets.
1188 */
1189 if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
1190 offset_low = round_down(offset_low,
1191 I915_GTT_PAGE_SIZE_2M);
1192
1193 err = __igt_write_huge(ce, obj, size, offset_low,
1194 dword, num + 1);
1195 if (err)
1196 break;
1197
1198 err = __igt_write_huge(ce, obj, size, offset_high,
1199 dword, num + 1);
1200 if (err)
1201 break;
1202
1203 if (igt_timeout(end_time,
1204 "%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
1205 __func__, ce->engine->name, offset_low, offset_high,
1206 max_page_size))
1207 break;
1208 }
1209 i915_gem_context_unlock_engines(ctx);
1210
1211 kfree(order);
1212
1213 return err;
1214 }
1215
1216 typedef struct drm_i915_gem_object *
1217 (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
1218
1219 static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
1220 {
1221 return i915->mm.gemfs && has_transparent_hugepage();
1222 }
1223
1224 static struct drm_i915_gem_object *
igt_create_shmem(struct drm_i915_private * i915,u32 size,u32 flags)1225 igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags)
1226 {
1227 if (!igt_can_allocate_thp(i915)) {
1228 pr_info("%s missing THP support, skipping\n", __func__);
1229 return ERR_PTR(-ENODEV);
1230 }
1231
1232 return i915_gem_object_create_shmem(i915, size);
1233 }
1234
1235 static struct drm_i915_gem_object *
igt_create_internal(struct drm_i915_private * i915,u32 size,u32 flags)1236 igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags)
1237 {
1238 return i915_gem_object_create_internal(i915, size);
1239 }
1240
1241 static struct drm_i915_gem_object *
igt_create_system(struct drm_i915_private * i915,u32 size,u32 flags)1242 igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags)
1243 {
1244 return huge_pages_object(i915, size, size);
1245 }
1246
1247 static struct drm_i915_gem_object *
igt_create_local(struct drm_i915_private * i915,u32 size,u32 flags)1248 igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags)
1249 {
1250 return i915_gem_object_create_lmem(i915, size, flags);
1251 }
1252
igt_random_size(struct rnd_state * prng,u32 min_page_size,u32 max_page_size)1253 static u32 igt_random_size(struct rnd_state *prng,
1254 u32 min_page_size,
1255 u32 max_page_size)
1256 {
1257 u64 mask;
1258 u32 size;
1259
1260 GEM_BUG_ON(!is_power_of_2(min_page_size));
1261 GEM_BUG_ON(!is_power_of_2(max_page_size));
1262 GEM_BUG_ON(min_page_size < PAGE_SIZE);
1263 GEM_BUG_ON(min_page_size > max_page_size);
1264
1265 mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK;
1266 size = prandom_u32_state(prng) & mask;
1267 if (size < min_page_size)
1268 size |= min_page_size;
1269
1270 return size;
1271 }
1272
igt_ppgtt_smoke_huge(void * arg)1273 static int igt_ppgtt_smoke_huge(void *arg)
1274 {
1275 struct i915_gem_context *ctx = arg;
1276 struct drm_i915_private *i915 = ctx->i915;
1277 struct drm_i915_gem_object *obj;
1278 I915_RND_STATE(prng);
1279 struct {
1280 igt_create_fn fn;
1281 u32 min;
1282 u32 max;
1283 } backends[] = {
1284 { igt_create_internal, SZ_64K, SZ_2M, },
1285 { igt_create_shmem, SZ_64K, SZ_32M, },
1286 { igt_create_local, SZ_64K, SZ_1G, },
1287 };
1288 int err;
1289 int i;
1290
1291 /*
1292 * Sanity check that the HW uses huge pages correctly through our
1293 * various backends -- ensure that our writes land in the right place.
1294 */
1295
1296 for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1297 u32 min = backends[i].min;
1298 u32 max = backends[i].max;
1299 u32 size = max;
1300 try_again:
1301 size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
1302
1303 obj = backends[i].fn(i915, size, 0);
1304 if (IS_ERR(obj)) {
1305 err = PTR_ERR(obj);
1306 if (err == -E2BIG) {
1307 size >>= 1;
1308 goto try_again;
1309 } else if (err == -ENODEV) {
1310 err = 0;
1311 continue;
1312 }
1313
1314 return err;
1315 }
1316
1317 err = i915_gem_object_pin_pages_unlocked(obj);
1318 if (err) {
1319 if (err == -ENXIO || err == -E2BIG) {
1320 i915_gem_object_put(obj);
1321 size >>= 1;
1322 goto try_again;
1323 }
1324 goto out_put;
1325 }
1326
1327 if (obj->mm.page_sizes.phys < min) {
1328 pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
1329 __func__, size, i);
1330 err = -ENOMEM;
1331 goto out_unpin;
1332 }
1333
1334 err = igt_write_huge(ctx, obj);
1335 if (err) {
1336 pr_err("%s write-huge failed with size=%u, i=%d\n",
1337 __func__, size, i);
1338 }
1339 out_unpin:
1340 i915_gem_object_lock(obj, NULL);
1341 i915_gem_object_unpin_pages(obj);
1342 __i915_gem_object_put_pages(obj);
1343 i915_gem_object_unlock(obj);
1344 out_put:
1345 i915_gem_object_put(obj);
1346
1347 if (err == -ENOMEM || err == -ENXIO)
1348 err = 0;
1349
1350 if (err)
1351 break;
1352
1353 cond_resched();
1354 }
1355
1356 return err;
1357 }
1358
igt_ppgtt_sanity_check(void * arg)1359 static int igt_ppgtt_sanity_check(void *arg)
1360 {
1361 struct i915_gem_context *ctx = arg;
1362 struct drm_i915_private *i915 = ctx->i915;
1363 unsigned int supported = INTEL_INFO(i915)->page_sizes;
1364 struct {
1365 igt_create_fn fn;
1366 unsigned int flags;
1367 } backends[] = {
1368 { igt_create_system, 0, },
1369 { igt_create_local, 0, },
1370 { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, },
1371 };
1372 struct {
1373 u32 size;
1374 u32 pages;
1375 } combos[] = {
1376 { SZ_64K, SZ_64K },
1377 { SZ_2M, SZ_2M },
1378 { SZ_2M, SZ_64K },
1379 { SZ_2M - SZ_64K, SZ_64K },
1380 { SZ_2M - SZ_4K, SZ_64K | SZ_4K },
1381 { SZ_2M + SZ_4K, SZ_64K | SZ_4K },
1382 { SZ_2M + SZ_4K, SZ_2M | SZ_4K },
1383 { SZ_2M + SZ_64K, SZ_2M | SZ_64K },
1384 };
1385 int i, j;
1386 int err;
1387
1388 if (supported == I915_GTT_PAGE_SIZE_4K)
1389 return 0;
1390
1391 /*
1392 * Sanity check that the HW behaves with a limited set of combinations.
1393 * We already have a bunch of randomised testing, which should give us
1394 * a decent amount of variation between runs, however we should keep
1395 * this to limit the chances of introducing a temporary regression, by
1396 * testing the most obvious cases that might make something blow up.
1397 */
1398
1399 for (i = 0; i < ARRAY_SIZE(backends); ++i) {
1400 for (j = 0; j < ARRAY_SIZE(combos); ++j) {
1401 struct drm_i915_gem_object *obj;
1402 u32 size = combos[j].size;
1403 u32 pages = combos[j].pages;
1404
1405 obj = backends[i].fn(i915, size, backends[i].flags);
1406 if (IS_ERR(obj)) {
1407 err = PTR_ERR(obj);
1408 if (err == -ENODEV) {
1409 pr_info("Device lacks local memory, skipping\n");
1410 err = 0;
1411 break;
1412 }
1413
1414 return err;
1415 }
1416
1417 err = i915_gem_object_pin_pages_unlocked(obj);
1418 if (err) {
1419 i915_gem_object_put(obj);
1420 goto out;
1421 }
1422
1423 GEM_BUG_ON(pages > obj->base.size);
1424 pages = pages & supported;
1425
1426 if (pages)
1427 obj->mm.page_sizes.sg = pages;
1428
1429 err = igt_write_huge(ctx, obj);
1430
1431 i915_gem_object_lock(obj, NULL);
1432 i915_gem_object_unpin_pages(obj);
1433 __i915_gem_object_put_pages(obj);
1434 i915_gem_object_unlock(obj);
1435 i915_gem_object_put(obj);
1436
1437 if (err) {
1438 pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n",
1439 __func__, size, pages, i, j);
1440 goto out;
1441 }
1442 }
1443
1444 cond_resched();
1445 }
1446
1447 out:
1448 if (err == -ENOMEM)
1449 err = 0;
1450
1451 return err;
1452 }
1453
igt_tmpfs_fallback(void * arg)1454 static int igt_tmpfs_fallback(void *arg)
1455 {
1456 struct i915_gem_context *ctx = arg;
1457 struct drm_i915_private *i915 = ctx->i915;
1458 struct vfsmount *gemfs = i915->mm.gemfs;
1459 struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
1460 struct drm_i915_gem_object *obj;
1461 struct i915_vma *vma;
1462 u32 *vaddr;
1463 int err = 0;
1464
1465 /*
1466 * Make sure that we don't burst into a ball of flames upon falling back
1467 * to tmpfs, which we rely on if on the off-chance we encouter a failure
1468 * when setting up gemfs.
1469 */
1470
1471 i915->mm.gemfs = NULL;
1472
1473 obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
1474 if (IS_ERR(obj)) {
1475 err = PTR_ERR(obj);
1476 goto out_restore;
1477 }
1478
1479 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
1480 if (IS_ERR(vaddr)) {
1481 err = PTR_ERR(vaddr);
1482 goto out_put;
1483 }
1484 *vaddr = 0xdeadbeaf;
1485
1486 __i915_gem_object_flush_map(obj, 0, 64);
1487 i915_gem_object_unpin_map(obj);
1488
1489 vma = i915_vma_instance(obj, vm, NULL);
1490 if (IS_ERR(vma)) {
1491 err = PTR_ERR(vma);
1492 goto out_put;
1493 }
1494
1495 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1496 if (err)
1497 goto out_put;
1498
1499 err = igt_check_page_sizes(vma);
1500
1501 i915_vma_unpin(vma);
1502 out_put:
1503 i915_gem_object_put(obj);
1504 out_restore:
1505 i915->mm.gemfs = gemfs;
1506
1507 i915_vm_put(vm);
1508 return err;
1509 }
1510
igt_shrink_thp(void * arg)1511 static int igt_shrink_thp(void *arg)
1512 {
1513 struct i915_gem_context *ctx = arg;
1514 struct drm_i915_private *i915 = ctx->i915;
1515 struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
1516 struct drm_i915_gem_object *obj;
1517 struct i915_gem_engines_iter it;
1518 struct intel_context *ce;
1519 struct i915_vma *vma;
1520 unsigned int flags = PIN_USER;
1521 unsigned int n;
1522 int err = 0;
1523
1524 /*
1525 * Sanity check shrinking huge-paged object -- make sure nothing blows
1526 * up.
1527 */
1528
1529 if (!igt_can_allocate_thp(i915)) {
1530 pr_info("missing THP support, skipping\n");
1531 goto out_vm;
1532 }
1533
1534 obj = i915_gem_object_create_shmem(i915, SZ_2M);
1535 if (IS_ERR(obj)) {
1536 err = PTR_ERR(obj);
1537 goto out_vm;
1538 }
1539
1540 vma = i915_vma_instance(obj, vm, NULL);
1541 if (IS_ERR(vma)) {
1542 err = PTR_ERR(vma);
1543 goto out_put;
1544 }
1545
1546 err = i915_vma_pin(vma, 0, 0, flags);
1547 if (err)
1548 goto out_put;
1549
1550 if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
1551 pr_info("failed to allocate THP, finishing test early\n");
1552 goto out_unpin;
1553 }
1554
1555 err = igt_check_page_sizes(vma);
1556 if (err)
1557 goto out_unpin;
1558
1559 n = 0;
1560
1561 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1562 if (!intel_engine_can_store_dword(ce->engine))
1563 continue;
1564
1565 err = gpu_write(ce, vma, n++, 0xdeadbeaf);
1566 if (err)
1567 break;
1568 }
1569 i915_gem_context_unlock_engines(ctx);
1570 i915_vma_unpin(vma);
1571 if (err)
1572 goto out_put;
1573
1574 /*
1575 * Now that the pages are *unpinned* shrink-all should invoke
1576 * shmem to truncate our pages.
1577 */
1578 i915_gem_shrink_all(i915);
1579 if (i915_gem_object_has_pages(obj)) {
1580 pr_err("shrink-all didn't truncate the pages\n");
1581 err = -EINVAL;
1582 goto out_put;
1583 }
1584
1585 if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
1586 pr_err("residual page-size bits left\n");
1587 err = -EINVAL;
1588 goto out_put;
1589 }
1590
1591 err = i915_vma_pin(vma, 0, 0, flags);
1592 if (err)
1593 goto out_put;
1594
1595 while (n--) {
1596 err = cpu_check(obj, n, 0xdeadbeaf);
1597 if (err)
1598 break;
1599 }
1600
1601 out_unpin:
1602 i915_vma_unpin(vma);
1603 out_put:
1604 i915_gem_object_put(obj);
1605 out_vm:
1606 i915_vm_put(vm);
1607
1608 return err;
1609 }
1610
i915_gem_huge_page_mock_selftests(void)1611 int i915_gem_huge_page_mock_selftests(void)
1612 {
1613 static const struct i915_subtest tests[] = {
1614 SUBTEST(igt_mock_exhaust_device_supported_pages),
1615 SUBTEST(igt_mock_memory_region_huge_pages),
1616 SUBTEST(igt_mock_ppgtt_misaligned_dma),
1617 SUBTEST(igt_mock_ppgtt_huge_fill),
1618 SUBTEST(igt_mock_ppgtt_64K),
1619 };
1620 struct drm_i915_private *dev_priv;
1621 struct i915_ppgtt *ppgtt;
1622 int err;
1623
1624 dev_priv = mock_gem_device();
1625 if (!dev_priv)
1626 return -ENOMEM;
1627
1628 /* Pretend to be a device which supports the 48b PPGTT */
1629 mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
1630 mkwrite_device_info(dev_priv)->ppgtt_size = 48;
1631
1632 ppgtt = i915_ppgtt_create(&dev_priv->gt);
1633 if (IS_ERR(ppgtt)) {
1634 err = PTR_ERR(ppgtt);
1635 goto out_unlock;
1636 }
1637
1638 if (!i915_vm_is_4lvl(&ppgtt->vm)) {
1639 pr_err("failed to create 48b PPGTT\n");
1640 err = -EINVAL;
1641 goto out_put;
1642 }
1643
1644 /* If we were ever hit this then it's time to mock the 64K scratch */
1645 if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
1646 pr_err("PPGTT missing 64K scratch page\n");
1647 err = -EINVAL;
1648 goto out_put;
1649 }
1650
1651 err = i915_subtests(tests, ppgtt);
1652
1653 out_put:
1654 i915_vm_put(&ppgtt->vm);
1655 out_unlock:
1656 mock_destroy_device(dev_priv);
1657 return err;
1658 }
1659
i915_gem_huge_page_live_selftests(struct drm_i915_private * i915)1660 int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
1661 {
1662 static const struct i915_subtest tests[] = {
1663 SUBTEST(igt_shrink_thp),
1664 SUBTEST(igt_tmpfs_fallback),
1665 SUBTEST(igt_ppgtt_smoke_huge),
1666 SUBTEST(igt_ppgtt_sanity_check),
1667 };
1668 struct i915_gem_context *ctx;
1669 struct i915_address_space *vm;
1670 struct file *file;
1671 int err;
1672
1673 if (!HAS_PPGTT(i915)) {
1674 pr_info("PPGTT not supported, skipping live-selftests\n");
1675 return 0;
1676 }
1677
1678 if (intel_gt_is_wedged(&i915->gt))
1679 return 0;
1680
1681 file = mock_file(i915);
1682 if (IS_ERR(file))
1683 return PTR_ERR(file);
1684
1685 ctx = live_context(i915, file);
1686 if (IS_ERR(ctx)) {
1687 err = PTR_ERR(ctx);
1688 goto out_file;
1689 }
1690
1691 mutex_lock(&ctx->mutex);
1692 vm = i915_gem_context_vm(ctx);
1693 if (vm)
1694 WRITE_ONCE(vm->scrub_64K, true);
1695 mutex_unlock(&ctx->mutex);
1696
1697 err = i915_subtests(tests, ctx);
1698
1699 out_file:
1700 fput(file);
1701 return err;
1702 }
1703