1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7 #include <linux/prime_numbers.h>
8
9 #include "gt/intel_engine_pm.h"
10 #include "gt/intel_gt.h"
11 #include "gt/intel_gt_pm.h"
12 #include "gem/i915_gem_region.h"
13 #include "huge_gem_object.h"
14 #include "i915_selftest.h"
15 #include "selftests/i915_random.h"
16 #include "selftests/igt_flush_test.h"
17 #include "selftests/igt_mmap.h"
18
19 struct tile {
20 unsigned int width;
21 unsigned int height;
22 unsigned int stride;
23 unsigned int size;
24 unsigned int tiling;
25 unsigned int swizzle;
26 };
27
swizzle_bit(unsigned int bit,u64 offset)28 static u64 swizzle_bit(unsigned int bit, u64 offset)
29 {
30 return (offset & BIT_ULL(bit)) >> (bit - 6);
31 }
32
tiled_offset(const struct tile * tile,u64 v)33 static u64 tiled_offset(const struct tile *tile, u64 v)
34 {
35 u64 x, y;
36
37 if (tile->tiling == I915_TILING_NONE)
38 return v;
39
40 y = div64_u64_rem(v, tile->stride, &x);
41 v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
42
43 if (tile->tiling == I915_TILING_X) {
44 v += y * tile->width;
45 v += div64_u64_rem(x, tile->width, &x) << tile->size;
46 v += x;
47 } else if (tile->width == 128) {
48 const unsigned int ytile_span = 16;
49 const unsigned int ytile_height = 512;
50
51 v += y * ytile_span;
52 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
53 v += x;
54 } else {
55 const unsigned int ytile_span = 32;
56 const unsigned int ytile_height = 256;
57
58 v += y * ytile_span;
59 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
60 v += x;
61 }
62
63 switch (tile->swizzle) {
64 case I915_BIT_6_SWIZZLE_9:
65 v ^= swizzle_bit(9, v);
66 break;
67 case I915_BIT_6_SWIZZLE_9_10:
68 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
69 break;
70 case I915_BIT_6_SWIZZLE_9_11:
71 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
72 break;
73 case I915_BIT_6_SWIZZLE_9_10_11:
74 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
75 break;
76 }
77
78 return v;
79 }
80
check_partial_mapping(struct drm_i915_gem_object * obj,const struct tile * tile,struct rnd_state * prng)81 static int check_partial_mapping(struct drm_i915_gem_object *obj,
82 const struct tile *tile,
83 struct rnd_state *prng)
84 {
85 const unsigned long npages = obj->base.size / PAGE_SIZE;
86 struct i915_ggtt_view view;
87 struct i915_vma *vma;
88 unsigned long page;
89 u32 __iomem *io;
90 struct page *p;
91 unsigned int n;
92 u64 offset;
93 u32 *cpu;
94 int err;
95
96 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
97 if (err) {
98 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
99 tile->tiling, tile->stride, err);
100 return err;
101 }
102
103 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
104 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
105
106 i915_gem_object_lock(obj, NULL);
107 err = i915_gem_object_set_to_gtt_domain(obj, true);
108 i915_gem_object_unlock(obj);
109 if (err) {
110 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
111 return err;
112 }
113
114 page = i915_prandom_u32_max_state(npages, prng);
115 view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
116
117 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
118 if (IS_ERR(vma)) {
119 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
120 page, (int)PTR_ERR(vma));
121 return PTR_ERR(vma);
122 }
123
124 n = page - view.partial.offset;
125 GEM_BUG_ON(n >= view.partial.size);
126
127 io = i915_vma_pin_iomap(vma);
128 i915_vma_unpin(vma);
129 if (IS_ERR(io)) {
130 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
131 page, (int)PTR_ERR(io));
132 err = PTR_ERR(io);
133 goto out;
134 }
135
136 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
137 i915_vma_unpin_iomap(vma);
138
139 offset = tiled_offset(tile, page << PAGE_SHIFT);
140 if (offset >= obj->base.size)
141 goto out;
142
143 intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
144
145 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
146 cpu = kmap(p) + offset_in_page(offset);
147 drm_clflush_virt_range(cpu, sizeof(*cpu));
148 if (*cpu != (u32)page) {
149 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
150 page, n,
151 view.partial.offset,
152 view.partial.size,
153 vma->size >> PAGE_SHIFT,
154 tile->tiling ? tile_row_pages(obj) : 0,
155 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
156 offset >> PAGE_SHIFT,
157 (unsigned int)offset_in_page(offset),
158 offset,
159 (u32)page, *cpu);
160 err = -EINVAL;
161 }
162 *cpu = 0;
163 drm_clflush_virt_range(cpu, sizeof(*cpu));
164 kunmap(p);
165
166 out:
167 __i915_vma_put(vma);
168 return err;
169 }
170
check_partial_mappings(struct drm_i915_gem_object * obj,const struct tile * tile,unsigned long end_time)171 static int check_partial_mappings(struct drm_i915_gem_object *obj,
172 const struct tile *tile,
173 unsigned long end_time)
174 {
175 const unsigned int nreal = obj->scratch / PAGE_SIZE;
176 const unsigned long npages = obj->base.size / PAGE_SIZE;
177 struct i915_vma *vma;
178 unsigned long page;
179 int err;
180
181 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
182 if (err) {
183 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
184 tile->tiling, tile->stride, err);
185 return err;
186 }
187
188 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
189 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
190
191 i915_gem_object_lock(obj, NULL);
192 err = i915_gem_object_set_to_gtt_domain(obj, true);
193 i915_gem_object_unlock(obj);
194 if (err) {
195 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
196 return err;
197 }
198
199 for_each_prime_number_from(page, 1, npages) {
200 struct i915_ggtt_view view =
201 compute_partial_view(obj, page, MIN_CHUNK_PAGES);
202 u32 __iomem *io;
203 struct page *p;
204 unsigned int n;
205 u64 offset;
206 u32 *cpu;
207
208 GEM_BUG_ON(view.partial.size > nreal);
209 cond_resched();
210
211 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
212 if (IS_ERR(vma)) {
213 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
214 page, (int)PTR_ERR(vma));
215 return PTR_ERR(vma);
216 }
217
218 n = page - view.partial.offset;
219 GEM_BUG_ON(n >= view.partial.size);
220
221 io = i915_vma_pin_iomap(vma);
222 i915_vma_unpin(vma);
223 if (IS_ERR(io)) {
224 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
225 page, (int)PTR_ERR(io));
226 return PTR_ERR(io);
227 }
228
229 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
230 i915_vma_unpin_iomap(vma);
231
232 offset = tiled_offset(tile, page << PAGE_SHIFT);
233 if (offset >= obj->base.size)
234 continue;
235
236 intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
237
238 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
239 cpu = kmap(p) + offset_in_page(offset);
240 drm_clflush_virt_range(cpu, sizeof(*cpu));
241 if (*cpu != (u32)page) {
242 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
243 page, n,
244 view.partial.offset,
245 view.partial.size,
246 vma->size >> PAGE_SHIFT,
247 tile->tiling ? tile_row_pages(obj) : 0,
248 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
249 offset >> PAGE_SHIFT,
250 (unsigned int)offset_in_page(offset),
251 offset,
252 (u32)page, *cpu);
253 err = -EINVAL;
254 }
255 *cpu = 0;
256 drm_clflush_virt_range(cpu, sizeof(*cpu));
257 kunmap(p);
258 if (err)
259 return err;
260
261 __i915_vma_put(vma);
262
263 if (igt_timeout(end_time,
264 "%s: timed out after tiling=%d stride=%d\n",
265 __func__, tile->tiling, tile->stride))
266 return -EINTR;
267 }
268
269 return 0;
270 }
271
272 static unsigned int
setup_tile_size(struct tile * tile,struct drm_i915_private * i915)273 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
274 {
275 if (INTEL_GEN(i915) <= 2) {
276 tile->height = 16;
277 tile->width = 128;
278 tile->size = 11;
279 } else if (tile->tiling == I915_TILING_Y &&
280 HAS_128_BYTE_Y_TILING(i915)) {
281 tile->height = 32;
282 tile->width = 128;
283 tile->size = 12;
284 } else {
285 tile->height = 8;
286 tile->width = 512;
287 tile->size = 12;
288 }
289
290 if (INTEL_GEN(i915) < 4)
291 return 8192 / tile->width;
292 else if (INTEL_GEN(i915) < 7)
293 return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
294 else
295 return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
296 }
297
igt_partial_tiling(void * arg)298 static int igt_partial_tiling(void *arg)
299 {
300 const unsigned int nreal = 1 << 12; /* largest tile row x2 */
301 struct drm_i915_private *i915 = arg;
302 struct drm_i915_gem_object *obj;
303 intel_wakeref_t wakeref;
304 int tiling;
305 int err;
306
307 if (!i915_ggtt_has_aperture(&i915->ggtt))
308 return 0;
309
310 /* We want to check the page mapping and fencing of a large object
311 * mmapped through the GTT. The object we create is larger than can
312 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
313 * We then check that a write through each partial GGTT vma ends up
314 * in the right set of pages within the object, and with the expected
315 * tiling, which we verify by manual swizzling.
316 */
317
318 obj = huge_gem_object(i915,
319 nreal << PAGE_SHIFT,
320 (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
321 if (IS_ERR(obj))
322 return PTR_ERR(obj);
323
324 err = i915_gem_object_pin_pages(obj);
325 if (err) {
326 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
327 nreal, obj->base.size / PAGE_SIZE, err);
328 goto out;
329 }
330
331 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
332
333 if (1) {
334 IGT_TIMEOUT(end);
335 struct tile tile;
336
337 tile.height = 1;
338 tile.width = 1;
339 tile.size = 0;
340 tile.stride = 0;
341 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
342 tile.tiling = I915_TILING_NONE;
343
344 err = check_partial_mappings(obj, &tile, end);
345 if (err && err != -EINTR)
346 goto out_unlock;
347 }
348
349 for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
350 IGT_TIMEOUT(end);
351 unsigned int max_pitch;
352 unsigned int pitch;
353 struct tile tile;
354
355 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
356 /*
357 * The swizzling pattern is actually unknown as it
358 * varies based on physical address of each page.
359 * See i915_gem_detect_bit_6_swizzle().
360 */
361 break;
362
363 tile.tiling = tiling;
364 switch (tiling) {
365 case I915_TILING_X:
366 tile.swizzle = i915->ggtt.bit_6_swizzle_x;
367 break;
368 case I915_TILING_Y:
369 tile.swizzle = i915->ggtt.bit_6_swizzle_y;
370 break;
371 }
372
373 GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
374 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
375 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
376 continue;
377
378 max_pitch = setup_tile_size(&tile, i915);
379
380 for (pitch = max_pitch; pitch; pitch >>= 1) {
381 tile.stride = tile.width * pitch;
382 err = check_partial_mappings(obj, &tile, end);
383 if (err == -EINTR)
384 goto next_tiling;
385 if (err)
386 goto out_unlock;
387
388 if (pitch > 2 && INTEL_GEN(i915) >= 4) {
389 tile.stride = tile.width * (pitch - 1);
390 err = check_partial_mappings(obj, &tile, end);
391 if (err == -EINTR)
392 goto next_tiling;
393 if (err)
394 goto out_unlock;
395 }
396
397 if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
398 tile.stride = tile.width * (pitch + 1);
399 err = check_partial_mappings(obj, &tile, end);
400 if (err == -EINTR)
401 goto next_tiling;
402 if (err)
403 goto out_unlock;
404 }
405 }
406
407 if (INTEL_GEN(i915) >= 4) {
408 for_each_prime_number(pitch, max_pitch) {
409 tile.stride = tile.width * pitch;
410 err = check_partial_mappings(obj, &tile, end);
411 if (err == -EINTR)
412 goto next_tiling;
413 if (err)
414 goto out_unlock;
415 }
416 }
417
418 next_tiling: ;
419 }
420
421 out_unlock:
422 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
423 i915_gem_object_unpin_pages(obj);
424 out:
425 i915_gem_object_put(obj);
426 return err;
427 }
428
igt_smoke_tiling(void * arg)429 static int igt_smoke_tiling(void *arg)
430 {
431 const unsigned int nreal = 1 << 12; /* largest tile row x2 */
432 struct drm_i915_private *i915 = arg;
433 struct drm_i915_gem_object *obj;
434 intel_wakeref_t wakeref;
435 I915_RND_STATE(prng);
436 unsigned long count;
437 IGT_TIMEOUT(end);
438 int err;
439
440 if (!i915_ggtt_has_aperture(&i915->ggtt))
441 return 0;
442
443 /*
444 * igt_partial_tiling() does an exhastive check of partial tiling
445 * chunking, but will undoubtably run out of time. Here, we do a
446 * randomised search and hope over many runs of 1s with different
447 * seeds we will do a thorough check.
448 *
449 * Remember to look at the st_seed if we see a flip-flop in BAT!
450 */
451
452 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
453 return 0;
454
455 obj = huge_gem_object(i915,
456 nreal << PAGE_SHIFT,
457 (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
458 if (IS_ERR(obj))
459 return PTR_ERR(obj);
460
461 err = i915_gem_object_pin_pages(obj);
462 if (err) {
463 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
464 nreal, obj->base.size / PAGE_SIZE, err);
465 goto out;
466 }
467
468 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
469
470 count = 0;
471 do {
472 struct tile tile;
473
474 tile.tiling =
475 i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
476 switch (tile.tiling) {
477 case I915_TILING_NONE:
478 tile.height = 1;
479 tile.width = 1;
480 tile.size = 0;
481 tile.stride = 0;
482 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
483 break;
484
485 case I915_TILING_X:
486 tile.swizzle = i915->ggtt.bit_6_swizzle_x;
487 break;
488 case I915_TILING_Y:
489 tile.swizzle = i915->ggtt.bit_6_swizzle_y;
490 break;
491 }
492
493 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
494 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
495 continue;
496
497 if (tile.tiling != I915_TILING_NONE) {
498 unsigned int max_pitch = setup_tile_size(&tile, i915);
499
500 tile.stride =
501 i915_prandom_u32_max_state(max_pitch, &prng);
502 tile.stride = (1 + tile.stride) * tile.width;
503 if (INTEL_GEN(i915) < 4)
504 tile.stride = rounddown_pow_of_two(tile.stride);
505 }
506
507 err = check_partial_mapping(obj, &tile, &prng);
508 if (err)
509 break;
510
511 count++;
512 } while (!__igt_timeout(end, NULL));
513
514 pr_info("%s: Completed %lu trials\n", __func__, count);
515
516 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
517 i915_gem_object_unpin_pages(obj);
518 out:
519 i915_gem_object_put(obj);
520 return err;
521 }
522
make_obj_busy(struct drm_i915_gem_object * obj)523 static int make_obj_busy(struct drm_i915_gem_object *obj)
524 {
525 struct drm_i915_private *i915 = to_i915(obj->base.dev);
526 struct intel_engine_cs *engine;
527
528 for_each_uabi_engine(engine, i915) {
529 struct i915_request *rq;
530 struct i915_vma *vma;
531 struct i915_gem_ww_ctx ww;
532 int err;
533
534 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
535 if (IS_ERR(vma))
536 return PTR_ERR(vma);
537
538 i915_gem_ww_ctx_init(&ww, false);
539 retry:
540 err = i915_gem_object_lock(obj, &ww);
541 if (!err)
542 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
543 if (err)
544 goto err;
545
546 rq = intel_engine_create_kernel_request(engine);
547 if (IS_ERR(rq)) {
548 err = PTR_ERR(rq);
549 goto err_unpin;
550 }
551
552 err = i915_request_await_object(rq, vma->obj, true);
553 if (err == 0)
554 err = i915_vma_move_to_active(vma, rq,
555 EXEC_OBJECT_WRITE);
556
557 i915_request_add(rq);
558 err_unpin:
559 i915_vma_unpin(vma);
560 err:
561 if (err == -EDEADLK) {
562 err = i915_gem_ww_ctx_backoff(&ww);
563 if (!err)
564 goto retry;
565 }
566 i915_gem_ww_ctx_fini(&ww);
567 if (err)
568 return err;
569 }
570
571 i915_gem_object_put(obj); /* leave it only alive via its active ref */
572 return 0;
573 }
574
assert_mmap_offset(struct drm_i915_private * i915,unsigned long size,int expected)575 static bool assert_mmap_offset(struct drm_i915_private *i915,
576 unsigned long size,
577 int expected)
578 {
579 struct drm_i915_gem_object *obj;
580 struct i915_mmap_offset *mmo;
581
582 obj = i915_gem_object_create_internal(i915, size);
583 if (IS_ERR(obj))
584 return false;
585
586 mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
587 i915_gem_object_put(obj);
588
589 return PTR_ERR_OR_ZERO(mmo) == expected;
590 }
591
disable_retire_worker(struct drm_i915_private * i915)592 static void disable_retire_worker(struct drm_i915_private *i915)
593 {
594 i915_gem_driver_unregister__shrinker(i915);
595 intel_gt_pm_get(&i915->gt);
596 cancel_delayed_work_sync(&i915->gt.requests.retire_work);
597 }
598
restore_retire_worker(struct drm_i915_private * i915)599 static void restore_retire_worker(struct drm_i915_private *i915)
600 {
601 igt_flush_test(i915);
602 intel_gt_pm_put(&i915->gt);
603 i915_gem_driver_register__shrinker(i915);
604 }
605
mmap_offset_lock(struct drm_i915_private * i915)606 static void mmap_offset_lock(struct drm_i915_private *i915)
607 __acquires(&i915->drm.vma_offset_manager->vm_lock)
608 {
609 write_lock(&i915->drm.vma_offset_manager->vm_lock);
610 }
611
mmap_offset_unlock(struct drm_i915_private * i915)612 static void mmap_offset_unlock(struct drm_i915_private *i915)
613 __releases(&i915->drm.vma_offset_manager->vm_lock)
614 {
615 write_unlock(&i915->drm.vma_offset_manager->vm_lock);
616 }
617
igt_mmap_offset_exhaustion(void * arg)618 static int igt_mmap_offset_exhaustion(void *arg)
619 {
620 struct drm_i915_private *i915 = arg;
621 struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
622 struct drm_i915_gem_object *obj;
623 struct drm_mm_node *hole, *next;
624 struct i915_mmap_offset *mmo;
625 int loop, err = 0;
626
627 /* Disable background reaper */
628 disable_retire_worker(i915);
629 GEM_BUG_ON(!i915->gt.awake);
630 intel_gt_retire_requests(&i915->gt);
631 i915_gem_drain_freed_objects(i915);
632
633 /* Trim the device mmap space to only a page */
634 mmap_offset_lock(i915);
635 loop = 1; /* PAGE_SIZE units */
636 list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
637 struct drm_mm_node *resv;
638
639 resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
640 if (!resv) {
641 err = -ENOMEM;
642 goto out_park;
643 }
644
645 resv->start = drm_mm_hole_node_start(hole) + loop;
646 resv->size = hole->hole_size - loop;
647 resv->color = -1ul;
648 loop = 0;
649
650 if (!resv->size) {
651 kfree(resv);
652 continue;
653 }
654
655 pr_debug("Reserving hole [%llx + %llx]\n",
656 resv->start, resv->size);
657
658 err = drm_mm_reserve_node(mm, resv);
659 if (err) {
660 pr_err("Failed to trim VMA manager, err=%d\n", err);
661 kfree(resv);
662 goto out_park;
663 }
664 }
665 GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
666 mmap_offset_unlock(i915);
667
668 /* Just fits! */
669 if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
670 pr_err("Unable to insert object into single page hole\n");
671 err = -EINVAL;
672 goto out;
673 }
674
675 /* Too large */
676 if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
677 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
678 err = -EINVAL;
679 goto out;
680 }
681
682 /* Fill the hole, further allocation attempts should then fail */
683 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
684 if (IS_ERR(obj)) {
685 err = PTR_ERR(obj);
686 goto out;
687 }
688
689 mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
690 if (IS_ERR(mmo)) {
691 pr_err("Unable to insert object into reclaimed hole\n");
692 err = PTR_ERR(mmo);
693 goto err_obj;
694 }
695
696 if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
697 pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
698 err = -EINVAL;
699 goto err_obj;
700 }
701
702 i915_gem_object_put(obj);
703
704 /* Now fill with busy dead objects that we expect to reap */
705 for (loop = 0; loop < 3; loop++) {
706 if (intel_gt_is_wedged(&i915->gt))
707 break;
708
709 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
710 if (IS_ERR(obj)) {
711 err = PTR_ERR(obj);
712 goto out;
713 }
714
715 err = make_obj_busy(obj);
716 if (err) {
717 pr_err("[loop %d] Failed to busy the object\n", loop);
718 goto err_obj;
719 }
720 }
721
722 out:
723 mmap_offset_lock(i915);
724 out_park:
725 drm_mm_for_each_node_safe(hole, next, mm) {
726 if (hole->color != -1ul)
727 continue;
728
729 drm_mm_remove_node(hole);
730 kfree(hole);
731 }
732 mmap_offset_unlock(i915);
733 restore_retire_worker(i915);
734 return err;
735 err_obj:
736 i915_gem_object_put(obj);
737 goto out;
738 }
739
gtt_set(struct drm_i915_gem_object * obj)740 static int gtt_set(struct drm_i915_gem_object *obj)
741 {
742 struct i915_vma *vma;
743 void __iomem *map;
744 int err = 0;
745
746 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
747 if (IS_ERR(vma))
748 return PTR_ERR(vma);
749
750 intel_gt_pm_get(vma->vm->gt);
751 map = i915_vma_pin_iomap(vma);
752 i915_vma_unpin(vma);
753 if (IS_ERR(map)) {
754 err = PTR_ERR(map);
755 goto out;
756 }
757
758 memset_io(map, POISON_INUSE, obj->base.size);
759 i915_vma_unpin_iomap(vma);
760
761 out:
762 intel_gt_pm_put(vma->vm->gt);
763 return err;
764 }
765
gtt_check(struct drm_i915_gem_object * obj)766 static int gtt_check(struct drm_i915_gem_object *obj)
767 {
768 struct i915_vma *vma;
769 void __iomem *map;
770 int err = 0;
771
772 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
773 if (IS_ERR(vma))
774 return PTR_ERR(vma);
775
776 intel_gt_pm_get(vma->vm->gt);
777 map = i915_vma_pin_iomap(vma);
778 i915_vma_unpin(vma);
779 if (IS_ERR(map)) {
780 err = PTR_ERR(map);
781 goto out;
782 }
783
784 if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
785 pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
786 obj->mm.region->name);
787 err = -EINVAL;
788 }
789 i915_vma_unpin_iomap(vma);
790
791 out:
792 intel_gt_pm_put(vma->vm->gt);
793 return err;
794 }
795
wc_set(struct drm_i915_gem_object * obj)796 static int wc_set(struct drm_i915_gem_object *obj)
797 {
798 void *vaddr;
799
800 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
801 if (IS_ERR(vaddr))
802 return PTR_ERR(vaddr);
803
804 memset(vaddr, POISON_INUSE, obj->base.size);
805 i915_gem_object_flush_map(obj);
806 i915_gem_object_unpin_map(obj);
807
808 return 0;
809 }
810
wc_check(struct drm_i915_gem_object * obj)811 static int wc_check(struct drm_i915_gem_object *obj)
812 {
813 void *vaddr;
814 int err = 0;
815
816 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
817 if (IS_ERR(vaddr))
818 return PTR_ERR(vaddr);
819
820 if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
821 pr_err("%s: Write via mmap did not land in backing store (WC)\n",
822 obj->mm.region->name);
823 err = -EINVAL;
824 }
825 i915_gem_object_unpin_map(obj);
826
827 return err;
828 }
829
can_mmap(struct drm_i915_gem_object * obj,enum i915_mmap_type type)830 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
831 {
832 if (type == I915_MMAP_TYPE_GTT &&
833 !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
834 return false;
835
836 if (type != I915_MMAP_TYPE_GTT &&
837 !i915_gem_object_type_has(obj,
838 I915_GEM_OBJECT_HAS_STRUCT_PAGE |
839 I915_GEM_OBJECT_HAS_IOMEM))
840 return false;
841
842 return true;
843 }
844
845 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
__igt_mmap(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)846 static int __igt_mmap(struct drm_i915_private *i915,
847 struct drm_i915_gem_object *obj,
848 enum i915_mmap_type type)
849 {
850 struct i915_mmap_offset *mmo;
851 struct vm_area_struct *area;
852 unsigned long addr;
853 int err, i;
854
855 if (!can_mmap(obj, type))
856 return 0;
857
858 err = wc_set(obj);
859 if (err == -ENXIO)
860 err = gtt_set(obj);
861 if (err)
862 return err;
863
864 mmo = mmap_offset_attach(obj, type, NULL);
865 if (IS_ERR(mmo))
866 return PTR_ERR(mmo);
867
868 addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
869 if (IS_ERR_VALUE(addr))
870 return addr;
871
872 pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
873
874 area = find_vma(current->mm, addr);
875 if (!area) {
876 pr_err("%s: Did not create a vm_area_struct for the mmap\n",
877 obj->mm.region->name);
878 err = -EINVAL;
879 goto out_unmap;
880 }
881
882 if (area->vm_private_data != mmo) {
883 pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n",
884 obj->mm.region->name);
885 err = -EINVAL;
886 goto out_unmap;
887 }
888
889 for (i = 0; i < obj->base.size / sizeof(u32); i++) {
890 u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
891 u32 x;
892
893 if (get_user(x, ux)) {
894 pr_err("%s: Unable to read from mmap, offset:%zd\n",
895 obj->mm.region->name, i * sizeof(x));
896 err = -EFAULT;
897 goto out_unmap;
898 }
899
900 if (x != expand32(POISON_INUSE)) {
901 pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
902 obj->mm.region->name,
903 i * sizeof(x), x, expand32(POISON_INUSE));
904 err = -EINVAL;
905 goto out_unmap;
906 }
907
908 x = expand32(POISON_FREE);
909 if (put_user(x, ux)) {
910 pr_err("%s: Unable to write to mmap, offset:%zd\n",
911 obj->mm.region->name, i * sizeof(x));
912 err = -EFAULT;
913 goto out_unmap;
914 }
915 }
916
917 if (type == I915_MMAP_TYPE_GTT)
918 intel_gt_flush_ggtt_writes(&i915->gt);
919
920 err = wc_check(obj);
921 if (err == -ENXIO)
922 err = gtt_check(obj);
923 out_unmap:
924 vm_munmap(addr, obj->base.size);
925 return err;
926 }
927
igt_mmap(void * arg)928 static int igt_mmap(void *arg)
929 {
930 struct drm_i915_private *i915 = arg;
931 struct intel_memory_region *mr;
932 enum intel_region_id id;
933
934 for_each_memory_region(mr, i915, id) {
935 unsigned long sizes[] = {
936 PAGE_SIZE,
937 mr->min_page_size,
938 SZ_4M,
939 };
940 int i;
941
942 for (i = 0; i < ARRAY_SIZE(sizes); i++) {
943 struct drm_i915_gem_object *obj;
944 int err;
945
946 obj = i915_gem_object_create_region(mr, sizes[i], 0);
947 if (obj == ERR_PTR(-ENODEV))
948 continue;
949
950 if (IS_ERR(obj))
951 return PTR_ERR(obj);
952
953 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
954 if (err == 0)
955 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
956
957 i915_gem_object_put(obj);
958 if (err)
959 return err;
960 }
961 }
962
963 return 0;
964 }
965
repr_mmap_type(enum i915_mmap_type type)966 static const char *repr_mmap_type(enum i915_mmap_type type)
967 {
968 switch (type) {
969 case I915_MMAP_TYPE_GTT: return "gtt";
970 case I915_MMAP_TYPE_WB: return "wb";
971 case I915_MMAP_TYPE_WC: return "wc";
972 case I915_MMAP_TYPE_UC: return "uc";
973 default: return "unknown";
974 }
975 }
976
can_access(const struct drm_i915_gem_object * obj)977 static bool can_access(const struct drm_i915_gem_object *obj)
978 {
979 unsigned int flags =
980 I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
981
982 return i915_gem_object_type_has(obj, flags);
983 }
984
__igt_mmap_access(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)985 static int __igt_mmap_access(struct drm_i915_private *i915,
986 struct drm_i915_gem_object *obj,
987 enum i915_mmap_type type)
988 {
989 struct i915_mmap_offset *mmo;
990 unsigned long __user *ptr;
991 unsigned long A, B;
992 unsigned long x, y;
993 unsigned long addr;
994 int err;
995
996 memset(&A, 0xAA, sizeof(A));
997 memset(&B, 0xBB, sizeof(B));
998
999 if (!can_mmap(obj, type) || !can_access(obj))
1000 return 0;
1001
1002 mmo = mmap_offset_attach(obj, type, NULL);
1003 if (IS_ERR(mmo))
1004 return PTR_ERR(mmo);
1005
1006 addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
1007 if (IS_ERR_VALUE(addr))
1008 return addr;
1009 ptr = (unsigned long __user *)addr;
1010
1011 err = __put_user(A, ptr);
1012 if (err) {
1013 pr_err("%s(%s): failed to write into user mmap\n",
1014 obj->mm.region->name, repr_mmap_type(type));
1015 goto out_unmap;
1016 }
1017
1018 intel_gt_flush_ggtt_writes(&i915->gt);
1019
1020 err = access_process_vm(current, addr, &x, sizeof(x), 0);
1021 if (err != sizeof(x)) {
1022 pr_err("%s(%s): access_process_vm() read failed\n",
1023 obj->mm.region->name, repr_mmap_type(type));
1024 goto out_unmap;
1025 }
1026
1027 err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
1028 if (err != sizeof(B)) {
1029 pr_err("%s(%s): access_process_vm() write failed\n",
1030 obj->mm.region->name, repr_mmap_type(type));
1031 goto out_unmap;
1032 }
1033
1034 intel_gt_flush_ggtt_writes(&i915->gt);
1035
1036 err = __get_user(y, ptr);
1037 if (err) {
1038 pr_err("%s(%s): failed to read from user mmap\n",
1039 obj->mm.region->name, repr_mmap_type(type));
1040 goto out_unmap;
1041 }
1042
1043 if (x != A || y != B) {
1044 pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1045 obj->mm.region->name, repr_mmap_type(type),
1046 x, y);
1047 err = -EINVAL;
1048 goto out_unmap;
1049 }
1050
1051 out_unmap:
1052 vm_munmap(addr, obj->base.size);
1053 return err;
1054 }
1055
igt_mmap_access(void * arg)1056 static int igt_mmap_access(void *arg)
1057 {
1058 struct drm_i915_private *i915 = arg;
1059 struct intel_memory_region *mr;
1060 enum intel_region_id id;
1061
1062 for_each_memory_region(mr, i915, id) {
1063 struct drm_i915_gem_object *obj;
1064 int err;
1065
1066 obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
1067 if (obj == ERR_PTR(-ENODEV))
1068 continue;
1069
1070 if (IS_ERR(obj))
1071 return PTR_ERR(obj);
1072
1073 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1074 if (err == 0)
1075 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1076 if (err == 0)
1077 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1078 if (err == 0)
1079 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
1080
1081 i915_gem_object_put(obj);
1082 if (err)
1083 return err;
1084 }
1085
1086 return 0;
1087 }
1088
__igt_mmap_gpu(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)1089 static int __igt_mmap_gpu(struct drm_i915_private *i915,
1090 struct drm_i915_gem_object *obj,
1091 enum i915_mmap_type type)
1092 {
1093 struct intel_engine_cs *engine;
1094 struct i915_mmap_offset *mmo;
1095 unsigned long addr;
1096 u32 __user *ux;
1097 u32 bbe;
1098 int err;
1099
1100 /*
1101 * Verify that the mmap access into the backing store aligns with
1102 * that of the GPU, i.e. that mmap is indeed writing into the same
1103 * page as being read by the GPU.
1104 */
1105
1106 if (!can_mmap(obj, type))
1107 return 0;
1108
1109 err = wc_set(obj);
1110 if (err == -ENXIO)
1111 err = gtt_set(obj);
1112 if (err)
1113 return err;
1114
1115 mmo = mmap_offset_attach(obj, type, NULL);
1116 if (IS_ERR(mmo))
1117 return PTR_ERR(mmo);
1118
1119 addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
1120 if (IS_ERR_VALUE(addr))
1121 return addr;
1122
1123 ux = u64_to_user_ptr((u64)addr);
1124 bbe = MI_BATCH_BUFFER_END;
1125 if (put_user(bbe, ux)) {
1126 pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
1127 err = -EFAULT;
1128 goto out_unmap;
1129 }
1130
1131 if (type == I915_MMAP_TYPE_GTT)
1132 intel_gt_flush_ggtt_writes(&i915->gt);
1133
1134 for_each_uabi_engine(engine, i915) {
1135 struct i915_request *rq;
1136 struct i915_vma *vma;
1137 struct i915_gem_ww_ctx ww;
1138
1139 vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1140 if (IS_ERR(vma)) {
1141 err = PTR_ERR(vma);
1142 goto out_unmap;
1143 }
1144
1145 i915_gem_ww_ctx_init(&ww, false);
1146 retry:
1147 err = i915_gem_object_lock(obj, &ww);
1148 if (!err)
1149 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
1150 if (err)
1151 goto out_ww;
1152
1153 rq = i915_request_create(engine->kernel_context);
1154 if (IS_ERR(rq)) {
1155 err = PTR_ERR(rq);
1156 goto out_unpin;
1157 }
1158
1159 err = i915_request_await_object(rq, vma->obj, false);
1160 if (err == 0)
1161 err = i915_vma_move_to_active(vma, rq, 0);
1162
1163 err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
1164 i915_request_get(rq);
1165 i915_request_add(rq);
1166
1167 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1168 struct drm_printer p =
1169 drm_info_printer(engine->i915->drm.dev);
1170
1171 pr_err("%s(%s, %s): Failed to execute batch\n",
1172 __func__, engine->name, obj->mm.region->name);
1173 intel_engine_dump(engine, &p,
1174 "%s\n", engine->name);
1175
1176 intel_gt_set_wedged(engine->gt);
1177 err = -EIO;
1178 }
1179 i915_request_put(rq);
1180
1181 out_unpin:
1182 i915_vma_unpin(vma);
1183 out_ww:
1184 if (err == -EDEADLK) {
1185 err = i915_gem_ww_ctx_backoff(&ww);
1186 if (!err)
1187 goto retry;
1188 }
1189 i915_gem_ww_ctx_fini(&ww);
1190 if (err)
1191 goto out_unmap;
1192 }
1193
1194 out_unmap:
1195 vm_munmap(addr, obj->base.size);
1196 return err;
1197 }
1198
igt_mmap_gpu(void * arg)1199 static int igt_mmap_gpu(void *arg)
1200 {
1201 struct drm_i915_private *i915 = arg;
1202 struct intel_memory_region *mr;
1203 enum intel_region_id id;
1204
1205 for_each_memory_region(mr, i915, id) {
1206 struct drm_i915_gem_object *obj;
1207 int err;
1208
1209 obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
1210 if (obj == ERR_PTR(-ENODEV))
1211 continue;
1212
1213 if (IS_ERR(obj))
1214 return PTR_ERR(obj);
1215
1216 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1217 if (err == 0)
1218 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1219
1220 i915_gem_object_put(obj);
1221 if (err)
1222 return err;
1223 }
1224
1225 return 0;
1226 }
1227
check_present_pte(pte_t * pte,unsigned long addr,void * data)1228 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1229 {
1230 if (!pte_present(*pte) || pte_none(*pte)) {
1231 pr_err("missing PTE:%lx\n",
1232 (addr - (unsigned long)data) >> PAGE_SHIFT);
1233 return -EINVAL;
1234 }
1235
1236 return 0;
1237 }
1238
check_absent_pte(pte_t * pte,unsigned long addr,void * data)1239 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1240 {
1241 if (pte_present(*pte) && !pte_none(*pte)) {
1242 pr_err("present PTE:%lx; expected to be revoked\n",
1243 (addr - (unsigned long)data) >> PAGE_SHIFT);
1244 return -EINVAL;
1245 }
1246
1247 return 0;
1248 }
1249
check_present(unsigned long addr,unsigned long len)1250 static int check_present(unsigned long addr, unsigned long len)
1251 {
1252 return apply_to_page_range(current->mm, addr, len,
1253 check_present_pte, (void *)addr);
1254 }
1255
check_absent(unsigned long addr,unsigned long len)1256 static int check_absent(unsigned long addr, unsigned long len)
1257 {
1258 return apply_to_page_range(current->mm, addr, len,
1259 check_absent_pte, (void *)addr);
1260 }
1261
prefault_range(u64 start,u64 len)1262 static int prefault_range(u64 start, u64 len)
1263 {
1264 const char __user *addr, *end;
1265 char __maybe_unused c;
1266 int err;
1267
1268 addr = u64_to_user_ptr(start);
1269 end = addr + len;
1270
1271 for (; addr < end; addr += PAGE_SIZE) {
1272 err = __get_user(c, addr);
1273 if (err)
1274 return err;
1275 }
1276
1277 return __get_user(c, end - 1);
1278 }
1279
__igt_mmap_revoke(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)1280 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1281 struct drm_i915_gem_object *obj,
1282 enum i915_mmap_type type)
1283 {
1284 struct i915_mmap_offset *mmo;
1285 unsigned long addr;
1286 int err;
1287
1288 if (!can_mmap(obj, type))
1289 return 0;
1290
1291 mmo = mmap_offset_attach(obj, type, NULL);
1292 if (IS_ERR(mmo))
1293 return PTR_ERR(mmo);
1294
1295 addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
1296 if (IS_ERR_VALUE(addr))
1297 return addr;
1298
1299 err = prefault_range(addr, obj->base.size);
1300 if (err)
1301 goto out_unmap;
1302
1303 err = check_present(addr, obj->base.size);
1304 if (err) {
1305 pr_err("%s: was not present\n", obj->mm.region->name);
1306 goto out_unmap;
1307 }
1308
1309 /*
1310 * After unbinding the object from the GGTT, its address may be reused
1311 * for other objects. Ergo we have to revoke the previous mmap PTE
1312 * access as it no longer points to the same object.
1313 */
1314 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1315 if (err) {
1316 pr_err("Failed to unbind object!\n");
1317 goto out_unmap;
1318 }
1319
1320 if (type != I915_MMAP_TYPE_GTT) {
1321 __i915_gem_object_put_pages(obj);
1322 if (i915_gem_object_has_pages(obj)) {
1323 pr_err("Failed to put-pages object!\n");
1324 err = -EINVAL;
1325 goto out_unmap;
1326 }
1327 }
1328
1329 err = check_absent(addr, obj->base.size);
1330 if (err) {
1331 pr_err("%s: was not absent\n", obj->mm.region->name);
1332 goto out_unmap;
1333 }
1334
1335 out_unmap:
1336 vm_munmap(addr, obj->base.size);
1337 return err;
1338 }
1339
igt_mmap_revoke(void * arg)1340 static int igt_mmap_revoke(void *arg)
1341 {
1342 struct drm_i915_private *i915 = arg;
1343 struct intel_memory_region *mr;
1344 enum intel_region_id id;
1345
1346 for_each_memory_region(mr, i915, id) {
1347 struct drm_i915_gem_object *obj;
1348 int err;
1349
1350 obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
1351 if (obj == ERR_PTR(-ENODEV))
1352 continue;
1353
1354 if (IS_ERR(obj))
1355 return PTR_ERR(obj);
1356
1357 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1358 if (err == 0)
1359 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1360
1361 i915_gem_object_put(obj);
1362 if (err)
1363 return err;
1364 }
1365
1366 return 0;
1367 }
1368
i915_gem_mman_live_selftests(struct drm_i915_private * i915)1369 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1370 {
1371 static const struct i915_subtest tests[] = {
1372 SUBTEST(igt_partial_tiling),
1373 SUBTEST(igt_smoke_tiling),
1374 SUBTEST(igt_mmap_offset_exhaustion),
1375 SUBTEST(igt_mmap),
1376 SUBTEST(igt_mmap_access),
1377 SUBTEST(igt_mmap_revoke),
1378 SUBTEST(igt_mmap_gpu),
1379 };
1380
1381 return i915_subtests(tests, i915);
1382 }
1383