1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "../i915_selftest.h"
26 #include "i915_random.h"
27 #include "igt_flush_test.h"
28 
29 #include "mock_drm.h"
30 #include "mock_gem_device.h"
31 #include "huge_gem_object.h"
32 
33 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
34 
35 static struct i915_vma *
gpu_fill_dw(struct i915_vma * vma,u64 offset,unsigned long count,u32 value)36 gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
37 {
38 	struct drm_i915_gem_object *obj;
39 	const int gen = INTEL_GEN(vma->vm->i915);
40 	unsigned long n, size;
41 	u32 *cmd;
42 	int err;
43 
44 	size = (4 * count + 1) * sizeof(u32);
45 	size = round_up(size, PAGE_SIZE);
46 	obj = i915_gem_object_create_internal(vma->vm->i915, size);
47 	if (IS_ERR(obj))
48 		return ERR_CAST(obj);
49 
50 	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
51 	if (IS_ERR(cmd)) {
52 		err = PTR_ERR(cmd);
53 		goto err;
54 	}
55 
56 	GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
57 	offset += vma->node.start;
58 
59 	for (n = 0; n < count; n++) {
60 		if (gen >= 8) {
61 			*cmd++ = MI_STORE_DWORD_IMM_GEN4;
62 			*cmd++ = lower_32_bits(offset);
63 			*cmd++ = upper_32_bits(offset);
64 			*cmd++ = value;
65 		} else if (gen >= 4) {
66 			*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
67 				(gen < 6 ? MI_USE_GGTT : 0);
68 			*cmd++ = 0;
69 			*cmd++ = offset;
70 			*cmd++ = value;
71 		} else {
72 			*cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
73 			*cmd++ = offset;
74 			*cmd++ = value;
75 		}
76 		offset += PAGE_SIZE;
77 	}
78 	*cmd = MI_BATCH_BUFFER_END;
79 	i915_gem_object_unpin_map(obj);
80 
81 	err = i915_gem_object_set_to_gtt_domain(obj, false);
82 	if (err)
83 		goto err;
84 
85 	vma = i915_vma_instance(obj, vma->vm, NULL);
86 	if (IS_ERR(vma)) {
87 		err = PTR_ERR(vma);
88 		goto err;
89 	}
90 
91 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
92 	if (err)
93 		goto err;
94 
95 	return vma;
96 
97 err:
98 	i915_gem_object_put(obj);
99 	return ERR_PTR(err);
100 }
101 
real_page_count(struct drm_i915_gem_object * obj)102 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
103 {
104 	return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
105 }
106 
fake_page_count(struct drm_i915_gem_object * obj)107 static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
108 {
109 	return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
110 }
111 
gpu_fill(struct drm_i915_gem_object * obj,struct i915_gem_context * ctx,struct intel_engine_cs * engine,unsigned int dw)112 static int gpu_fill(struct drm_i915_gem_object *obj,
113 		    struct i915_gem_context *ctx,
114 		    struct intel_engine_cs *engine,
115 		    unsigned int dw)
116 {
117 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
118 	struct i915_address_space *vm =
119 		ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
120 	struct i915_request *rq;
121 	struct i915_vma *vma;
122 	struct i915_vma *batch;
123 	unsigned int flags;
124 	int err;
125 
126 	GEM_BUG_ON(obj->base.size > vm->total);
127 	GEM_BUG_ON(!intel_engine_can_store_dword(engine));
128 
129 	vma = i915_vma_instance(obj, vm, NULL);
130 	if (IS_ERR(vma))
131 		return PTR_ERR(vma);
132 
133 	err = i915_gem_object_set_to_gtt_domain(obj, false);
134 	if (err)
135 		return err;
136 
137 	err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
138 	if (err)
139 		return err;
140 
141 	/* Within the GTT the huge objects maps every page onto
142 	 * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
143 	 * We set the nth dword within the page using the nth
144 	 * mapping via the GTT - this should exercise the GTT mapping
145 	 * whilst checking that each context provides a unique view
146 	 * into the object.
147 	 */
148 	batch = gpu_fill_dw(vma,
149 			    (dw * real_page_count(obj)) << PAGE_SHIFT |
150 			    (dw * sizeof(u32)),
151 			    real_page_count(obj),
152 			    dw);
153 	if (IS_ERR(batch)) {
154 		err = PTR_ERR(batch);
155 		goto err_vma;
156 	}
157 
158 	rq = i915_request_alloc(engine, ctx);
159 	if (IS_ERR(rq)) {
160 		err = PTR_ERR(rq);
161 		goto err_batch;
162 	}
163 
164 	flags = 0;
165 	if (INTEL_GEN(vm->i915) <= 5)
166 		flags |= I915_DISPATCH_SECURE;
167 
168 	err = engine->emit_bb_start(rq,
169 				    batch->node.start, batch->node.size,
170 				    flags);
171 	if (err)
172 		goto err_request;
173 
174 	err = i915_vma_move_to_active(batch, rq, 0);
175 	if (err)
176 		goto skip_request;
177 
178 	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
179 	if (err)
180 		goto skip_request;
181 
182 	i915_gem_object_set_active_reference(batch->obj);
183 	i915_vma_unpin(batch);
184 	i915_vma_close(batch);
185 
186 	i915_vma_unpin(vma);
187 
188 	i915_request_add(rq);
189 
190 	return 0;
191 
192 skip_request:
193 	i915_request_skip(rq, err);
194 err_request:
195 	i915_request_add(rq);
196 err_batch:
197 	i915_vma_unpin(batch);
198 err_vma:
199 	i915_vma_unpin(vma);
200 	return err;
201 }
202 
cpu_fill(struct drm_i915_gem_object * obj,u32 value)203 static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
204 {
205 	const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
206 	unsigned int n, m, need_flush;
207 	int err;
208 
209 	err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
210 	if (err)
211 		return err;
212 
213 	for (n = 0; n < real_page_count(obj); n++) {
214 		u32 *map;
215 
216 		map = kmap_atomic(i915_gem_object_get_page(obj, n));
217 		for (m = 0; m < DW_PER_PAGE; m++)
218 			map[m] = value;
219 		if (!has_llc)
220 			drm_clflush_virt_range(map, PAGE_SIZE);
221 		kunmap_atomic(map);
222 	}
223 
224 	i915_gem_obj_finish_shmem_access(obj);
225 	obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
226 	obj->write_domain = 0;
227 	return 0;
228 }
229 
cpu_check(struct drm_i915_gem_object * obj,unsigned int max)230 static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
231 {
232 	unsigned int n, m, needs_flush;
233 	int err;
234 
235 	err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
236 	if (err)
237 		return err;
238 
239 	for (n = 0; n < real_page_count(obj); n++) {
240 		u32 *map;
241 
242 		map = kmap_atomic(i915_gem_object_get_page(obj, n));
243 		if (needs_flush & CLFLUSH_BEFORE)
244 			drm_clflush_virt_range(map, PAGE_SIZE);
245 
246 		for (m = 0; m < max; m++) {
247 			if (map[m] != m) {
248 				pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
249 				       n, m, map[m], m);
250 				err = -EINVAL;
251 				goto out_unmap;
252 			}
253 		}
254 
255 		for (; m < DW_PER_PAGE; m++) {
256 			if (map[m] != STACK_MAGIC) {
257 				pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
258 				       n, m, map[m], STACK_MAGIC);
259 				err = -EINVAL;
260 				goto out_unmap;
261 			}
262 		}
263 
264 out_unmap:
265 		kunmap_atomic(map);
266 		if (err)
267 			break;
268 	}
269 
270 	i915_gem_obj_finish_shmem_access(obj);
271 	return err;
272 }
273 
file_add_object(struct drm_file * file,struct drm_i915_gem_object * obj)274 static int file_add_object(struct drm_file *file,
275 			    struct drm_i915_gem_object *obj)
276 {
277 	int err;
278 
279 	GEM_BUG_ON(obj->base.handle_count);
280 
281 	/* tie the object to the drm_file for easy reaping */
282 	err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
283 	if (err < 0)
284 		return  err;
285 
286 	i915_gem_object_get(obj);
287 	obj->base.handle_count++;
288 	return 0;
289 }
290 
291 static struct drm_i915_gem_object *
create_test_object(struct i915_gem_context * ctx,struct drm_file * file,struct list_head * objects)292 create_test_object(struct i915_gem_context *ctx,
293 		   struct drm_file *file,
294 		   struct list_head *objects)
295 {
296 	struct drm_i915_gem_object *obj;
297 	struct i915_address_space *vm =
298 		ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
299 	u64 size;
300 	int err;
301 
302 	size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
303 	size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
304 
305 	obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
306 	if (IS_ERR(obj))
307 		return obj;
308 
309 	err = file_add_object(file, obj);
310 	i915_gem_object_put(obj);
311 	if (err)
312 		return ERR_PTR(err);
313 
314 	err = cpu_fill(obj, STACK_MAGIC);
315 	if (err) {
316 		pr_err("Failed to fill object with cpu, err=%d\n",
317 		       err);
318 		return ERR_PTR(err);
319 	}
320 
321 	list_add_tail(&obj->st_link, objects);
322 	return obj;
323 }
324 
max_dwords(struct drm_i915_gem_object * obj)325 static unsigned long max_dwords(struct drm_i915_gem_object *obj)
326 {
327 	unsigned long npages = fake_page_count(obj);
328 
329 	GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
330 	return npages / DW_PER_PAGE;
331 }
332 
igt_ctx_exec(void * arg)333 static int igt_ctx_exec(void *arg)
334 {
335 	struct drm_i915_private *i915 = arg;
336 	struct drm_i915_gem_object *obj = NULL;
337 	struct drm_file *file;
338 	IGT_TIMEOUT(end_time);
339 	LIST_HEAD(objects);
340 	unsigned long ncontexts, ndwords, dw;
341 	bool first_shared_gtt = true;
342 	int err = -ENODEV;
343 
344 	/*
345 	 * Create a few different contexts (with different mm) and write
346 	 * through each ctx/mm using the GPU making sure those writes end
347 	 * up in the expected pages of our obj.
348 	 */
349 
350 	if (!DRIVER_CAPS(i915)->has_logical_contexts)
351 		return 0;
352 
353 	file = mock_file(i915);
354 	if (IS_ERR(file))
355 		return PTR_ERR(file);
356 
357 	mutex_lock(&i915->drm.struct_mutex);
358 
359 	ncontexts = 0;
360 	ndwords = 0;
361 	dw = 0;
362 	while (!time_after(jiffies, end_time)) {
363 		struct intel_engine_cs *engine;
364 		struct i915_gem_context *ctx;
365 		unsigned int id;
366 
367 		if (first_shared_gtt) {
368 			ctx = __create_hw_context(i915, file->driver_priv);
369 			first_shared_gtt = false;
370 		} else {
371 			ctx = i915_gem_create_context(i915, file->driver_priv);
372 		}
373 		if (IS_ERR(ctx)) {
374 			err = PTR_ERR(ctx);
375 			goto out_unlock;
376 		}
377 
378 		for_each_engine(engine, i915, id) {
379 			if (!engine->context_size)
380 				continue; /* No logical context support in HW */
381 
382 			if (!intel_engine_can_store_dword(engine))
383 				continue;
384 
385 			if (!obj) {
386 				obj = create_test_object(ctx, file, &objects);
387 				if (IS_ERR(obj)) {
388 					err = PTR_ERR(obj);
389 					goto out_unlock;
390 				}
391 			}
392 
393 			intel_runtime_pm_get(i915);
394 			err = gpu_fill(obj, ctx, engine, dw);
395 			intel_runtime_pm_put(i915);
396 			if (err) {
397 				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
398 				       ndwords, dw, max_dwords(obj),
399 				       engine->name, ctx->hw_id,
400 				       yesno(!!ctx->ppgtt), err);
401 				goto out_unlock;
402 			}
403 
404 			if (++dw == max_dwords(obj)) {
405 				obj = NULL;
406 				dw = 0;
407 			}
408 			ndwords++;
409 		}
410 		ncontexts++;
411 	}
412 	pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
413 		ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
414 
415 	dw = 0;
416 	list_for_each_entry(obj, &objects, st_link) {
417 		unsigned int rem =
418 			min_t(unsigned int, ndwords - dw, max_dwords(obj));
419 
420 		err = cpu_check(obj, rem);
421 		if (err)
422 			break;
423 
424 		dw += rem;
425 	}
426 
427 out_unlock:
428 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
429 		err = -EIO;
430 	mutex_unlock(&i915->drm.struct_mutex);
431 
432 	mock_file_free(i915, file);
433 	return err;
434 }
435 
igt_ctx_readonly(void * arg)436 static int igt_ctx_readonly(void *arg)
437 {
438 	struct drm_i915_private *i915 = arg;
439 	struct drm_i915_gem_object *obj = NULL;
440 	struct drm_file *file;
441 	I915_RND_STATE(prng);
442 	IGT_TIMEOUT(end_time);
443 	LIST_HEAD(objects);
444 	struct i915_gem_context *ctx;
445 	struct i915_hw_ppgtt *ppgtt;
446 	unsigned long ndwords, dw;
447 	int err = -ENODEV;
448 
449 	/*
450 	 * Create a few read-only objects (with the occasional writable object)
451 	 * and try to write into these object checking that the GPU discards
452 	 * any write to a read-only object.
453 	 */
454 
455 	file = mock_file(i915);
456 	if (IS_ERR(file))
457 		return PTR_ERR(file);
458 
459 	mutex_lock(&i915->drm.struct_mutex);
460 
461 	ctx = i915_gem_create_context(i915, file->driver_priv);
462 	if (IS_ERR(ctx)) {
463 		err = PTR_ERR(ctx);
464 		goto out_unlock;
465 	}
466 
467 	ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
468 	if (!ppgtt || !ppgtt->vm.has_read_only) {
469 		err = 0;
470 		goto out_unlock;
471 	}
472 
473 	ndwords = 0;
474 	dw = 0;
475 	while (!time_after(jiffies, end_time)) {
476 		struct intel_engine_cs *engine;
477 		unsigned int id;
478 
479 		for_each_engine(engine, i915, id) {
480 			if (!intel_engine_can_store_dword(engine))
481 				continue;
482 
483 			if (!obj) {
484 				obj = create_test_object(ctx, file, &objects);
485 				if (IS_ERR(obj)) {
486 					err = PTR_ERR(obj);
487 					goto out_unlock;
488 				}
489 
490 				if (prandom_u32_state(&prng) & 1)
491 					i915_gem_object_set_readonly(obj);
492 			}
493 
494 			intel_runtime_pm_get(i915);
495 			err = gpu_fill(obj, ctx, engine, dw);
496 			intel_runtime_pm_put(i915);
497 			if (err) {
498 				pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
499 				       ndwords, dw, max_dwords(obj),
500 				       engine->name, ctx->hw_id,
501 				       yesno(!!ctx->ppgtt), err);
502 				goto out_unlock;
503 			}
504 
505 			if (++dw == max_dwords(obj)) {
506 				obj = NULL;
507 				dw = 0;
508 			}
509 			ndwords++;
510 		}
511 	}
512 	pr_info("Submitted %lu dwords (across %u engines)\n",
513 		ndwords, INTEL_INFO(i915)->num_rings);
514 
515 	dw = 0;
516 	list_for_each_entry(obj, &objects, st_link) {
517 		unsigned int rem =
518 			min_t(unsigned int, ndwords - dw, max_dwords(obj));
519 		unsigned int num_writes;
520 
521 		num_writes = rem;
522 		if (i915_gem_object_is_readonly(obj))
523 			num_writes = 0;
524 
525 		err = cpu_check(obj, num_writes);
526 		if (err)
527 			break;
528 
529 		dw += rem;
530 	}
531 
532 out_unlock:
533 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
534 		err = -EIO;
535 	mutex_unlock(&i915->drm.struct_mutex);
536 
537 	mock_file_free(i915, file);
538 	return err;
539 }
540 
541 static __maybe_unused const char *
__engine_name(struct drm_i915_private * i915,unsigned int engines)542 __engine_name(struct drm_i915_private *i915, unsigned int engines)
543 {
544 	struct intel_engine_cs *engine;
545 	unsigned int tmp;
546 
547 	if (engines == ALL_ENGINES)
548 		return "all";
549 
550 	for_each_engine_masked(engine, i915, engines, tmp)
551 		return engine->name;
552 
553 	return "none";
554 }
555 
__igt_switch_to_kernel_context(struct drm_i915_private * i915,struct i915_gem_context * ctx,unsigned int engines)556 static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
557 					  struct i915_gem_context *ctx,
558 					  unsigned int engines)
559 {
560 	struct intel_engine_cs *engine;
561 	unsigned int tmp;
562 	int err;
563 
564 	GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
565 	for_each_engine_masked(engine, i915, engines, tmp) {
566 		struct i915_request *rq;
567 
568 		rq = i915_request_alloc(engine, ctx);
569 		if (IS_ERR(rq))
570 			return PTR_ERR(rq);
571 
572 		i915_request_add(rq);
573 	}
574 
575 	err = i915_gem_switch_to_kernel_context(i915);
576 	if (err)
577 		return err;
578 
579 	for_each_engine_masked(engine, i915, engines, tmp) {
580 		if (!engine_has_kernel_context_barrier(engine)) {
581 			pr_err("kernel context not last on engine %s!\n",
582 			       engine->name);
583 			return -EINVAL;
584 		}
585 	}
586 
587 	err = i915_gem_wait_for_idle(i915,
588 				     I915_WAIT_LOCKED,
589 				     MAX_SCHEDULE_TIMEOUT);
590 	if (err)
591 		return err;
592 
593 	GEM_BUG_ON(i915->gt.active_requests);
594 	for_each_engine_masked(engine, i915, engines, tmp) {
595 		if (engine->last_retired_context->gem_context != i915->kernel_context) {
596 			pr_err("engine %s not idling in kernel context!\n",
597 			       engine->name);
598 			return -EINVAL;
599 		}
600 	}
601 
602 	err = i915_gem_switch_to_kernel_context(i915);
603 	if (err)
604 		return err;
605 
606 	if (i915->gt.active_requests) {
607 		pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
608 		       i915->gt.active_requests);
609 		return -EINVAL;
610 	}
611 
612 	for_each_engine_masked(engine, i915, engines, tmp) {
613 		if (!intel_engine_has_kernel_context(engine)) {
614 			pr_err("kernel context not last on engine %s!\n",
615 			       engine->name);
616 			return -EINVAL;
617 		}
618 	}
619 
620 	return 0;
621 }
622 
igt_switch_to_kernel_context(void * arg)623 static int igt_switch_to_kernel_context(void *arg)
624 {
625 	struct drm_i915_private *i915 = arg;
626 	struct intel_engine_cs *engine;
627 	struct i915_gem_context *ctx;
628 	enum intel_engine_id id;
629 	int err;
630 
631 	/*
632 	 * A core premise of switching to the kernel context is that
633 	 * if an engine is already idling in the kernel context, we
634 	 * do not emit another request and wake it up. The other being
635 	 * that we do indeed end up idling in the kernel context.
636 	 */
637 
638 	mutex_lock(&i915->drm.struct_mutex);
639 	ctx = kernel_context(i915);
640 	if (IS_ERR(ctx)) {
641 		mutex_unlock(&i915->drm.struct_mutex);
642 		return PTR_ERR(ctx);
643 	}
644 
645 	/* First check idling each individual engine */
646 	for_each_engine(engine, i915, id) {
647 		err = __igt_switch_to_kernel_context(i915, ctx, BIT(id));
648 		if (err)
649 			goto out_unlock;
650 	}
651 
652 	/* Now en masse */
653 	err = __igt_switch_to_kernel_context(i915, ctx, ALL_ENGINES);
654 	if (err)
655 		goto out_unlock;
656 
657 out_unlock:
658 	GEM_TRACE_DUMP_ON(err);
659 	if (igt_flush_test(i915, I915_WAIT_LOCKED))
660 		err = -EIO;
661 	mutex_unlock(&i915->drm.struct_mutex);
662 
663 	kernel_context_close(ctx);
664 	return err;
665 }
666 
fake_aliasing_ppgtt_enable(struct drm_i915_private * i915)667 static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
668 {
669 	struct drm_i915_gem_object *obj;
670 	int err;
671 
672 	err = i915_gem_init_aliasing_ppgtt(i915);
673 	if (err)
674 		return err;
675 
676 	list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
677 		struct i915_vma *vma;
678 
679 		vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
680 		if (IS_ERR(vma))
681 			continue;
682 
683 		vma->flags &= ~I915_VMA_LOCAL_BIND;
684 	}
685 
686 	return 0;
687 }
688 
fake_aliasing_ppgtt_disable(struct drm_i915_private * i915)689 static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
690 {
691 	i915_gem_fini_aliasing_ppgtt(i915);
692 }
693 
i915_gem_context_mock_selftests(void)694 int i915_gem_context_mock_selftests(void)
695 {
696 	static const struct i915_subtest tests[] = {
697 		SUBTEST(igt_switch_to_kernel_context),
698 	};
699 	struct drm_i915_private *i915;
700 	int err;
701 
702 	i915 = mock_gem_device();
703 	if (!i915)
704 		return -ENOMEM;
705 
706 	err = i915_subtests(tests, i915);
707 
708 	drm_dev_put(&i915->drm);
709 	return err;
710 }
711 
i915_gem_context_live_selftests(struct drm_i915_private * dev_priv)712 int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
713 {
714 	static const struct i915_subtest tests[] = {
715 		SUBTEST(igt_switch_to_kernel_context),
716 		SUBTEST(igt_ctx_exec),
717 		SUBTEST(igt_ctx_readonly),
718 	};
719 	bool fake_alias = false;
720 	int err;
721 
722 	if (i915_terminally_wedged(&dev_priv->gpu_error))
723 		return 0;
724 
725 	/* Install a fake aliasing gtt for exercise */
726 	if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
727 		mutex_lock(&dev_priv->drm.struct_mutex);
728 		err = fake_aliasing_ppgtt_enable(dev_priv);
729 		mutex_unlock(&dev_priv->drm.struct_mutex);
730 		if (err)
731 			return err;
732 
733 		GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
734 		fake_alias = true;
735 	}
736 
737 	err = i915_subtests(tests, dev_priv);
738 
739 	if (fake_alias) {
740 		mutex_lock(&dev_priv->drm.struct_mutex);
741 		fake_aliasing_ppgtt_disable(dev_priv);
742 		mutex_unlock(&dev_priv->drm.struct_mutex);
743 	}
744 
745 	return err;
746 }
747