1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/prime_numbers.h>
26
27 #include "../i915_selftest.h"
28
29 #include "mock_gem_device.h"
30 #include "mock_context.h"
31
assert_vma(struct i915_vma * vma,struct drm_i915_gem_object * obj,struct i915_gem_context * ctx)32 static bool assert_vma(struct i915_vma *vma,
33 struct drm_i915_gem_object *obj,
34 struct i915_gem_context *ctx)
35 {
36 bool ok = true;
37
38 if (vma->vm != &ctx->ppgtt->vm) {
39 pr_err("VMA created with wrong VM\n");
40 ok = false;
41 }
42
43 if (vma->size != obj->base.size) {
44 pr_err("VMA created with wrong size, found %llu, expected %zu\n",
45 vma->size, obj->base.size);
46 ok = false;
47 }
48
49 if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
50 pr_err("VMA created with wrong type [%d]\n",
51 vma->ggtt_view.type);
52 ok = false;
53 }
54
55 return ok;
56 }
57
58 static struct i915_vma *
checked_vma_instance(struct drm_i915_gem_object * obj,struct i915_address_space * vm,struct i915_ggtt_view * view)59 checked_vma_instance(struct drm_i915_gem_object *obj,
60 struct i915_address_space *vm,
61 struct i915_ggtt_view *view)
62 {
63 struct i915_vma *vma;
64 bool ok = true;
65
66 vma = i915_vma_instance(obj, vm, view);
67 if (IS_ERR(vma))
68 return vma;
69
70 /* Manual checks, will be reinforced by i915_vma_compare! */
71 if (vma->vm != vm) {
72 pr_err("VMA's vm [%p] does not match request [%p]\n",
73 vma->vm, vm);
74 ok = false;
75 }
76
77 if (i915_is_ggtt(vm) != i915_vma_is_ggtt(vma)) {
78 pr_err("VMA ggtt status [%d] does not match parent [%d]\n",
79 i915_vma_is_ggtt(vma), i915_is_ggtt(vm));
80 ok = false;
81 }
82
83 if (i915_vma_compare(vma, vm, view)) {
84 pr_err("i915_vma_compare failed with create parameters!\n");
85 return ERR_PTR(-EINVAL);
86 }
87
88 if (i915_vma_compare(vma, vma->vm,
89 i915_vma_is_ggtt(vma) ? &vma->ggtt_view : NULL)) {
90 pr_err("i915_vma_compare failed with itself\n");
91 return ERR_PTR(-EINVAL);
92 }
93
94 if (!ok) {
95 pr_err("i915_vma_compare failed to detect the difference!\n");
96 return ERR_PTR(-EINVAL);
97 }
98
99 return vma;
100 }
101
create_vmas(struct drm_i915_private * i915,struct list_head * objects,struct list_head * contexts)102 static int create_vmas(struct drm_i915_private *i915,
103 struct list_head *objects,
104 struct list_head *contexts)
105 {
106 struct drm_i915_gem_object *obj;
107 struct i915_gem_context *ctx;
108 int pinned;
109
110 list_for_each_entry(obj, objects, st_link) {
111 for (pinned = 0; pinned <= 1; pinned++) {
112 list_for_each_entry(ctx, contexts, link) {
113 struct i915_address_space *vm = &ctx->ppgtt->vm;
114 struct i915_vma *vma;
115 int err;
116
117 vma = checked_vma_instance(obj, vm, NULL);
118 if (IS_ERR(vma))
119 return PTR_ERR(vma);
120
121 if (!assert_vma(vma, obj, ctx)) {
122 pr_err("VMA lookup/create failed\n");
123 return -EINVAL;
124 }
125
126 if (!pinned) {
127 err = i915_vma_pin(vma, 0, 0, PIN_USER);
128 if (err) {
129 pr_err("Failed to pin VMA\n");
130 return err;
131 }
132 } else {
133 i915_vma_unpin(vma);
134 }
135 }
136 }
137 }
138
139 return 0;
140 }
141
igt_vma_create(void * arg)142 static int igt_vma_create(void *arg)
143 {
144 struct drm_i915_private *i915 = arg;
145 struct drm_i915_gem_object *obj, *on;
146 struct i915_gem_context *ctx, *cn;
147 unsigned long num_obj, num_ctx;
148 unsigned long no, nc;
149 IGT_TIMEOUT(end_time);
150 LIST_HEAD(contexts);
151 LIST_HEAD(objects);
152 int err = -ENOMEM;
153
154 /* Exercise creating many vma amonst many objections, checking the
155 * vma creation and lookup routines.
156 */
157
158 no = 0;
159 for_each_prime_number(num_obj, ULONG_MAX - 1) {
160 for (; no < num_obj; no++) {
161 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
162 if (IS_ERR(obj))
163 goto out;
164
165 list_add(&obj->st_link, &objects);
166 }
167
168 nc = 0;
169 for_each_prime_number(num_ctx, MAX_CONTEXT_HW_ID) {
170 for (; nc < num_ctx; nc++) {
171 ctx = mock_context(i915, "mock");
172 if (!ctx)
173 goto out;
174
175 list_move(&ctx->link, &contexts);
176 }
177
178 err = create_vmas(i915, &objects, &contexts);
179 if (err)
180 goto out;
181
182 if (igt_timeout(end_time,
183 "%s timed out: after %lu objects in %lu contexts\n",
184 __func__, no, nc))
185 goto end;
186 }
187
188 list_for_each_entry_safe(ctx, cn, &contexts, link) {
189 list_del_init(&ctx->link);
190 mock_context_close(ctx);
191 }
192 }
193
194 end:
195 /* Final pass to lookup all created contexts */
196 err = create_vmas(i915, &objects, &contexts);
197 out:
198 list_for_each_entry_safe(ctx, cn, &contexts, link) {
199 list_del_init(&ctx->link);
200 mock_context_close(ctx);
201 }
202
203 list_for_each_entry_safe(obj, on, &objects, st_link)
204 i915_gem_object_put(obj);
205 return err;
206 }
207
208 struct pin_mode {
209 u64 size;
210 u64 flags;
211 bool (*assert)(const struct i915_vma *,
212 const struct pin_mode *mode,
213 int result);
214 const char *string;
215 };
216
assert_pin_valid(const struct i915_vma * vma,const struct pin_mode * mode,int result)217 static bool assert_pin_valid(const struct i915_vma *vma,
218 const struct pin_mode *mode,
219 int result)
220 {
221 if (result)
222 return false;
223
224 if (i915_vma_misplaced(vma, mode->size, 0, mode->flags))
225 return false;
226
227 return true;
228 }
229
230 __maybe_unused
assert_pin_enospc(const struct i915_vma * vma,const struct pin_mode * mode,int result)231 static bool assert_pin_enospc(const struct i915_vma *vma,
232 const struct pin_mode *mode,
233 int result)
234 {
235 return result == -ENOSPC;
236 }
237
238 __maybe_unused
assert_pin_einval(const struct i915_vma * vma,const struct pin_mode * mode,int result)239 static bool assert_pin_einval(const struct i915_vma *vma,
240 const struct pin_mode *mode,
241 int result)
242 {
243 return result == -EINVAL;
244 }
245
igt_vma_pin1(void * arg)246 static int igt_vma_pin1(void *arg)
247 {
248 struct drm_i915_private *i915 = arg;
249 const struct pin_mode modes[] = {
250 #define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " }
251 #define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" }
252 #define INVALID(sz, fl) __INVALID(sz, fl, assert_pin_einval, EINVAL)
253 #define NOSPACE(sz, fl) __INVALID(sz, fl, assert_pin_enospc, ENOSPC)
254 VALID(0, PIN_GLOBAL),
255 VALID(0, PIN_GLOBAL | PIN_MAPPABLE),
256
257 VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 4096),
258 VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
259 VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
260 VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
261 VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
262
263 VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
264 INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
265 VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
266 INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total),
267 INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
268
269 VALID(4096, PIN_GLOBAL),
270 VALID(8192, PIN_GLOBAL),
271 VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
272 VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
273 NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
274 VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL),
275 VALID(i915->ggtt.vm.total, PIN_GLOBAL),
276 NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL),
277 NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
278 INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
279 INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
280 INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
281
282 VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
283
284 #if !IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
285 /* Misusing BIAS is a programming error (it is not controllable
286 * from userspace) so when debugging is enabled, it explodes.
287 * However, the tests are still quite interesting for checking
288 * variable start, end and size.
289 */
290 NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
291 NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total),
292 NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
293 NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
294 #endif
295 { },
296 #undef NOSPACE
297 #undef INVALID
298 #undef __INVALID
299 #undef VALID
300 }, *m;
301 struct drm_i915_gem_object *obj;
302 struct i915_vma *vma;
303 int err = -EINVAL;
304
305 /* Exercise all the weird and wonderful i915_vma_pin requests,
306 * focusing on error handling of boundary conditions.
307 */
308
309 GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm));
310
311 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
312 if (IS_ERR(obj))
313 return PTR_ERR(obj);
314
315 vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL);
316 if (IS_ERR(vma))
317 goto out;
318
319 for (m = modes; m->assert; m++) {
320 err = i915_vma_pin(vma, m->size, 0, m->flags);
321 if (!m->assert(vma, m, err)) {
322 pr_err("%s to pin single page into GGTT with mode[%d:%s]: size=%llx flags=%llx, err=%d\n",
323 m->assert == assert_pin_valid ? "Failed" : "Unexpectedly succeeded",
324 (int)(m - modes), m->string, m->size, m->flags,
325 err);
326 if (!err)
327 i915_vma_unpin(vma);
328 err = -EINVAL;
329 goto out;
330 }
331
332 if (!err) {
333 i915_vma_unpin(vma);
334 err = i915_vma_unbind(vma);
335 if (err) {
336 pr_err("Failed to unbind single page from GGTT, err=%d\n", err);
337 goto out;
338 }
339 }
340 }
341
342 err = 0;
343 out:
344 i915_gem_object_put(obj);
345 return err;
346 }
347
rotated_index(const struct intel_rotation_info * r,unsigned int n,unsigned int x,unsigned int y)348 static unsigned long rotated_index(const struct intel_rotation_info *r,
349 unsigned int n,
350 unsigned int x,
351 unsigned int y)
352 {
353 return (r->plane[n].stride * (r->plane[n].height - y - 1) +
354 r->plane[n].offset + x);
355 }
356
357 static struct scatterlist *
assert_rotated(struct drm_i915_gem_object * obj,const struct intel_rotation_info * r,unsigned int n,struct scatterlist * sg)358 assert_rotated(struct drm_i915_gem_object *obj,
359 const struct intel_rotation_info *r, unsigned int n,
360 struct scatterlist *sg)
361 {
362 unsigned int x, y;
363
364 for (x = 0; x < r->plane[n].width; x++) {
365 for (y = 0; y < r->plane[n].height; y++) {
366 unsigned long src_idx;
367 dma_addr_t src;
368
369 if (!sg) {
370 pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n",
371 n, x, y);
372 return ERR_PTR(-EINVAL);
373 }
374
375 src_idx = rotated_index(r, n, x, y);
376 src = i915_gem_object_get_dma_address(obj, src_idx);
377
378 if (sg_dma_len(sg) != PAGE_SIZE) {
379 pr_err("Invalid sg.length, found %d, expected %lu for rotated page (%d, %d) [src index %lu]\n",
380 sg_dma_len(sg), PAGE_SIZE,
381 x, y, src_idx);
382 return ERR_PTR(-EINVAL);
383 }
384
385 if (sg_dma_address(sg) != src) {
386 pr_err("Invalid address for rotated page (%d, %d) [src index %lu]\n",
387 x, y, src_idx);
388 return ERR_PTR(-EINVAL);
389 }
390
391 sg = sg_next(sg);
392 }
393 }
394
395 return sg;
396 }
397
rotated_size(const struct intel_rotation_plane_info * a,const struct intel_rotation_plane_info * b)398 static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
399 const struct intel_rotation_plane_info *b)
400 {
401 return a->width * a->height + b->width * b->height;
402 }
403
igt_vma_rotate(void * arg)404 static int igt_vma_rotate(void *arg)
405 {
406 struct drm_i915_private *i915 = arg;
407 struct i915_address_space *vm = &i915->ggtt.vm;
408 struct drm_i915_gem_object *obj;
409 const struct intel_rotation_plane_info planes[] = {
410 { .width = 1, .height = 1, .stride = 1 },
411 { .width = 2, .height = 2, .stride = 2 },
412 { .width = 4, .height = 4, .stride = 4 },
413 { .width = 8, .height = 8, .stride = 8 },
414
415 { .width = 3, .height = 5, .stride = 3 },
416 { .width = 3, .height = 5, .stride = 4 },
417 { .width = 3, .height = 5, .stride = 5 },
418
419 { .width = 5, .height = 3, .stride = 5 },
420 { .width = 5, .height = 3, .stride = 7 },
421 { .width = 5, .height = 3, .stride = 9 },
422
423 { .width = 4, .height = 6, .stride = 6 },
424 { .width = 6, .height = 4, .stride = 6 },
425 { }
426 }, *a, *b;
427 const unsigned int max_pages = 64;
428 int err = -ENOMEM;
429
430 /* Create VMA for many different combinations of planes and check
431 * that the page layout within the rotated VMA match our expectations.
432 */
433
434 obj = i915_gem_object_create_internal(i915, max_pages * PAGE_SIZE);
435 if (IS_ERR(obj))
436 goto out;
437
438 for (a = planes; a->width; a++) {
439 for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) {
440 struct i915_ggtt_view view;
441 unsigned int n, max_offset;
442
443 max_offset = max(a->stride * a->height,
444 b->stride * b->height);
445 GEM_BUG_ON(max_offset > max_pages);
446 max_offset = max_pages - max_offset;
447
448 view.type = I915_GGTT_VIEW_ROTATED;
449 view.rotated.plane[0] = *a;
450 view.rotated.plane[1] = *b;
451
452 for_each_prime_number_from(view.rotated.plane[0].offset, 0, max_offset) {
453 for_each_prime_number_from(view.rotated.plane[1].offset, 0, max_offset) {
454 struct scatterlist *sg;
455 struct i915_vma *vma;
456
457 vma = checked_vma_instance(obj, vm, &view);
458 if (IS_ERR(vma)) {
459 err = PTR_ERR(vma);
460 goto out_object;
461 }
462
463 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
464 if (err) {
465 pr_err("Failed to pin VMA, err=%d\n", err);
466 goto out_object;
467 }
468
469 if (vma->size != rotated_size(a, b) * PAGE_SIZE) {
470 pr_err("VMA is wrong size, expected %lu, found %llu\n",
471 PAGE_SIZE * rotated_size(a, b), vma->size);
472 err = -EINVAL;
473 goto out_object;
474 }
475
476 if (vma->pages->nents != rotated_size(a, b)) {
477 pr_err("sg table is wrong sizeo, expected %u, found %u nents\n",
478 rotated_size(a, b), vma->pages->nents);
479 err = -EINVAL;
480 goto out_object;
481 }
482
483 if (vma->node.size < vma->size) {
484 pr_err("VMA binding too small, expected %llu, found %llu\n",
485 vma->size, vma->node.size);
486 err = -EINVAL;
487 goto out_object;
488 }
489
490 if (vma->pages == obj->mm.pages) {
491 pr_err("VMA using unrotated object pages!\n");
492 err = -EINVAL;
493 goto out_object;
494 }
495
496 sg = vma->pages->sgl;
497 for (n = 0; n < ARRAY_SIZE(view.rotated.plane); n++) {
498 sg = assert_rotated(obj, &view.rotated, n, sg);
499 if (IS_ERR(sg)) {
500 pr_err("Inconsistent VMA pages for plane %d: [(%d, %d, %d, %d), (%d, %d, %d, %d)]\n", n,
501 view.rotated.plane[0].width,
502 view.rotated.plane[0].height,
503 view.rotated.plane[0].stride,
504 view.rotated.plane[0].offset,
505 view.rotated.plane[1].width,
506 view.rotated.plane[1].height,
507 view.rotated.plane[1].stride,
508 view.rotated.plane[1].offset);
509 err = -EINVAL;
510 goto out_object;
511 }
512 }
513
514 i915_vma_unpin(vma);
515 }
516 }
517 }
518 }
519
520 out_object:
521 i915_gem_object_put(obj);
522 out:
523 return err;
524 }
525
assert_partial(struct drm_i915_gem_object * obj,struct i915_vma * vma,unsigned long offset,unsigned long size)526 static bool assert_partial(struct drm_i915_gem_object *obj,
527 struct i915_vma *vma,
528 unsigned long offset,
529 unsigned long size)
530 {
531 struct sgt_iter sgt;
532 dma_addr_t dma;
533
534 for_each_sgt_dma(dma, sgt, vma->pages) {
535 dma_addr_t src;
536
537 if (!size) {
538 pr_err("Partial scattergather list too long\n");
539 return false;
540 }
541
542 src = i915_gem_object_get_dma_address(obj, offset);
543 if (src != dma) {
544 pr_err("DMA mismatch for partial page offset %lu\n",
545 offset);
546 return false;
547 }
548
549 offset++;
550 size--;
551 }
552
553 return true;
554 }
555
assert_pin(struct i915_vma * vma,struct i915_ggtt_view * view,u64 size,const char * name)556 static bool assert_pin(struct i915_vma *vma,
557 struct i915_ggtt_view *view,
558 u64 size,
559 const char *name)
560 {
561 bool ok = true;
562
563 if (vma->size != size) {
564 pr_err("(%s) VMA is wrong size, expected %llu, found %llu\n",
565 name, size, vma->size);
566 ok = false;
567 }
568
569 if (vma->node.size < vma->size) {
570 pr_err("(%s) VMA binding too small, expected %llu, found %llu\n",
571 name, vma->size, vma->node.size);
572 ok = false;
573 }
574
575 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
576 if (memcmp(&vma->ggtt_view, view, sizeof(*view))) {
577 pr_err("(%s) VMA mismatch upon creation!\n",
578 name);
579 ok = false;
580 }
581
582 if (vma->pages == vma->obj->mm.pages) {
583 pr_err("(%s) VMA using original object pages!\n",
584 name);
585 ok = false;
586 }
587 } else {
588 if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
589 pr_err("Not the normal ggtt view! Found %d\n",
590 vma->ggtt_view.type);
591 ok = false;
592 }
593
594 if (vma->pages != vma->obj->mm.pages) {
595 pr_err("VMA not using object pages!\n");
596 ok = false;
597 }
598 }
599
600 return ok;
601 }
602
igt_vma_partial(void * arg)603 static int igt_vma_partial(void *arg)
604 {
605 struct drm_i915_private *i915 = arg;
606 struct i915_address_space *vm = &i915->ggtt.vm;
607 const unsigned int npages = 1021; /* prime! */
608 struct drm_i915_gem_object *obj;
609 const struct phase {
610 const char *name;
611 } phases[] = {
612 { "create" },
613 { "lookup" },
614 { },
615 }, *p;
616 unsigned int sz, offset;
617 struct i915_vma *vma;
618 int err = -ENOMEM;
619
620 /* Create lots of different VMA for the object and check that
621 * we are returned the same VMA when we later request the same range.
622 */
623
624 obj = i915_gem_object_create_internal(i915, npages*PAGE_SIZE);
625 if (IS_ERR(obj))
626 goto out;
627
628 for (p = phases; p->name; p++) { /* exercise both create/lookup */
629 unsigned int count, nvma;
630
631 nvma = 0;
632 for_each_prime_number_from(sz, 1, npages) {
633 for_each_prime_number_from(offset, 0, npages - sz) {
634 struct i915_ggtt_view view;
635
636 view.type = I915_GGTT_VIEW_PARTIAL;
637 view.partial.offset = offset;
638 view.partial.size = sz;
639
640 if (sz == npages)
641 view.type = I915_GGTT_VIEW_NORMAL;
642
643 vma = checked_vma_instance(obj, vm, &view);
644 if (IS_ERR(vma)) {
645 err = PTR_ERR(vma);
646 goto out_object;
647 }
648
649 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
650 if (err)
651 goto out_object;
652
653 if (!assert_pin(vma, &view, sz*PAGE_SIZE, p->name)) {
654 pr_err("(%s) Inconsistent partial pinning for (offset=%d, size=%d)\n",
655 p->name, offset, sz);
656 err = -EINVAL;
657 goto out_object;
658 }
659
660 if (!assert_partial(obj, vma, offset, sz)) {
661 pr_err("(%s) Inconsistent partial pages for (offset=%d, size=%d)\n",
662 p->name, offset, sz);
663 err = -EINVAL;
664 goto out_object;
665 }
666
667 i915_vma_unpin(vma);
668 nvma++;
669 }
670 }
671
672 count = 0;
673 list_for_each_entry(vma, &obj->vma_list, obj_link)
674 count++;
675 if (count != nvma) {
676 pr_err("(%s) All partial vma were not recorded on the obj->vma_list: found %u, expected %u\n",
677 p->name, count, nvma);
678 err = -EINVAL;
679 goto out_object;
680 }
681
682 /* Check that we did create the whole object mapping */
683 vma = checked_vma_instance(obj, vm, NULL);
684 if (IS_ERR(vma)) {
685 err = PTR_ERR(vma);
686 goto out_object;
687 }
688
689 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
690 if (err)
691 goto out_object;
692
693 if (!assert_pin(vma, NULL, obj->base.size, p->name)) {
694 pr_err("(%s) inconsistent full pin\n", p->name);
695 err = -EINVAL;
696 goto out_object;
697 }
698
699 i915_vma_unpin(vma);
700
701 count = 0;
702 list_for_each_entry(vma, &obj->vma_list, obj_link)
703 count++;
704 if (count != nvma) {
705 pr_err("(%s) allocated an extra full vma!\n", p->name);
706 err = -EINVAL;
707 goto out_object;
708 }
709 }
710
711 out_object:
712 i915_gem_object_put(obj);
713 out:
714 return err;
715 }
716
i915_vma_mock_selftests(void)717 int i915_vma_mock_selftests(void)
718 {
719 static const struct i915_subtest tests[] = {
720 SUBTEST(igt_vma_create),
721 SUBTEST(igt_vma_pin1),
722 SUBTEST(igt_vma_rotate),
723 SUBTEST(igt_vma_partial),
724 };
725 struct drm_i915_private *i915;
726 int err;
727
728 i915 = mock_gem_device();
729 if (!i915)
730 return -ENOMEM;
731
732 mutex_lock(&i915->drm.struct_mutex);
733 err = i915_subtests(tests, i915);
734 mutex_unlock(&i915->drm.struct_mutex);
735
736 drm_dev_put(&i915->drm);
737 return err;
738 }
739
740