Lines Matching +full:cs +full:- +full:x
1 // SPDX-License-Identifier: MIT
3 * Copyright © 2017-2018 Intel Corporation
27 struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj; in hwsp_page()
30 return sg_page(obj->mm.pages->sgl); in hwsp_page()
37 return (address + offset_in_page(tl->hwsp_offset)) / TIMELINE_SEQNO_BYTES; in hwsp_cacheline()
47 err = i915_gem_object_lock(tl->hwsp_ggtt->obj, &ww); in selftest_tl_pin()
51 if (err == -EDEADLK) { in selftest_tl_pin()
79 tl = xchg(&state->history[idx], tl); in __mock_hwsp_record()
81 radix_tree_delete(&state->cachelines, hwsp_cacheline(tl)); in __mock_hwsp_record()
94 while (count--) { in __mock_hwsp_timeline()
98 tl = intel_timeline_create(state->gt); in __mock_hwsp_timeline()
109 err = radix_tree_insert(&state->cachelines, cacheline, tl); in __mock_hwsp_timeline()
111 if (err == -EEXIST) { in __mock_hwsp_timeline()
120 idx = state->count++ % state->max; in __mock_hwsp_timeline()
125 i915_prandom_shuffle(state->history, in __mock_hwsp_timeline()
126 sizeof(*state->history), in __mock_hwsp_timeline()
127 min(state->count, state->max), in __mock_hwsp_timeline()
128 &state->prng); in __mock_hwsp_timeline()
130 count = i915_prandom_u32_max_state(min(state->count, state->max), in __mock_hwsp_timeline()
131 &state->prng); in __mock_hwsp_timeline()
132 while (count--) { in __mock_hwsp_timeline()
133 idx = --state->count % state->max; in __mock_hwsp_timeline()
157 return -ENOMEM; in mock_hwsp_freelist()
162 state.gt = &i915->gt; in mock_hwsp_freelist()
173 err = -ENOMEM; in mock_hwsp_freelist()
177 for (p = phases; p->name; p++) { in mock_hwsp_freelist()
178 pr_debug("%s(%s)\n", __func__, p->name); in mock_hwsp_freelist()
180 err = __mock_hwsp_timeline(&state, na, p->flags); in mock_hwsp_freelist()
209 if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) { in __igt_sync()
211 name, p->name, ctx, p->seqno, yesno(p->expected)); in __igt_sync()
212 return -EINVAL; in __igt_sync()
215 if (p->set) { in __igt_sync()
216 ret = __intel_timeline_sync_set(tl, ctx, p->seqno); in __igt_sync()
236 { "INT_MAX-1", INT_MAX-1, true, false }, in igt_sync()
246 int ret = -ENODEV; in igt_sync()
249 for (p = pass; p->name; p++) { in igt_sync()
251 for (offset = -1; offset <= (order > 1); offset++) { in igt_sync()
264 for (offset = -1; offset <= (order > 1); offset++) { in igt_sync()
267 for (p = pass; p->name; p++) { in igt_sync()
297 * and the loop itself becomes a significant factor in the per-iteration in bench_sync()
306 u32 x; in bench_sync() local
309 WRITE_ONCE(x, prandom_u32_state(&prng)); in bench_sync()
338 while (end_time--) { in bench_sync()
344 return -EINVAL; in bench_sync()
365 pr_info("%s: %lu in-order insertions, %lluns/insert\n", in bench_sync()
371 while (end_time--) { in bench_sync()
375 return -EINVAL; in bench_sync()
379 pr_info("%s: %lu in-order lookups, %lluns/lookup\n", in bench_sync()
411 unsigned int mask = BIT(order) - 1; in bench_sync()
420 * implementation, try to identify its phase-changes in bench_sync()
454 u32 *cs; in emit_ggtt_store_dw() local
456 cs = intel_ring_begin(rq, 4); in emit_ggtt_store_dw()
457 if (IS_ERR(cs)) in emit_ggtt_store_dw()
458 return PTR_ERR(cs); in emit_ggtt_store_dw()
460 if (GRAPHICS_VER(rq->engine->i915) >= 8) { in emit_ggtt_store_dw()
461 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in emit_ggtt_store_dw()
462 *cs++ = addr; in emit_ggtt_store_dw()
463 *cs++ = 0; in emit_ggtt_store_dw()
464 *cs++ = value; in emit_ggtt_store_dw()
465 } else if (GRAPHICS_VER(rq->engine->i915) >= 4) { in emit_ggtt_store_dw()
466 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in emit_ggtt_store_dw()
467 *cs++ = 0; in emit_ggtt_store_dw()
468 *cs++ = addr; in emit_ggtt_store_dw()
469 *cs++ = value; in emit_ggtt_store_dw()
471 *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; in emit_ggtt_store_dw()
472 *cs++ = addr; in emit_ggtt_store_dw()
473 *cs++ = value; in emit_ggtt_store_dw()
474 *cs++ = MI_NOOP; in emit_ggtt_store_dw()
477 intel_ring_advance(rq, cs); in emit_ggtt_store_dw()
494 if (READ_ONCE(*tl->hwsp_seqno) != tl->seqno) { in checked_tl_write()
495 pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n", in checked_tl_write()
496 *tl->hwsp_seqno, tl->seqno); in checked_tl_write()
498 return ERR_PTR(-EINVAL); in checked_tl_write()
507 err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value); in checked_tl_write()
541 return -ENOMEM; in live_hwsp_engine()
576 if (igt_flush_test(gt->i915)) in live_hwsp_engine()
577 err = -EIO; in live_hwsp_engine()
582 if (!err && READ_ONCE(*tl->hwsp_seqno) != n) { in live_hwsp_engine()
583 GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n", in live_hwsp_engine()
584 n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno); in live_hwsp_engine()
586 err = -EINVAL; in live_hwsp_engine()
616 return -ENOMEM; in live_hwsp_alternate()
648 if (igt_flush_test(gt->i915)) in live_hwsp_alternate()
649 err = -EIO; in live_hwsp_alternate()
654 if (!err && READ_ONCE(*tl->hwsp_seqno) != n) { in live_hwsp_alternate()
655 GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x, found 0x%x\n", in live_hwsp_alternate()
656 n, tl->fence_context, tl->hwsp_offset, *tl->hwsp_seqno); in live_hwsp_alternate()
658 err = -EINVAL; in live_hwsp_alternate()
685 if (!tl->has_initial_breadcrumb) in live_hwsp_wrap()
706 tl->seqno = -4u; in live_hwsp_wrap()
708 mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING); in live_hwsp_wrap()
710 mutex_unlock(&tl->mutex); in live_hwsp_wrap()
715 pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n", in live_hwsp_wrap()
716 seqno[0], tl->hwsp_offset); in live_hwsp_wrap()
718 err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]); in live_hwsp_wrap()
723 hwsp_seqno[0] = tl->hwsp_seqno; in live_hwsp_wrap()
725 mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING); in live_hwsp_wrap()
727 mutex_unlock(&tl->mutex); in live_hwsp_wrap()
732 pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n", in live_hwsp_wrap()
733 seqno[1], tl->hwsp_offset); in live_hwsp_wrap()
735 err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]); in live_hwsp_wrap()
740 hwsp_seqno[1] = tl->hwsp_seqno; in live_hwsp_wrap()
750 err = -EIO; in live_hwsp_wrap()
756 pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n", in live_hwsp_wrap()
759 err = -EINVAL; in live_hwsp_wrap()
767 if (igt_flush_test(gt->i915)) in live_hwsp_wrap()
768 err = -EIO; in live_hwsp_wrap()
780 const u32 gpr = i915_mmio_reg_offset(GEN8_RING_CS_GPR(rq->engine->mmio_base, 0)); in emit_read_hwsp()
781 u32 *cs; in emit_read_hwsp() local
783 cs = intel_ring_begin(rq, 12); in emit_read_hwsp()
784 if (IS_ERR(cs)) in emit_read_hwsp()
785 return PTR_ERR(cs); in emit_read_hwsp()
787 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in emit_read_hwsp()
788 *cs++ = *addr; in emit_read_hwsp()
789 *cs++ = 0; in emit_read_hwsp()
790 *cs++ = seqno; in emit_read_hwsp()
793 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_USE_GGTT; in emit_read_hwsp()
794 *cs++ = gpr; in emit_read_hwsp()
795 *cs++ = hwsp; in emit_read_hwsp()
796 *cs++ = 0; in emit_read_hwsp()
798 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; in emit_read_hwsp()
799 *cs++ = gpr; in emit_read_hwsp()
800 *cs++ = *addr; in emit_read_hwsp()
801 *cs++ = 0; in emit_read_hwsp()
804 intel_ring_advance(rq, cs); in emit_read_hwsp()
831 obj = i915_gem_object_create_internal(gt->i915, SZ_2M); in setup_watcher()
835 w->map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); in setup_watcher()
836 if (IS_ERR(w->map)) { in setup_watcher()
838 return PTR_ERR(w->map); in setup_watcher()
847 w->vma = vma; in setup_watcher()
848 w->addr = i915_ggtt_offset(vma); in setup_watcher()
854 /* some light mutex juggling required; think co-routines */ in switch_tl_lock()
857 lockdep_unpin_lock(&from->context->timeline->mutex, from->cookie); in switch_tl_lock()
858 mutex_unlock(&from->context->timeline->mutex); in switch_tl_lock()
862 mutex_lock(&to->context->timeline->mutex); in switch_tl_lock()
863 to->cookie = lockdep_pin_lock(&to->context->timeline->mutex); in switch_tl_lock()
877 ce->ring_size = ringsz; in create_watcher()
878 w->rq = intel_context_create_request(ce); in create_watcher()
880 if (IS_ERR(w->rq)) in create_watcher()
881 return PTR_ERR(w->rq); in create_watcher()
883 w->addr = i915_ggtt_offset(w->vma); in create_watcher()
885 switch_tl_lock(w->rq, NULL); in create_watcher()
893 struct i915_request *rq = fetch_and_zero(&w->rq); in check_watcher()
897 GEM_BUG_ON(w->addr - i915_ggtt_offset(w->vma) > w->vma->size); in check_watcher()
904 err = -ETIME; in check_watcher()
910 end = (w->addr - i915_ggtt_offset(w->vma)) / sizeof(*w->map); in check_watcher()
912 if (!op(w->map[offset + 1], w->map[offset])) { in check_watcher()
913 pr_err("Watcher '%s' found HWSP value %x for seqno %x\n", in check_watcher()
914 name, w->map[offset + 1], w->map[offset]); in check_watcher()
915 err = -EINVAL; in check_watcher()
928 if (w->rq) { in cleanup_watcher()
929 switch_tl_lock(NULL, w->rq); in cleanup_watcher()
931 i915_request_add(w->rq); in cleanup_watcher()
934 i915_vma_unpin_and_release(&w->vma, I915_VMA_RELEASE_MAP); in cleanup_watcher()
941 mutex_lock(&tl->mutex); in retire_requests()
942 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests()
945 mutex_unlock(&tl->mutex); in retire_requests()
947 return !i915_active_fence_isset(&tl->last_request); in retire_requests()
952 struct intel_context *ce = rq->context; in wrap_timeline()
953 struct intel_timeline *tl = ce->timeline; in wrap_timeline()
954 u32 seqno = rq->fence.seqno; in wrap_timeline()
956 while (tl->seqno >= seqno) { /* Cause a wrap */ in wrap_timeline()
995 if (GRAPHICS_VER(gt->i915) < 8) /* CS convenience [SRM/LRM] */ in live_hwsp_read()
1002 if (!tl->has_initial_breadcrumb) in live_hwsp_read()
1028 err = -ENOMEM; in live_hwsp_read()
1042 ce->timeline = intel_timeline_get(tl); in live_hwsp_read()
1055 tl->seqno = -12u + 2 * (count & 3); in live_hwsp_read()
1066 err = i915_sw_fence_await_dma_fence(&rq->submit, in live_hwsp_read()
1067 &watcher[0].rq->fence, 0, in live_hwsp_read()
1080 rq->fence.seqno, hwsp, in live_hwsp_read()
1094 rq->fence.seqno, hwsp, in live_hwsp_read()
1115 err = i915_sw_fence_await_dma_fence(&watcher[1].rq->submit, in live_hwsp_read()
1116 &rq->fence, 0, in live_hwsp_read()
1136 err = -ETIME; in live_hwsp_read()
1144 if (8 * watcher[1].rq->ring->emit > in live_hwsp_read()
1145 3 * watcher[1].rq->ring->size) in live_hwsp_read()
1149 count < (PAGE_SIZE / TIMELINE_SEQNO_BYTES - 1) / 2); in live_hwsp_read()
1151 pr_info("%s: simulated %lu wraps\n", engine->name, count); in live_hwsp_read()
1161 if (igt_flush_test(gt->i915)) in live_hwsp_read()
1162 err = -EIO; in live_hwsp_read()
1182 struct intel_context *ce = engine->kernel_context; in live_hwsp_rollover_kernel()
1183 struct intel_timeline *tl = ce->timeline; in live_hwsp_rollover_kernel()
1189 err = -EIO; in live_hwsp_rollover_kernel()
1193 GEM_BUG_ON(i915_active_fence_isset(&tl->last_request)); in live_hwsp_rollover_kernel()
1194 tl->seqno = -2u; in live_hwsp_rollover_kernel()
1195 WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno); in live_hwsp_rollover_kernel()
1207 engine->name, in live_hwsp_rollover_kernel()
1208 lower_32_bits(this->fence.seqno)); in live_hwsp_rollover_kernel()
1210 GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl); in live_hwsp_rollover_kernel()
1217 GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno); in live_hwsp_rollover_kernel()
1221 err = -EIO; in live_hwsp_rollover_kernel()
1227 pr_err("Pre-wrap request not completed!\n"); in live_hwsp_rollover_kernel()
1228 err = -EINVAL; in live_hwsp_rollover_kernel()
1241 if (igt_flush_test(gt->i915)) in live_hwsp_rollover_kernel()
1242 err = -EIO; in live_hwsp_rollover_kernel()
1273 tl = ce->timeline; in live_hwsp_rollover_user()
1274 if (!tl->has_initial_breadcrumb) in live_hwsp_rollover_user()
1281 tl->seqno = -4u; in live_hwsp_rollover_user()
1282 WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno); in live_hwsp_rollover_user()
1294 engine->name, in live_hwsp_rollover_user()
1295 lower_32_bits(this->fence.seqno)); in live_hwsp_rollover_user()
1297 GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl); in live_hwsp_rollover_user()
1304 GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno); in live_hwsp_rollover_user()
1308 err = -EIO; in live_hwsp_rollover_user()
1314 pr_err("Pre-wrap request not completed!\n"); in live_hwsp_rollover_user()
1315 err = -EINVAL; in live_hwsp_rollover_user()
1329 if (igt_flush_test(gt->i915)) in live_hwsp_rollover_user()
1330 err = -EIO; in live_hwsp_rollover_user()
1379 err = -EIO; in live_hwsp_recycle()
1383 if (READ_ONCE(*tl->hwsp_seqno) != count) { in live_hwsp_recycle()
1384 GEM_TRACE_ERR("Invalid seqno:%lu stored in timeline %llu @ %x found 0x%x\n", in live_hwsp_recycle()
1385 count, tl->fence_context, in live_hwsp_recycle()
1386 tl->hwsp_offset, *tl->hwsp_seqno); in live_hwsp_recycle()
1388 err = -EINVAL; in live_hwsp_recycle()
1419 if (intel_gt_is_wedged(&i915->gt)) in intel_timeline_live_selftests()
1422 return intel_gt_live_subtests(tests, &i915->gt); in intel_timeline_live_selftests()