Lines Matching refs:rq

55 static bool is_active(struct i915_request *rq)  in is_active()  argument
57 if (i915_request_is_active(rq)) in is_active()
60 if (i915_request_on_hold(rq)) in is_active()
63 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) in is_active()
70 struct i915_request *rq, in wait_for_submit() argument
77 if (i915_request_completed(rq)) /* that was quick! */ in wait_for_submit()
82 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit()
93 struct i915_request *rq, in wait_for_reset() argument
105 if (i915_request_completed(rq)) in wait_for_reset()
108 if (READ_ONCE(rq->fence.error)) in wait_for_reset()
114 if (rq->fence.error != -EIO) { in wait_for_reset()
117 rq->fence.context, in wait_for_reset()
118 rq->fence.seqno); in wait_for_reset()
123 if (i915_request_wait(rq, 0, in wait_for_reset()
127 rq->fence.context, in wait_for_reset()
128 rq->fence.seqno); in wait_for_reset()
151 struct i915_request *rq; in live_sanitycheck() local
159 rq = igt_spinner_create_request(&spin, ce, MI_NOOP); in live_sanitycheck()
160 if (IS_ERR(rq)) { in live_sanitycheck()
161 err = PTR_ERR(rq); in live_sanitycheck()
165 i915_request_add(rq); in live_sanitycheck()
166 if (!igt_wait_for_spinner(&spin, rq)) { in live_sanitycheck()
208 struct i915_request *rq[2]; in live_unlite_restore() local
254 rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK); in live_unlite_restore()
255 if (IS_ERR(rq[0])) { in live_unlite_restore()
256 err = PTR_ERR(rq[0]); in live_unlite_restore()
260 i915_request_get(rq[0]); in live_unlite_restore()
261 i915_request_add(rq[0]); in live_unlite_restore()
262 GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit); in live_unlite_restore()
264 if (!igt_wait_for_spinner(&spin, rq[0])) { in live_unlite_restore()
265 i915_request_put(rq[0]); in live_unlite_restore()
269 rq[1] = i915_request_create(ce[1]); in live_unlite_restore()
270 if (IS_ERR(rq[1])) { in live_unlite_restore()
271 err = PTR_ERR(rq[1]); in live_unlite_restore()
272 i915_request_put(rq[0]); in live_unlite_restore()
287 i915_request_await_dma_fence(rq[1], &rq[0]->fence); in live_unlite_restore()
290 i915_request_get(rq[1]); in live_unlite_restore()
291 i915_request_add(rq[1]); in live_unlite_restore()
292 GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix); in live_unlite_restore()
293 i915_request_put(rq[0]); in live_unlite_restore()
301 engine->schedule(rq[1], &attr); in live_unlite_restore()
305 rq[0] = i915_request_create(ce[0]); in live_unlite_restore()
306 if (IS_ERR(rq[0])) { in live_unlite_restore()
307 err = PTR_ERR(rq[0]); in live_unlite_restore()
308 i915_request_put(rq[1]); in live_unlite_restore()
312 i915_request_await_dma_fence(rq[0], &rq[1]->fence); in live_unlite_restore()
313 i915_request_get(rq[0]); in live_unlite_restore()
314 i915_request_add(rq[0]); in live_unlite_restore()
315 GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix); in live_unlite_restore()
316 i915_request_put(rq[1]); in live_unlite_restore()
317 i915_request_put(rq[0]); in live_unlite_restore()
370 struct i915_request *rq; in live_unlite_ring() local
409 rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK); in live_unlite_ring()
410 if (IS_ERR(rq)) { in live_unlite_ring()
411 err = PTR_ERR(rq); in live_unlite_ring()
415 i915_request_get(rq); in live_unlite_ring()
416 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in live_unlite_ring()
417 i915_request_add(rq); in live_unlite_ring()
419 if (!igt_wait_for_spinner(&spin, rq)) { in live_unlite_ring()
421 i915_request_put(rq); in live_unlite_ring()
429 rq->wa_tail, in live_unlite_ring()
436 i915_request_put(rq); in live_unlite_ring()
450 rq->tail); in live_unlite_ring()
452 rq->tail, in live_unlite_ring()
454 i915_request_put(rq); in live_unlite_ring()
457 rq = intel_context_create_request(ce[1]); in live_unlite_ring()
458 if (IS_ERR(rq)) { in live_unlite_ring()
459 err = PTR_ERR(rq); in live_unlite_ring()
463 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in live_unlite_ring()
464 i915_request_get(rq); in live_unlite_ring()
465 i915_request_add(rq); in live_unlite_ring()
467 err = wait_for_submit(engine, rq, HZ / 2); in live_unlite_ring()
468 i915_request_put(rq); in live_unlite_ring()
519 struct i915_request *rq; in live_pin_rewind() local
559 rq = intel_context_create_request(ce); in live_pin_rewind()
562 if (IS_ERR(rq)) { in live_pin_rewind()
563 err = PTR_ERR(rq); in live_pin_rewind()
566 GEM_BUG_ON(!rq->head); in live_pin_rewind()
567 i915_request_add(rq); in live_pin_rewind()
601 struct i915_request *rq; in live_hold_reset() local
611 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); in live_hold_reset()
612 if (IS_ERR(rq)) { in live_hold_reset()
613 err = PTR_ERR(rq); in live_hold_reset()
616 i915_request_add(rq); in live_hold_reset()
618 if (!igt_wait_for_spinner(&spin, rq)) { in live_hold_reset()
635 GEM_BUG_ON(execlists_active(&engine->execlists) != rq); in live_hold_reset()
637 i915_request_get(rq); in live_hold_reset()
638 execlists_hold(engine, rq); in live_hold_reset()
639 GEM_BUG_ON(!i915_request_on_hold(rq)); in live_hold_reset()
642 GEM_BUG_ON(rq->fence.error != -EIO); in live_hold_reset()
649 if (!i915_request_wait(rq, 0, HZ / 5)) { in live_hold_reset()
652 i915_request_put(rq); in live_hold_reset()
656 GEM_BUG_ON(!i915_request_on_hold(rq)); in live_hold_reset()
659 execlists_unhold(engine, rq); in live_hold_reset()
660 if (i915_request_wait(rq, 0, HZ / 5) < 0) { in live_hold_reset()
666 i915_request_put(rq); in live_hold_reset()
726 struct i915_request *rq; in live_error_interrupt() local
734 rq = intel_context_create_request(ce); in live_error_interrupt()
736 if (IS_ERR(rq)) { in live_error_interrupt()
737 err = PTR_ERR(rq); in live_error_interrupt()
741 if (rq->engine->emit_init_breadcrumb) { in live_error_interrupt()
742 err = rq->engine->emit_init_breadcrumb(rq); in live_error_interrupt()
744 i915_request_add(rq); in live_error_interrupt()
749 cs = intel_ring_begin(rq, 2); in live_error_interrupt()
751 i915_request_add(rq); in live_error_interrupt()
764 client[i] = i915_request_get(rq); in live_error_interrupt()
765 i915_request_add(rq); in live_error_interrupt()
826 emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx) in emit_semaphore_chain() argument
830 cs = intel_ring_begin(rq, 10); in emit_semaphore_chain()
858 intel_ring_advance(rq, cs); in emit_semaphore_chain()
866 struct i915_request *rq; in semaphore_queue() local
873 rq = intel_context_create_request(ce); in semaphore_queue()
874 if (IS_ERR(rq)) in semaphore_queue()
878 if (rq->engine->emit_init_breadcrumb) in semaphore_queue()
879 err = rq->engine->emit_init_breadcrumb(rq); in semaphore_queue()
881 err = emit_semaphore_chain(rq, vma, idx); in semaphore_queue()
883 i915_request_get(rq); in semaphore_queue()
884 i915_request_add(rq); in semaphore_queue()
886 rq = ERR_PTR(err); in semaphore_queue()
890 return rq; in semaphore_queue()
901 struct i915_request *rq; in release_queue() local
904 rq = intel_engine_create_kernel_request(engine); in release_queue()
905 if (IS_ERR(rq)) in release_queue()
906 return PTR_ERR(rq); in release_queue()
908 cs = intel_ring_begin(rq, 4); in release_queue()
910 i915_request_add(rq); in release_queue()
919 intel_ring_advance(rq, cs); in release_queue()
921 i915_request_get(rq); in release_queue()
922 i915_request_add(rq); in release_queue()
925 engine->schedule(rq, &attr); in release_queue()
928 i915_request_put(rq); in release_queue()
949 struct i915_request *rq; in slice_semaphore_queue() local
951 rq = semaphore_queue(engine, vma, n++); in slice_semaphore_queue()
952 if (IS_ERR(rq)) { in slice_semaphore_queue()
953 err = PTR_ERR(rq); in slice_semaphore_queue()
957 i915_request_put(rq); in slice_semaphore_queue()
1059 struct i915_request *rq; in create_rewinder() local
1063 rq = intel_context_create_request(ce); in create_rewinder()
1064 if (IS_ERR(rq)) in create_rewinder()
1065 return rq; in create_rewinder()
1068 err = i915_request_await_dma_fence(rq, &wait->fence); in create_rewinder()
1073 cs = intel_ring_begin(rq, 14); in create_rewinder()
1091 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base)); in create_rewinder()
1100 intel_ring_advance(rq, cs); in create_rewinder()
1102 rq->sched.attr.priority = I915_PRIORITY_MASK; in create_rewinder()
1105 i915_request_get(rq); in create_rewinder()
1106 i915_request_add(rq); in create_rewinder()
1108 i915_request_put(rq); in create_rewinder()
1112 return rq; in create_rewinder()
1134 struct i915_request *rq[3] = {}; in live_timeslice_rewind() local
1165 rq[A1] = create_rewinder(ce, NULL, slot, X); in live_timeslice_rewind()
1166 if (IS_ERR(rq[A1])) { in live_timeslice_rewind()
1171 rq[A2] = create_rewinder(ce, NULL, slot, Y); in live_timeslice_rewind()
1173 if (IS_ERR(rq[A2])) in live_timeslice_rewind()
1176 err = wait_for_submit(engine, rq[A2], HZ / 2); in live_timeslice_rewind()
1189 rq[B1] = create_rewinder(ce, rq[A1], slot, Z); in live_timeslice_rewind()
1191 if (IS_ERR(rq[2])) in live_timeslice_rewind()
1194 err = wait_for_submit(engine, rq[B1], HZ / 2); in live_timeslice_rewind()
1203 if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */ in live_timeslice_rewind()
1210 GEM_BUG_ON(!i915_request_is_active(rq[A1])); in live_timeslice_rewind()
1211 GEM_BUG_ON(!i915_request_is_active(rq[B1])); in live_timeslice_rewind()
1212 GEM_BUG_ON(i915_request_is_active(rq[A2])); in live_timeslice_rewind()
1251 i915_request_put(rq[i]); in live_timeslice_rewind()
1263 struct i915_request *rq; in nop_request() local
1265 rq = intel_engine_create_kernel_request(engine); in nop_request()
1266 if (IS_ERR(rq)) in nop_request()
1267 return rq; in nop_request()
1269 i915_request_get(rq); in nop_request()
1270 i915_request_add(rq); in nop_request()
1272 return rq; in nop_request()
1336 struct i915_request *rq, *nop; in live_timeslice_queue() local
1345 rq = semaphore_queue(engine, vma, 0); in live_timeslice_queue()
1346 if (IS_ERR(rq)) { in live_timeslice_queue()
1347 err = PTR_ERR(rq); in live_timeslice_queue()
1350 engine->schedule(rq, &attr); in live_timeslice_queue()
1351 err = wait_for_submit(engine, rq, HZ / 2); in live_timeslice_queue()
1372 GEM_BUG_ON(i915_request_completed(rq)); in live_timeslice_queue()
1373 GEM_BUG_ON(execlists_active(&engine->execlists) != rq); in live_timeslice_queue()
1376 err = release_queue(engine, vma, 1, effective_prio(rq)); in live_timeslice_queue()
1387 if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) { in live_timeslice_queue()
1400 i915_request_put(rq); in live_timeslice_queue()
1436 struct i915_request *rq; in live_timeslice_nopreempt() local
1453 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); in live_timeslice_nopreempt()
1455 if (IS_ERR(rq)) { in live_timeslice_nopreempt()
1456 err = PTR_ERR(rq); in live_timeslice_nopreempt()
1460 i915_request_get(rq); in live_timeslice_nopreempt()
1461 i915_request_add(rq); in live_timeslice_nopreempt()
1463 if (!igt_wait_for_spinner(&spin, rq)) { in live_timeslice_nopreempt()
1464 i915_request_put(rq); in live_timeslice_nopreempt()
1469 set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags); in live_timeslice_nopreempt()
1470 i915_request_put(rq); in live_timeslice_nopreempt()
1480 rq = intel_context_create_request(ce); in live_timeslice_nopreempt()
1482 if (IS_ERR(rq)) { in live_timeslice_nopreempt()
1483 err = PTR_ERR(rq); in live_timeslice_nopreempt()
1487 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in live_timeslice_nopreempt()
1488 i915_request_get(rq); in live_timeslice_nopreempt()
1489 i915_request_add(rq); in live_timeslice_nopreempt()
1495 if (wait_for_submit(engine, rq, HZ / 2)) { in live_timeslice_nopreempt()
1496 i915_request_put(rq); in live_timeslice_nopreempt()
1506 if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) { in live_timeslice_nopreempt()
1511 i915_request_put(rq); in live_timeslice_nopreempt()
1725 struct i915_request *rq; in spinner_create_request() local
1731 rq = igt_spinner_create_request(spin, ce, arb); in spinner_create_request()
1733 return rq; in spinner_create_request()
1771 struct i915_request *rq; in live_preempt() local
1781 rq = spinner_create_request(&spin_lo, ctx_lo, engine, in live_preempt()
1783 if (IS_ERR(rq)) { in live_preempt()
1784 err = PTR_ERR(rq); in live_preempt()
1788 i915_request_add(rq); in live_preempt()
1789 if (!igt_wait_for_spinner(&spin_lo, rq)) { in live_preempt()
1797 rq = spinner_create_request(&spin_hi, ctx_hi, engine, in live_preempt()
1799 if (IS_ERR(rq)) { in live_preempt()
1801 err = PTR_ERR(rq); in live_preempt()
1805 i915_request_add(rq); in live_preempt()
1806 if (!igt_wait_for_spinner(&spin_hi, rq)) { in live_preempt()
1867 struct i915_request *rq; in live_late_preempt() local
1877 rq = spinner_create_request(&spin_lo, ctx_lo, engine, in live_late_preempt()
1879 if (IS_ERR(rq)) { in live_late_preempt()
1880 err = PTR_ERR(rq); in live_late_preempt()
1884 i915_request_add(rq); in live_late_preempt()
1885 if (!igt_wait_for_spinner(&spin_lo, rq)) { in live_late_preempt()
1890 rq = spinner_create_request(&spin_hi, ctx_hi, engine, in live_late_preempt()
1892 if (IS_ERR(rq)) { in live_late_preempt()
1894 err = PTR_ERR(rq); in live_late_preempt()
1898 i915_request_add(rq); in live_late_preempt()
1899 if (igt_wait_for_spinner(&spin_hi, rq)) { in live_late_preempt()
1905 engine->schedule(rq, &attr); in live_late_preempt()
1907 if (!igt_wait_for_spinner(&spin_hi, rq)) { in live_late_preempt()
2076 struct i915_request *rq; in __cancel_active0() local
2086 rq = spinner_create_request(&arg->a.spin, in __cancel_active0()
2089 if (IS_ERR(rq)) in __cancel_active0()
2090 return PTR_ERR(rq); in __cancel_active0()
2092 clear_bit(CONTEXT_BANNED, &rq->context->flags); in __cancel_active0()
2093 i915_request_get(rq); in __cancel_active0()
2094 i915_request_add(rq); in __cancel_active0()
2095 if (!igt_wait_for_spinner(&arg->a.spin, rq)) { in __cancel_active0()
2100 intel_context_set_banned(rq->context); in __cancel_active0()
2105 err = wait_for_reset(arg->engine, rq, HZ / 2); in __cancel_active0()
2112 i915_request_put(rq); in __cancel_active0()
2120 struct i915_request *rq[2] = {}; in __cancel_active1() local
2130 rq[0] = spinner_create_request(&arg->a.spin, in __cancel_active1()
2133 if (IS_ERR(rq[0])) in __cancel_active1()
2134 return PTR_ERR(rq[0]); in __cancel_active1()
2136 clear_bit(CONTEXT_BANNED, &rq[0]->context->flags); in __cancel_active1()
2137 i915_request_get(rq[0]); in __cancel_active1()
2138 i915_request_add(rq[0]); in __cancel_active1()
2139 if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { in __cancel_active1()
2144 rq[1] = spinner_create_request(&arg->b.spin, in __cancel_active1()
2147 if (IS_ERR(rq[1])) { in __cancel_active1()
2148 err = PTR_ERR(rq[1]); in __cancel_active1()
2152 clear_bit(CONTEXT_BANNED, &rq[1]->context->flags); in __cancel_active1()
2153 i915_request_get(rq[1]); in __cancel_active1()
2154 err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); in __cancel_active1()
2155 i915_request_add(rq[1]); in __cancel_active1()
2159 intel_context_set_banned(rq[1]->context); in __cancel_active1()
2165 err = wait_for_reset(arg->engine, rq[1], HZ / 2); in __cancel_active1()
2169 if (rq[0]->fence.error != 0) { in __cancel_active1()
2175 if (rq[1]->fence.error != -EIO) { in __cancel_active1()
2182 i915_request_put(rq[1]); in __cancel_active1()
2183 i915_request_put(rq[0]); in __cancel_active1()
2191 struct i915_request *rq[3] = {}; in __cancel_queued() local
2201 rq[0] = spinner_create_request(&arg->a.spin, in __cancel_queued()
2204 if (IS_ERR(rq[0])) in __cancel_queued()
2205 return PTR_ERR(rq[0]); in __cancel_queued()
2207 clear_bit(CONTEXT_BANNED, &rq[0]->context->flags); in __cancel_queued()
2208 i915_request_get(rq[0]); in __cancel_queued()
2209 i915_request_add(rq[0]); in __cancel_queued()
2210 if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { in __cancel_queued()
2215 rq[1] = igt_request_alloc(arg->b.ctx, arg->engine); in __cancel_queued()
2216 if (IS_ERR(rq[1])) { in __cancel_queued()
2217 err = PTR_ERR(rq[1]); in __cancel_queued()
2221 clear_bit(CONTEXT_BANNED, &rq[1]->context->flags); in __cancel_queued()
2222 i915_request_get(rq[1]); in __cancel_queued()
2223 err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); in __cancel_queued()
2224 i915_request_add(rq[1]); in __cancel_queued()
2228 rq[2] = spinner_create_request(&arg->b.spin, in __cancel_queued()
2231 if (IS_ERR(rq[2])) { in __cancel_queued()
2232 err = PTR_ERR(rq[2]); in __cancel_queued()
2236 i915_request_get(rq[2]); in __cancel_queued()
2237 err = i915_request_await_dma_fence(rq[2], &rq[1]->fence); in __cancel_queued()
2238 i915_request_add(rq[2]); in __cancel_queued()
2242 intel_context_set_banned(rq[2]->context); in __cancel_queued()
2247 err = wait_for_reset(arg->engine, rq[2], HZ / 2); in __cancel_queued()
2251 if (rq[0]->fence.error != -EIO) { in __cancel_queued()
2257 if (rq[1]->fence.error != 0) { in __cancel_queued()
2263 if (rq[2]->fence.error != -EIO) { in __cancel_queued()
2270 i915_request_put(rq[2]); in __cancel_queued()
2271 i915_request_put(rq[1]); in __cancel_queued()
2272 i915_request_put(rq[0]); in __cancel_queued()
2280 struct i915_request *rq; in __cancel_hostile() local
2291 rq = spinner_create_request(&arg->a.spin, in __cancel_hostile()
2294 if (IS_ERR(rq)) in __cancel_hostile()
2295 return PTR_ERR(rq); in __cancel_hostile()
2297 clear_bit(CONTEXT_BANNED, &rq->context->flags); in __cancel_hostile()
2298 i915_request_get(rq); in __cancel_hostile()
2299 i915_request_add(rq); in __cancel_hostile()
2300 if (!igt_wait_for_spinner(&arg->a.spin, rq)) { in __cancel_hostile()
2305 intel_context_set_banned(rq->context); in __cancel_hostile()
2310 err = wait_for_reset(arg->engine, rq, HZ / 2); in __cancel_hostile()
2317 i915_request_put(rq); in __cancel_hostile()
2526 struct i915_request *rq; in live_chain_preempt() local
2532 rq = spinner_create_request(&lo.spin, in live_chain_preempt()
2535 if (IS_ERR(rq)) in live_chain_preempt()
2538 i915_request_get(rq); in live_chain_preempt()
2539 i915_request_add(rq); in live_chain_preempt()
2541 ring_size = rq->wa_tail - rq->head; in live_chain_preempt()
2543 ring_size += rq->ring->size; in live_chain_preempt()
2544 ring_size = rq->ring->size / ring_size; in live_chain_preempt()
2549 if (i915_request_wait(rq, 0, HZ / 2) < 0) { in live_chain_preempt()
2551 i915_request_put(rq); in live_chain_preempt()
2554 i915_request_put(rq); in live_chain_preempt()
2562 rq = spinner_create_request(&hi.spin, in live_chain_preempt()
2565 if (IS_ERR(rq)) in live_chain_preempt()
2567 i915_request_add(rq); in live_chain_preempt()
2568 if (!igt_wait_for_spinner(&hi.spin, rq)) in live_chain_preempt()
2571 rq = spinner_create_request(&lo.spin, in live_chain_preempt()
2574 if (IS_ERR(rq)) in live_chain_preempt()
2576 i915_request_add(rq); in live_chain_preempt()
2579 rq = igt_request_alloc(lo.ctx, engine); in live_chain_preempt()
2580 if (IS_ERR(rq)) in live_chain_preempt()
2582 i915_request_add(rq); in live_chain_preempt()
2585 rq = igt_request_alloc(hi.ctx, engine); in live_chain_preempt()
2586 if (IS_ERR(rq)) in live_chain_preempt()
2589 i915_request_get(rq); in live_chain_preempt()
2590 i915_request_add(rq); in live_chain_preempt()
2591 engine->schedule(rq, &attr); in live_chain_preempt()
2594 if (i915_request_wait(rq, 0, HZ / 5) < 0) { in live_chain_preempt()
2602 i915_request_put(rq); in live_chain_preempt()
2606 i915_request_put(rq); in live_chain_preempt()
2608 rq = igt_request_alloc(lo.ctx, engine); in live_chain_preempt()
2609 if (IS_ERR(rq)) in live_chain_preempt()
2612 i915_request_get(rq); in live_chain_preempt()
2613 i915_request_add(rq); in live_chain_preempt()
2615 if (i915_request_wait(rq, 0, HZ / 5) < 0) { in live_chain_preempt()
2624 i915_request_put(rq); in live_chain_preempt()
2627 i915_request_put(rq); in live_chain_preempt()
2656 struct i915_request *rq; in create_gang() local
2709 rq = intel_context_create_request(ce); in create_gang()
2710 if (IS_ERR(rq)) in create_gang()
2713 rq->batch = i915_vma_get(vma); in create_gang()
2714 i915_request_get(rq); in create_gang()
2717 err = i915_request_await_object(rq, vma->obj, false); in create_gang()
2719 err = i915_vma_move_to_active(vma, rq, 0); in create_gang()
2721 err = rq->engine->emit_bb_start(rq, in create_gang()
2725 i915_request_add(rq); in create_gang()
2732 rq->mock.link.next = &(*prev)->mock.link; in create_gang()
2733 *prev = rq; in create_gang()
2737 i915_vma_put(rq->batch); in create_gang()
2738 i915_request_put(rq); in create_gang()
2751 struct i915_request *rq; in __live_preempt_ring() local
2783 rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK); in __live_preempt_ring()
2784 if (IS_ERR(rq)) { in __live_preempt_ring()
2785 err = PTR_ERR(rq); in __live_preempt_ring()
2789 i915_request_get(rq); in __live_preempt_ring()
2790 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in __live_preempt_ring()
2791 i915_request_add(rq); in __live_preempt_ring()
2793 if (!igt_wait_for_spinner(spin, rq)) { in __live_preempt_ring()
2795 i915_request_put(rq); in __live_preempt_ring()
2802 while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) { in __live_preempt_ring()
2808 i915_request_put(rq); in __live_preempt_ring()
2822 rq->tail); in __live_preempt_ring()
2823 i915_request_put(rq); in __live_preempt_ring()
2826 rq = intel_context_create_request(ce[1]); in __live_preempt_ring()
2827 if (IS_ERR(rq)) { in __live_preempt_ring()
2828 err = PTR_ERR(rq); in __live_preempt_ring()
2832 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in __live_preempt_ring()
2833 i915_request_get(rq); in __live_preempt_ring()
2834 i915_request_add(rq); in __live_preempt_ring()
2836 err = wait_for_submit(engine, rq, HZ / 2); in __live_preempt_ring()
2837 i915_request_put(rq); in __live_preempt_ring()
2928 struct i915_request *rq = NULL; in live_preempt_gang() local
2946 err = create_gang(engine, &rq); in live_preempt_gang()
2951 engine->schedule(rq, &attr); in live_preempt_gang()
2963 cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC); in live_preempt_gang()
2966 i915_gem_object_unpin_map(rq->batch->obj); in live_preempt_gang()
2972 while (rq) { /* wait for each rq from highest to lowest prio */ in live_preempt_gang()
2973 struct i915_request *n = list_next_entry(rq, mock.link); in live_preempt_gang()
2975 if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) { in live_preempt_gang()
2980 prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT); in live_preempt_gang()
2987 i915_vma_put(rq->batch); in live_preempt_gang()
2988 i915_request_put(rq); in live_preempt_gang()
2989 rq = n; in live_preempt_gang()
3108 struct i915_request *rq; in create_gpr_client() local
3131 rq = intel_context_create_request(ce); in create_gpr_client()
3132 if (IS_ERR(rq)) { in create_gpr_client()
3133 err = PTR_ERR(rq); in create_gpr_client()
3138 err = i915_request_await_object(rq, vma->obj, false); in create_gpr_client()
3140 err = i915_vma_move_to_active(vma, rq, 0); in create_gpr_client()
3145 err = i915_request_await_object(rq, batch->obj, false); in create_gpr_client()
3147 err = i915_vma_move_to_active(batch, rq, 0); in create_gpr_client()
3149 err = rq->engine->emit_bb_start(rq, in create_gpr_client()
3156 i915_request_get(rq); in create_gpr_client()
3157 i915_request_add(rq); in create_gpr_client()
3165 return err ? ERR_PTR(err) : rq; in create_gpr_client()
3175 struct i915_request *rq; in preempt_user() local
3179 rq = intel_engine_create_kernel_request(engine); in preempt_user()
3180 if (IS_ERR(rq)) in preempt_user()
3181 return PTR_ERR(rq); in preempt_user()
3183 cs = intel_ring_begin(rq, 4); in preempt_user()
3185 i915_request_add(rq); in preempt_user()
3194 intel_ring_advance(rq, cs); in preempt_user()
3196 i915_request_get(rq); in preempt_user()
3197 i915_request_add(rq); in preempt_user()
3199 engine->schedule(rq, &attr); in preempt_user()
3201 if (i915_request_wait(rq, 0, HZ / 2) < 0) in preempt_user()
3203 i915_request_put(rq); in preempt_user()
3263 struct i915_request *rq; in live_preempt_user() local
3265 rq = create_gpr_client(engine, global, in live_preempt_user()
3267 if (IS_ERR(rq)) in live_preempt_user()
3270 client[i] = rq; in live_preempt_user()
3365 struct i915_request *rq; in live_preempt_timeout() local
3370 rq = spinner_create_request(&spin_lo, ctx_lo, engine, in live_preempt_timeout()
3372 if (IS_ERR(rq)) { in live_preempt_timeout()
3373 err = PTR_ERR(rq); in live_preempt_timeout()
3377 i915_request_add(rq); in live_preempt_timeout()
3378 if (!igt_wait_for_spinner(&spin_lo, rq)) { in live_preempt_timeout()
3384 rq = igt_request_alloc(ctx_hi, engine); in live_preempt_timeout()
3385 if (IS_ERR(rq)) { in live_preempt_timeout()
3387 err = PTR_ERR(rq); in live_preempt_timeout()
3398 i915_request_get(rq); in live_preempt_timeout()
3399 i915_request_add(rq); in live_preempt_timeout()
3404 if (i915_request_wait(rq, 0, HZ / 10) < 0) { in live_preempt_timeout()
3406 i915_request_put(rq); in live_preempt_timeout()
3412 i915_request_put(rq); in live_preempt_timeout()
3455 struct i915_request *rq; in smoke_submit() local
3475 rq = igt_request_alloc(ctx, smoke->engine); in smoke_submit()
3476 if (IS_ERR(rq)) { in smoke_submit()
3477 err = PTR_ERR(rq); in smoke_submit()
3483 err = i915_request_await_object(rq, vma->obj, false); in smoke_submit()
3485 err = i915_vma_move_to_active(vma, rq, 0); in smoke_submit()
3487 err = rq->engine->emit_bb_start(rq, in smoke_submit()
3493 i915_request_add(rq); in smoke_submit()
3724 struct i915_request *rq; in nop_virtual_engine() local
3726 rq = i915_request_create(ve[nc]); in nop_virtual_engine()
3727 if (IS_ERR(rq)) { in nop_virtual_engine()
3728 err = PTR_ERR(rq); in nop_virtual_engine()
3734 request[nc] = i915_request_get(rq); in nop_virtual_engine()
3735 i915_request_add(rq); in nop_virtual_engine()
3741 struct i915_request *rq; in nop_virtual_engine() local
3743 rq = i915_request_create(ve[nc]); in nop_virtual_engine()
3744 if (IS_ERR(rq)) { in nop_virtual_engine()
3745 err = PTR_ERR(rq); in nop_virtual_engine()
3751 request[nc] = i915_request_get(rq); in nop_virtual_engine()
3752 i915_request_add(rq); in nop_virtual_engine()
3997 struct i915_request *rq; in slicein_virtual_engine() local
4016 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); in slicein_virtual_engine()
4018 if (IS_ERR(rq)) { in slicein_virtual_engine()
4019 err = PTR_ERR(rq); in slicein_virtual_engine()
4023 i915_request_add(rq); in slicein_virtual_engine()
4032 rq = intel_context_create_request(ce); in slicein_virtual_engine()
4034 if (IS_ERR(rq)) { in slicein_virtual_engine()
4035 err = PTR_ERR(rq); in slicein_virtual_engine()
4039 i915_request_get(rq); in slicein_virtual_engine()
4040 i915_request_add(rq); in slicein_virtual_engine()
4041 if (i915_request_wait(rq, 0, timeout) < 0) { in slicein_virtual_engine()
4043 __func__, rq->engine->name); in slicein_virtual_engine()
4048 i915_request_put(rq); in slicein_virtual_engine()
4064 struct i915_request *rq; in sliceout_virtual_engine() local
4084 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); in sliceout_virtual_engine()
4086 if (IS_ERR(rq)) { in sliceout_virtual_engine()
4087 err = PTR_ERR(rq); in sliceout_virtual_engine()
4091 i915_request_add(rq); in sliceout_virtual_engine()
4101 rq = intel_context_create_request(ce); in sliceout_virtual_engine()
4103 if (IS_ERR(rq)) { in sliceout_virtual_engine()
4104 err = PTR_ERR(rq); in sliceout_virtual_engine()
4108 i915_request_get(rq); in sliceout_virtual_engine()
4109 i915_request_add(rq); in sliceout_virtual_engine()
4110 if (i915_request_wait(rq, 0, timeout) < 0) { in sliceout_virtual_engine()
4117 i915_request_put(rq); in sliceout_virtual_engine()
4194 struct i915_request *rq; in preserved_virtual_engine() local
4196 rq = i915_request_create(ve); in preserved_virtual_engine()
4197 if (IS_ERR(rq)) { in preserved_virtual_engine()
4198 err = PTR_ERR(rq); in preserved_virtual_engine()
4203 last = i915_request_get(rq); in preserved_virtual_engine()
4205 cs = intel_ring_begin(rq, 8); in preserved_virtual_engine()
4207 i915_request_add(rq); in preserved_virtual_engine()
4222 intel_ring_advance(rq, cs); in preserved_virtual_engine()
4225 rq->execution_mask = engine->mask; in preserved_virtual_engine()
4226 i915_request_add(rq); in preserved_virtual_engine()
4306 struct i915_request *rq[16]; in bond_virtual_engine() local
4351 GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1); in bond_virtual_engine()
4357 rq[0] = ERR_PTR(-ENOMEM); in bond_virtual_engine()
4371 memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq)); in bond_virtual_engine()
4373 rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP); in bond_virtual_engine()
4375 if (IS_ERR(rq[0])) { in bond_virtual_engine()
4376 err = PTR_ERR(rq[0]); in bond_virtual_engine()
4379 i915_request_get(rq[0]); in bond_virtual_engine()
4383 err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit, in bond_virtual_engine()
4388 i915_request_add(rq[0]); in bond_virtual_engine()
4393 !igt_wait_for_spinner(&spin, rq[0])) { in bond_virtual_engine()
4424 rq[n + 1] = i915_request_create(ve); in bond_virtual_engine()
4426 if (IS_ERR(rq[n + 1])) { in bond_virtual_engine()
4427 err = PTR_ERR(rq[n + 1]); in bond_virtual_engine()
4431 i915_request_get(rq[n + 1]); in bond_virtual_engine()
4433 err = i915_request_await_execution(rq[n + 1], in bond_virtual_engine()
4434 &rq[0]->fence, in bond_virtual_engine()
4436 i915_request_add(rq[n + 1]); in bond_virtual_engine()
4446 if (i915_request_wait(rq[0], 0, HZ / 10) < 0) { in bond_virtual_engine()
4448 rq[0]->engine->name); in bond_virtual_engine()
4454 if (i915_request_wait(rq[n + 1], 0, in bond_virtual_engine()
4460 if (rq[n + 1]->engine != siblings[n]) { in bond_virtual_engine()
4463 rq[n + 1]->engine->name, in bond_virtual_engine()
4464 rq[0]->engine->name); in bond_virtual_engine()
4470 for (n = 0; !IS_ERR(rq[n]); n++) in bond_virtual_engine()
4471 i915_request_put(rq[n]); in bond_virtual_engine()
4472 rq[0] = ERR_PTR(-ENOMEM); in bond_virtual_engine()
4476 for (n = 0; !IS_ERR(rq[n]); n++) in bond_virtual_engine()
4477 i915_request_put(rq[n]); in bond_virtual_engine()
4533 struct i915_request *rq; in reset_virtual_engine() local
4555 rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK); in reset_virtual_engine()
4556 if (IS_ERR(rq)) { in reset_virtual_engine()
4557 err = PTR_ERR(rq); in reset_virtual_engine()
4560 i915_request_add(rq); in reset_virtual_engine()
4562 if (!igt_wait_for_spinner(&spin, rq)) { in reset_virtual_engine()
4568 engine = rq->engine; in reset_virtual_engine()
4581 GEM_BUG_ON(execlists_active(&engine->execlists) != rq); in reset_virtual_engine()
4587 GEM_BUG_ON(rq->engine != ve->engine); in reset_virtual_engine()
4590 execlists_hold(engine, rq); in reset_virtual_engine()
4591 GEM_BUG_ON(!i915_request_on_hold(rq)); in reset_virtual_engine()
4594 GEM_BUG_ON(rq->fence.error != -EIO); in reset_virtual_engine()
4601 i915_request_get(rq); in reset_virtual_engine()
4602 if (!i915_request_wait(rq, 0, HZ / 5)) { in reset_virtual_engine()
4609 GEM_BUG_ON(!i915_request_on_hold(rq)); in reset_virtual_engine()
4612 execlists_unhold(engine, rq); in reset_virtual_engine()
4613 if (i915_request_wait(rq, 0, HZ / 5) < 0) { in reset_virtual_engine()
4621 i915_request_put(rq); in reset_virtual_engine()
4714 struct i915_request *rq; in emit_semaphore_signal() local
4717 rq = intel_context_create_request(ce); in emit_semaphore_signal()
4718 if (IS_ERR(rq)) in emit_semaphore_signal()
4719 return PTR_ERR(rq); in emit_semaphore_signal()
4721 cs = intel_ring_begin(rq, 4); in emit_semaphore_signal()
4723 i915_request_add(rq); in emit_semaphore_signal()
4732 intel_ring_advance(rq, cs); in emit_semaphore_signal()
4734 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in emit_semaphore_signal()
4735 i915_request_add(rq); in emit_semaphore_signal()
4741 struct i915_request *rq; in context_flush() local
4745 rq = intel_engine_create_kernel_request(ce->engine); in context_flush()
4746 if (IS_ERR(rq)) in context_flush()
4747 return PTR_ERR(rq); in context_flush()
4751 i915_request_await_dma_fence(rq, fence); in context_flush()
4755 rq = i915_request_get(rq); in context_flush()
4756 i915_request_add(rq); in context_flush()
4757 if (i915_request_wait(rq, 0, timeout) < 0) in context_flush()
4759 i915_request_put(rq); in context_flush()
4998 struct i915_request *rq; in __live_lrc_state() local
5022 rq = i915_request_create(ce); in __live_lrc_state()
5023 if (IS_ERR(rq)) { in __live_lrc_state()
5024 err = PTR_ERR(rq); in __live_lrc_state()
5028 cs = intel_ring_begin(rq, 4 * MAX_IDX); in __live_lrc_state()
5031 i915_request_add(rq); in __live_lrc_state()
5047 err = i915_request_await_object(rq, scratch->obj, true); in __live_lrc_state()
5049 err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE); in __live_lrc_state()
5051 i915_request_get(rq); in __live_lrc_state()
5052 i915_request_add(rq); in __live_lrc_state()
5059 if (i915_request_wait(rq, 0, HZ / 5) < 0) { in __live_lrc_state()
5082 i915_request_put(rq); in __live_lrc_state()
5128 struct i915_request *rq; in gpr_make_dirty() local
5132 rq = intel_context_create_request(ce); in gpr_make_dirty()
5133 if (IS_ERR(rq)) in gpr_make_dirty()
5134 return PTR_ERR(rq); in gpr_make_dirty()
5136 cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2); in gpr_make_dirty()
5138 i915_request_add(rq); in gpr_make_dirty()
5149 intel_ring_advance(rq, cs); in gpr_make_dirty()
5151 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in gpr_make_dirty()
5152 i915_request_add(rq); in gpr_make_dirty()
5163 struct i915_request *rq; in __gpr_read() local
5168 rq = intel_context_create_request(ce); in __gpr_read()
5169 if (IS_ERR(rq)) in __gpr_read()
5170 return rq; in __gpr_read()
5172 cs = intel_ring_begin(rq, 6 + 4 * NUM_GPR_DW); in __gpr_read()
5174 i915_request_add(rq); in __gpr_read()
5197 err = i915_request_await_object(rq, scratch->obj, true); in __gpr_read()
5199 err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE); in __gpr_read()
5202 i915_request_get(rq); in __gpr_read()
5203 i915_request_add(rq); in __gpr_read()
5205 i915_request_put(rq); in __gpr_read()
5206 rq = ERR_PTR(err); in __gpr_read()
5209 return rq; in __gpr_read()
5218 struct i915_request *rq; in __live_lrc_gpr() local
5234 rq = __gpr_read(ce, scratch, slot); in __live_lrc_gpr()
5235 if (IS_ERR(rq)) { in __live_lrc_gpr()
5236 err = PTR_ERR(rq); in __live_lrc_gpr()
5240 err = wait_for_submit(engine, rq, HZ / 2); in __live_lrc_gpr()
5257 if (i915_request_wait(rq, 0, HZ / 5) < 0) { in __live_lrc_gpr()
5284 i915_request_put(rq); in __live_lrc_gpr()
5336 struct i915_request *rq; in create_timestamp() local
5340 rq = intel_context_create_request(ce); in create_timestamp()
5341 if (IS_ERR(rq)) in create_timestamp()
5342 return rq; in create_timestamp()
5344 cs = intel_ring_begin(rq, 10); in create_timestamp()
5362 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base)); in create_timestamp()
5366 intel_ring_advance(rq, cs); in create_timestamp()
5368 rq->sched.attr.priority = I915_PRIORITY_MASK; in create_timestamp()
5371 i915_request_get(rq); in create_timestamp()
5372 i915_request_add(rq); in create_timestamp()
5374 i915_request_put(rq); in create_timestamp()
5378 return rq; in create_timestamp()
5395 struct i915_request *rq; in __lrc_timestamp() local
5400 rq = create_timestamp(arg->ce[0], slot, 1); in __lrc_timestamp()
5401 if (IS_ERR(rq)) in __lrc_timestamp()
5402 return PTR_ERR(rq); in __lrc_timestamp()
5404 err = wait_for_submit(rq->engine, rq, HZ / 2); in __lrc_timestamp()
5440 i915_request_put(rq); in __lrc_timestamp()
5610 static int move_to_active(struct i915_request *rq, in move_to_active() argument
5617 err = i915_request_await_object(rq, vma->obj, flags); in move_to_active()
5619 err = i915_vma_move_to_active(vma, rq, flags); in move_to_active()
5632 struct i915_request *rq; in record_registers() local
5642 rq = ERR_CAST(b_after); in record_registers()
5646 rq = intel_context_create_request(ce); in record_registers()
5647 if (IS_ERR(rq)) in record_registers()
5650 err = move_to_active(rq, before, EXEC_OBJECT_WRITE); in record_registers()
5654 err = move_to_active(rq, b_before, 0); in record_registers()
5658 err = move_to_active(rq, after, EXEC_OBJECT_WRITE); in record_registers()
5662 err = move_to_active(rq, b_after, 0); in record_registers()
5666 cs = intel_ring_begin(rq, 14); in record_registers()
5693 intel_ring_advance(rq, cs); in record_registers()
5696 i915_request_get(rq); in record_registers()
5697 i915_request_add(rq); in record_registers()
5702 return rq; in record_registers()
5705 i915_request_add(rq); in record_registers()
5706 rq = ERR_PTR(err); in record_registers()
5772 struct i915_request *rq; in poison_registers() local
5781 rq = intel_context_create_request(ce); in poison_registers()
5782 if (IS_ERR(rq)) { in poison_registers()
5783 err = PTR_ERR(rq); in poison_registers()
5787 err = move_to_active(rq, batch, 0); in poison_registers()
5791 cs = intel_ring_begin(rq, 8); in poison_registers()
5808 intel_ring_advance(rq, cs); in poison_registers()
5810 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in poison_registers()
5812 i915_request_add(rq); in poison_registers()
5931 struct i915_request *rq; in __lrc_isolation() local
5956 rq = record_registers(A, ref[0], ref[1], sema); in __lrc_isolation()
5957 if (IS_ERR(rq)) { in __lrc_isolation()
5958 err = PTR_ERR(rq); in __lrc_isolation()
5965 if (i915_request_wait(rq, 0, HZ / 2) < 0) { in __lrc_isolation()
5966 i915_request_put(rq); in __lrc_isolation()
5970 i915_request_put(rq); in __lrc_isolation()
5984 rq = record_registers(A, result[0], result[1], sema); in __lrc_isolation()
5985 if (IS_ERR(rq)) { in __lrc_isolation()
5986 err = PTR_ERR(rq); in __lrc_isolation()
5993 i915_request_put(rq); in __lrc_isolation()
5997 if (i915_request_wait(rq, 0, HZ / 2) < 0) { in __lrc_isolation()
5998 i915_request_put(rq); in __lrc_isolation()
6002 i915_request_put(rq); in __lrc_isolation()
6086 struct i915_request *rq; in indirect_ctx_submit_req() local
6089 rq = intel_context_create_request(ce); in indirect_ctx_submit_req()
6090 if (IS_ERR(rq)) in indirect_ctx_submit_req()
6091 return PTR_ERR(rq); in indirect_ctx_submit_req()
6093 i915_request_get(rq); in indirect_ctx_submit_req()
6094 i915_request_add(rq); in indirect_ctx_submit_req()
6096 if (i915_request_wait(rq, 0, HZ / 5) < 0) in indirect_ctx_submit_req()
6099 i915_request_put(rq); in indirect_ctx_submit_req()
6240 struct i915_request *rq) in garbage_reset() argument
6250 if (!rq->fence.error) in garbage_reset()
6260 struct i915_request *rq; in garbage() local
6272 rq = intel_context_create_request(ce); in garbage()
6273 if (IS_ERR(rq)) { in garbage()
6274 err = PTR_ERR(rq); in garbage()
6278 i915_request_get(rq); in garbage()
6279 i915_request_add(rq); in garbage()
6280 return rq; in garbage()
6376 struct i915_request *rq; in __live_pphwsp_runtime() local
6391 rq = intel_context_create_request(ce); in __live_pphwsp_runtime()
6392 if (IS_ERR(rq)) { in __live_pphwsp_runtime()
6393 err = PTR_ERR(rq); in __live_pphwsp_runtime()
6398 i915_request_get(rq); in __live_pphwsp_runtime()
6400 i915_request_add(rq); in __live_pphwsp_runtime()
6406 i915_request_put(rq); in __live_pphwsp_runtime()
6409 err = i915_request_wait(rq, 0, HZ / 5); in __live_pphwsp_runtime()
6433 i915_request_put(rq); in __live_pphwsp_runtime()