Lines Matching refs:rq
74 struct i915_request *rq = to_request(fence); in i915_fence_release() local
83 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
85 kmem_cache_free(rq->i915->requests, rq); in i915_fence_release()
349 struct i915_request *rq) in __retire_engine_request() argument
353 rq->fence.context, rq->fence.seqno, in __retire_engine_request()
354 rq->global_seqno, in __retire_engine_request()
357 GEM_BUG_ON(!i915_request_completed(rq)); in __retire_engine_request()
362 GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests)); in __retire_engine_request()
363 list_del_init(&rq->link); in __retire_engine_request()
366 spin_lock(&rq->lock); in __retire_engine_request()
367 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) in __retire_engine_request()
368 dma_fence_signal_locked(&rq->fence); in __retire_engine_request()
369 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) in __retire_engine_request()
370 intel_engine_cancel_signaling(rq); in __retire_engine_request()
371 if (rq->waitboost) { in __retire_engine_request()
372 GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); in __retire_engine_request()
373 atomic_dec(&rq->i915->gt_pm.rps.num_waiters); in __retire_engine_request()
375 spin_unlock(&rq->lock); in __retire_engine_request()
389 engine->last_retired_context = rq->hw_context; in __retire_engine_request()
393 struct i915_request *rq) in __retire_engine_upto() argument
397 if (list_empty(&rq->link)) in __retire_engine_upto()
406 } while (tmp != rq); in __retire_engine_upto()
471 void i915_request_retire_upto(struct i915_request *rq) in i915_request_retire_upto() argument
473 struct intel_ring *ring = rq->ring; in i915_request_retire_upto()
477 rq->engine->name, in i915_request_retire_upto()
478 rq->fence.context, rq->fence.seqno, in i915_request_retire_upto()
479 rq->global_seqno, in i915_request_retire_upto()
480 intel_engine_get_seqno(rq->engine)); in i915_request_retire_upto()
482 lockdep_assert_held(&rq->i915->drm.struct_mutex); in i915_request_retire_upto()
483 GEM_BUG_ON(!i915_request_completed(rq)); in i915_request_retire_upto()
485 if (list_empty(&rq->ring_link)) in i915_request_retire_upto()
493 } while (tmp != rq); in i915_request_retire_upto()
661 struct i915_request *rq; in i915_request_alloc() local
699 rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link); in i915_request_alloc()
700 if (!list_is_last(&rq->ring_link, &ce->ring->request_list) && in i915_request_alloc()
701 i915_request_completed(rq)) in i915_request_alloc()
702 i915_request_retire(rq); in i915_request_alloc()
733 rq = kmem_cache_alloc(i915->requests, in i915_request_alloc()
735 if (unlikely(!rq)) { in i915_request_alloc()
755 rq = kmem_cache_alloc(i915->requests, GFP_KERNEL); in i915_request_alloc()
756 if (!rq) { in i915_request_alloc()
762 INIT_LIST_HEAD(&rq->active_list); in i915_request_alloc()
763 rq->i915 = i915; in i915_request_alloc()
764 rq->engine = engine; in i915_request_alloc()
765 rq->gem_context = ctx; in i915_request_alloc()
766 rq->hw_context = ce; in i915_request_alloc()
767 rq->ring = ce->ring; in i915_request_alloc()
768 rq->timeline = ce->ring->timeline; in i915_request_alloc()
769 GEM_BUG_ON(rq->timeline == &engine->timeline); in i915_request_alloc()
771 spin_lock_init(&rq->lock); in i915_request_alloc()
772 dma_fence_init(&rq->fence, in i915_request_alloc()
774 &rq->lock, in i915_request_alloc()
775 rq->timeline->fence_context, in i915_request_alloc()
776 timeline_get_seqno(rq->timeline)); in i915_request_alloc()
779 i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); in i915_request_alloc()
780 init_waitqueue_head(&rq->execute); in i915_request_alloc()
782 i915_sched_node_init(&rq->sched); in i915_request_alloc()
785 rq->global_seqno = 0; in i915_request_alloc()
786 rq->signaling.wait.seqno = 0; in i915_request_alloc()
787 rq->file_priv = NULL; in i915_request_alloc()
788 rq->batch = NULL; in i915_request_alloc()
789 rq->capture_list = NULL; in i915_request_alloc()
790 rq->waitboost = false; in i915_request_alloc()
799 rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; in i915_request_alloc()
800 GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz); in i915_request_alloc()
808 rq->head = rq->ring->emit; in i915_request_alloc()
811 ret = engine->emit_flush(rq, EMIT_INVALIDATE); in i915_request_alloc()
815 ret = engine->request_alloc(rq); in i915_request_alloc()
822 rq->infix = rq->ring->emit; /* end of header; start of user payload */ in i915_request_alloc()
825 GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno); in i915_request_alloc()
826 return rq; in i915_request_alloc()
829 ce->ring->emit = rq->head; in i915_request_alloc()
832 GEM_BUG_ON(!list_empty(&rq->active_list)); in i915_request_alloc()
833 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); in i915_request_alloc()
834 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); in i915_request_alloc()
836 kmem_cache_free(i915->requests, rq); in i915_request_alloc()
899 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) in i915_request_await_dma_fence() argument
931 if (fence->context == rq->fence.context) in i915_request_await_dma_fence()
935 if (fence->context != rq->i915->mm.unordered_timeline && in i915_request_await_dma_fence()
936 i915_timeline_sync_is_later(rq->timeline, fence)) in i915_request_await_dma_fence()
940 ret = i915_request_await_request(rq, to_request(fence)); in i915_request_await_dma_fence()
942 ret = i915_sw_fence_await_dma_fence(&rq->submit, fence, in i915_request_await_dma_fence()
949 if (fence->context != rq->i915->mm.unordered_timeline) in i915_request_await_dma_fence()
950 i915_timeline_sync_set(rq->timeline, fence); in i915_request_await_dma_fence()
1018 void i915_request_skip(struct i915_request *rq, int error) in i915_request_skip() argument
1020 void *vaddr = rq->ring->vaddr; in i915_request_skip()
1024 dma_fence_set_error(&rq->fence, error); in i915_request_skip()
1031 head = rq->infix; in i915_request_skip()
1032 if (rq->postfix < head) { in i915_request_skip()
1033 memset(vaddr + head, 0, rq->ring->size - head); in i915_request_skip()
1036 memset(vaddr + head, 0, rq->postfix - head); in i915_request_skip()
1189 static bool __i915_spin_request(const struct i915_request *rq, in __i915_spin_request() argument
1192 struct intel_engine_cs *engine = rq->engine; in __i915_spin_request()
1226 return seqno == i915_request_global_seqno(rq); in __i915_spin_request()
1280 long i915_request_wait(struct i915_request *rq, in i915_request_wait() argument
1286 wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue; in i915_request_wait()
1294 !!lockdep_is_held(&rq->i915->drm.struct_mutex) != in i915_request_wait()
1299 if (i915_request_completed(rq)) in i915_request_wait()
1305 trace_i915_request_wait_begin(rq, flags); in i915_request_wait()
1307 add_wait_queue(&rq->execute, &exec); in i915_request_wait()
1316 if (intel_wait_update_request(&wait, rq)) in i915_request_wait()
1320 __i915_wait_request_check_and_reset(rq)) in i915_request_wait()
1337 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); in i915_request_wait()
1340 if (__i915_spin_request(rq, wait.seqno, state, 5)) in i915_request_wait()
1344 if (intel_engine_add_wait(rq->engine, &wait)) in i915_request_wait()
1353 __i915_wait_request_check_and_reset(rq); in i915_request_wait()
1369 intel_wait_check_request(&wait, rq)) in i915_request_wait()
1381 if (__i915_request_irq_complete(rq)) in i915_request_wait()
1396 __i915_wait_request_check_and_reset(rq)) in i915_request_wait()
1400 if (__i915_spin_request(rq, wait.seqno, state, 2)) in i915_request_wait()
1403 if (!intel_wait_check_request(&wait, rq)) { in i915_request_wait()
1404 intel_engine_remove_wait(rq->engine, &wait); in i915_request_wait()
1409 intel_engine_remove_wait(rq->engine, &wait); in i915_request_wait()
1414 remove_wait_queue(&rq->execute, &exec); in i915_request_wait()
1415 trace_i915_request_wait_end(rq); in i915_request_wait()