Lines Matching refs:rq
48 void (*hook)(struct i915_request *rq, struct dma_fence *signal);
112 struct i915_request *rq = to_request(fence); in i915_fence_release() local
121 i915_sw_fence_fini(&rq->submit); in i915_fence_release()
122 i915_sw_fence_fini(&rq->semaphore); in i915_fence_release()
155 if (is_power_of_2(rq->execution_mask) && in i915_fence_release()
156 !cmpxchg(&rq->engine->request_pool, NULL, rq)) in i915_fence_release()
159 kmem_cache_free(global.slab_requests, rq); in i915_fence_release()
191 __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk)) in __notify_execute_cb() argument
195 if (llist_empty(&rq->execute_cb)) in __notify_execute_cb()
199 llist_del_all(&rq->execute_cb), in __notify_execute_cb()
204 static void __notify_execute_cb_irq(struct i915_request *rq) in __notify_execute_cb_irq() argument
206 __notify_execute_cb(rq, irq_work_queue); in __notify_execute_cb_irq()
215 static void __notify_execute_cb_imm(struct i915_request *rq) in __notify_execute_cb_imm() argument
217 __notify_execute_cb(rq, irq_work_imm); in __notify_execute_cb_imm()
233 static void __i915_request_fill(struct i915_request *rq, u8 val) in __i915_request_fill() argument
235 void *vaddr = rq->ring->vaddr; in __i915_request_fill()
238 head = rq->infix; in __i915_request_fill()
239 if (rq->postfix < head) { in __i915_request_fill()
240 memset(vaddr + head, val, rq->ring->size - head); in __i915_request_fill()
243 memset(vaddr + head, val, rq->postfix - head); in __i915_request_fill()
246 static void remove_from_engine(struct i915_request *rq) in remove_from_engine() argument
256 locked = READ_ONCE(rq->engine); in remove_from_engine()
258 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { in remove_from_engine()
263 list_del_init(&rq->sched.link); in remove_from_engine()
265 clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); in remove_from_engine()
266 clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags); in remove_from_engine()
269 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); in remove_from_engine()
273 __notify_execute_cb_imm(rq); in remove_from_engine()
276 bool i915_request_retire(struct i915_request *rq) in i915_request_retire() argument
278 if (!i915_request_completed(rq)) in i915_request_retire()
281 RQ_TRACE(rq, "\n"); in i915_request_retire()
283 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); in i915_request_retire()
284 trace_i915_request_retire(rq); in i915_request_retire()
285 i915_request_mark_complete(rq); in i915_request_retire()
296 GEM_BUG_ON(!list_is_first(&rq->link, in i915_request_retire()
297 &i915_request_timeline(rq)->requests)); in i915_request_retire()
300 __i915_request_fill(rq, POISON_FREE); in i915_request_retire()
301 rq->ring->head = rq->postfix; in i915_request_retire()
303 if (!i915_request_signaled(rq)) { in i915_request_retire()
304 spin_lock_irq(&rq->lock); in i915_request_retire()
305 dma_fence_signal_locked(&rq->fence); in i915_request_retire()
306 spin_unlock_irq(&rq->lock); in i915_request_retire()
309 if (i915_request_has_waitboost(rq)) { in i915_request_retire()
310 GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters)); in i915_request_retire()
311 atomic_dec(&rq->engine->gt->rps.num_waiters); in i915_request_retire()
324 remove_from_engine(rq); in i915_request_retire()
325 GEM_BUG_ON(!llist_empty(&rq->execute_cb)); in i915_request_retire()
327 __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */ in i915_request_retire()
329 intel_context_exit(rq->context); in i915_request_retire()
330 intel_context_unpin(rq->context); in i915_request_retire()
332 free_capture_list(rq); in i915_request_retire()
333 i915_sched_node_fini(&rq->sched); in i915_request_retire()
334 i915_request_put(rq); in i915_request_retire()
339 void i915_request_retire_upto(struct i915_request *rq) in i915_request_retire_upto() argument
341 struct intel_timeline * const tl = i915_request_timeline(rq); in i915_request_retire_upto()
344 RQ_TRACE(rq, "\n"); in i915_request_retire_upto()
346 GEM_BUG_ON(!i915_request_completed(rq)); in i915_request_retire_upto()
350 } while (i915_request_retire(tmp) && tmp != rq); in i915_request_retire_upto()
361 struct i915_request * const *port, *rq; in __request_in_flight() local
408 (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */ in __request_in_flight()
410 if (rq->context == signal->context) { in __request_in_flight()
411 inflight = i915_seqno_passed(rq->fence.seqno, in __request_in_flight()
422 __await_execution(struct i915_request *rq, in __await_execution() argument
424 void (*hook)(struct i915_request *rq, in __await_execution() argument
432 hook(rq, &signal->fence); in __await_execution()
440 cb->fence = &rq->submit; in __await_execution()
484 void __i915_request_skip(struct i915_request *rq) in __i915_request_skip() argument
486 GEM_BUG_ON(!fatal_error(rq->fence.error)); in __i915_request_skip()
488 if (rq->infix == rq->postfix) in __i915_request_skip()
496 __i915_request_fill(rq, 0); in __i915_request_skip()
497 rq->infix = rq->postfix; in __i915_request_skip()
500 void i915_request_set_error_once(struct i915_request *rq, int error) in i915_request_set_error_once() argument
506 if (i915_request_signaled(rq)) in i915_request_set_error_once()
509 old = READ_ONCE(rq->fence.error); in i915_request_set_error_once()
513 } while (!try_cmpxchg(&rq->fence.error, &old, error)); in i915_request_set_error_once()
708 struct i915_request *rq = container_of(fence, typeof(*rq), semaphore); in semaphore_notify() local
715 i915_request_put(rq); in semaphore_notify()
724 struct i915_request *rq, *rn; in retire_requests() local
726 list_for_each_entry_safe(rq, rn, &tl->requests, link) in retire_requests()
727 if (!i915_request_retire(rq)) in retire_requests()
736 struct i915_request *rq; in request_alloc_slow() local
740 rq = xchg(rsvd, NULL); in request_alloc_slow()
741 if (!rq) /* Use the normal failure path for one final WARN */ in request_alloc_slow()
744 return rq; in request_alloc_slow()
751 rq = list_first_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow()
752 i915_request_retire(rq); in request_alloc_slow()
754 rq = kmem_cache_alloc(global.slab_requests, in request_alloc_slow()
756 if (rq) in request_alloc_slow()
757 return rq; in request_alloc_slow()
760 rq = list_last_entry(&tl->requests, typeof(*rq), link); in request_alloc_slow()
761 cond_synchronize_rcu(rq->rcustate); in request_alloc_slow()
772 struct i915_request *rq = arg; in __i915_request_ctor() local
774 spin_lock_init(&rq->lock); in __i915_request_ctor()
775 i915_sched_node_init(&rq->sched); in __i915_request_ctor()
776 i915_sw_fence_init(&rq->submit, submit_notify); in __i915_request_ctor()
777 i915_sw_fence_init(&rq->semaphore, semaphore_notify); in __i915_request_ctor()
779 dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0); in __i915_request_ctor()
781 rq->capture_list = NULL; in __i915_request_ctor()
783 init_llist_head(&rq->execute_cb); in __i915_request_ctor()
790 struct i915_request *rq; in __i915_request_create() local
828 rq = kmem_cache_alloc(global.slab_requests, in __i915_request_create()
830 if (unlikely(!rq)) { in __i915_request_create()
831 rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp); in __i915_request_create()
832 if (!rq) { in __i915_request_create()
838 rq->context = ce; in __i915_request_create()
839 rq->engine = ce->engine; in __i915_request_create()
840 rq->ring = ce->ring; in __i915_request_create()
841 rq->execution_mask = ce->engine->mask; in __i915_request_create()
843 kref_init(&rq->fence.refcount); in __i915_request_create()
844 rq->fence.flags = 0; in __i915_request_create()
845 rq->fence.error = 0; in __i915_request_create()
846 INIT_LIST_HEAD(&rq->fence.cb_list); in __i915_request_create()
848 ret = intel_timeline_get_seqno(tl, rq, &seqno); in __i915_request_create()
852 rq->fence.context = tl->fence_context; in __i915_request_create()
853 rq->fence.seqno = seqno; in __i915_request_create()
855 RCU_INIT_POINTER(rq->timeline, tl); in __i915_request_create()
856 RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline); in __i915_request_create()
857 rq->hwsp_seqno = tl->hwsp_seqno; in __i915_request_create()
858 GEM_BUG_ON(i915_request_completed(rq)); in __i915_request_create()
860 rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ in __i915_request_create()
863 i915_sw_fence_reinit(&i915_request_get(rq)->submit); in __i915_request_create()
864 i915_sw_fence_reinit(&i915_request_get(rq)->semaphore); in __i915_request_create()
866 i915_sched_node_reinit(&rq->sched); in __i915_request_create()
869 rq->batch = NULL; in __i915_request_create()
870 GEM_BUG_ON(rq->capture_list); in __i915_request_create()
871 GEM_BUG_ON(!llist_empty(&rq->execute_cb)); in __i915_request_create()
885 rq->reserved_space = in __i915_request_create()
886 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32); in __i915_request_create()
894 rq->head = rq->ring->emit; in __i915_request_create()
896 ret = rq->engine->request_alloc(rq); in __i915_request_create()
900 rq->infix = rq->ring->emit; /* end of header; start of user payload */ in __i915_request_create()
903 list_add_tail_rcu(&rq->link, &tl->requests); in __i915_request_create()
905 return rq; in __i915_request_create()
908 ce->ring->emit = rq->head; in __i915_request_create()
911 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); in __i915_request_create()
912 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); in __i915_request_create()
915 kmem_cache_free(global.slab_requests, rq); in __i915_request_create()
924 struct i915_request *rq; in i915_request_create() local
932 rq = list_first_entry(&tl->requests, typeof(*rq), link); in i915_request_create()
933 if (!list_is_last(&rq->link, &tl->requests)) in i915_request_create()
934 i915_request_retire(rq); in i915_request_create()
937 rq = __i915_request_create(ce, GFP_KERNEL); in i915_request_create()
939 if (IS_ERR(rq)) in i915_request_create()
943 rq->cookie = lockdep_pin_lock(&tl->mutex); in i915_request_create()
945 return rq; in i915_request_create()
949 return rq; in i915_request_create()
953 i915_request_await_start(struct i915_request *rq, struct i915_request *signal) in i915_request_await_start() argument
958 if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline)) in i915_request_await_start()
1003 if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) in i915_request_await_start()
1004 err = i915_sw_fence_await_dma_fence(&rq->submit, in i915_request_await_start()
1013 already_busywaiting(struct i915_request *rq) in already_busywaiting() argument
1027 return rq->sched.semaphores | READ_ONCE(rq->engine->saturated); in already_busywaiting()
1147 void (*hook)(struct i915_request *rq, in __i915_request_await_execution() argument
1217 static void mark_external(struct i915_request *rq) in mark_external() argument
1227 rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN; in mark_external()
1231 __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) in __i915_request_await_external() argument
1233 mark_external(rq); in __i915_request_await_external()
1234 return i915_sw_fence_await_dma_fence(&rq->submit, fence, in __i915_request_await_external()
1235 i915_fence_context_timeout(rq->engine->i915, in __i915_request_await_external()
1241 i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) in i915_request_await_external() argument
1247 return __i915_request_await_external(rq, fence); in i915_request_await_external()
1253 err = __i915_request_await_external(rq, iter); in i915_request_await_external()
1257 err = i915_request_await_dma_fence(rq, chain->fence); in i915_request_await_external()
1267 i915_request_await_execution(struct i915_request *rq, in i915_request_await_execution() argument
1269 void (*hook)(struct i915_request *rq, in i915_request_await_execution() argument
1289 i915_sw_fence_set_error_once(&rq->submit, fence->error); in i915_request_await_execution()
1293 if (fence->context == rq->fence.context) in i915_request_await_execution()
1302 ret = __i915_request_await_execution(rq, in i915_request_await_execution()
1306 ret = i915_request_await_external(rq, fence); in i915_request_await_execution()
1365 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) in i915_request_await_dma_fence() argument
1390 i915_sw_fence_set_error_once(&rq->submit, fence->error); in i915_request_await_dma_fence()
1399 if (fence->context == rq->fence.context) in i915_request_await_dma_fence()
1404 intel_timeline_sync_is_later(i915_request_timeline(rq), in i915_request_await_dma_fence()
1409 ret = i915_request_await_request(rq, to_request(fence)); in i915_request_await_dma_fence()
1411 ret = i915_request_await_external(rq, fence); in i915_request_await_dma_fence()
1417 intel_timeline_sync_set(i915_request_timeline(rq), in i915_request_await_dma_fence()
1487 __i915_request_add_to_timeline(struct i915_request *rq) in __i915_request_add_to_timeline() argument
1489 struct intel_timeline *timeline = i915_request_timeline(rq); in __i915_request_add_to_timeline()
1513 &rq->fence)); in __i915_request_add_to_timeline()
1521 GEM_BUG_ON(prev->context == rq->context && in __i915_request_add_to_timeline()
1523 rq->fence.seqno)); in __i915_request_add_to_timeline()
1525 if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) in __i915_request_add_to_timeline()
1526 i915_sw_fence_await_sw_fence(&rq->submit, in __i915_request_add_to_timeline()
1528 &rq->submitq); in __i915_request_add_to_timeline()
1530 __i915_sw_fence_await_dma_fence(&rq->submit, in __i915_request_add_to_timeline()
1532 &rq->dmaq); in __i915_request_add_to_timeline()
1533 if (rq->engine->schedule) in __i915_request_add_to_timeline()
1534 __i915_sched_node_add_dependency(&rq->sched, in __i915_request_add_to_timeline()
1536 &rq->dep, in __i915_request_add_to_timeline()
1545 GEM_BUG_ON(timeline->seqno != rq->fence.seqno); in __i915_request_add_to_timeline()
1555 struct i915_request *__i915_request_commit(struct i915_request *rq) in __i915_request_commit() argument
1557 struct intel_engine_cs *engine = rq->engine; in __i915_request_commit()
1558 struct intel_ring *ring = rq->ring; in __i915_request_commit()
1561 RQ_TRACE(rq, "\n"); in __i915_request_commit()
1568 GEM_BUG_ON(rq->reserved_space > ring->space); in __i915_request_commit()
1569 rq->reserved_space = 0; in __i915_request_commit()
1570 rq->emitted_jiffies = jiffies; in __i915_request_commit()
1578 cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw); in __i915_request_commit()
1580 rq->postfix = intel_ring_offset(rq, cs); in __i915_request_commit()
1582 return __i915_request_add_to_timeline(rq); in __i915_request_commit()
1585 void __i915_request_queue(struct i915_request *rq, in __i915_request_queue() argument
1599 if (attr && rq->engine->schedule) in __i915_request_queue()
1600 rq->engine->schedule(rq, attr); in __i915_request_queue()
1601 i915_sw_fence_commit(&rq->semaphore); in __i915_request_queue()
1602 i915_sw_fence_commit(&rq->submit); in __i915_request_queue()
1605 void i915_request_add(struct i915_request *rq) in i915_request_add() argument
1607 struct intel_timeline * const tl = i915_request_timeline(rq); in i915_request_add()
1612 lockdep_unpin_lock(&tl->mutex, rq->cookie); in i915_request_add()
1614 trace_i915_request_add(rq); in i915_request_add()
1615 __i915_request_commit(rq); in i915_request_add()
1619 ctx = rcu_dereference(rq->context->gem_context); in i915_request_add()
1624 __i915_request_queue(rq, &attr); in i915_request_add()
1662 static bool __i915_spin_request(struct i915_request * const rq, int state) in __i915_spin_request() argument
1678 if (!i915_request_is_running(rq)) in __i915_spin_request()
1692 timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns); in __i915_spin_request()
1695 if (dma_fence_is_signaled(&rq->fence)) in __i915_spin_request()
1737 long i915_request_wait(struct i915_request *rq, in i915_request_wait() argument
1748 if (dma_fence_is_signaled(&rq->fence)) in i915_request_wait()
1754 trace_i915_request_wait_begin(rq, flags); in i915_request_wait()
1762 mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_); in i915_request_wait()
1788 __i915_spin_request(rq, state)) in i915_request_wait()
1803 if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq)) in i915_request_wait()
1804 intel_rps_boost(rq); in i915_request_wait()
1807 if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake)) in i915_request_wait()
1825 if (i915_request_is_ready(rq)) in i915_request_wait()
1826 intel_engine_flush_submission(rq->engine); in i915_request_wait()
1831 if (dma_fence_is_signaled(&rq->fence)) in i915_request_wait()
1849 dma_fence_remove_callback(&rq->fence, &wait.cb); in i915_request_wait()
1853 mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_); in i915_request_wait()
1854 trace_i915_request_wait_end(rq); in i915_request_wait()