Lines Matching full:request

68 	 * may be freed when the request is no longer in use by the GPU.  in i915_fence_get_timeline_name()
117 * The request is put onto a RCU freelist (i.e. the address in i915_fence_release()
127 * Keep one request on each engine for reserved use under mempressure, in i915_fence_release()
189 static void free_capture_list(struct i915_request *request) in free_capture_list() argument
193 capture = fetch_and_zero(&request->capture_list); in free_capture_list()
217 * @rq: request to inspect
220 * Fills the currently active engine to the @active pointer if the request
223 * Returns true if request was active or false otherwise.
234 * is-banned?, or we know the request is already inflight. in i915_request_active_engine()
320 * We know the GPU must have read the request to have in i915_request_retire()
322 * of tail of the request to update the last known position in i915_request_retire()
325 * Note this requires that we are always called in request in i915_request_retire()
347 * request that we have removed from the HW and put back on a run in i915_request_retire()
350 * As we set I915_FENCE_FLAG_ACTIVE on the request, this should be in i915_request_retire()
352 * inadvertently attach the breadcrumb to a completed request. in i915_request_retire()
398 * Even if we have unwound the request, it may still be on in __request_in_flight()
399 * the GPU (preempt-to-busy). If that request is inside an in __request_in_flight()
401 * GPU functions may even be stuck waiting for the paired request in __request_in_flight()
406 * requests, we know that only the currently executing request in __request_in_flight()
409 * which request is currently active and so maybe stuck, as in __request_in_flight()
478 * request (then flush the execute_cb). So by registering the in __await_execution()
480 * the completed/retired request. in __await_execution()
513 * As this request likely depends on state from the lost in __i915_request_skip()
546 /* As soon as the request is completed, it may be retired */ in i915_request_mark_eio()
555 bool __i915_request_submit(struct i915_request *request) in __i915_request_submit() argument
557 struct intel_engine_cs *engine = request->engine; in __i915_request_submit()
560 RQ_TRACE(request, "\n"); in __i915_request_submit()
569 * resubmission of that completed request, we can skip in __i915_request_submit()
571 * the request. in __i915_request_submit()
573 * We must remove the request from the caller's priority queue, in __i915_request_submit()
574 * and the caller must only call us when the request is in their in __i915_request_submit()
576 * request has *not* yet been retired and we can safely move in __i915_request_submit()
577 * the request into the engine->active.list where it will be in __i915_request_submit()
579 * request, this would be a horrible use-after-free.) in __i915_request_submit()
581 if (__i915_request_is_complete(request)) { in __i915_request_submit()
582 list_del_init(&request->sched.link); in __i915_request_submit()
586 if (unlikely(intel_context_is_banned(request->context))) in __i915_request_submit()
587 i915_request_set_error_once(request, -EIO); in __i915_request_submit()
589 if (unlikely(fatal_error(request->fence.error))) in __i915_request_submit()
590 __i915_request_skip(request); in __i915_request_submit()
601 * If we installed a semaphore on this request and we only submit in __i915_request_submit()
602 * the request after the signaler completed, that indicates the in __i915_request_submit()
608 if (request->sched.semaphores && in __i915_request_submit()
609 i915_sw_fence_signaled(&request->semaphore)) in __i915_request_submit()
610 engine->saturated |= request->sched.semaphores; in __i915_request_submit()
612 engine->emit_fini_breadcrumb(request, in __i915_request_submit()
613 request->ring->vaddr + request->postfix); in __i915_request_submit()
615 trace_i915_request_execute(request); in __i915_request_submit()
623 GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); in __i915_request_submit()
624 engine->add_active_request(request); in __i915_request_submit()
626 clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags); in __i915_request_submit()
627 set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); in __i915_request_submit()
639 __notify_execute_cb_irq(request); in __i915_request_submit()
642 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) in __i915_request_submit()
643 i915_request_enable_breadcrumb(request); in __i915_request_submit()
648 void i915_request_submit(struct i915_request *request) in i915_request_submit() argument
650 struct intel_engine_cs *engine = request->engine; in i915_request_submit()
656 __i915_request_submit(request); in i915_request_submit()
661 void __i915_request_unsubmit(struct i915_request *request) in __i915_request_unsubmit() argument
663 struct intel_engine_cs *engine = request->engine; in __i915_request_unsubmit()
669 RQ_TRACE(request, "\n"); in __i915_request_unsubmit()
677 * attach itself. We first mark the request as no longer active and in __i915_request_unsubmit()
681 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); in __i915_request_unsubmit()
682 clear_bit_unlock(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); in __i915_request_unsubmit()
683 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) in __i915_request_unsubmit()
684 i915_request_cancel_breadcrumb(request); in __i915_request_unsubmit()
687 if (request->sched.semaphores && __i915_request_has_started(request)) in __i915_request_unsubmit()
688 request->sched.semaphores = 0; in __i915_request_unsubmit()
691 * We don't need to wake_up any waiters on request->execute, they in __i915_request_unsubmit()
692 * will get woken by any other event or us re-adding this request in __i915_request_unsubmit()
694 * should be quite adapt at finding that the request now has a new in __i915_request_unsubmit()
699 void i915_request_unsubmit(struct i915_request *request) in i915_request_unsubmit() argument
701 struct intel_engine_cs *engine = request->engine; in i915_request_unsubmit()
707 __i915_request_unsubmit(request); in i915_request_unsubmit()
725 struct i915_request *request = in submit_notify() local
726 container_of(fence, typeof(*request), submit); in submit_notify()
730 trace_i915_request_submit(request); in submit_notify()
733 i915_request_set_error_once(request, fence->error); in submit_notify()
735 __rq_arm_watchdog(request); in submit_notify()
746 request->engine->submit_request(request); in submit_notify()
751 i915_request_put(request); in submit_notify()
803 /* Move our oldest request to the slab-cache (if not in use!) */ in request_alloc_slow()
854 * race with the request being allocated from the slab freelist. in __i915_request_create()
855 * That is the request we are writing to here, may be in the process in __i915_request_create()
858 * the RCU lookup, we change chase the request->engine pointer, in __i915_request_create()
859 * read the request->global_seqno and increment the reference count. in __i915_request_create()
862 * the lookup knows the request is unallocated and complete. Otherwise, in __i915_request_create()
865 * check that the request we have a reference to and matches the active in __i915_request_create()
866 * request. in __i915_request_create()
868 * Before we increment the refcount, we chase the request->engine in __i915_request_create()
871 * we see the request is completed (based on the value of the in __i915_request_create()
873 * If we decide the request is not completed (new engine or seqno), in __i915_request_create()
875 * active request - which it won't be and restart the lookup. in __i915_request_create()
892 * destroyed (e.g. request retired, context closed, but user space holds in __i915_request_create()
893 * a reference to the request from an out fence). In the case of GuC in __i915_request_create()
894 * submission + virtual engine, the engine that the request references in __i915_request_create()
898 * hold the intel_context reference. In execlist mode the request always in __i915_request_create()
935 * eventually emit this request. This is to guarantee that the in __i915_request_create()
937 * to be redone if the request is not actually submitted straight in __i915_request_create()
941 * we need to double our request to ensure that if we need to wrap in __i915_request_create()
949 * Record the position of the start of the request so that in __i915_request_create()
951 * GPU processing the request, we never over-estimate the in __i915_request_create()
992 /* Move our oldest request to the slab-cache (if not in use!) */ in i915_request_create()
999 intel_context_exit(ce); /* active reference transferred to request */ in i915_request_create()
1003 /* Check that we do not interrupt ourselves with a new request */ in i915_request_create()
1029 * We do not hold a reference to the request before @signal, and in i915_request_await_start()
1043 /* Is signal the earliest request on its timeline? */ in i915_request_await_start()
1048 * Peek at the request before us in the timeline. That in i915_request_await_start()
1049 * request will only be valid before it is retired, so in i915_request_await_start()
1171 /* Just emit the first semaphore we see as request space is limited. */ in emit_semaphore_wait()
1227 * Wait until the start of this request. in __i915_request_await_execution()
1229 * The execution cb fires when we submit the request to HW. But in in __i915_request_await_execution()
1230 * many cases this may be long before the request itself is ready to in __i915_request_await_execution()
1232 * the request of interest is behind an indefinite spinner). So we hook in __i915_request_await_execution()
1285 * fatal errors we want to scrub the request before it is executed, in mark_external()
1286 * which means that we cannot preload the request onto HW and have in mark_external()
1380 * as it may then bypass the virtual request. in await_request_submit()
1481 * i915_request_await_object - set this request to (async) wait upon a bo
1482 * @to: request we are wishing to use
1491 * - If there is an outstanding write request to the object, the new
1492 * request must wait for it to complete (either CPU or in hw, requests
1495 * - If we are a write request (pending_write_domain is set), the new
1496 * request must wait for outstanding read requests to complete.
1549 * Dependency tracking and request ordering along the timeline in __i915_request_add_to_timeline()
1551 * operations while building the request (we know that the timeline in __i915_request_add_to_timeline()
1555 * we embed the hooks into our request struct -- at the cost of in __i915_request_add_to_timeline()
1563 * to prevent scheduling of the second request until the first is in __i915_request_add_to_timeline()
1601 * Make sure that no request gazumped us - if it was allocated after in __i915_request_add_to_timeline()
1612 * request is not being tracked for completion but the work itself is
1635 * GPU processing the request, we never over-estimate the in __i915_request_commit()
1655 * Let the backend know a new request has arrived that may need in __i915_request_queue()
1657 * request - i.e. we may want to preempt the current request in order in __i915_request_queue()
1659 * request. in __i915_request_queue()
1661 * This is called before the request is ready to run so that we can in __i915_request_queue()
1736 * Only wait for the request if we know it is likely to complete. in __i915_spin_request()
1739 * request length, so we do not have a good indicator that this in __i915_spin_request()
1740 * request will complete within the timeout. What we do know is the in __i915_spin_request()
1742 * tell if the request has been started. If the request is not even in __i915_spin_request()
1753 * rate. By busywaiting on the request completion for a short while we in __i915_spin_request()
1755 * if it is a slow request, we want to sleep as quickly as possible. in __i915_spin_request()
1757 * takes to sleep on a request, on the order of a microsecond. in __i915_spin_request()
1791 * i915_request_wait - wait until execution of request has finished
1792 * @rq: the request to wait upon
1796 * i915_request_wait() waits for the request to be completed, for a
1800 * Returns the remaining time (in jiffies) if the request completed, which may
1801 * be zero or -ETIME if the request is unfinished after the timeout expires.
1803 * pending before the request completes.
1842 * short wait, we first spin to see if the request would have completed in i915_request_wait()
1852 * completion. That requires having a good predictor for the request in i915_request_wait()
1879 * Flush the submission tasklet, but only if it may help this request. in i915_request_wait()
1884 * is a chance it may submit this request. If the request is not ready in i915_request_wait()
1990 * - the request is not ready for execution as it is waiting in i915_request_show()
1994 * - all fences the request was waiting on have been signaled, in i915_request_show()
1995 * and the request is now ready for execution and will be in i915_request_show()
1998 * - a ready request may still need to wait on semaphores in i915_request_show()
2005 * - the request has been transferred from the backend queue and in i915_request_show()
2008 * - a completed request may still be regarded as executing, its in i915_request_show()