Lines Matching full:request
65 * may be freed when the request is no longer in use by the GPU. in i915_fence_get_timeline_name()
102 * The request is put onto a RCU freelist (i.e. the address in i915_fence_release()
168 remove_from_client(struct i915_request *request) in remove_from_client() argument
172 file_priv = READ_ONCE(request->file_priv); in remove_from_client()
177 if (request->file_priv) { in remove_from_client()
178 list_del(&request->client_link); in remove_from_client()
179 request->file_priv = NULL; in remove_from_client()
184 static void free_capture_list(struct i915_request *request) in free_capture_list() argument
188 capture = request->capture_list; in free_capture_list()
235 * We know the GPU must have read the request to have in i915_request_retire()
237 * of tail of the request to update the last known position in i915_request_retire()
240 * Note this requires that we are always called in request in i915_request_retire()
249 * when their *last* active request is completed (updating state in i915_request_retire()
270 RCU_INIT_POINTER(active->request, NULL); in i915_request_retire()
280 * request that we have removed from the HW and put back on a run in i915_request_retire()
380 bool __i915_request_submit(struct i915_request *request) in __i915_request_submit() argument
382 struct intel_engine_cs *engine = request->engine; in __i915_request_submit()
387 request->fence.context, request->fence.seqno, in __i915_request_submit()
388 hwsp_seqno(request)); in __i915_request_submit()
397 * resubmission of that completed request, we can skip in __i915_request_submit()
399 * the request. in __i915_request_submit()
401 * We must remove the request from the caller's priority queue, in __i915_request_submit()
402 * and the caller must only call us when the request is in their in __i915_request_submit()
404 * request has *not* yet been retired and we can safely move in __i915_request_submit()
405 * the request into the engine->active.list where it will be in __i915_request_submit()
407 * request, this would be a horrible use-after-free.) in __i915_request_submit()
409 if (i915_request_completed(request)) in __i915_request_submit()
412 if (i915_gem_context_is_banned(request->gem_context)) in __i915_request_submit()
413 i915_request_skip(request, -EIO); in __i915_request_submit()
424 * If we installed a semaphore on this request and we only submit in __i915_request_submit()
425 * the request after the signaler completed, that indicates the in __i915_request_submit()
431 if (request->sched.semaphores && in __i915_request_submit()
432 i915_sw_fence_signaled(&request->semaphore)) in __i915_request_submit()
433 engine->saturated |= request->sched.semaphores; in __i915_request_submit()
435 engine->emit_fini_breadcrumb(request, in __i915_request_submit()
436 request->ring->vaddr + request->postfix); in __i915_request_submit()
438 trace_i915_request_execute(request); in __i915_request_submit()
443 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); in __i915_request_submit()
445 if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) in __i915_request_submit()
446 list_move_tail(&request->sched.link, &engine->active.requests); in __i915_request_submit()
448 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && in __i915_request_submit()
449 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) && in __i915_request_submit()
450 !i915_request_enable_breadcrumb(request)) in __i915_request_submit()
453 __notify_execute_cb(request); in __i915_request_submit()
455 spin_unlock(&request->lock); in __i915_request_submit()
460 void i915_request_submit(struct i915_request *request) in i915_request_submit() argument
462 struct intel_engine_cs *engine = request->engine; in i915_request_submit()
468 __i915_request_submit(request); in i915_request_submit()
473 void __i915_request_unsubmit(struct i915_request *request) in __i915_request_unsubmit() argument
475 struct intel_engine_cs *engine = request->engine; in __i915_request_unsubmit()
479 request->fence.context, request->fence.seqno, in __i915_request_unsubmit()
480 hwsp_seqno(request)); in __i915_request_unsubmit()
491 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); in __i915_request_unsubmit()
493 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) in __i915_request_unsubmit()
494 i915_request_cancel_breadcrumb(request); in __i915_request_unsubmit()
496 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); in __i915_request_unsubmit()
497 clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); in __i915_request_unsubmit()
499 spin_unlock(&request->lock); in __i915_request_unsubmit()
502 if (request->sched.semaphores && i915_request_started(request)) { in __i915_request_unsubmit()
503 request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE; in __i915_request_unsubmit()
504 request->sched.semaphores = 0; in __i915_request_unsubmit()
508 * We don't need to wake_up any waiters on request->execute, they in __i915_request_unsubmit()
509 * will get woken by any other event or us re-adding this request in __i915_request_unsubmit()
511 * should be quite adapt at finding that the request now has a new in __i915_request_unsubmit()
516 void i915_request_unsubmit(struct i915_request *request) in i915_request_unsubmit() argument
518 struct intel_engine_cs *engine = request->engine; in i915_request_unsubmit()
524 __i915_request_unsubmit(request); in i915_request_unsubmit()
532 struct i915_request *request = in submit_notify() local
533 container_of(fence, typeof(*request), submit); in submit_notify()
537 trace_i915_request_submit(request); in submit_notify()
540 i915_request_skip(request, fence->error); in submit_notify()
551 request->engine->submit_request(request); in submit_notify()
556 i915_request_put(request); in submit_notify()
566 struct i915_request *request = in semaphore_notify() local
567 container_of(fence, typeof(*request), semaphore); in semaphore_notify()
571 i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE); in semaphore_notify()
575 i915_request_put(request); in semaphore_notify()
602 /* Move our oldest request to the slab-cache (if not in use!) */ in request_alloc_slow()
639 * race with the request being allocated from the slab freelist. in __i915_request_create()
640 * That is the request we are writing to here, may be in the process in __i915_request_create()
643 * the RCU lookup, we change chase the request->engine pointer, in __i915_request_create()
644 * read the request->global_seqno and increment the reference count. in __i915_request_create()
647 * the lookup knows the request is unallocated and complete. Otherwise, in __i915_request_create()
650 * check that the request we have a reference to and matches the active in __i915_request_create()
651 * request. in __i915_request_create()
653 * Before we increment the refcount, we chase the request->engine in __i915_request_create()
656 * we see the request is completed (based on the value of the in __i915_request_create()
658 * If we decide the request is not completed (new engine or seqno), in __i915_request_create()
660 * active request - which it won't be and restart the lookup. in __i915_request_create()
710 * eventually emit this request. This is to guarantee that the in __i915_request_create()
712 * to be redone if the request is not actually submitted straight in __i915_request_create()
716 * we need to double our request to ensure that if we need to wrap in __i915_request_create()
724 * Record the position of the start of the request so that in __i915_request_create()
726 * GPU processing the request, we never over-estimate the in __i915_request_create()
765 /* Move our oldest request to the slab-cache (if not in use!) */ in i915_request_create()
772 intel_context_exit(ce); /* active reference transferred to request */ in i915_request_create()
776 /* Check that we do not interrupt ourselves with a new request */ in i915_request_create()
831 /* Just emit the first semaphore we see as request space is limited. */ in emit_semaphore_wait()
1025 * i915_request_await_object - set this request to (async) wait upon a bo
1026 * @to: request we are wishing to use
1035 * - If there is an outstanding write request to the object, the new
1036 * request must wait for it to complete (either CPU or in hw, requests
1039 * - If we are a write request (pending_write_domain is set), the new
1040 * request must wait for outstanding read requests to complete.
1098 * As this request likely depends on state from the lost in i915_request_skip()
1118 * Dependency tracking and request ordering along the timeline in __i915_request_add_to_timeline()
1120 * operations while building the request (we know that the timeline in __i915_request_add_to_timeline()
1124 * we embed the hooks into our request struct -- at the cost of in __i915_request_add_to_timeline()
1132 * to prevent scheduling of the second request until the first is in __i915_request_add_to_timeline()
1137 prev = rcu_dereference_protected(timeline->last_request.request, in __i915_request_add_to_timeline()
1158 * Make sure that no request gazumped us - if it was allocated after in __i915_request_add_to_timeline()
1170 * request is not being tracked for completion but the work itself is
1194 * GPU processing the request, we never over-estimate the in __i915_request_commit()
1208 * Let the backend know a new request has arrived that may need in __i915_request_queue()
1210 * request - i.e. we may want to preempt the current request in order in __i915_request_queue()
1212 * request. in __i915_request_queue()
1214 * This is called before the request is ready to run so that we can in __i915_request_queue()
1253 * Boost priorities to new clients (new request flows). in i915_request_add()
1266 * In typical scenarios, we do not expect the previous request on in i915_request_add()
1268 * has been completed. If the completed request is still here, that in i915_request_add()
1269 * implies that request retirement is a long way behind submission, in i915_request_add()
1272 * retirement worker. So if the last request on this timeline was in i915_request_add()
1327 * Only wait for the request if we know it is likely to complete. in __i915_spin_request()
1330 * request length, so we do not have a good indicator that this in __i915_spin_request()
1331 * request will complete within the timeout. What we do know is the in __i915_spin_request()
1333 * tell if the request has been started. If the request is not even in __i915_spin_request()
1344 * rate. By busywaiting on the request completion for a short while we in __i915_spin_request()
1346 * if it is a slow request, we want to sleep as quickly as possible. in __i915_spin_request()
1348 * takes to sleep on a request, on the order of a microsecond. in __i915_spin_request()
1381 * i915_request_wait - wait until execution of request has finished
1382 * @rq: the request to wait upon
1386 * i915_request_wait() waits for the request to be completed, for a
1390 * Returns the remaining time (in jiffies) if the request completed, which may
1391 * be zero or -ETIME if the request is unfinished after the timeout expires.
1393 * pending before the request completes.
1432 * short wait, we first spin to see if the request would have completed in i915_request_wait()
1442 * completion. That requires having a good predictor for the request in i915_request_wait()