1 /*
2 * Copyright © 2008-2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #ifndef I915_REQUEST_H
26 #define I915_REQUEST_H
27
28 #include <linux/dma-fence.h>
29 #include <linux/hrtimer.h>
30 #include <linux/irq_work.h>
31 #include <linux/llist.h>
32 #include <linux/lockdep.h>
33
34 #include "gem/i915_gem_context_types.h"
35 #include "gt/intel_context_types.h"
36 #include "gt/intel_engine_types.h"
37 #include "gt/intel_timeline_types.h"
38
39 #include "i915_gem.h"
40 #include "i915_scheduler.h"
41 #include "i915_selftest.h"
42 #include "i915_sw_fence.h"
43
44 #include <uapi/drm/i915_drm.h>
45
46 struct drm_file;
47 struct drm_i915_gem_object;
48 struct drm_printer;
49 struct i915_request;
50
51 struct i915_capture_list {
52 struct i915_capture_list *next;
53 struct i915_vma *vma;
54 };
55
56 #define RQ_TRACE(rq, fmt, ...) do { \
57 const struct i915_request *rq__ = (rq); \
58 ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
59 rq__->fence.context, rq__->fence.seqno, \
60 hwsp_seqno(rq__), ##__VA_ARGS__); \
61 } while (0)
62
63 enum {
64 /*
65 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
66 *
67 * Set by __i915_request_submit() on handing over to HW, and cleared
68 * by __i915_request_unsubmit() if we preempt this request.
69 *
70 * Finally cleared for consistency on retiring the request, when
71 * we know the HW is no longer running this request.
72 *
73 * See i915_request_is_active()
74 */
75 I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
76
77 /*
78 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
79 *
80 * Using the scheduler, when a request is ready for execution it is put
81 * into the priority queue, and removed from that queue when transferred
82 * to the HW runlists. We want to track its membership within the
83 * priority queue so that we can easily check before rescheduling.
84 *
85 * See i915_request_in_priority_queue()
86 */
87 I915_FENCE_FLAG_PQUEUE,
88
89 /*
90 * I915_FENCE_FLAG_HOLD - this request is currently on hold
91 *
92 * This request has been suspended, pending an ongoing investigation.
93 */
94 I915_FENCE_FLAG_HOLD,
95
96 /*
97 * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial
98 * breadcrumb that marks the end of semaphore waits and start of the
99 * user payload.
100 */
101 I915_FENCE_FLAG_INITIAL_BREADCRUMB,
102
103 /*
104 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
105 *
106 * Internal bookkeeping used by the breadcrumb code to track when
107 * a request is on the various signal_list.
108 */
109 I915_FENCE_FLAG_SIGNAL,
110
111 /*
112 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
113 *
114 * The execution of some requests should not be interrupted. This is
115 * a sensitive operation as it makes the request super important,
116 * blocking other higher priority work. Abuse of this flag will
117 * lead to quality of service issues.
118 */
119 I915_FENCE_FLAG_NOPREEMPT,
120
121 /*
122 * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
123 *
124 * A high priority sentinel request may be submitted to clear the
125 * submission queue. As it will be the only request in-flight, upon
126 * execution all other active requests will have been preempted and
127 * unsubmitted. This preemptive pulse is used to re-evaluate the
128 * in-flight requests, particularly in cases where an active context
129 * is banned and those active requests need to be cancelled.
130 */
131 I915_FENCE_FLAG_SENTINEL,
132
133 /*
134 * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
135 *
136 * Some requests are more important than others! In particular, a
137 * request that the user is waiting on is typically required for
138 * interactive latency, for which we want to minimise by upclocking
139 * the GPU. Here we track such boost requests on a per-request basis.
140 */
141 I915_FENCE_FLAG_BOOST,
142 };
143
144 /**
145 * Request queue structure.
146 *
147 * The request queue allows us to note sequence numbers that have been emitted
148 * and may be associated with active buffers to be retired.
149 *
150 * By keeping this list, we can avoid having to do questionable sequence
151 * number comparisons on buffer last_read|write_seqno. It also allows an
152 * emission time to be associated with the request for tracking how far ahead
153 * of the GPU the submission is.
154 *
155 * When modifying this structure be very aware that we perform a lockless
156 * RCU lookup of it that may race against reallocation of the struct
157 * from the slab freelist. We intentionally do not zero the structure on
158 * allocation so that the lookup can use the dangling pointers (and is
159 * cogniscent that those pointers may be wrong). Instead, everything that
160 * needs to be initialised must be done so explicitly.
161 *
162 * The requests are reference counted.
163 */
164 struct i915_request {
165 struct dma_fence fence;
166 spinlock_t lock;
167
168 /**
169 * Context and ring buffer related to this request
170 * Contexts are refcounted, so when this request is associated with a
171 * context, we must increment the context's refcount, to guarantee that
172 * it persists while any request is linked to it. Requests themselves
173 * are also refcounted, so the request will only be freed when the last
174 * reference to it is dismissed, and the code in
175 * i915_request_free() will then decrement the refcount on the
176 * context.
177 */
178 struct intel_engine_cs *engine;
179 struct intel_context *context;
180 struct intel_ring *ring;
181 struct intel_timeline __rcu *timeline;
182
183 struct list_head signal_link;
184 struct llist_node signal_node;
185
186 /*
187 * The rcu epoch of when this request was allocated. Used to judiciously
188 * apply backpressure on future allocations to ensure that under
189 * mempressure there is sufficient RCU ticks for us to reclaim our
190 * RCU protected slabs.
191 */
192 unsigned long rcustate;
193
194 /*
195 * We pin the timeline->mutex while constructing the request to
196 * ensure that no caller accidentally drops it during construction.
197 * The timeline->mutex must be held to ensure that only this caller
198 * can use the ring and manipulate the associated timeline during
199 * construction.
200 */
201 struct pin_cookie cookie;
202
203 /*
204 * Fences for the various phases in the request's lifetime.
205 *
206 * The submit fence is used to await upon all of the request's
207 * dependencies. When it is signaled, the request is ready to run.
208 * It is used by the driver to then queue the request for execution.
209 */
210 struct i915_sw_fence submit;
211 union {
212 wait_queue_entry_t submitq;
213 struct i915_sw_dma_fence_cb dmaq;
214 struct i915_request_duration_cb {
215 struct dma_fence_cb cb;
216 ktime_t emitted;
217 } duration;
218 };
219 struct llist_head execute_cb;
220 struct i915_sw_fence semaphore;
221
222 /*
223 * A list of everyone we wait upon, and everyone who waits upon us.
224 * Even though we will not be submitted to the hardware before the
225 * submit fence is signaled (it waits for all external events as well
226 * as our own requests), the scheduler still needs to know the
227 * dependency tree for the lifetime of the request (from execbuf
228 * to retirement), i.e. bidirectional dependency information for the
229 * request not tied to individual fences.
230 */
231 struct i915_sched_node sched;
232 struct i915_dependency dep;
233 intel_engine_mask_t execution_mask;
234
235 /*
236 * A convenience pointer to the current breadcrumb value stored in
237 * the HW status page (or our timeline's local equivalent). The full
238 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
239 */
240 const u32 *hwsp_seqno;
241
242 /** Position in the ring of the start of the request */
243 u32 head;
244
245 /** Position in the ring of the start of the user packets */
246 u32 infix;
247
248 /**
249 * Position in the ring of the start of the postfix.
250 * This is required to calculate the maximum available ring space
251 * without overwriting the postfix.
252 */
253 u32 postfix;
254
255 /** Position in the ring of the end of the whole request */
256 u32 tail;
257
258 /** Position in the ring of the end of any workarounds after the tail */
259 u32 wa_tail;
260
261 /** Preallocate space in the ring for the emitting the request */
262 u32 reserved_space;
263
264 /** Batch buffer related to this request if any (used for
265 * error state dump only).
266 */
267 struct i915_vma *batch;
268 /**
269 * Additional buffers requested by userspace to be captured upon
270 * a GPU hang. The vma/obj on this list are protected by their
271 * active reference - all objects on this list must also be
272 * on the active_list (of their final request).
273 */
274 struct i915_capture_list *capture_list;
275
276 /** Time at which this request was emitted, in jiffies. */
277 unsigned long emitted_jiffies;
278
279 /** timeline->request entry for this request */
280 struct list_head link;
281
282 /** Watchdog support fields. */
283 struct i915_request_watchdog {
284 struct llist_node link;
285 struct hrtimer timer;
286 } watchdog;
287
288 /*
289 * Requests may need to be stalled when using GuC submission waiting for
290 * certain GuC operations to complete. If that is the case, stalled
291 * requests are added to a per context list of stalled requests. The
292 * below list_head is the link in that list.
293 */
294 struct list_head guc_fence_link;
295
296 /**
297 * Priority level while the request is inflight. Differs from i915
298 * scheduler priority. See comment above
299 * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details.
300 */
301 #define GUC_PRIO_INIT 0xff
302 #define GUC_PRIO_FINI 0xfe
303 u8 guc_prio;
304
305 I915_SELFTEST_DECLARE(struct {
306 struct list_head link;
307 unsigned long delay;
308 } mock;)
309 };
310
311 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
312
313 extern const struct dma_fence_ops i915_fence_ops;
314
dma_fence_is_i915(const struct dma_fence * fence)315 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
316 {
317 return fence->ops == &i915_fence_ops;
318 }
319
320 struct kmem_cache *i915_request_slab_cache(void);
321
322 struct i915_request * __must_check
323 __i915_request_create(struct intel_context *ce, gfp_t gfp);
324 struct i915_request * __must_check
325 i915_request_create(struct intel_context *ce);
326
327 void __i915_request_skip(struct i915_request *rq);
328 bool i915_request_set_error_once(struct i915_request *rq, int error);
329 struct i915_request *i915_request_mark_eio(struct i915_request *rq);
330
331 struct i915_request *__i915_request_commit(struct i915_request *request);
332 void __i915_request_queue(struct i915_request *rq,
333 const struct i915_sched_attr *attr);
334 void __i915_request_queue_bh(struct i915_request *rq);
335
336 bool i915_request_retire(struct i915_request *rq);
337 void i915_request_retire_upto(struct i915_request *rq);
338
339 static inline struct i915_request *
to_request(struct dma_fence * fence)340 to_request(struct dma_fence *fence)
341 {
342 /* We assume that NULL fence/request are interoperable */
343 BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
344 GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
345 return container_of(fence, struct i915_request, fence);
346 }
347
348 static inline struct i915_request *
i915_request_get(struct i915_request * rq)349 i915_request_get(struct i915_request *rq)
350 {
351 return to_request(dma_fence_get(&rq->fence));
352 }
353
354 static inline struct i915_request *
i915_request_get_rcu(struct i915_request * rq)355 i915_request_get_rcu(struct i915_request *rq)
356 {
357 return to_request(dma_fence_get_rcu(&rq->fence));
358 }
359
360 static inline void
i915_request_put(struct i915_request * rq)361 i915_request_put(struct i915_request *rq)
362 {
363 dma_fence_put(&rq->fence);
364 }
365
366 int i915_request_await_object(struct i915_request *to,
367 struct drm_i915_gem_object *obj,
368 bool write);
369 int i915_request_await_dma_fence(struct i915_request *rq,
370 struct dma_fence *fence);
371 int i915_request_await_execution(struct i915_request *rq,
372 struct dma_fence *fence);
373
374 void i915_request_add(struct i915_request *rq);
375
376 bool __i915_request_submit(struct i915_request *request);
377 void i915_request_submit(struct i915_request *request);
378
379 void __i915_request_unsubmit(struct i915_request *request);
380 void i915_request_unsubmit(struct i915_request *request);
381
382 void i915_request_cancel(struct i915_request *rq, int error);
383
384 long i915_request_wait(struct i915_request *rq,
385 unsigned int flags,
386 long timeout)
387 __attribute__((nonnull(1)));
388 #define I915_WAIT_INTERRUPTIBLE BIT(0)
389 #define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
390 #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
391
392 void i915_request_show(struct drm_printer *m,
393 const struct i915_request *rq,
394 const char *prefix,
395 int indent);
396
i915_request_signaled(const struct i915_request * rq)397 static inline bool i915_request_signaled(const struct i915_request *rq)
398 {
399 /* The request may live longer than its HWSP, so check flags first! */
400 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
401 }
402
i915_request_is_active(const struct i915_request * rq)403 static inline bool i915_request_is_active(const struct i915_request *rq)
404 {
405 return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
406 }
407
i915_request_in_priority_queue(const struct i915_request * rq)408 static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
409 {
410 return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
411 }
412
413 static inline bool
i915_request_has_initial_breadcrumb(const struct i915_request * rq)414 i915_request_has_initial_breadcrumb(const struct i915_request *rq)
415 {
416 return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
417 }
418
419 /**
420 * Returns true if seq1 is later than seq2.
421 */
i915_seqno_passed(u32 seq1,u32 seq2)422 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
423 {
424 return (s32)(seq1 - seq2) >= 0;
425 }
426
__hwsp_seqno(const struct i915_request * rq)427 static inline u32 __hwsp_seqno(const struct i915_request *rq)
428 {
429 const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
430
431 return READ_ONCE(*hwsp);
432 }
433
434 /**
435 * hwsp_seqno - the current breadcrumb value in the HW status page
436 * @rq: the request, to chase the relevant HW status page
437 *
438 * The emphasis in naming here is that hwsp_seqno() is not a property of the
439 * request, but an indication of the current HW state (associated with this
440 * request). Its value will change as the GPU executes more requests.
441 *
442 * Returns the current breadcrumb value in the associated HW status page (or
443 * the local timeline's equivalent) for this request. The request itself
444 * has the associated breadcrumb value of rq->fence.seqno, when the HW
445 * status page has that breadcrumb or later, this request is complete.
446 */
hwsp_seqno(const struct i915_request * rq)447 static inline u32 hwsp_seqno(const struct i915_request *rq)
448 {
449 u32 seqno;
450
451 rcu_read_lock(); /* the HWSP may be freed at runtime */
452 seqno = __hwsp_seqno(rq);
453 rcu_read_unlock();
454
455 return seqno;
456 }
457
__i915_request_has_started(const struct i915_request * rq)458 static inline bool __i915_request_has_started(const struct i915_request *rq)
459 {
460 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
461 }
462
463 /**
464 * i915_request_started - check if the request has begun being executed
465 * @rq: the request
466 *
467 * If the timeline is not using initial breadcrumbs, a request is
468 * considered started if the previous request on its timeline (i.e.
469 * context) has been signaled.
470 *
471 * If the timeline is using semaphores, it will also be emitting an
472 * "initial breadcrumb" after the semaphores are complete and just before
473 * it began executing the user payload. A request can therefore be active
474 * on the HW and not yet started as it is still busywaiting on its
475 * dependencies (via HW semaphores).
476 *
477 * If the request has started, its dependencies will have been signaled
478 * (either by fences or by semaphores) and it will have begun processing
479 * the user payload.
480 *
481 * However, even if a request has started, it may have been preempted and
482 * so no longer active, or it may have already completed.
483 *
484 * See also i915_request_is_active().
485 *
486 * Returns true if the request has begun executing the user payload, or
487 * has completed:
488 */
i915_request_started(const struct i915_request * rq)489 static inline bool i915_request_started(const struct i915_request *rq)
490 {
491 bool result;
492
493 if (i915_request_signaled(rq))
494 return true;
495
496 result = true;
497 rcu_read_lock(); /* the HWSP may be freed at runtime */
498 if (likely(!i915_request_signaled(rq)))
499 /* Remember: started but may have since been preempted! */
500 result = __i915_request_has_started(rq);
501 rcu_read_unlock();
502
503 return result;
504 }
505
506 /**
507 * i915_request_is_running - check if the request may actually be executing
508 * @rq: the request
509 *
510 * Returns true if the request is currently submitted to hardware, has passed
511 * its start point (i.e. the context is setup and not busywaiting). Note that
512 * it may no longer be running by the time the function returns!
513 */
i915_request_is_running(const struct i915_request * rq)514 static inline bool i915_request_is_running(const struct i915_request *rq)
515 {
516 bool result;
517
518 if (!i915_request_is_active(rq))
519 return false;
520
521 rcu_read_lock();
522 result = __i915_request_has_started(rq) && i915_request_is_active(rq);
523 rcu_read_unlock();
524
525 return result;
526 }
527
528 /**
529 * i915_request_is_ready - check if the request is ready for execution
530 * @rq: the request
531 *
532 * Upon construction, the request is instructed to wait upon various
533 * signals before it is ready to be executed by the HW. That is, we do
534 * not want to start execution and read data before it is written. In practice,
535 * this is controlled with a mixture of interrupts and semaphores. Once
536 * the submit fence is completed, the backend scheduler will place the
537 * request into its queue and from there submit it for execution. So we
538 * can detect when a request is eligible for execution (and is under control
539 * of the scheduler) by querying where it is in any of the scheduler's lists.
540 *
541 * Returns true if the request is ready for execution (it may be inflight),
542 * false otherwise.
543 */
i915_request_is_ready(const struct i915_request * rq)544 static inline bool i915_request_is_ready(const struct i915_request *rq)
545 {
546 return !list_empty(&rq->sched.link);
547 }
548
__i915_request_is_complete(const struct i915_request * rq)549 static inline bool __i915_request_is_complete(const struct i915_request *rq)
550 {
551 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
552 }
553
i915_request_completed(const struct i915_request * rq)554 static inline bool i915_request_completed(const struct i915_request *rq)
555 {
556 bool result;
557
558 if (i915_request_signaled(rq))
559 return true;
560
561 result = true;
562 rcu_read_lock(); /* the HWSP may be freed at runtime */
563 if (likely(!i915_request_signaled(rq)))
564 result = __i915_request_is_complete(rq);
565 rcu_read_unlock();
566
567 return result;
568 }
569
i915_request_mark_complete(struct i915_request * rq)570 static inline void i915_request_mark_complete(struct i915_request *rq)
571 {
572 WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
573 (u32 *)&rq->fence.seqno);
574 }
575
i915_request_has_waitboost(const struct i915_request * rq)576 static inline bool i915_request_has_waitboost(const struct i915_request *rq)
577 {
578 return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
579 }
580
i915_request_has_nopreempt(const struct i915_request * rq)581 static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
582 {
583 /* Preemption should only be disabled very rarely */
584 return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
585 }
586
i915_request_has_sentinel(const struct i915_request * rq)587 static inline bool i915_request_has_sentinel(const struct i915_request *rq)
588 {
589 return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
590 }
591
i915_request_on_hold(const struct i915_request * rq)592 static inline bool i915_request_on_hold(const struct i915_request *rq)
593 {
594 return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
595 }
596
i915_request_set_hold(struct i915_request * rq)597 static inline void i915_request_set_hold(struct i915_request *rq)
598 {
599 set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
600 }
601
i915_request_clear_hold(struct i915_request * rq)602 static inline void i915_request_clear_hold(struct i915_request *rq)
603 {
604 clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
605 }
606
607 static inline struct intel_timeline *
i915_request_timeline(const struct i915_request * rq)608 i915_request_timeline(const struct i915_request *rq)
609 {
610 /* Valid only while the request is being constructed (or retired). */
611 return rcu_dereference_protected(rq->timeline,
612 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
613 }
614
615 static inline struct i915_gem_context *
i915_request_gem_context(const struct i915_request * rq)616 i915_request_gem_context(const struct i915_request *rq)
617 {
618 /* Valid only while the request is being constructed (or retired). */
619 return rcu_dereference_protected(rq->context->gem_context, true);
620 }
621
622 static inline struct intel_timeline *
i915_request_active_timeline(const struct i915_request * rq)623 i915_request_active_timeline(const struct i915_request *rq)
624 {
625 /*
626 * When in use during submission, we are protected by a guarantee that
627 * the context/timeline is pinned and must remain pinned until after
628 * this submission.
629 */
630 return rcu_dereference_protected(rq->timeline,
631 lockdep_is_held(&rq->engine->sched_engine->lock));
632 }
633
634 static inline u32
i915_request_active_seqno(const struct i915_request * rq)635 i915_request_active_seqno(const struct i915_request *rq)
636 {
637 u32 hwsp_phys_base =
638 page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset);
639 u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno);
640
641 /*
642 * Because of wraparound, we cannot simply take tl->hwsp_offset,
643 * but instead use the fact that the relative for vaddr is the
644 * offset as for hwsp_offset. Take the top bits from tl->hwsp_offset
645 * and combine them with the relative offset in rq->hwsp_seqno.
646 *
647 * As rw->hwsp_seqno is rewritten when signaled, this only works
648 * when the request isn't signaled yet, but at that point you
649 * no longer need the offset.
650 */
651
652 return hwsp_phys_base + hwsp_relative_offset;
653 }
654
655 bool
656 i915_request_active_engine(struct i915_request *rq,
657 struct intel_engine_cs **active);
658
659 void i915_request_notify_execute_cb_imm(struct i915_request *rq);
660
661 enum i915_request_state {
662 I915_REQUEST_UNKNOWN = 0,
663 I915_REQUEST_COMPLETE,
664 I915_REQUEST_PENDING,
665 I915_REQUEST_QUEUED,
666 I915_REQUEST_ACTIVE,
667 };
668
669 enum i915_request_state i915_test_request_state(struct i915_request *rq);
670
671 void i915_request_module_exit(void);
672 int i915_request_module_init(void);
673
674 #endif /* I915_REQUEST_H */
675