1 /*
2 * Copyright © 2008-2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #ifndef I915_REQUEST_H
26 #define I915_REQUEST_H
27
28 #include <linux/dma-fence.h>
29
30 #include "i915_gem.h"
31 #include "i915_scheduler.h"
32 #include "i915_sw_fence.h"
33 #include "i915_scheduler.h"
34
35 #include <uapi/drm/i915_drm.h>
36
37 struct drm_file;
38 struct drm_i915_gem_object;
39 struct i915_request;
40 struct i915_timeline;
41
42 struct intel_wait {
43 struct rb_node node;
44 struct task_struct *tsk;
45 struct i915_request *request;
46 u32 seqno;
47 };
48
49 struct intel_signal_node {
50 struct intel_wait wait;
51 struct list_head link;
52 };
53
54 struct i915_capture_list {
55 struct i915_capture_list *next;
56 struct i915_vma *vma;
57 };
58
59 /**
60 * Request queue structure.
61 *
62 * The request queue allows us to note sequence numbers that have been emitted
63 * and may be associated with active buffers to be retired.
64 *
65 * By keeping this list, we can avoid having to do questionable sequence
66 * number comparisons on buffer last_read|write_seqno. It also allows an
67 * emission time to be associated with the request for tracking how far ahead
68 * of the GPU the submission is.
69 *
70 * When modifying this structure be very aware that we perform a lockless
71 * RCU lookup of it that may race against reallocation of the struct
72 * from the slab freelist. We intentionally do not zero the structure on
73 * allocation so that the lookup can use the dangling pointers (and is
74 * cogniscent that those pointers may be wrong). Instead, everything that
75 * needs to be initialised must be done so explicitly.
76 *
77 * The requests are reference counted.
78 */
79 struct i915_request {
80 struct dma_fence fence;
81 spinlock_t lock;
82
83 /** On Which ring this request was generated */
84 struct drm_i915_private *i915;
85
86 /**
87 * Context and ring buffer related to this request
88 * Contexts are refcounted, so when this request is associated with a
89 * context, we must increment the context's refcount, to guarantee that
90 * it persists while any request is linked to it. Requests themselves
91 * are also refcounted, so the request will only be freed when the last
92 * reference to it is dismissed, and the code in
93 * i915_request_free() will then decrement the refcount on the
94 * context.
95 */
96 struct i915_gem_context *gem_context;
97 struct intel_engine_cs *engine;
98 struct intel_context *hw_context;
99 struct intel_ring *ring;
100 struct i915_timeline *timeline;
101 struct intel_signal_node signaling;
102
103 /*
104 * Fences for the various phases in the request's lifetime.
105 *
106 * The submit fence is used to await upon all of the request's
107 * dependencies. When it is signaled, the request is ready to run.
108 * It is used by the driver to then queue the request for execution.
109 */
110 struct i915_sw_fence submit;
111 wait_queue_entry_t submitq;
112 wait_queue_head_t execute;
113
114 /*
115 * A list of everyone we wait upon, and everyone who waits upon us.
116 * Even though we will not be submitted to the hardware before the
117 * submit fence is signaled (it waits for all external events as well
118 * as our own requests), the scheduler still needs to know the
119 * dependency tree for the lifetime of the request (from execbuf
120 * to retirement), i.e. bidirectional dependency information for the
121 * request not tied to individual fences.
122 */
123 struct i915_sched_node sched;
124 struct i915_dependency dep;
125
126 /**
127 * GEM sequence number associated with this request on the
128 * global execution timeline. It is zero when the request is not
129 * on the HW queue (i.e. not on the engine timeline list).
130 * Its value is guarded by the timeline spinlock.
131 */
132 u32 global_seqno;
133
134 /** Position in the ring of the start of the request */
135 u32 head;
136
137 /** Position in the ring of the start of the user packets */
138 u32 infix;
139
140 /**
141 * Position in the ring of the start of the postfix.
142 * This is required to calculate the maximum available ring space
143 * without overwriting the postfix.
144 */
145 u32 postfix;
146
147 /** Position in the ring of the end of the whole request */
148 u32 tail;
149
150 /** Position in the ring of the end of any workarounds after the tail */
151 u32 wa_tail;
152
153 /** Preallocate space in the ring for the emitting the request */
154 u32 reserved_space;
155
156 /** Batch buffer related to this request if any (used for
157 * error state dump only).
158 */
159 struct i915_vma *batch;
160 /**
161 * Additional buffers requested by userspace to be captured upon
162 * a GPU hang. The vma/obj on this list are protected by their
163 * active reference - all objects on this list must also be
164 * on the active_list (of their final request).
165 */
166 struct i915_capture_list *capture_list;
167 struct list_head active_list;
168
169 /** Time at which this request was emitted, in jiffies. */
170 unsigned long emitted_jiffies;
171
172 bool waitboost;
173
174 /** engine->request_list entry for this request */
175 struct list_head link;
176
177 /** ring->request_list entry for this request */
178 struct list_head ring_link;
179
180 struct drm_i915_file_private *file_priv;
181 /** file_priv list entry for this request */
182 struct list_head client_link;
183 };
184
185 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
186
187 extern const struct dma_fence_ops i915_fence_ops;
188
dma_fence_is_i915(const struct dma_fence * fence)189 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
190 {
191 return fence->ops == &i915_fence_ops;
192 }
193
194 struct i915_request * __must_check
195 i915_request_alloc(struct intel_engine_cs *engine,
196 struct i915_gem_context *ctx);
197 void i915_request_retire_upto(struct i915_request *rq);
198
199 static inline struct i915_request *
to_request(struct dma_fence * fence)200 to_request(struct dma_fence *fence)
201 {
202 /* We assume that NULL fence/request are interoperable */
203 BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
204 GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
205 return container_of(fence, struct i915_request, fence);
206 }
207
208 static inline struct i915_request *
i915_request_get(struct i915_request * rq)209 i915_request_get(struct i915_request *rq)
210 {
211 return to_request(dma_fence_get(&rq->fence));
212 }
213
214 static inline struct i915_request *
i915_request_get_rcu(struct i915_request * rq)215 i915_request_get_rcu(struct i915_request *rq)
216 {
217 return to_request(dma_fence_get_rcu(&rq->fence));
218 }
219
220 static inline void
i915_request_put(struct i915_request * rq)221 i915_request_put(struct i915_request *rq)
222 {
223 dma_fence_put(&rq->fence);
224 }
225
226 /**
227 * i915_request_global_seqno - report the current global seqno
228 * @request - the request
229 *
230 * A request is assigned a global seqno only when it is on the hardware
231 * execution queue. The global seqno can be used to maintain a list of
232 * requests on the same engine in retirement order, for example for
233 * constructing a priority queue for waiting. Prior to its execution, or
234 * if it is subsequently removed in the event of preemption, its global
235 * seqno is zero. As both insertion and removal from the execution queue
236 * may operate in IRQ context, it is not guarded by the usual struct_mutex
237 * BKL. Instead those relying on the global seqno must be prepared for its
238 * value to change between reads. Only when the request is complete can
239 * the global seqno be stable (due to the memory barriers on submitting
240 * the commands to the hardware to write the breadcrumb, if the HWS shows
241 * that it has passed the global seqno and the global seqno is unchanged
242 * after the read, it is indeed complete).
243 */
244 static u32
i915_request_global_seqno(const struct i915_request * request)245 i915_request_global_seqno(const struct i915_request *request)
246 {
247 return READ_ONCE(request->global_seqno);
248 }
249
250 int i915_request_await_object(struct i915_request *to,
251 struct drm_i915_gem_object *obj,
252 bool write);
253 int i915_request_await_dma_fence(struct i915_request *rq,
254 struct dma_fence *fence);
255
256 void i915_request_add(struct i915_request *rq);
257
258 void __i915_request_submit(struct i915_request *request);
259 void i915_request_submit(struct i915_request *request);
260
261 void i915_request_skip(struct i915_request *request, int error);
262
263 void __i915_request_unsubmit(struct i915_request *request);
264 void i915_request_unsubmit(struct i915_request *request);
265
266 long i915_request_wait(struct i915_request *rq,
267 unsigned int flags,
268 long timeout)
269 __attribute__((nonnull(1)));
270 #define I915_WAIT_INTERRUPTIBLE BIT(0)
271 #define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
272 #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
273 #define I915_WAIT_FOR_IDLE_BOOST BIT(3)
274
275 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
276
277 /**
278 * Returns true if seq1 is later than seq2.
279 */
i915_seqno_passed(u32 seq1,u32 seq2)280 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
281 {
282 return (s32)(seq1 - seq2) >= 0;
283 }
284
285 static inline bool
__i915_request_completed(const struct i915_request * rq,u32 seqno)286 __i915_request_completed(const struct i915_request *rq, u32 seqno)
287 {
288 GEM_BUG_ON(!seqno);
289 return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) &&
290 seqno == i915_request_global_seqno(rq);
291 }
292
i915_request_completed(const struct i915_request * rq)293 static inline bool i915_request_completed(const struct i915_request *rq)
294 {
295 u32 seqno;
296
297 seqno = i915_request_global_seqno(rq);
298 if (!seqno)
299 return false;
300
301 return __i915_request_completed(rq, seqno);
302 }
303
i915_request_started(const struct i915_request * rq)304 static inline bool i915_request_started(const struct i915_request *rq)
305 {
306 u32 seqno;
307
308 seqno = i915_request_global_seqno(rq);
309 if (!seqno)
310 return false;
311
312 return i915_seqno_passed(intel_engine_get_seqno(rq->engine),
313 seqno - 1);
314 }
315
i915_sched_node_signaled(const struct i915_sched_node * node)316 static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
317 {
318 const struct i915_request *rq =
319 container_of(node, const struct i915_request, sched);
320
321 return i915_request_completed(rq);
322 }
323
324 void i915_retire_requests(struct drm_i915_private *i915);
325
326 /*
327 * We treat requests as fences. This is not be to confused with our
328 * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
329 * We use the fences to synchronize access from the CPU with activity on the
330 * GPU, for example, we should not rewrite an object's PTE whilst the GPU
331 * is reading them. We also track fences at a higher level to provide
332 * implicit synchronisation around GEM objects, e.g. set-domain will wait
333 * for outstanding GPU rendering before marking the object ready for CPU
334 * access, or a pageflip will wait until the GPU is complete before showing
335 * the frame on the scanout.
336 *
337 * In order to use a fence, the object must track the fence it needs to
338 * serialise with. For example, GEM objects want to track both read and
339 * write access so that we can perform concurrent read operations between
340 * the CPU and GPU engines, as well as waiting for all rendering to
341 * complete, or waiting for the last GPU user of a "fence register". The
342 * object then embeds a #i915_gem_active to track the most recent (in
343 * retirement order) request relevant for the desired mode of access.
344 * The #i915_gem_active is updated with i915_gem_active_set() to track the
345 * most recent fence request, typically this is done as part of
346 * i915_vma_move_to_active().
347 *
348 * When the #i915_gem_active completes (is retired), it will
349 * signal its completion to the owner through a callback as well as mark
350 * itself as idle (i915_gem_active.request == NULL). The owner
351 * can then perform any action, such as delayed freeing of an active
352 * resource including itself.
353 */
354 struct i915_gem_active;
355
356 typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
357 struct i915_request *);
358
359 struct i915_gem_active {
360 struct i915_request __rcu *request;
361 struct list_head link;
362 i915_gem_retire_fn retire;
363 };
364
365 void i915_gem_retire_noop(struct i915_gem_active *,
366 struct i915_request *request);
367
368 /**
369 * init_request_active - prepares the activity tracker for use
370 * @active - the active tracker
371 * @func - a callback when then the tracker is retired (becomes idle),
372 * can be NULL
373 *
374 * init_request_active() prepares the embedded @active struct for use as
375 * an activity tracker, that is for tracking the last known active request
376 * associated with it. When the last request becomes idle, when it is retired
377 * after completion, the optional callback @func is invoked.
378 */
379 static inline void
init_request_active(struct i915_gem_active * active,i915_gem_retire_fn retire)380 init_request_active(struct i915_gem_active *active,
381 i915_gem_retire_fn retire)
382 {
383 RCU_INIT_POINTER(active->request, NULL);
384 INIT_LIST_HEAD(&active->link);
385 active->retire = retire ?: i915_gem_retire_noop;
386 }
387
388 /**
389 * i915_gem_active_set - updates the tracker to watch the current request
390 * @active - the active tracker
391 * @request - the request to watch
392 *
393 * i915_gem_active_set() watches the given @request for completion. Whilst
394 * that @request is busy, the @active reports busy. When that @request is
395 * retired, the @active tracker is updated to report idle.
396 */
397 static inline void
i915_gem_active_set(struct i915_gem_active * active,struct i915_request * request)398 i915_gem_active_set(struct i915_gem_active *active,
399 struct i915_request *request)
400 {
401 list_move(&active->link, &request->active_list);
402 rcu_assign_pointer(active->request, request);
403 }
404
405 /**
406 * i915_gem_active_set_retire_fn - updates the retirement callback
407 * @active - the active tracker
408 * @fn - the routine called when the request is retired
409 * @mutex - struct_mutex used to guard retirements
410 *
411 * i915_gem_active_set_retire_fn() updates the function pointer that
412 * is called when the final request associated with the @active tracker
413 * is retired.
414 */
415 static inline void
i915_gem_active_set_retire_fn(struct i915_gem_active * active,i915_gem_retire_fn fn,struct mutex * mutex)416 i915_gem_active_set_retire_fn(struct i915_gem_active *active,
417 i915_gem_retire_fn fn,
418 struct mutex *mutex)
419 {
420 lockdep_assert_held(mutex);
421 active->retire = fn ?: i915_gem_retire_noop;
422 }
423
424 static inline struct i915_request *
__i915_gem_active_peek(const struct i915_gem_active * active)425 __i915_gem_active_peek(const struct i915_gem_active *active)
426 {
427 /*
428 * Inside the error capture (running with the driver in an unknown
429 * state), we want to bend the rules slightly (a lot).
430 *
431 * Work is in progress to make it safer, in the meantime this keeps
432 * the known issue from spamming the logs.
433 */
434 return rcu_dereference_protected(active->request, 1);
435 }
436
437 /**
438 * i915_gem_active_raw - return the active request
439 * @active - the active tracker
440 *
441 * i915_gem_active_raw() returns the current request being tracked, or NULL.
442 * It does not obtain a reference on the request for the caller, so the caller
443 * must hold struct_mutex.
444 */
445 static inline struct i915_request *
i915_gem_active_raw(const struct i915_gem_active * active,struct mutex * mutex)446 i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
447 {
448 return rcu_dereference_protected(active->request,
449 lockdep_is_held(mutex));
450 }
451
452 /**
453 * i915_gem_active_peek - report the active request being monitored
454 * @active - the active tracker
455 *
456 * i915_gem_active_peek() returns the current request being tracked if
457 * still active, or NULL. It does not obtain a reference on the request
458 * for the caller, so the caller must hold struct_mutex.
459 */
460 static inline struct i915_request *
i915_gem_active_peek(const struct i915_gem_active * active,struct mutex * mutex)461 i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
462 {
463 struct i915_request *request;
464
465 request = i915_gem_active_raw(active, mutex);
466 if (!request || i915_request_completed(request))
467 return NULL;
468
469 return request;
470 }
471
472 /**
473 * i915_gem_active_get - return a reference to the active request
474 * @active - the active tracker
475 *
476 * i915_gem_active_get() returns a reference to the active request, or NULL
477 * if the active tracker is idle. The caller must hold struct_mutex.
478 */
479 static inline struct i915_request *
i915_gem_active_get(const struct i915_gem_active * active,struct mutex * mutex)480 i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
481 {
482 return i915_request_get(i915_gem_active_peek(active, mutex));
483 }
484
485 /**
486 * __i915_gem_active_get_rcu - return a reference to the active request
487 * @active - the active tracker
488 *
489 * __i915_gem_active_get() returns a reference to the active request, or NULL
490 * if the active tracker is idle. The caller must hold the RCU read lock, but
491 * the returned pointer is safe to use outside of RCU.
492 */
493 static inline struct i915_request *
__i915_gem_active_get_rcu(const struct i915_gem_active * active)494 __i915_gem_active_get_rcu(const struct i915_gem_active *active)
495 {
496 /*
497 * Performing a lockless retrieval of the active request is super
498 * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
499 * slab of request objects will not be freed whilst we hold the
500 * RCU read lock. It does not guarantee that the request itself
501 * will not be freed and then *reused*. Viz,
502 *
503 * Thread A Thread B
504 *
505 * rq = active.request
506 * retire(rq) -> free(rq);
507 * (rq is now first on the slab freelist)
508 * active.request = NULL
509 *
510 * rq = new submission on a new object
511 * ref(rq)
512 *
513 * To prevent the request from being reused whilst the caller
514 * uses it, we take a reference like normal. Whilst acquiring
515 * the reference we check that it is not in a destroyed state
516 * (refcnt == 0). That prevents the request being reallocated
517 * whilst the caller holds on to it. To check that the request
518 * was not reallocated as we acquired the reference we have to
519 * check that our request remains the active request across
520 * the lookup, in the same manner as a seqlock. The visibility
521 * of the pointer versus the reference counting is controlled
522 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
523 *
524 * In the middle of all that, we inspect whether the request is
525 * complete. Retiring is lazy so the request may be completed long
526 * before the active tracker is updated. Querying whether the
527 * request is complete is far cheaper (as it involves no locked
528 * instructions setting cachelines to exclusive) than acquiring
529 * the reference, so we do it first. The RCU read lock ensures the
530 * pointer dereference is valid, but does not ensure that the
531 * seqno nor HWS is the right one! However, if the request was
532 * reallocated, that means the active tracker's request was complete.
533 * If the new request is also complete, then both are and we can
534 * just report the active tracker is idle. If the new request is
535 * incomplete, then we acquire a reference on it and check that
536 * it remained the active request.
537 *
538 * It is then imperative that we do not zero the request on
539 * reallocation, so that we can chase the dangling pointers!
540 * See i915_request_alloc().
541 */
542 do {
543 struct i915_request *request;
544
545 request = rcu_dereference(active->request);
546 if (!request || i915_request_completed(request))
547 return NULL;
548
549 /*
550 * An especially silly compiler could decide to recompute the
551 * result of i915_request_completed, more specifically
552 * re-emit the load for request->fence.seqno. A race would catch
553 * a later seqno value, which could flip the result from true to
554 * false. Which means part of the instructions below might not
555 * be executed, while later on instructions are executed. Due to
556 * barriers within the refcounting the inconsistency can't reach
557 * past the call to i915_request_get_rcu, but not executing
558 * that while still executing i915_request_put() creates
559 * havoc enough. Prevent this with a compiler barrier.
560 */
561 barrier();
562
563 request = i915_request_get_rcu(request);
564
565 /*
566 * What stops the following rcu_access_pointer() from occurring
567 * before the above i915_request_get_rcu()? If we were
568 * to read the value before pausing to get the reference to
569 * the request, we may not notice a change in the active
570 * tracker.
571 *
572 * The rcu_access_pointer() is a mere compiler barrier, which
573 * means both the CPU and compiler are free to perform the
574 * memory read without constraint. The compiler only has to
575 * ensure that any operations after the rcu_access_pointer()
576 * occur afterwards in program order. This means the read may
577 * be performed earlier by an out-of-order CPU, or adventurous
578 * compiler.
579 *
580 * The atomic operation at the heart of
581 * i915_request_get_rcu(), see dma_fence_get_rcu(), is
582 * atomic_inc_not_zero() which is only a full memory barrier
583 * when successful. That is, if i915_request_get_rcu()
584 * returns the request (and so with the reference counted
585 * incremented) then the following read for rcu_access_pointer()
586 * must occur after the atomic operation and so confirm
587 * that this request is the one currently being tracked.
588 *
589 * The corresponding write barrier is part of
590 * rcu_assign_pointer().
591 */
592 if (!request || request == rcu_access_pointer(active->request))
593 return rcu_pointer_handoff(request);
594
595 i915_request_put(request);
596 } while (1);
597 }
598
599 /**
600 * i915_gem_active_get_unlocked - return a reference to the active request
601 * @active - the active tracker
602 *
603 * i915_gem_active_get_unlocked() returns a reference to the active request,
604 * or NULL if the active tracker is idle. The reference is obtained under RCU,
605 * so no locking is required by the caller.
606 *
607 * The reference should be freed with i915_request_put().
608 */
609 static inline struct i915_request *
i915_gem_active_get_unlocked(const struct i915_gem_active * active)610 i915_gem_active_get_unlocked(const struct i915_gem_active *active)
611 {
612 struct i915_request *request;
613
614 rcu_read_lock();
615 request = __i915_gem_active_get_rcu(active);
616 rcu_read_unlock();
617
618 return request;
619 }
620
621 /**
622 * i915_gem_active_isset - report whether the active tracker is assigned
623 * @active - the active tracker
624 *
625 * i915_gem_active_isset() returns true if the active tracker is currently
626 * assigned to a request. Due to the lazy retiring, that request may be idle
627 * and this may report stale information.
628 */
629 static inline bool
i915_gem_active_isset(const struct i915_gem_active * active)630 i915_gem_active_isset(const struct i915_gem_active *active)
631 {
632 return rcu_access_pointer(active->request);
633 }
634
635 /**
636 * i915_gem_active_wait - waits until the request is completed
637 * @active - the active request on which to wait
638 * @flags - how to wait
639 * @timeout - how long to wait at most
640 * @rps - userspace client to charge for a waitboost
641 *
642 * i915_gem_active_wait() waits until the request is completed before
643 * returning, without requiring any locks to be held. Note that it does not
644 * retire any requests before returning.
645 *
646 * This function relies on RCU in order to acquire the reference to the active
647 * request without holding any locks. See __i915_gem_active_get_rcu() for the
648 * glory details on how that is managed. Once the reference is acquired, we
649 * can then wait upon the request, and afterwards release our reference,
650 * free of any locking.
651 *
652 * This function wraps i915_request_wait(), see it for the full details on
653 * the arguments.
654 *
655 * Returns 0 if successful, or a negative error code.
656 */
657 static inline int
i915_gem_active_wait(const struct i915_gem_active * active,unsigned int flags)658 i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
659 {
660 struct i915_request *request;
661 long ret = 0;
662
663 request = i915_gem_active_get_unlocked(active);
664 if (request) {
665 ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT);
666 i915_request_put(request);
667 }
668
669 return ret < 0 ? ret : 0;
670 }
671
672 /**
673 * i915_gem_active_retire - waits until the request is retired
674 * @active - the active request on which to wait
675 *
676 * i915_gem_active_retire() waits until the request is completed,
677 * and then ensures that at least the retirement handler for this
678 * @active tracker is called before returning. If the @active
679 * tracker is idle, the function returns immediately.
680 */
681 static inline int __must_check
i915_gem_active_retire(struct i915_gem_active * active,struct mutex * mutex)682 i915_gem_active_retire(struct i915_gem_active *active,
683 struct mutex *mutex)
684 {
685 struct i915_request *request;
686 long ret;
687
688 request = i915_gem_active_raw(active, mutex);
689 if (!request)
690 return 0;
691
692 ret = i915_request_wait(request,
693 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
694 MAX_SCHEDULE_TIMEOUT);
695 if (ret < 0)
696 return ret;
697
698 list_del_init(&active->link);
699 RCU_INIT_POINTER(active->request, NULL);
700
701 active->retire(active, request);
702
703 return 0;
704 }
705
706 #define for_each_active(mask, idx) \
707 for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
708
709 #endif /* I915_REQUEST_H */
710