1 /*
2 * Copyright © 2008-2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #ifndef I915_REQUEST_H
26 #define I915_REQUEST_H
27
28 #include <linux/dma-fence.h>
29 #include <linux/lockdep.h>
30
31 #include "gt/intel_context_types.h"
32 #include "gt/intel_engine_types.h"
33
34 #include "i915_gem.h"
35 #include "i915_scheduler.h"
36 #include "i915_selftest.h"
37 #include "i915_sw_fence.h"
38
39 #include <uapi/drm/i915_drm.h>
40
41 struct drm_file;
42 struct drm_i915_gem_object;
43 struct i915_request;
44 struct intel_timeline;
45 struct intel_timeline_cacheline;
46
47 struct i915_capture_list {
48 struct i915_capture_list *next;
49 struct i915_vma *vma;
50 };
51
52 enum {
53 /*
54 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
55 *
56 * Set by __i915_request_submit() on handing over to HW, and cleared
57 * by __i915_request_unsubmit() if we preempt this request.
58 *
59 * Finally cleared for consistency on retiring the request, when
60 * we know the HW is no longer running this request.
61 *
62 * See i915_request_is_active()
63 */
64 I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
65
66 /*
67 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
68 *
69 * Internal bookkeeping used by the breadcrumb code to track when
70 * a request is on the various signal_list.
71 */
72 I915_FENCE_FLAG_SIGNAL,
73 };
74
75 /**
76 * Request queue structure.
77 *
78 * The request queue allows us to note sequence numbers that have been emitted
79 * and may be associated with active buffers to be retired.
80 *
81 * By keeping this list, we can avoid having to do questionable sequence
82 * number comparisons on buffer last_read|write_seqno. It also allows an
83 * emission time to be associated with the request for tracking how far ahead
84 * of the GPU the submission is.
85 *
86 * When modifying this structure be very aware that we perform a lockless
87 * RCU lookup of it that may race against reallocation of the struct
88 * from the slab freelist. We intentionally do not zero the structure on
89 * allocation so that the lookup can use the dangling pointers (and is
90 * cogniscent that those pointers may be wrong). Instead, everything that
91 * needs to be initialised must be done so explicitly.
92 *
93 * The requests are reference counted.
94 */
95 struct i915_request {
96 struct dma_fence fence;
97 spinlock_t lock;
98
99 /** On Which ring this request was generated */
100 struct drm_i915_private *i915;
101
102 /**
103 * Context and ring buffer related to this request
104 * Contexts are refcounted, so when this request is associated with a
105 * context, we must increment the context's refcount, to guarantee that
106 * it persists while any request is linked to it. Requests themselves
107 * are also refcounted, so the request will only be freed when the last
108 * reference to it is dismissed, and the code in
109 * i915_request_free() will then decrement the refcount on the
110 * context.
111 */
112 struct i915_gem_context *gem_context;
113 struct intel_engine_cs *engine;
114 struct intel_context *hw_context;
115 struct intel_ring *ring;
116 struct intel_timeline *timeline;
117 struct list_head signal_link;
118
119 /*
120 * The rcu epoch of when this request was allocated. Used to judiciously
121 * apply backpressure on future allocations to ensure that under
122 * mempressure there is sufficient RCU ticks for us to reclaim our
123 * RCU protected slabs.
124 */
125 unsigned long rcustate;
126
127 /*
128 * We pin the timeline->mutex while constructing the request to
129 * ensure that no caller accidentally drops it during construction.
130 * The timeline->mutex must be held to ensure that only this caller
131 * can use the ring and manipulate the associated timeline during
132 * construction.
133 */
134 struct pin_cookie cookie;
135
136 /*
137 * Fences for the various phases in the request's lifetime.
138 *
139 * The submit fence is used to await upon all of the request's
140 * dependencies. When it is signaled, the request is ready to run.
141 * It is used by the driver to then queue the request for execution.
142 */
143 struct i915_sw_fence submit;
144 union {
145 wait_queue_entry_t submitq;
146 struct i915_sw_dma_fence_cb dmaq;
147 };
148 struct list_head execute_cb;
149 struct i915_sw_fence semaphore;
150
151 /*
152 * A list of everyone we wait upon, and everyone who waits upon us.
153 * Even though we will not be submitted to the hardware before the
154 * submit fence is signaled (it waits for all external events as well
155 * as our own requests), the scheduler still needs to know the
156 * dependency tree for the lifetime of the request (from execbuf
157 * to retirement), i.e. bidirectional dependency information for the
158 * request not tied to individual fences.
159 */
160 struct i915_sched_node sched;
161 struct i915_dependency dep;
162 intel_engine_mask_t execution_mask;
163
164 /*
165 * A convenience pointer to the current breadcrumb value stored in
166 * the HW status page (or our timeline's local equivalent). The full
167 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
168 */
169 const u32 *hwsp_seqno;
170
171 /*
172 * If we need to access the timeline's seqno for this request in
173 * another request, we need to keep a read reference to this associated
174 * cacheline, so that we do not free and recycle it before the foreign
175 * observers have completed. Hence, we keep a pointer to the cacheline
176 * inside the timeline's HWSP vma, but it is only valid while this
177 * request has not completed and guarded by the timeline mutex.
178 */
179 struct intel_timeline_cacheline *hwsp_cacheline;
180
181 /** Position in the ring of the start of the request */
182 u32 head;
183
184 /** Position in the ring of the start of the user packets */
185 u32 infix;
186
187 /**
188 * Position in the ring of the start of the postfix.
189 * This is required to calculate the maximum available ring space
190 * without overwriting the postfix.
191 */
192 u32 postfix;
193
194 /** Position in the ring of the end of the whole request */
195 u32 tail;
196
197 /** Position in the ring of the end of any workarounds after the tail */
198 u32 wa_tail;
199
200 /** Preallocate space in the ring for the emitting the request */
201 u32 reserved_space;
202
203 /** Batch buffer related to this request if any (used for
204 * error state dump only).
205 */
206 struct i915_vma *batch;
207 /**
208 * Additional buffers requested by userspace to be captured upon
209 * a GPU hang. The vma/obj on this list are protected by their
210 * active reference - all objects on this list must also be
211 * on the active_list (of their final request).
212 */
213 struct i915_capture_list *capture_list;
214 struct list_head active_list;
215
216 /** Time at which this request was emitted, in jiffies. */
217 unsigned long emitted_jiffies;
218
219 unsigned long flags;
220 #define I915_REQUEST_WAITBOOST BIT(0)
221 #define I915_REQUEST_NOPREEMPT BIT(1)
222
223 /** timeline->request entry for this request */
224 struct list_head link;
225
226 struct drm_i915_file_private *file_priv;
227 /** file_priv list entry for this request */
228 struct list_head client_link;
229
230 I915_SELFTEST_DECLARE(struct {
231 struct list_head link;
232 unsigned long delay;
233 } mock;)
234 };
235
236 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
237
238 extern const struct dma_fence_ops i915_fence_ops;
239
dma_fence_is_i915(const struct dma_fence * fence)240 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
241 {
242 return fence->ops == &i915_fence_ops;
243 }
244
245 struct i915_request * __must_check
246 __i915_request_create(struct intel_context *ce, gfp_t gfp);
247 struct i915_request * __must_check
248 i915_request_create(struct intel_context *ce);
249
250 struct i915_request *__i915_request_commit(struct i915_request *request);
251 void __i915_request_queue(struct i915_request *rq,
252 const struct i915_sched_attr *attr);
253
254 void i915_request_retire_upto(struct i915_request *rq);
255
256 static inline struct i915_request *
to_request(struct dma_fence * fence)257 to_request(struct dma_fence *fence)
258 {
259 /* We assume that NULL fence/request are interoperable */
260 BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
261 GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
262 return container_of(fence, struct i915_request, fence);
263 }
264
265 static inline struct i915_request *
i915_request_get(struct i915_request * rq)266 i915_request_get(struct i915_request *rq)
267 {
268 return to_request(dma_fence_get(&rq->fence));
269 }
270
271 static inline struct i915_request *
i915_request_get_rcu(struct i915_request * rq)272 i915_request_get_rcu(struct i915_request *rq)
273 {
274 return to_request(dma_fence_get_rcu(&rq->fence));
275 }
276
277 static inline void
i915_request_put(struct i915_request * rq)278 i915_request_put(struct i915_request *rq)
279 {
280 dma_fence_put(&rq->fence);
281 }
282
283 int i915_request_await_object(struct i915_request *to,
284 struct drm_i915_gem_object *obj,
285 bool write);
286 int i915_request_await_dma_fence(struct i915_request *rq,
287 struct dma_fence *fence);
288 int i915_request_await_execution(struct i915_request *rq,
289 struct dma_fence *fence,
290 void (*hook)(struct i915_request *rq,
291 struct dma_fence *signal));
292
293 void i915_request_add(struct i915_request *rq);
294
295 bool __i915_request_submit(struct i915_request *request);
296 void i915_request_submit(struct i915_request *request);
297
298 void i915_request_skip(struct i915_request *request, int error);
299
300 void __i915_request_unsubmit(struct i915_request *request);
301 void i915_request_unsubmit(struct i915_request *request);
302
303 /* Note: part of the intel_breadcrumbs family */
304 bool i915_request_enable_breadcrumb(struct i915_request *request);
305 void i915_request_cancel_breadcrumb(struct i915_request *request);
306
307 long i915_request_wait(struct i915_request *rq,
308 unsigned int flags,
309 long timeout)
310 __attribute__((nonnull(1)));
311 #define I915_WAIT_INTERRUPTIBLE BIT(0)
312 #define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
313 #define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */
314 #define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
315 #define I915_WAIT_FOR_IDLE_BOOST BIT(4)
316
i915_request_signaled(const struct i915_request * rq)317 static inline bool i915_request_signaled(const struct i915_request *rq)
318 {
319 /* The request may live longer than its HWSP, so check flags first! */
320 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
321 }
322
i915_request_is_active(const struct i915_request * rq)323 static inline bool i915_request_is_active(const struct i915_request *rq)
324 {
325 return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
326 }
327
328 /**
329 * Returns true if seq1 is later than seq2.
330 */
i915_seqno_passed(u32 seq1,u32 seq2)331 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
332 {
333 return (s32)(seq1 - seq2) >= 0;
334 }
335
__hwsp_seqno(const struct i915_request * rq)336 static inline u32 __hwsp_seqno(const struct i915_request *rq)
337 {
338 return READ_ONCE(*rq->hwsp_seqno);
339 }
340
341 /**
342 * hwsp_seqno - the current breadcrumb value in the HW status page
343 * @rq: the request, to chase the relevant HW status page
344 *
345 * The emphasis in naming here is that hwsp_seqno() is not a property of the
346 * request, but an indication of the current HW state (associated with this
347 * request). Its value will change as the GPU executes more requests.
348 *
349 * Returns the current breadcrumb value in the associated HW status page (or
350 * the local timeline's equivalent) for this request. The request itself
351 * has the associated breadcrumb value of rq->fence.seqno, when the HW
352 * status page has that breadcrumb or later, this request is complete.
353 */
hwsp_seqno(const struct i915_request * rq)354 static inline u32 hwsp_seqno(const struct i915_request *rq)
355 {
356 u32 seqno;
357
358 rcu_read_lock(); /* the HWSP may be freed at runtime */
359 seqno = __hwsp_seqno(rq);
360 rcu_read_unlock();
361
362 return seqno;
363 }
364
__i915_request_has_started(const struct i915_request * rq)365 static inline bool __i915_request_has_started(const struct i915_request *rq)
366 {
367 return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
368 }
369
370 /**
371 * i915_request_started - check if the request has begun being executed
372 * @rq: the request
373 *
374 * If the timeline is not using initial breadcrumbs, a request is
375 * considered started if the previous request on its timeline (i.e.
376 * context) has been signaled.
377 *
378 * If the timeline is using semaphores, it will also be emitting an
379 * "initial breadcrumb" after the semaphores are complete and just before
380 * it began executing the user payload. A request can therefore be active
381 * on the HW and not yet started as it is still busywaiting on its
382 * dependencies (via HW semaphores).
383 *
384 * If the request has started, its dependencies will have been signaled
385 * (either by fences or by semaphores) and it will have begun processing
386 * the user payload.
387 *
388 * However, even if a request has started, it may have been preempted and
389 * so no longer active, or it may have already completed.
390 *
391 * See also i915_request_is_active().
392 *
393 * Returns true if the request has begun executing the user payload, or
394 * has completed:
395 */
i915_request_started(const struct i915_request * rq)396 static inline bool i915_request_started(const struct i915_request *rq)
397 {
398 if (i915_request_signaled(rq))
399 return true;
400
401 /* Remember: started but may have since been preempted! */
402 return __i915_request_has_started(rq);
403 }
404
405 /**
406 * i915_request_is_running - check if the request may actually be executing
407 * @rq: the request
408 *
409 * Returns true if the request is currently submitted to hardware, has passed
410 * its start point (i.e. the context is setup and not busywaiting). Note that
411 * it may no longer be running by the time the function returns!
412 */
i915_request_is_running(const struct i915_request * rq)413 static inline bool i915_request_is_running(const struct i915_request *rq)
414 {
415 if (!i915_request_is_active(rq))
416 return false;
417
418 return __i915_request_has_started(rq);
419 }
420
i915_request_completed(const struct i915_request * rq)421 static inline bool i915_request_completed(const struct i915_request *rq)
422 {
423 if (i915_request_signaled(rq))
424 return true;
425
426 return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
427 }
428
i915_request_mark_complete(struct i915_request * rq)429 static inline void i915_request_mark_complete(struct i915_request *rq)
430 {
431 rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
432 }
433
i915_request_has_waitboost(const struct i915_request * rq)434 static inline bool i915_request_has_waitboost(const struct i915_request *rq)
435 {
436 return rq->flags & I915_REQUEST_WAITBOOST;
437 }
438
i915_request_has_nopreempt(const struct i915_request * rq)439 static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
440 {
441 /* Preemption should only be disabled very rarely */
442 return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
443 }
444
445 bool i915_retire_requests(struct drm_i915_private *i915);
446
447 #endif /* I915_REQUEST_H */
448