1 /*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/dma-fence-array.h>
26 #include <linux/irq_work.h>
27 #include <linux/prefetch.h>
28 #include <linux/sched.h>
29 #include <linux/sched/clock.h>
30 #include <linux/sched/signal.h>
31
32 #include "gem/i915_gem_context.h"
33 #include "gt/intel_context.h"
34
35 #include "i915_active.h"
36 #include "i915_drv.h"
37 #include "i915_globals.h"
38 #include "i915_trace.h"
39 #include "intel_pm.h"
40
41 struct execute_cb {
42 struct list_head link;
43 struct irq_work work;
44 struct i915_sw_fence *fence;
45 void (*hook)(struct i915_request *rq, struct dma_fence *signal);
46 struct i915_request *signal;
47 };
48
49 static struct i915_global_request {
50 struct i915_global base;
51 struct kmem_cache *slab_requests;
52 struct kmem_cache *slab_dependencies;
53 struct kmem_cache *slab_execute_cbs;
54 } global;
55
i915_fence_get_driver_name(struct dma_fence * fence)56 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
57 {
58 return "i915";
59 }
60
i915_fence_get_timeline_name(struct dma_fence * fence)61 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
62 {
63 /*
64 * The timeline struct (as part of the ppgtt underneath a context)
65 * may be freed when the request is no longer in use by the GPU.
66 * We could extend the life of a context to beyond that of all
67 * fences, possibly keeping the hw resource around indefinitely,
68 * or we just give them a false name. Since
69 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
70 * lie seems justifiable.
71 */
72 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
73 return "signaled";
74
75 return to_request(fence)->gem_context->name ?: "[i915]";
76 }
77
i915_fence_signaled(struct dma_fence * fence)78 static bool i915_fence_signaled(struct dma_fence *fence)
79 {
80 return i915_request_completed(to_request(fence));
81 }
82
i915_fence_enable_signaling(struct dma_fence * fence)83 static bool i915_fence_enable_signaling(struct dma_fence *fence)
84 {
85 return i915_request_enable_breadcrumb(to_request(fence));
86 }
87
i915_fence_wait(struct dma_fence * fence,bool interruptible,signed long timeout)88 static signed long i915_fence_wait(struct dma_fence *fence,
89 bool interruptible,
90 signed long timeout)
91 {
92 return i915_request_wait(to_request(fence),
93 interruptible | I915_WAIT_PRIORITY,
94 timeout);
95 }
96
i915_fence_release(struct dma_fence * fence)97 static void i915_fence_release(struct dma_fence *fence)
98 {
99 struct i915_request *rq = to_request(fence);
100
101 /*
102 * The request is put onto a RCU freelist (i.e. the address
103 * is immediately reused), mark the fences as being freed now.
104 * Otherwise the debugobjects for the fences are only marked as
105 * freed when the slab cache itself is freed, and so we would get
106 * caught trying to reuse dead objects.
107 */
108 i915_sw_fence_fini(&rq->submit);
109 i915_sw_fence_fini(&rq->semaphore);
110
111 kmem_cache_free(global.slab_requests, rq);
112 }
113
114 const struct dma_fence_ops i915_fence_ops = {
115 .get_driver_name = i915_fence_get_driver_name,
116 .get_timeline_name = i915_fence_get_timeline_name,
117 .enable_signaling = i915_fence_enable_signaling,
118 .signaled = i915_fence_signaled,
119 .wait = i915_fence_wait,
120 .release = i915_fence_release,
121 };
122
irq_execute_cb(struct irq_work * wrk)123 static void irq_execute_cb(struct irq_work *wrk)
124 {
125 struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
126
127 i915_sw_fence_complete(cb->fence);
128 kmem_cache_free(global.slab_execute_cbs, cb);
129 }
130
irq_execute_cb_hook(struct irq_work * wrk)131 static void irq_execute_cb_hook(struct irq_work *wrk)
132 {
133 struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
134
135 cb->hook(container_of(cb->fence, struct i915_request, submit),
136 &cb->signal->fence);
137 i915_request_put(cb->signal);
138
139 irq_execute_cb(wrk);
140 }
141
__notify_execute_cb(struct i915_request * rq)142 static void __notify_execute_cb(struct i915_request *rq)
143 {
144 struct execute_cb *cb;
145
146 lockdep_assert_held(&rq->lock);
147
148 if (list_empty(&rq->execute_cb))
149 return;
150
151 list_for_each_entry(cb, &rq->execute_cb, link)
152 irq_work_queue(&cb->work);
153
154 /*
155 * XXX Rollback on __i915_request_unsubmit()
156 *
157 * In the future, perhaps when we have an active time-slicing scheduler,
158 * it will be interesting to unsubmit parallel execution and remove
159 * busywaits from the GPU until their master is restarted. This is
160 * quite hairy, we have to carefully rollback the fence and do a
161 * preempt-to-idle cycle on the target engine, all the while the
162 * master execute_cb may refire.
163 */
164 INIT_LIST_HEAD(&rq->execute_cb);
165 }
166
167 static inline void
remove_from_client(struct i915_request * request)168 remove_from_client(struct i915_request *request)
169 {
170 struct drm_i915_file_private *file_priv;
171
172 file_priv = READ_ONCE(request->file_priv);
173 if (!file_priv)
174 return;
175
176 spin_lock(&file_priv->mm.lock);
177 if (request->file_priv) {
178 list_del(&request->client_link);
179 request->file_priv = NULL;
180 }
181 spin_unlock(&file_priv->mm.lock);
182 }
183
free_capture_list(struct i915_request * request)184 static void free_capture_list(struct i915_request *request)
185 {
186 struct i915_capture_list *capture;
187
188 capture = request->capture_list;
189 while (capture) {
190 struct i915_capture_list *next = capture->next;
191
192 kfree(capture);
193 capture = next;
194 }
195 }
196
remove_from_engine(struct i915_request * rq)197 static void remove_from_engine(struct i915_request *rq)
198 {
199 struct intel_engine_cs *engine, *locked;
200
201 /*
202 * Virtual engines complicate acquiring the engine timeline lock,
203 * as their rq->engine pointer is not stable until under that
204 * engine lock. The simple ploy we use is to take the lock then
205 * check that the rq still belongs to the newly locked engine.
206 */
207 locked = READ_ONCE(rq->engine);
208 spin_lock(&locked->active.lock);
209 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
210 spin_unlock(&locked->active.lock);
211 spin_lock(&engine->active.lock);
212 locked = engine;
213 }
214 list_del(&rq->sched.link);
215 spin_unlock(&locked->active.lock);
216 }
217
i915_request_retire(struct i915_request * rq)218 static bool i915_request_retire(struct i915_request *rq)
219 {
220 struct i915_active_request *active, *next;
221
222 lockdep_assert_held(&rq->timeline->mutex);
223 if (!i915_request_completed(rq))
224 return false;
225
226 GEM_TRACE("%s fence %llx:%lld, current %d\n",
227 rq->engine->name,
228 rq->fence.context, rq->fence.seqno,
229 hwsp_seqno(rq));
230
231 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
232 trace_i915_request_retire(rq);
233
234 /*
235 * We know the GPU must have read the request to have
236 * sent us the seqno + interrupt, so use the position
237 * of tail of the request to update the last known position
238 * of the GPU head.
239 *
240 * Note this requires that we are always called in request
241 * completion order.
242 */
243 GEM_BUG_ON(!list_is_first(&rq->link, &rq->timeline->requests));
244 rq->ring->head = rq->postfix;
245
246 /*
247 * Walk through the active list, calling retire on each. This allows
248 * objects to track their GPU activity and mark themselves as idle
249 * when their *last* active request is completed (updating state
250 * tracking lists for eviction, active references for GEM, etc).
251 *
252 * As the ->retire() may free the node, we decouple it first and
253 * pass along the auxiliary information (to avoid dereferencing
254 * the node after the callback).
255 */
256 list_for_each_entry_safe(active, next, &rq->active_list, link) {
257 /*
258 * In microbenchmarks or focusing upon time inside the kernel,
259 * we may spend an inordinate amount of time simply handling
260 * the retirement of requests and processing their callbacks.
261 * Of which, this loop itself is particularly hot due to the
262 * cache misses when jumping around the list of
263 * i915_active_request. So we try to keep this loop as
264 * streamlined as possible and also prefetch the next
265 * i915_active_request to try and hide the likely cache miss.
266 */
267 prefetchw(next);
268
269 INIT_LIST_HEAD(&active->link);
270 RCU_INIT_POINTER(active->request, NULL);
271
272 active->retire(active, rq);
273 }
274
275 local_irq_disable();
276
277 /*
278 * We only loosely track inflight requests across preemption,
279 * and so we may find ourselves attempting to retire a _completed_
280 * request that we have removed from the HW and put back on a run
281 * queue.
282 */
283 remove_from_engine(rq);
284
285 spin_lock(&rq->lock);
286 i915_request_mark_complete(rq);
287 if (!i915_request_signaled(rq))
288 dma_fence_signal_locked(&rq->fence);
289 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
290 i915_request_cancel_breadcrumb(rq);
291 if (i915_request_has_waitboost(rq)) {
292 GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
293 atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
294 }
295 if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
296 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
297 __notify_execute_cb(rq);
298 }
299 GEM_BUG_ON(!list_empty(&rq->execute_cb));
300 spin_unlock(&rq->lock);
301
302 local_irq_enable();
303
304 remove_from_client(rq);
305 list_del(&rq->link);
306
307 intel_context_exit(rq->hw_context);
308 intel_context_unpin(rq->hw_context);
309
310 free_capture_list(rq);
311 i915_sched_node_fini(&rq->sched);
312 i915_request_put(rq);
313
314 return true;
315 }
316
i915_request_retire_upto(struct i915_request * rq)317 void i915_request_retire_upto(struct i915_request *rq)
318 {
319 struct intel_timeline * const tl = rq->timeline;
320 struct i915_request *tmp;
321
322 GEM_TRACE("%s fence %llx:%lld, current %d\n",
323 rq->engine->name,
324 rq->fence.context, rq->fence.seqno,
325 hwsp_seqno(rq));
326
327 lockdep_assert_held(&tl->mutex);
328 GEM_BUG_ON(!i915_request_completed(rq));
329
330 do {
331 tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
332 } while (i915_request_retire(tmp) && tmp != rq);
333 }
334
335 static int
__i915_request_await_execution(struct i915_request * rq,struct i915_request * signal,void (* hook)(struct i915_request * rq,struct dma_fence * signal),gfp_t gfp)336 __i915_request_await_execution(struct i915_request *rq,
337 struct i915_request *signal,
338 void (*hook)(struct i915_request *rq,
339 struct dma_fence *signal),
340 gfp_t gfp)
341 {
342 struct execute_cb *cb;
343
344 if (i915_request_is_active(signal)) {
345 if (hook)
346 hook(rq, &signal->fence);
347 return 0;
348 }
349
350 cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
351 if (!cb)
352 return -ENOMEM;
353
354 cb->fence = &rq->submit;
355 i915_sw_fence_await(cb->fence);
356 init_irq_work(&cb->work, irq_execute_cb);
357
358 if (hook) {
359 cb->hook = hook;
360 cb->signal = i915_request_get(signal);
361 cb->work.func = irq_execute_cb_hook;
362 }
363
364 spin_lock_irq(&signal->lock);
365 if (i915_request_is_active(signal)) {
366 if (hook) {
367 hook(rq, &signal->fence);
368 i915_request_put(signal);
369 }
370 i915_sw_fence_complete(cb->fence);
371 kmem_cache_free(global.slab_execute_cbs, cb);
372 } else {
373 list_add_tail(&cb->link, &signal->execute_cb);
374 }
375 spin_unlock_irq(&signal->lock);
376
377 return 0;
378 }
379
__i915_request_submit(struct i915_request * request)380 bool __i915_request_submit(struct i915_request *request)
381 {
382 struct intel_engine_cs *engine = request->engine;
383 bool result = false;
384
385 GEM_TRACE("%s fence %llx:%lld, current %d\n",
386 engine->name,
387 request->fence.context, request->fence.seqno,
388 hwsp_seqno(request));
389
390 GEM_BUG_ON(!irqs_disabled());
391 lockdep_assert_held(&engine->active.lock);
392
393 /*
394 * With the advent of preempt-to-busy, we frequently encounter
395 * requests that we have unsubmitted from HW, but left running
396 * until the next ack and so have completed in the meantime. On
397 * resubmission of that completed request, we can skip
398 * updating the payload, and execlists can even skip submitting
399 * the request.
400 *
401 * We must remove the request from the caller's priority queue,
402 * and the caller must only call us when the request is in their
403 * priority queue, under the active.lock. This ensures that the
404 * request has *not* yet been retired and we can safely move
405 * the request into the engine->active.list where it will be
406 * dropped upon retiring. (Otherwise if resubmit a *retired*
407 * request, this would be a horrible use-after-free.)
408 */
409 if (i915_request_completed(request))
410 goto xfer;
411
412 if (i915_gem_context_is_banned(request->gem_context))
413 i915_request_skip(request, -EIO);
414
415 /*
416 * Are we using semaphores when the gpu is already saturated?
417 *
418 * Using semaphores incurs a cost in having the GPU poll a
419 * memory location, busywaiting for it to change. The continual
420 * memory reads can have a noticeable impact on the rest of the
421 * system with the extra bus traffic, stalling the cpu as it too
422 * tries to access memory across the bus (perf stat -e bus-cycles).
423 *
424 * If we installed a semaphore on this request and we only submit
425 * the request after the signaler completed, that indicates the
426 * system is overloaded and using semaphores at this time only
427 * increases the amount of work we are doing. If so, we disable
428 * further use of semaphores until we are idle again, whence we
429 * optimistically try again.
430 */
431 if (request->sched.semaphores &&
432 i915_sw_fence_signaled(&request->semaphore))
433 engine->saturated |= request->sched.semaphores;
434
435 engine->emit_fini_breadcrumb(request,
436 request->ring->vaddr + request->postfix);
437
438 trace_i915_request_execute(request);
439 engine->serial++;
440 result = true;
441
442 xfer: /* We may be recursing from the signal callback of another i915 fence */
443 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
444
445 if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
446 list_move_tail(&request->sched.link, &engine->active.requests);
447
448 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
449 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
450 !i915_request_enable_breadcrumb(request))
451 intel_engine_queue_breadcrumbs(engine);
452
453 __notify_execute_cb(request);
454
455 spin_unlock(&request->lock);
456
457 return result;
458 }
459
i915_request_submit(struct i915_request * request)460 void i915_request_submit(struct i915_request *request)
461 {
462 struct intel_engine_cs *engine = request->engine;
463 unsigned long flags;
464
465 /* Will be called from irq-context when using foreign fences. */
466 spin_lock_irqsave(&engine->active.lock, flags);
467
468 __i915_request_submit(request);
469
470 spin_unlock_irqrestore(&engine->active.lock, flags);
471 }
472
__i915_request_unsubmit(struct i915_request * request)473 void __i915_request_unsubmit(struct i915_request *request)
474 {
475 struct intel_engine_cs *engine = request->engine;
476
477 GEM_TRACE("%s fence %llx:%lld, current %d\n",
478 engine->name,
479 request->fence.context, request->fence.seqno,
480 hwsp_seqno(request));
481
482 GEM_BUG_ON(!irqs_disabled());
483 lockdep_assert_held(&engine->active.lock);
484
485 /*
486 * Only unwind in reverse order, required so that the per-context list
487 * is kept in seqno/ring order.
488 */
489
490 /* We may be recursing from the signal callback of another i915 fence */
491 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
492
493 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
494 i915_request_cancel_breadcrumb(request);
495
496 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
497 clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
498
499 spin_unlock(&request->lock);
500
501 /* We've already spun, don't charge on resubmitting. */
502 if (request->sched.semaphores && i915_request_started(request)) {
503 request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
504 request->sched.semaphores = 0;
505 }
506
507 /*
508 * We don't need to wake_up any waiters on request->execute, they
509 * will get woken by any other event or us re-adding this request
510 * to the engine timeline (__i915_request_submit()). The waiters
511 * should be quite adapt at finding that the request now has a new
512 * global_seqno to the one they went to sleep on.
513 */
514 }
515
i915_request_unsubmit(struct i915_request * request)516 void i915_request_unsubmit(struct i915_request *request)
517 {
518 struct intel_engine_cs *engine = request->engine;
519 unsigned long flags;
520
521 /* Will be called from irq-context when using foreign fences. */
522 spin_lock_irqsave(&engine->active.lock, flags);
523
524 __i915_request_unsubmit(request);
525
526 spin_unlock_irqrestore(&engine->active.lock, flags);
527 }
528
529 static int __i915_sw_fence_call
submit_notify(struct i915_sw_fence * fence,enum i915_sw_fence_notify state)530 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
531 {
532 struct i915_request *request =
533 container_of(fence, typeof(*request), submit);
534
535 switch (state) {
536 case FENCE_COMPLETE:
537 trace_i915_request_submit(request);
538
539 if (unlikely(fence->error))
540 i915_request_skip(request, fence->error);
541
542 /*
543 * We need to serialize use of the submit_request() callback
544 * with its hotplugging performed during an emergency
545 * i915_gem_set_wedged(). We use the RCU mechanism to mark the
546 * critical section in order to force i915_gem_set_wedged() to
547 * wait until the submit_request() is completed before
548 * proceeding.
549 */
550 rcu_read_lock();
551 request->engine->submit_request(request);
552 rcu_read_unlock();
553 break;
554
555 case FENCE_FREE:
556 i915_request_put(request);
557 break;
558 }
559
560 return NOTIFY_DONE;
561 }
562
563 static int __i915_sw_fence_call
semaphore_notify(struct i915_sw_fence * fence,enum i915_sw_fence_notify state)564 semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
565 {
566 struct i915_request *request =
567 container_of(fence, typeof(*request), semaphore);
568
569 switch (state) {
570 case FENCE_COMPLETE:
571 i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
572 break;
573
574 case FENCE_FREE:
575 i915_request_put(request);
576 break;
577 }
578
579 return NOTIFY_DONE;
580 }
581
retire_requests(struct intel_timeline * tl)582 static void retire_requests(struct intel_timeline *tl)
583 {
584 struct i915_request *rq, *rn;
585
586 list_for_each_entry_safe(rq, rn, &tl->requests, link)
587 if (!i915_request_retire(rq))
588 break;
589 }
590
591 static noinline struct i915_request *
request_alloc_slow(struct intel_timeline * tl,gfp_t gfp)592 request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
593 {
594 struct i915_request *rq;
595
596 if (list_empty(&tl->requests))
597 goto out;
598
599 if (!gfpflags_allow_blocking(gfp))
600 goto out;
601
602 /* Move our oldest request to the slab-cache (if not in use!) */
603 rq = list_first_entry(&tl->requests, typeof(*rq), link);
604 i915_request_retire(rq);
605
606 rq = kmem_cache_alloc(global.slab_requests,
607 gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
608 if (rq)
609 return rq;
610
611 /* Ratelimit ourselves to prevent oom from malicious clients */
612 rq = list_last_entry(&tl->requests, typeof(*rq), link);
613 cond_synchronize_rcu(rq->rcustate);
614
615 /* Retire our old requests in the hope that we free some */
616 retire_requests(tl);
617
618 out:
619 return kmem_cache_alloc(global.slab_requests, gfp);
620 }
621
622 struct i915_request *
__i915_request_create(struct intel_context * ce,gfp_t gfp)623 __i915_request_create(struct intel_context *ce, gfp_t gfp)
624 {
625 struct intel_timeline *tl = ce->timeline;
626 struct i915_request *rq;
627 u32 seqno;
628 int ret;
629
630 might_sleep_if(gfpflags_allow_blocking(gfp));
631
632 /* Check that the caller provided an already pinned context */
633 __intel_context_pin(ce);
634
635 /*
636 * Beware: Dragons be flying overhead.
637 *
638 * We use RCU to look up requests in flight. The lookups may
639 * race with the request being allocated from the slab freelist.
640 * That is the request we are writing to here, may be in the process
641 * of being read by __i915_active_request_get_rcu(). As such,
642 * we have to be very careful when overwriting the contents. During
643 * the RCU lookup, we change chase the request->engine pointer,
644 * read the request->global_seqno and increment the reference count.
645 *
646 * The reference count is incremented atomically. If it is zero,
647 * the lookup knows the request is unallocated and complete. Otherwise,
648 * it is either still in use, or has been reallocated and reset
649 * with dma_fence_init(). This increment is safe for release as we
650 * check that the request we have a reference to and matches the active
651 * request.
652 *
653 * Before we increment the refcount, we chase the request->engine
654 * pointer. We must not call kmem_cache_zalloc() or else we set
655 * that pointer to NULL and cause a crash during the lookup. If
656 * we see the request is completed (based on the value of the
657 * old engine and seqno), the lookup is complete and reports NULL.
658 * If we decide the request is not completed (new engine or seqno),
659 * then we grab a reference and double check that it is still the
660 * active request - which it won't be and restart the lookup.
661 *
662 * Do not use kmem_cache_zalloc() here!
663 */
664 rq = kmem_cache_alloc(global.slab_requests,
665 gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
666 if (unlikely(!rq)) {
667 rq = request_alloc_slow(tl, gfp);
668 if (!rq) {
669 ret = -ENOMEM;
670 goto err_unreserve;
671 }
672 }
673
674 ret = intel_timeline_get_seqno(tl, rq, &seqno);
675 if (ret)
676 goto err_free;
677
678 rq->i915 = ce->engine->i915;
679 rq->hw_context = ce;
680 rq->gem_context = ce->gem_context;
681 rq->engine = ce->engine;
682 rq->ring = ce->ring;
683 rq->timeline = tl;
684 rq->hwsp_seqno = tl->hwsp_seqno;
685 rq->hwsp_cacheline = tl->hwsp_cacheline;
686 rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
687
688 spin_lock_init(&rq->lock);
689 dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
690 tl->fence_context, seqno);
691
692 /* We bump the ref for the fence chain */
693 i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
694 i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
695
696 i915_sched_node_init(&rq->sched);
697
698 /* No zalloc, must clear what we need by hand */
699 rq->file_priv = NULL;
700 rq->batch = NULL;
701 rq->capture_list = NULL;
702 rq->flags = 0;
703 rq->execution_mask = ALL_ENGINES;
704
705 INIT_LIST_HEAD(&rq->active_list);
706 INIT_LIST_HEAD(&rq->execute_cb);
707
708 /*
709 * Reserve space in the ring buffer for all the commands required to
710 * eventually emit this request. This is to guarantee that the
711 * i915_request_add() call can't fail. Note that the reserve may need
712 * to be redone if the request is not actually submitted straight
713 * away, e.g. because a GPU scheduler has deferred it.
714 *
715 * Note that due to how we add reserved_space to intel_ring_begin()
716 * we need to double our request to ensure that if we need to wrap
717 * around inside i915_request_add() there is sufficient space at
718 * the beginning of the ring as well.
719 */
720 rq->reserved_space =
721 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
722
723 /*
724 * Record the position of the start of the request so that
725 * should we detect the updated seqno part-way through the
726 * GPU processing the request, we never over-estimate the
727 * position of the head.
728 */
729 rq->head = rq->ring->emit;
730
731 ret = rq->engine->request_alloc(rq);
732 if (ret)
733 goto err_unwind;
734
735 rq->infix = rq->ring->emit; /* end of header; start of user payload */
736
737 intel_context_mark_active(ce);
738 return rq;
739
740 err_unwind:
741 ce->ring->emit = rq->head;
742
743 /* Make sure we didn't add ourselves to external state before freeing */
744 GEM_BUG_ON(!list_empty(&rq->active_list));
745 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
746 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
747
748 err_free:
749 kmem_cache_free(global.slab_requests, rq);
750 err_unreserve:
751 intel_context_unpin(ce);
752 return ERR_PTR(ret);
753 }
754
755 struct i915_request *
i915_request_create(struct intel_context * ce)756 i915_request_create(struct intel_context *ce)
757 {
758 struct i915_request *rq;
759 struct intel_timeline *tl;
760
761 tl = intel_context_timeline_lock(ce);
762 if (IS_ERR(tl))
763 return ERR_CAST(tl);
764
765 /* Move our oldest request to the slab-cache (if not in use!) */
766 rq = list_first_entry(&tl->requests, typeof(*rq), link);
767 if (!list_is_last(&rq->link, &tl->requests))
768 i915_request_retire(rq);
769
770 intel_context_enter(ce);
771 rq = __i915_request_create(ce, GFP_KERNEL);
772 intel_context_exit(ce); /* active reference transferred to request */
773 if (IS_ERR(rq))
774 goto err_unlock;
775
776 /* Check that we do not interrupt ourselves with a new request */
777 rq->cookie = lockdep_pin_lock(&tl->mutex);
778
779 return rq;
780
781 err_unlock:
782 intel_context_timeline_unlock(tl);
783 return rq;
784 }
785
786 static int
i915_request_await_start(struct i915_request * rq,struct i915_request * signal)787 i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
788 {
789 if (list_is_first(&signal->link, &signal->timeline->requests))
790 return 0;
791
792 signal = list_prev_entry(signal, link);
793 if (intel_timeline_sync_is_later(rq->timeline, &signal->fence))
794 return 0;
795
796 return i915_sw_fence_await_dma_fence(&rq->submit,
797 &signal->fence, 0,
798 I915_FENCE_GFP);
799 }
800
801 static intel_engine_mask_t
already_busywaiting(struct i915_request * rq)802 already_busywaiting(struct i915_request *rq)
803 {
804 /*
805 * Polling a semaphore causes bus traffic, delaying other users of
806 * both the GPU and CPU. We want to limit the impact on others,
807 * while taking advantage of early submission to reduce GPU
808 * latency. Therefore we restrict ourselves to not using more
809 * than one semaphore from each source, and not using a semaphore
810 * if we have detected the engine is saturated (i.e. would not be
811 * submitted early and cause bus traffic reading an already passed
812 * semaphore).
813 *
814 * See the are-we-too-late? check in __i915_request_submit().
815 */
816 return rq->sched.semaphores | rq->engine->saturated;
817 }
818
819 static int
emit_semaphore_wait(struct i915_request * to,struct i915_request * from,gfp_t gfp)820 emit_semaphore_wait(struct i915_request *to,
821 struct i915_request *from,
822 gfp_t gfp)
823 {
824 u32 hwsp_offset;
825 u32 *cs;
826 int err;
827
828 GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
829 GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
830
831 /* Just emit the first semaphore we see as request space is limited. */
832 if (already_busywaiting(to) & from->engine->mask)
833 return i915_sw_fence_await_dma_fence(&to->submit,
834 &from->fence, 0,
835 I915_FENCE_GFP);
836
837 err = i915_request_await_start(to, from);
838 if (err < 0)
839 return err;
840
841 /* Only submit our spinner after the signaler is running! */
842 err = __i915_request_await_execution(to, from, NULL, gfp);
843 if (err)
844 return err;
845
846 /* We need to pin the signaler's HWSP until we are finished reading. */
847 err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
848 if (err)
849 return err;
850
851 cs = intel_ring_begin(to, 4);
852 if (IS_ERR(cs))
853 return PTR_ERR(cs);
854
855 /*
856 * Using greater-than-or-equal here means we have to worry
857 * about seqno wraparound. To side step that issue, we swap
858 * the timeline HWSP upon wrapping, so that everyone listening
859 * for the old (pre-wrap) values do not see the much smaller
860 * (post-wrap) values than they were expecting (and so wait
861 * forever).
862 */
863 *cs++ = MI_SEMAPHORE_WAIT |
864 MI_SEMAPHORE_GLOBAL_GTT |
865 MI_SEMAPHORE_POLL |
866 MI_SEMAPHORE_SAD_GTE_SDD;
867 *cs++ = from->fence.seqno;
868 *cs++ = hwsp_offset;
869 *cs++ = 0;
870
871 intel_ring_advance(to, cs);
872 to->sched.semaphores |= from->engine->mask;
873 to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
874 return 0;
875 }
876
877 static int
i915_request_await_request(struct i915_request * to,struct i915_request * from)878 i915_request_await_request(struct i915_request *to, struct i915_request *from)
879 {
880 int ret;
881
882 GEM_BUG_ON(to == from);
883 GEM_BUG_ON(to->timeline == from->timeline);
884
885 if (i915_request_completed(from))
886 return 0;
887
888 if (to->engine->schedule) {
889 ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
890 if (ret < 0)
891 return ret;
892 }
893
894 if (to->engine == from->engine) {
895 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
896 &from->submit,
897 I915_FENCE_GFP);
898 } else if (intel_engine_has_semaphores(to->engine) &&
899 to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
900 ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
901 } else {
902 ret = i915_sw_fence_await_dma_fence(&to->submit,
903 &from->fence, 0,
904 I915_FENCE_GFP);
905 }
906 if (ret < 0)
907 return ret;
908
909 if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
910 ret = i915_sw_fence_await_dma_fence(&to->semaphore,
911 &from->fence, 0,
912 I915_FENCE_GFP);
913 if (ret < 0)
914 return ret;
915 }
916
917 return 0;
918 }
919
920 int
i915_request_await_dma_fence(struct i915_request * rq,struct dma_fence * fence)921 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
922 {
923 struct dma_fence **child = &fence;
924 unsigned int nchild = 1;
925 int ret;
926
927 /*
928 * Note that if the fence-array was created in signal-on-any mode,
929 * we should *not* decompose it into its individual fences. However,
930 * we don't currently store which mode the fence-array is operating
931 * in. Fortunately, the only user of signal-on-any is private to
932 * amdgpu and we should not see any incoming fence-array from
933 * sync-file being in signal-on-any mode.
934 */
935 if (dma_fence_is_array(fence)) {
936 struct dma_fence_array *array = to_dma_fence_array(fence);
937
938 child = array->fences;
939 nchild = array->num_fences;
940 GEM_BUG_ON(!nchild);
941 }
942
943 do {
944 fence = *child++;
945 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
946 continue;
947
948 /*
949 * Requests on the same timeline are explicitly ordered, along
950 * with their dependencies, by i915_request_add() which ensures
951 * that requests are submitted in-order through each ring.
952 */
953 if (fence->context == rq->fence.context)
954 continue;
955
956 /* Squash repeated waits to the same timelines */
957 if (fence->context &&
958 intel_timeline_sync_is_later(rq->timeline, fence))
959 continue;
960
961 if (dma_fence_is_i915(fence))
962 ret = i915_request_await_request(rq, to_request(fence));
963 else
964 ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
965 I915_FENCE_TIMEOUT,
966 I915_FENCE_GFP);
967 if (ret < 0)
968 return ret;
969
970 /* Record the latest fence used against each timeline */
971 if (fence->context)
972 intel_timeline_sync_set(rq->timeline, fence);
973 } while (--nchild);
974
975 return 0;
976 }
977
978 int
i915_request_await_execution(struct i915_request * rq,struct dma_fence * fence,void (* hook)(struct i915_request * rq,struct dma_fence * signal))979 i915_request_await_execution(struct i915_request *rq,
980 struct dma_fence *fence,
981 void (*hook)(struct i915_request *rq,
982 struct dma_fence *signal))
983 {
984 struct dma_fence **child = &fence;
985 unsigned int nchild = 1;
986 int ret;
987
988 if (dma_fence_is_array(fence)) {
989 struct dma_fence_array *array = to_dma_fence_array(fence);
990
991 /* XXX Error for signal-on-any fence arrays */
992
993 child = array->fences;
994 nchild = array->num_fences;
995 GEM_BUG_ON(!nchild);
996 }
997
998 do {
999 fence = *child++;
1000 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1001 continue;
1002
1003 /*
1004 * We don't squash repeated fence dependencies here as we
1005 * want to run our callback in all cases.
1006 */
1007
1008 if (dma_fence_is_i915(fence))
1009 ret = __i915_request_await_execution(rq,
1010 to_request(fence),
1011 hook,
1012 I915_FENCE_GFP);
1013 else
1014 ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
1015 I915_FENCE_TIMEOUT,
1016 GFP_KERNEL);
1017 if (ret < 0)
1018 return ret;
1019 } while (--nchild);
1020
1021 return 0;
1022 }
1023
1024 /**
1025 * i915_request_await_object - set this request to (async) wait upon a bo
1026 * @to: request we are wishing to use
1027 * @obj: object which may be in use on another ring.
1028 * @write: whether the wait is on behalf of a writer
1029 *
1030 * This code is meant to abstract object synchronization with the GPU.
1031 * Conceptually we serialise writes between engines inside the GPU.
1032 * We only allow one engine to write into a buffer at any time, but
1033 * multiple readers. To ensure each has a coherent view of memory, we must:
1034 *
1035 * - If there is an outstanding write request to the object, the new
1036 * request must wait for it to complete (either CPU or in hw, requests
1037 * on the same ring will be naturally ordered).
1038 *
1039 * - If we are a write request (pending_write_domain is set), the new
1040 * request must wait for outstanding read requests to complete.
1041 *
1042 * Returns 0 if successful, else propagates up the lower layer error.
1043 */
1044 int
i915_request_await_object(struct i915_request * to,struct drm_i915_gem_object * obj,bool write)1045 i915_request_await_object(struct i915_request *to,
1046 struct drm_i915_gem_object *obj,
1047 bool write)
1048 {
1049 struct dma_fence *excl;
1050 int ret = 0;
1051
1052 if (write) {
1053 struct dma_fence **shared;
1054 unsigned int count, i;
1055
1056 ret = dma_resv_get_fences_rcu(obj->base.resv,
1057 &excl, &count, &shared);
1058 if (ret)
1059 return ret;
1060
1061 for (i = 0; i < count; i++) {
1062 ret = i915_request_await_dma_fence(to, shared[i]);
1063 if (ret)
1064 break;
1065
1066 dma_fence_put(shared[i]);
1067 }
1068
1069 for (; i < count; i++)
1070 dma_fence_put(shared[i]);
1071 kfree(shared);
1072 } else {
1073 excl = dma_resv_get_excl_rcu(obj->base.resv);
1074 }
1075
1076 if (excl) {
1077 if (ret == 0)
1078 ret = i915_request_await_dma_fence(to, excl);
1079
1080 dma_fence_put(excl);
1081 }
1082
1083 return ret;
1084 }
1085
i915_request_skip(struct i915_request * rq,int error)1086 void i915_request_skip(struct i915_request *rq, int error)
1087 {
1088 void *vaddr = rq->ring->vaddr;
1089 u32 head;
1090
1091 GEM_BUG_ON(!IS_ERR_VALUE((long)error));
1092 dma_fence_set_error(&rq->fence, error);
1093
1094 if (rq->infix == rq->postfix)
1095 return;
1096
1097 /*
1098 * As this request likely depends on state from the lost
1099 * context, clear out all the user operations leaving the
1100 * breadcrumb at the end (so we get the fence notifications).
1101 */
1102 head = rq->infix;
1103 if (rq->postfix < head) {
1104 memset(vaddr + head, 0, rq->ring->size - head);
1105 head = 0;
1106 }
1107 memset(vaddr + head, 0, rq->postfix - head);
1108 rq->infix = rq->postfix;
1109 }
1110
1111 static struct i915_request *
__i915_request_add_to_timeline(struct i915_request * rq)1112 __i915_request_add_to_timeline(struct i915_request *rq)
1113 {
1114 struct intel_timeline *timeline = rq->timeline;
1115 struct i915_request *prev;
1116
1117 /*
1118 * Dependency tracking and request ordering along the timeline
1119 * is special cased so that we can eliminate redundant ordering
1120 * operations while building the request (we know that the timeline
1121 * itself is ordered, and here we guarantee it).
1122 *
1123 * As we know we will need to emit tracking along the timeline,
1124 * we embed the hooks into our request struct -- at the cost of
1125 * having to have specialised no-allocation interfaces (which will
1126 * be beneficial elsewhere).
1127 *
1128 * A second benefit to open-coding i915_request_await_request is
1129 * that we can apply a slight variant of the rules specialised
1130 * for timelines that jump between engines (such as virtual engines).
1131 * If we consider the case of virtual engine, we must emit a dma-fence
1132 * to prevent scheduling of the second request until the first is
1133 * complete (to maximise our greedy late load balancing) and this
1134 * precludes optimising to use semaphores serialisation of a single
1135 * timeline across engines.
1136 */
1137 prev = rcu_dereference_protected(timeline->last_request.request,
1138 lockdep_is_held(&timeline->mutex));
1139 if (prev && !i915_request_completed(prev)) {
1140 if (is_power_of_2(prev->engine->mask | rq->engine->mask))
1141 i915_sw_fence_await_sw_fence(&rq->submit,
1142 &prev->submit,
1143 &rq->submitq);
1144 else
1145 __i915_sw_fence_await_dma_fence(&rq->submit,
1146 &prev->fence,
1147 &rq->dmaq);
1148 if (rq->engine->schedule)
1149 __i915_sched_node_add_dependency(&rq->sched,
1150 &prev->sched,
1151 &rq->dep,
1152 0);
1153 }
1154
1155 list_add_tail(&rq->link, &timeline->requests);
1156
1157 /*
1158 * Make sure that no request gazumped us - if it was allocated after
1159 * our i915_request_alloc() and called __i915_request_add() before
1160 * us, the timeline will hold its seqno which is later than ours.
1161 */
1162 GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
1163 __i915_active_request_set(&timeline->last_request, rq);
1164
1165 return prev;
1166 }
1167
1168 /*
1169 * NB: This function is not allowed to fail. Doing so would mean the the
1170 * request is not being tracked for completion but the work itself is
1171 * going to happen on the hardware. This would be a Bad Thing(tm).
1172 */
__i915_request_commit(struct i915_request * rq)1173 struct i915_request *__i915_request_commit(struct i915_request *rq)
1174 {
1175 struct intel_engine_cs *engine = rq->engine;
1176 struct intel_ring *ring = rq->ring;
1177 u32 *cs;
1178
1179 GEM_TRACE("%s fence %llx:%lld\n",
1180 engine->name, rq->fence.context, rq->fence.seqno);
1181
1182 /*
1183 * To ensure that this call will not fail, space for its emissions
1184 * should already have been reserved in the ring buffer. Let the ring
1185 * know that it is time to use that space up.
1186 */
1187 GEM_BUG_ON(rq->reserved_space > ring->space);
1188 rq->reserved_space = 0;
1189 rq->emitted_jiffies = jiffies;
1190
1191 /*
1192 * Record the position of the start of the breadcrumb so that
1193 * should we detect the updated seqno part-way through the
1194 * GPU processing the request, we never over-estimate the
1195 * position of the ring's HEAD.
1196 */
1197 cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
1198 GEM_BUG_ON(IS_ERR(cs));
1199 rq->postfix = intel_ring_offset(rq, cs);
1200
1201 return __i915_request_add_to_timeline(rq);
1202 }
1203
__i915_request_queue(struct i915_request * rq,const struct i915_sched_attr * attr)1204 void __i915_request_queue(struct i915_request *rq,
1205 const struct i915_sched_attr *attr)
1206 {
1207 /*
1208 * Let the backend know a new request has arrived that may need
1209 * to adjust the existing execution schedule due to a high priority
1210 * request - i.e. we may want to preempt the current request in order
1211 * to run a high priority dependency chain *before* we can execute this
1212 * request.
1213 *
1214 * This is called before the request is ready to run so that we can
1215 * decide whether to preempt the entire chain so that it is ready to
1216 * run at the earliest possible convenience.
1217 */
1218 i915_sw_fence_commit(&rq->semaphore);
1219 if (attr && rq->engine->schedule)
1220 rq->engine->schedule(rq, attr);
1221 i915_sw_fence_commit(&rq->submit);
1222 }
1223
i915_request_add(struct i915_request * rq)1224 void i915_request_add(struct i915_request *rq)
1225 {
1226 struct i915_sched_attr attr = rq->gem_context->sched;
1227 struct intel_timeline * const tl = rq->timeline;
1228 struct i915_request *prev;
1229
1230 lockdep_assert_held(&tl->mutex);
1231 lockdep_unpin_lock(&tl->mutex, rq->cookie);
1232
1233 trace_i915_request_add(rq);
1234
1235 prev = __i915_request_commit(rq);
1236
1237 /*
1238 * Boost actual workloads past semaphores!
1239 *
1240 * With semaphores we spin on one engine waiting for another,
1241 * simply to reduce the latency of starting our work when
1242 * the signaler completes. However, if there is any other
1243 * work that we could be doing on this engine instead, that
1244 * is better utilisation and will reduce the overall duration
1245 * of the current work. To avoid PI boosting a semaphore
1246 * far in the distance past over useful work, we keep a history
1247 * of any semaphore use along our dependency chain.
1248 */
1249 if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
1250 attr.priority |= I915_PRIORITY_NOSEMAPHORE;
1251
1252 /*
1253 * Boost priorities to new clients (new request flows).
1254 *
1255 * Allow interactive/synchronous clients to jump ahead of
1256 * the bulk clients. (FQ_CODEL)
1257 */
1258 if (list_empty(&rq->sched.signalers_list))
1259 attr.priority |= I915_PRIORITY_WAIT;
1260
1261 local_bh_disable();
1262 __i915_request_queue(rq, &attr);
1263 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1264
1265 /*
1266 * In typical scenarios, we do not expect the previous request on
1267 * the timeline to be still tracked by timeline->last_request if it
1268 * has been completed. If the completed request is still here, that
1269 * implies that request retirement is a long way behind submission,
1270 * suggesting that we haven't been retiring frequently enough from
1271 * the combination of retire-before-alloc, waiters and the background
1272 * retirement worker. So if the last request on this timeline was
1273 * already completed, do a catch up pass, flushing the retirement queue
1274 * up to this client. Since we have now moved the heaviest operations
1275 * during retirement onto secondary workers, such as freeing objects
1276 * or contexts, retiring a bunch of requests is mostly list management
1277 * (and cache misses), and so we should not be overly penalizing this
1278 * client by performing excess work, though we may still performing
1279 * work on behalf of others -- but instead we should benefit from
1280 * improved resource management. (Well, that's the theory at least.)
1281 */
1282 if (prev && i915_request_completed(prev) && prev->timeline == tl)
1283 i915_request_retire_upto(prev);
1284
1285 mutex_unlock(&tl->mutex);
1286 }
1287
local_clock_us(unsigned int * cpu)1288 static unsigned long local_clock_us(unsigned int *cpu)
1289 {
1290 unsigned long t;
1291
1292 /*
1293 * Cheaply and approximately convert from nanoseconds to microseconds.
1294 * The result and subsequent calculations are also defined in the same
1295 * approximate microseconds units. The principal source of timing
1296 * error here is from the simple truncation.
1297 *
1298 * Note that local_clock() is only defined wrt to the current CPU;
1299 * the comparisons are no longer valid if we switch CPUs. Instead of
1300 * blocking preemption for the entire busywait, we can detect the CPU
1301 * switch and use that as indicator of system load and a reason to
1302 * stop busywaiting, see busywait_stop().
1303 */
1304 *cpu = get_cpu();
1305 t = local_clock() >> 10;
1306 put_cpu();
1307
1308 return t;
1309 }
1310
busywait_stop(unsigned long timeout,unsigned int cpu)1311 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1312 {
1313 unsigned int this_cpu;
1314
1315 if (time_after(local_clock_us(&this_cpu), timeout))
1316 return true;
1317
1318 return this_cpu != cpu;
1319 }
1320
__i915_spin_request(const struct i915_request * const rq,int state,unsigned long timeout_us)1321 static bool __i915_spin_request(const struct i915_request * const rq,
1322 int state, unsigned long timeout_us)
1323 {
1324 unsigned int cpu;
1325
1326 /*
1327 * Only wait for the request if we know it is likely to complete.
1328 *
1329 * We don't track the timestamps around requests, nor the average
1330 * request length, so we do not have a good indicator that this
1331 * request will complete within the timeout. What we do know is the
1332 * order in which requests are executed by the context and so we can
1333 * tell if the request has been started. If the request is not even
1334 * running yet, it is a fair assumption that it will not complete
1335 * within our relatively short timeout.
1336 */
1337 if (!i915_request_is_running(rq))
1338 return false;
1339
1340 /*
1341 * When waiting for high frequency requests, e.g. during synchronous
1342 * rendering split between the CPU and GPU, the finite amount of time
1343 * required to set up the irq and wait upon it limits the response
1344 * rate. By busywaiting on the request completion for a short while we
1345 * can service the high frequency waits as quick as possible. However,
1346 * if it is a slow request, we want to sleep as quickly as possible.
1347 * The tradeoff between waiting and sleeping is roughly the time it
1348 * takes to sleep on a request, on the order of a microsecond.
1349 */
1350
1351 timeout_us += local_clock_us(&cpu);
1352 do {
1353 if (i915_request_completed(rq))
1354 return true;
1355
1356 if (signal_pending_state(state, current))
1357 break;
1358
1359 if (busywait_stop(timeout_us, cpu))
1360 break;
1361
1362 cpu_relax();
1363 } while (!need_resched());
1364
1365 return false;
1366 }
1367
1368 struct request_wait {
1369 struct dma_fence_cb cb;
1370 struct task_struct *tsk;
1371 };
1372
request_wait_wake(struct dma_fence * fence,struct dma_fence_cb * cb)1373 static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
1374 {
1375 struct request_wait *wait = container_of(cb, typeof(*wait), cb);
1376
1377 wake_up_process(wait->tsk);
1378 }
1379
1380 /**
1381 * i915_request_wait - wait until execution of request has finished
1382 * @rq: the request to wait upon
1383 * @flags: how to wait
1384 * @timeout: how long to wait in jiffies
1385 *
1386 * i915_request_wait() waits for the request to be completed, for a
1387 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1388 * unbounded wait).
1389 *
1390 * Returns the remaining time (in jiffies) if the request completed, which may
1391 * be zero or -ETIME if the request is unfinished after the timeout expires.
1392 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1393 * pending before the request completes.
1394 */
i915_request_wait(struct i915_request * rq,unsigned int flags,long timeout)1395 long i915_request_wait(struct i915_request *rq,
1396 unsigned int flags,
1397 long timeout)
1398 {
1399 const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1400 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1401 struct request_wait wait;
1402
1403 might_sleep();
1404 GEM_BUG_ON(timeout < 0);
1405
1406 if (dma_fence_is_signaled(&rq->fence))
1407 return timeout;
1408
1409 if (!timeout)
1410 return -ETIME;
1411
1412 trace_i915_request_wait_begin(rq, flags);
1413
1414 /*
1415 * We must never wait on the GPU while holding a lock as we
1416 * may need to perform a GPU reset. So while we don't need to
1417 * serialise wait/reset with an explicit lock, we do want
1418 * lockdep to detect potential dependency cycles.
1419 */
1420 mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
1421
1422 /*
1423 * Optimistic spin before touching IRQs.
1424 *
1425 * We may use a rather large value here to offset the penalty of
1426 * switching away from the active task. Frequently, the client will
1427 * wait upon an old swapbuffer to throttle itself to remain within a
1428 * frame of the gpu. If the client is running in lockstep with the gpu,
1429 * then it should not be waiting long at all, and a sleep now will incur
1430 * extra scheduler latency in producing the next frame. To try to
1431 * avoid adding the cost of enabling/disabling the interrupt to the
1432 * short wait, we first spin to see if the request would have completed
1433 * in the time taken to setup the interrupt.
1434 *
1435 * We need upto 5us to enable the irq, and upto 20us to hide the
1436 * scheduler latency of a context switch, ignoring the secondary
1437 * impacts from a context switch such as cache eviction.
1438 *
1439 * The scheme used for low-latency IO is called "hybrid interrupt
1440 * polling". The suggestion there is to sleep until just before you
1441 * expect to be woken by the device interrupt and then poll for its
1442 * completion. That requires having a good predictor for the request
1443 * duration, which we currently lack.
1444 */
1445 if (CONFIG_DRM_I915_SPIN_REQUEST &&
1446 __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
1447 dma_fence_signal(&rq->fence);
1448 goto out;
1449 }
1450
1451 /*
1452 * This client is about to stall waiting for the GPU. In many cases
1453 * this is undesirable and limits the throughput of the system, as
1454 * many clients cannot continue processing user input/output whilst
1455 * blocked. RPS autotuning may take tens of milliseconds to respond
1456 * to the GPU load and thus incurs additional latency for the client.
1457 * We can circumvent that by promoting the GPU frequency to maximum
1458 * before we sleep. This makes the GPU throttle up much more quickly
1459 * (good for benchmarks and user experience, e.g. window animations),
1460 * but at a cost of spending more power processing the workload
1461 * (bad for battery).
1462 */
1463 if (flags & I915_WAIT_PRIORITY) {
1464 if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
1465 gen6_rps_boost(rq);
1466 i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
1467 }
1468
1469 wait.tsk = current;
1470 if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
1471 goto out;
1472
1473 for (;;) {
1474 set_current_state(state);
1475
1476 if (i915_request_completed(rq)) {
1477 dma_fence_signal(&rq->fence);
1478 break;
1479 }
1480
1481 if (signal_pending_state(state, current)) {
1482 timeout = -ERESTARTSYS;
1483 break;
1484 }
1485
1486 if (!timeout) {
1487 timeout = -ETIME;
1488 break;
1489 }
1490
1491 timeout = io_schedule_timeout(timeout);
1492 }
1493 __set_current_state(TASK_RUNNING);
1494
1495 dma_fence_remove_callback(&rq->fence, &wait.cb);
1496
1497 out:
1498 mutex_release(&rq->engine->gt->reset.mutex.dep_map, 0, _THIS_IP_);
1499 trace_i915_request_wait_end(rq);
1500 return timeout;
1501 }
1502
i915_retire_requests(struct drm_i915_private * i915)1503 bool i915_retire_requests(struct drm_i915_private *i915)
1504 {
1505 struct intel_gt_timelines *timelines = &i915->gt.timelines;
1506 struct intel_timeline *tl, *tn;
1507 unsigned long flags;
1508 LIST_HEAD(free);
1509
1510 spin_lock_irqsave(&timelines->lock, flags);
1511 list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
1512 if (!mutex_trylock(&tl->mutex))
1513 continue;
1514
1515 intel_timeline_get(tl);
1516 GEM_BUG_ON(!tl->active_count);
1517 tl->active_count++; /* pin the list element */
1518 spin_unlock_irqrestore(&timelines->lock, flags);
1519
1520 retire_requests(tl);
1521
1522 spin_lock_irqsave(&timelines->lock, flags);
1523
1524 /* Resume iteration after dropping lock */
1525 list_safe_reset_next(tl, tn, link);
1526 if (!--tl->active_count)
1527 list_del(&tl->link);
1528
1529 mutex_unlock(&tl->mutex);
1530
1531 /* Defer the final release to after the spinlock */
1532 if (refcount_dec_and_test(&tl->kref.refcount)) {
1533 GEM_BUG_ON(tl->active_count);
1534 list_add(&tl->link, &free);
1535 }
1536 }
1537 spin_unlock_irqrestore(&timelines->lock, flags);
1538
1539 list_for_each_entry_safe(tl, tn, &free, link)
1540 __intel_timeline_free(&tl->kref);
1541
1542 return !list_empty(&timelines->active_list);
1543 }
1544
1545 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1546 #include "selftests/mock_request.c"
1547 #include "selftests/i915_request.c"
1548 #endif
1549
i915_global_request_shrink(void)1550 static void i915_global_request_shrink(void)
1551 {
1552 kmem_cache_shrink(global.slab_dependencies);
1553 kmem_cache_shrink(global.slab_execute_cbs);
1554 kmem_cache_shrink(global.slab_requests);
1555 }
1556
i915_global_request_exit(void)1557 static void i915_global_request_exit(void)
1558 {
1559 kmem_cache_destroy(global.slab_dependencies);
1560 kmem_cache_destroy(global.slab_execute_cbs);
1561 kmem_cache_destroy(global.slab_requests);
1562 }
1563
1564 static struct i915_global_request global = { {
1565 .shrink = i915_global_request_shrink,
1566 .exit = i915_global_request_exit,
1567 } };
1568
i915_global_request_init(void)1569 int __init i915_global_request_init(void)
1570 {
1571 global.slab_requests = KMEM_CACHE(i915_request,
1572 SLAB_HWCACHE_ALIGN |
1573 SLAB_RECLAIM_ACCOUNT |
1574 SLAB_TYPESAFE_BY_RCU);
1575 if (!global.slab_requests)
1576 return -ENOMEM;
1577
1578 global.slab_execute_cbs = KMEM_CACHE(execute_cb,
1579 SLAB_HWCACHE_ALIGN |
1580 SLAB_RECLAIM_ACCOUNT |
1581 SLAB_TYPESAFE_BY_RCU);
1582 if (!global.slab_execute_cbs)
1583 goto err_requests;
1584
1585 global.slab_dependencies = KMEM_CACHE(i915_dependency,
1586 SLAB_HWCACHE_ALIGN |
1587 SLAB_RECLAIM_ACCOUNT);
1588 if (!global.slab_dependencies)
1589 goto err_execute_cbs;
1590
1591 i915_global_register(&global.base);
1592 return 0;
1593
1594 err_execute_cbs:
1595 kmem_cache_destroy(global.slab_execute_cbs);
1596 err_requests:
1597 kmem_cache_destroy(global.slab_requests);
1598 return -ENOMEM;
1599 }
1600