1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/kthread.h>
26 #include <uapi/linux/sched/types.h>
27 
28 #include "i915_drv.h"
29 
30 #ifdef CONFIG_SMP
31 #define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu)
32 #else
33 #define task_asleep(tsk) ((tsk)->state & TASK_NORMAL)
34 #endif
35 
__intel_breadcrumbs_wakeup(struct intel_breadcrumbs * b)36 static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
37 {
38 	struct intel_wait *wait;
39 	unsigned int result = 0;
40 
41 	lockdep_assert_held(&b->irq_lock);
42 
43 	wait = b->irq_wait;
44 	if (wait) {
45 		/*
46 		 * N.B. Since task_asleep() and ttwu are not atomic, the
47 		 * waiter may actually go to sleep after the check, causing
48 		 * us to suppress a valid wakeup. We prefer to reduce the
49 		 * number of false positive missed_breadcrumb() warnings
50 		 * at the expense of a few false negatives, as it it easy
51 		 * to trigger a false positive under heavy load. Enough
52 		 * signal should remain from genuine missed_breadcrumb()
53 		 * for us to detect in CI.
54 		 */
55 		bool was_asleep = task_asleep(wait->tsk);
56 
57 		result = ENGINE_WAKEUP_WAITER;
58 		if (wake_up_process(wait->tsk) && was_asleep)
59 			result |= ENGINE_WAKEUP_ASLEEP;
60 	}
61 
62 	return result;
63 }
64 
intel_engine_wakeup(struct intel_engine_cs * engine)65 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
66 {
67 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
68 	unsigned long flags;
69 	unsigned int result;
70 
71 	spin_lock_irqsave(&b->irq_lock, flags);
72 	result = __intel_breadcrumbs_wakeup(b);
73 	spin_unlock_irqrestore(&b->irq_lock, flags);
74 
75 	return result;
76 }
77 
wait_timeout(void)78 static unsigned long wait_timeout(void)
79 {
80 	return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
81 }
82 
missed_breadcrumb(struct intel_engine_cs * engine)83 static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
84 {
85 	if (GEM_SHOW_DEBUG()) {
86 		struct drm_printer p = drm_debug_printer(__func__);
87 
88 		intel_engine_dump(engine, &p,
89 				  "%s missed breadcrumb at %pS\n",
90 				  engine->name, __builtin_return_address(0));
91 	}
92 
93 	set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
94 }
95 
intel_breadcrumbs_hangcheck(struct timer_list * t)96 static void intel_breadcrumbs_hangcheck(struct timer_list *t)
97 {
98 	struct intel_engine_cs *engine =
99 		from_timer(engine, t, breadcrumbs.hangcheck);
100 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
101 	unsigned int irq_count;
102 
103 	if (!b->irq_armed)
104 		return;
105 
106 	irq_count = READ_ONCE(b->irq_count);
107 	if (b->hangcheck_interrupts != irq_count) {
108 		b->hangcheck_interrupts = irq_count;
109 		mod_timer(&b->hangcheck, wait_timeout());
110 		return;
111 	}
112 
113 	/* We keep the hangcheck timer alive until we disarm the irq, even
114 	 * if there are no waiters at present.
115 	 *
116 	 * If the waiter was currently running, assume it hasn't had a chance
117 	 * to process the pending interrupt (e.g, low priority task on a loaded
118 	 * system) and wait until it sleeps before declaring a missed interrupt.
119 	 *
120 	 * If the waiter was asleep (and not even pending a wakeup), then we
121 	 * must have missed an interrupt as the GPU has stopped advancing
122 	 * but we still have a waiter. Assuming all batches complete within
123 	 * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
124 	 */
125 	if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
126 		missed_breadcrumb(engine);
127 		mod_timer(&b->fake_irq, jiffies + 1);
128 	} else {
129 		mod_timer(&b->hangcheck, wait_timeout());
130 	}
131 }
132 
intel_breadcrumbs_fake_irq(struct timer_list * t)133 static void intel_breadcrumbs_fake_irq(struct timer_list *t)
134 {
135 	struct intel_engine_cs *engine =
136 		from_timer(engine, t, breadcrumbs.fake_irq);
137 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
138 
139 	/*
140 	 * The timer persists in case we cannot enable interrupts,
141 	 * or if we have previously seen seqno/interrupt incoherency
142 	 * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
143 	 * Here the worker will wake up every jiffie in order to kick the
144 	 * oldest waiter to do the coherent seqno check.
145 	 */
146 
147 	spin_lock_irq(&b->irq_lock);
148 	if (b->irq_armed && !__intel_breadcrumbs_wakeup(b))
149 		__intel_engine_disarm_breadcrumbs(engine);
150 	spin_unlock_irq(&b->irq_lock);
151 	if (!b->irq_armed)
152 		return;
153 
154 	/* If the user has disabled the fake-irq, restore the hangchecking */
155 	if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) {
156 		mod_timer(&b->hangcheck, wait_timeout());
157 		return;
158 	}
159 
160 	mod_timer(&b->fake_irq, jiffies + 1);
161 }
162 
irq_enable(struct intel_engine_cs * engine)163 static void irq_enable(struct intel_engine_cs *engine)
164 {
165 	/*
166 	 * FIXME: Ideally we want this on the API boundary, but for the
167 	 * sake of testing with mock breadcrumbs (no HW so unable to
168 	 * enable irqs) we place it deep within the bowels, at the point
169 	 * of no return.
170 	 */
171 	GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
172 
173 	/* Enabling the IRQ may miss the generation of the interrupt, but
174 	 * we still need to force the barrier before reading the seqno,
175 	 * just in case.
176 	 */
177 	set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
178 
179 	/* Caller disables interrupts */
180 	if (engine->irq_enable) {
181 		spin_lock(&engine->i915->irq_lock);
182 		engine->irq_enable(engine);
183 		spin_unlock(&engine->i915->irq_lock);
184 	}
185 }
186 
irq_disable(struct intel_engine_cs * engine)187 static void irq_disable(struct intel_engine_cs *engine)
188 {
189 	/* Caller disables interrupts */
190 	if (engine->irq_disable) {
191 		spin_lock(&engine->i915->irq_lock);
192 		engine->irq_disable(engine);
193 		spin_unlock(&engine->i915->irq_lock);
194 	}
195 }
196 
__intel_engine_disarm_breadcrumbs(struct intel_engine_cs * engine)197 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
198 {
199 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
200 
201 	lockdep_assert_held(&b->irq_lock);
202 	GEM_BUG_ON(b->irq_wait);
203 	GEM_BUG_ON(!b->irq_armed);
204 
205 	GEM_BUG_ON(!b->irq_enabled);
206 	if (!--b->irq_enabled)
207 		irq_disable(engine);
208 
209 	b->irq_armed = false;
210 }
211 
intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs * engine)212 void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
213 {
214 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
215 
216 	spin_lock_irq(&b->irq_lock);
217 	if (!b->irq_enabled++)
218 		irq_enable(engine);
219 	GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
220 	spin_unlock_irq(&b->irq_lock);
221 }
222 
intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs * engine)223 void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
224 {
225 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
226 
227 	spin_lock_irq(&b->irq_lock);
228 	GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
229 	if (!--b->irq_enabled)
230 		irq_disable(engine);
231 	spin_unlock_irq(&b->irq_lock);
232 }
233 
intel_engine_disarm_breadcrumbs(struct intel_engine_cs * engine)234 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
235 {
236 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
237 	struct intel_wait *wait, *n;
238 
239 	if (!b->irq_armed)
240 		return;
241 
242 	/*
243 	 * We only disarm the irq when we are idle (all requests completed),
244 	 * so if the bottom-half remains asleep, it missed the request
245 	 * completion.
246 	 */
247 	if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP)
248 		missed_breadcrumb(engine);
249 
250 	spin_lock_irq(&b->rb_lock);
251 
252 	spin_lock(&b->irq_lock);
253 	b->irq_wait = NULL;
254 	if (b->irq_armed)
255 		__intel_engine_disarm_breadcrumbs(engine);
256 	spin_unlock(&b->irq_lock);
257 
258 	rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
259 		GEM_BUG_ON(!i915_seqno_passed(intel_engine_get_seqno(engine),
260 					      wait->seqno));
261 		RB_CLEAR_NODE(&wait->node);
262 		wake_up_process(wait->tsk);
263 	}
264 	b->waiters = RB_ROOT;
265 
266 	spin_unlock_irq(&b->rb_lock);
267 }
268 
use_fake_irq(const struct intel_breadcrumbs * b)269 static bool use_fake_irq(const struct intel_breadcrumbs *b)
270 {
271 	const struct intel_engine_cs *engine =
272 		container_of(b, struct intel_engine_cs, breadcrumbs);
273 
274 	if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
275 		return false;
276 
277 	/*
278 	 * Only start with the heavy weight fake irq timer if we have not
279 	 * seen any interrupts since enabling it the first time. If the
280 	 * interrupts are still arriving, it means we made a mistake in our
281 	 * engine->seqno_barrier(), a timing error that should be transient
282 	 * and unlikely to reoccur.
283 	 */
284 	return READ_ONCE(b->irq_count) == b->hangcheck_interrupts;
285 }
286 
enable_fake_irq(struct intel_breadcrumbs * b)287 static void enable_fake_irq(struct intel_breadcrumbs *b)
288 {
289 	/* Ensure we never sleep indefinitely */
290 	if (!b->irq_enabled || use_fake_irq(b))
291 		mod_timer(&b->fake_irq, jiffies + 1);
292 	else
293 		mod_timer(&b->hangcheck, wait_timeout());
294 }
295 
__intel_breadcrumbs_enable_irq(struct intel_breadcrumbs * b)296 static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
297 {
298 	struct intel_engine_cs *engine =
299 		container_of(b, struct intel_engine_cs, breadcrumbs);
300 	struct drm_i915_private *i915 = engine->i915;
301 	bool enabled;
302 
303 	lockdep_assert_held(&b->irq_lock);
304 	if (b->irq_armed)
305 		return false;
306 
307 	/* The breadcrumb irq will be disarmed on the interrupt after the
308 	 * waiters are signaled. This gives us a single interrupt window in
309 	 * which we can add a new waiter and avoid the cost of re-enabling
310 	 * the irq.
311 	 */
312 	b->irq_armed = true;
313 
314 	if (I915_SELFTEST_ONLY(b->mock)) {
315 		/* For our mock objects we want to avoid interaction
316 		 * with the real hardware (which is not set up). So
317 		 * we simply pretend we have enabled the powerwell
318 		 * and the irq, and leave it up to the mock
319 		 * implementation to call intel_engine_wakeup()
320 		 * itself when it wants to simulate a user interrupt,
321 		 */
322 		return true;
323 	}
324 
325 	/* Since we are waiting on a request, the GPU should be busy
326 	 * and should have its own rpm reference. This is tracked
327 	 * by i915->gt.awake, we can forgo holding our own wakref
328 	 * for the interrupt as before i915->gt.awake is released (when
329 	 * the driver is idle) we disarm the breadcrumbs.
330 	 */
331 
332 	/* No interrupts? Kick the waiter every jiffie! */
333 	enabled = false;
334 	if (!b->irq_enabled++ &&
335 	    !test_bit(engine->id, &i915->gpu_error.test_irq_rings)) {
336 		irq_enable(engine);
337 		enabled = true;
338 	}
339 
340 	enable_fake_irq(b);
341 	return enabled;
342 }
343 
to_wait(struct rb_node * node)344 static inline struct intel_wait *to_wait(struct rb_node *node)
345 {
346 	return rb_entry(node, struct intel_wait, node);
347 }
348 
__intel_breadcrumbs_finish(struct intel_breadcrumbs * b,struct intel_wait * wait)349 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
350 					      struct intel_wait *wait)
351 {
352 	lockdep_assert_held(&b->rb_lock);
353 	GEM_BUG_ON(b->irq_wait == wait);
354 
355 	/*
356 	 * This request is completed, so remove it from the tree, mark it as
357 	 * complete, and *then* wake up the associated task. N.B. when the
358 	 * task wakes up, it will find the empty rb_node, discern that it
359 	 * has already been removed from the tree and skip the serialisation
360 	 * of the b->rb_lock and b->irq_lock. This means that the destruction
361 	 * of the intel_wait is not serialised with the interrupt handler
362 	 * by the waiter - it must instead be serialised by the caller.
363 	 */
364 	rb_erase(&wait->node, &b->waiters);
365 	RB_CLEAR_NODE(&wait->node);
366 
367 	if (wait->tsk->state != TASK_RUNNING)
368 		wake_up_process(wait->tsk); /* implicit smp_wmb() */
369 }
370 
__intel_breadcrumbs_next(struct intel_engine_cs * engine,struct rb_node * next)371 static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
372 					    struct rb_node *next)
373 {
374 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
375 
376 	spin_lock(&b->irq_lock);
377 	GEM_BUG_ON(!b->irq_armed);
378 	GEM_BUG_ON(!b->irq_wait);
379 	b->irq_wait = to_wait(next);
380 	spin_unlock(&b->irq_lock);
381 
382 	/* We always wake up the next waiter that takes over as the bottom-half
383 	 * as we may delegate not only the irq-seqno barrier to the next waiter
384 	 * but also the task of waking up concurrent waiters.
385 	 */
386 	if (next)
387 		wake_up_process(to_wait(next)->tsk);
388 }
389 
__intel_engine_add_wait(struct intel_engine_cs * engine,struct intel_wait * wait)390 static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
391 				    struct intel_wait *wait)
392 {
393 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
394 	struct rb_node **p, *parent, *completed;
395 	bool first, armed;
396 	u32 seqno;
397 
398 	GEM_BUG_ON(!wait->seqno);
399 
400 	/* Insert the request into the retirement ordered list
401 	 * of waiters by walking the rbtree. If we are the oldest
402 	 * seqno in the tree (the first to be retired), then
403 	 * set ourselves as the bottom-half.
404 	 *
405 	 * As we descend the tree, prune completed branches since we hold the
406 	 * spinlock we know that the first_waiter must be delayed and can
407 	 * reduce some of the sequential wake up latency if we take action
408 	 * ourselves and wake up the completed tasks in parallel. Also, by
409 	 * removing stale elements in the tree, we may be able to reduce the
410 	 * ping-pong between the old bottom-half and ourselves as first-waiter.
411 	 */
412 	armed = false;
413 	first = true;
414 	parent = NULL;
415 	completed = NULL;
416 	seqno = intel_engine_get_seqno(engine);
417 
418 	 /* If the request completed before we managed to grab the spinlock,
419 	  * return now before adding ourselves to the rbtree. We let the
420 	  * current bottom-half handle any pending wakeups and instead
421 	  * try and get out of the way quickly.
422 	  */
423 	if (i915_seqno_passed(seqno, wait->seqno)) {
424 		RB_CLEAR_NODE(&wait->node);
425 		return first;
426 	}
427 
428 	p = &b->waiters.rb_node;
429 	while (*p) {
430 		parent = *p;
431 		if (wait->seqno == to_wait(parent)->seqno) {
432 			/* We have multiple waiters on the same seqno, select
433 			 * the highest priority task (that with the smallest
434 			 * task->prio) to serve as the bottom-half for this
435 			 * group.
436 			 */
437 			if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
438 				p = &parent->rb_right;
439 				first = false;
440 			} else {
441 				p = &parent->rb_left;
442 			}
443 		} else if (i915_seqno_passed(wait->seqno,
444 					     to_wait(parent)->seqno)) {
445 			p = &parent->rb_right;
446 			if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
447 				completed = parent;
448 			else
449 				first = false;
450 		} else {
451 			p = &parent->rb_left;
452 		}
453 	}
454 	rb_link_node(&wait->node, parent, p);
455 	rb_insert_color(&wait->node, &b->waiters);
456 
457 	if (first) {
458 		spin_lock(&b->irq_lock);
459 		b->irq_wait = wait;
460 		/* After assigning ourselves as the new bottom-half, we must
461 		 * perform a cursory check to prevent a missed interrupt.
462 		 * Either we miss the interrupt whilst programming the hardware,
463 		 * or if there was a previous waiter (for a later seqno) they
464 		 * may be woken instead of us (due to the inherent race
465 		 * in the unlocked read of b->irq_seqno_bh in the irq handler)
466 		 * and so we miss the wake up.
467 		 */
468 		armed = __intel_breadcrumbs_enable_irq(b);
469 		spin_unlock(&b->irq_lock);
470 	}
471 
472 	if (completed) {
473 		/* Advance the bottom-half (b->irq_wait) before we wake up
474 		 * the waiters who may scribble over their intel_wait
475 		 * just as the interrupt handler is dereferencing it via
476 		 * b->irq_wait.
477 		 */
478 		if (!first) {
479 			struct rb_node *next = rb_next(completed);
480 			GEM_BUG_ON(next == &wait->node);
481 			__intel_breadcrumbs_next(engine, next);
482 		}
483 
484 		do {
485 			struct intel_wait *crumb = to_wait(completed);
486 			completed = rb_prev(completed);
487 			__intel_breadcrumbs_finish(b, crumb);
488 		} while (completed);
489 	}
490 
491 	GEM_BUG_ON(!b->irq_wait);
492 	GEM_BUG_ON(!b->irq_armed);
493 	GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
494 
495 	return armed;
496 }
497 
intel_engine_add_wait(struct intel_engine_cs * engine,struct intel_wait * wait)498 bool intel_engine_add_wait(struct intel_engine_cs *engine,
499 			   struct intel_wait *wait)
500 {
501 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
502 	bool armed;
503 
504 	spin_lock_irq(&b->rb_lock);
505 	armed = __intel_engine_add_wait(engine, wait);
506 	spin_unlock_irq(&b->rb_lock);
507 	if (armed)
508 		return armed;
509 
510 	/* Make the caller recheck if its request has already started. */
511 	return i915_seqno_passed(intel_engine_get_seqno(engine),
512 				 wait->seqno - 1);
513 }
514 
chain_wakeup(struct rb_node * rb,int priority)515 static inline bool chain_wakeup(struct rb_node *rb, int priority)
516 {
517 	return rb && to_wait(rb)->tsk->prio <= priority;
518 }
519 
wakeup_priority(struct intel_breadcrumbs * b,struct task_struct * tsk)520 static inline int wakeup_priority(struct intel_breadcrumbs *b,
521 				  struct task_struct *tsk)
522 {
523 	if (tsk == b->signaler)
524 		return INT_MIN;
525 	else
526 		return tsk->prio;
527 }
528 
__intel_engine_remove_wait(struct intel_engine_cs * engine,struct intel_wait * wait)529 static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
530 				       struct intel_wait *wait)
531 {
532 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
533 
534 	lockdep_assert_held(&b->rb_lock);
535 
536 	if (RB_EMPTY_NODE(&wait->node))
537 		goto out;
538 
539 	if (b->irq_wait == wait) {
540 		const int priority = wakeup_priority(b, wait->tsk);
541 		struct rb_node *next;
542 
543 		/* We are the current bottom-half. Find the next candidate,
544 		 * the first waiter in the queue on the remaining oldest
545 		 * request. As multiple seqnos may complete in the time it
546 		 * takes us to wake up and find the next waiter, we have to
547 		 * wake up that waiter for it to perform its own coherent
548 		 * completion check.
549 		 */
550 		next = rb_next(&wait->node);
551 		if (chain_wakeup(next, priority)) {
552 			/* If the next waiter is already complete,
553 			 * wake it up and continue onto the next waiter. So
554 			 * if have a small herd, they will wake up in parallel
555 			 * rather than sequentially, which should reduce
556 			 * the overall latency in waking all the completed
557 			 * clients.
558 			 *
559 			 * However, waking up a chain adds extra latency to
560 			 * the first_waiter. This is undesirable if that
561 			 * waiter is a high priority task.
562 			 */
563 			u32 seqno = intel_engine_get_seqno(engine);
564 
565 			while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
566 				struct rb_node *n = rb_next(next);
567 
568 				__intel_breadcrumbs_finish(b, to_wait(next));
569 				next = n;
570 				if (!chain_wakeup(next, priority))
571 					break;
572 			}
573 		}
574 
575 		__intel_breadcrumbs_next(engine, next);
576 	} else {
577 		GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
578 	}
579 
580 	GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
581 	rb_erase(&wait->node, &b->waiters);
582 	RB_CLEAR_NODE(&wait->node);
583 
584 out:
585 	GEM_BUG_ON(b->irq_wait == wait);
586 	GEM_BUG_ON(rb_first(&b->waiters) !=
587 		   (b->irq_wait ? &b->irq_wait->node : NULL));
588 }
589 
intel_engine_remove_wait(struct intel_engine_cs * engine,struct intel_wait * wait)590 void intel_engine_remove_wait(struct intel_engine_cs *engine,
591 			      struct intel_wait *wait)
592 {
593 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
594 
595 	/* Quick check to see if this waiter was already decoupled from
596 	 * the tree by the bottom-half to avoid contention on the spinlock
597 	 * by the herd.
598 	 */
599 	if (RB_EMPTY_NODE(&wait->node)) {
600 		GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
601 		return;
602 	}
603 
604 	spin_lock_irq(&b->rb_lock);
605 	__intel_engine_remove_wait(engine, wait);
606 	spin_unlock_irq(&b->rb_lock);
607 }
608 
signaler_set_rtpriority(void)609 static void signaler_set_rtpriority(void)
610 {
611 	 struct sched_param param = { .sched_priority = 1 };
612 
613 	 sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
614 }
615 
intel_breadcrumbs_signaler(void * arg)616 static int intel_breadcrumbs_signaler(void *arg)
617 {
618 	struct intel_engine_cs *engine = arg;
619 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
620 	struct i915_request *rq, *n;
621 
622 	/* Install ourselves with high priority to reduce signalling latency */
623 	signaler_set_rtpriority();
624 
625 	do {
626 		bool do_schedule = true;
627 		LIST_HEAD(list);
628 		u32 seqno;
629 
630 		set_current_state(TASK_INTERRUPTIBLE);
631 		if (list_empty(&b->signals))
632 			goto sleep;
633 
634 		/*
635 		 * We are either woken up by the interrupt bottom-half,
636 		 * or by a client adding a new signaller. In both cases,
637 		 * the GPU seqno may have advanced beyond our oldest signal.
638 		 * If it has, propagate the signal, remove the waiter and
639 		 * check again with the next oldest signal. Otherwise we
640 		 * need to wait for a new interrupt from the GPU or for
641 		 * a new client.
642 		 */
643 		seqno = intel_engine_get_seqno(engine);
644 
645 		spin_lock_irq(&b->rb_lock);
646 		list_for_each_entry_safe(rq, n, &b->signals, signaling.link) {
647 			u32 this = rq->signaling.wait.seqno;
648 
649 			GEM_BUG_ON(!rq->signaling.wait.seqno);
650 
651 			if (!i915_seqno_passed(seqno, this))
652 				break;
653 
654 			if (likely(this == i915_request_global_seqno(rq))) {
655 				__intel_engine_remove_wait(engine,
656 							   &rq->signaling.wait);
657 
658 				rq->signaling.wait.seqno = 0;
659 				__list_del_entry(&rq->signaling.link);
660 
661 				if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
662 					      &rq->fence.flags)) {
663 					list_add_tail(&rq->signaling.link,
664 						      &list);
665 					i915_request_get(rq);
666 				}
667 			}
668 		}
669 		spin_unlock_irq(&b->rb_lock);
670 
671 		if (!list_empty(&list)) {
672 			local_bh_disable();
673 			list_for_each_entry_safe(rq, n, &list, signaling.link) {
674 				dma_fence_signal(&rq->fence);
675 				GEM_BUG_ON(!i915_request_completed(rq));
676 				i915_request_put(rq);
677 			}
678 			local_bh_enable(); /* kick start the tasklets */
679 
680 			/*
681 			 * If the engine is saturated we may be continually
682 			 * processing completed requests. This angers the
683 			 * NMI watchdog if we never let anything else
684 			 * have access to the CPU. Let's pretend to be nice
685 			 * and relinquish the CPU if we burn through the
686 			 * entire RT timeslice!
687 			 */
688 			do_schedule = need_resched();
689 		}
690 
691 		if (unlikely(do_schedule)) {
692 			/* Before we sleep, check for a missed seqno */
693 			if (current->state & TASK_NORMAL &&
694 			    !list_empty(&b->signals) &&
695 			    engine->irq_seqno_barrier &&
696 			    test_and_clear_bit(ENGINE_IRQ_BREADCRUMB,
697 					       &engine->irq_posted)) {
698 				engine->irq_seqno_barrier(engine);
699 				intel_engine_wakeup(engine);
700 			}
701 
702 sleep:
703 			if (kthread_should_park())
704 				kthread_parkme();
705 
706 			if (unlikely(kthread_should_stop()))
707 				break;
708 
709 			schedule();
710 		}
711 	} while (1);
712 	__set_current_state(TASK_RUNNING);
713 
714 	return 0;
715 }
716 
insert_signal(struct intel_breadcrumbs * b,struct i915_request * request,const u32 seqno)717 static void insert_signal(struct intel_breadcrumbs *b,
718 			  struct i915_request *request,
719 			  const u32 seqno)
720 {
721 	struct i915_request *iter;
722 
723 	lockdep_assert_held(&b->rb_lock);
724 
725 	/*
726 	 * A reasonable assumption is that we are called to add signals
727 	 * in sequence, as the requests are submitted for execution and
728 	 * assigned a global_seqno. This will be the case for the majority
729 	 * of internally generated signals (inter-engine signaling).
730 	 *
731 	 * Out of order waiters triggering random signaling enabling will
732 	 * be more problematic, but hopefully rare enough and the list
733 	 * small enough that the O(N) insertion sort is not an issue.
734 	 */
735 
736 	list_for_each_entry_reverse(iter, &b->signals, signaling.link)
737 		if (i915_seqno_passed(seqno, iter->signaling.wait.seqno))
738 			break;
739 
740 	list_add(&request->signaling.link, &iter->signaling.link);
741 }
742 
intel_engine_enable_signaling(struct i915_request * request,bool wakeup)743 bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
744 {
745 	struct intel_engine_cs *engine = request->engine;
746 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
747 	struct intel_wait *wait = &request->signaling.wait;
748 	u32 seqno;
749 
750 	/*
751 	 * Note that we may be called from an interrupt handler on another
752 	 * device (e.g. nouveau signaling a fence completion causing us
753 	 * to submit a request, and so enable signaling). As such,
754 	 * we need to make sure that all other users of b->rb_lock protect
755 	 * against interrupts, i.e. use spin_lock_irqsave.
756 	 */
757 
758 	/* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
759 	GEM_BUG_ON(!irqs_disabled());
760 	lockdep_assert_held(&request->lock);
761 
762 	seqno = i915_request_global_seqno(request);
763 	if (!seqno) /* will be enabled later upon execution */
764 		return true;
765 
766 	GEM_BUG_ON(wait->seqno);
767 	wait->tsk = b->signaler;
768 	wait->request = request;
769 	wait->seqno = seqno;
770 
771 	/*
772 	 * Add ourselves into the list of waiters, but registering our
773 	 * bottom-half as the signaller thread. As per usual, only the oldest
774 	 * waiter (not just signaller) is tasked as the bottom-half waking
775 	 * up all completed waiters after the user interrupt.
776 	 *
777 	 * If we are the oldest waiter, enable the irq (after which we
778 	 * must double check that the seqno did not complete).
779 	 */
780 	spin_lock(&b->rb_lock);
781 	insert_signal(b, request, seqno);
782 	wakeup &= __intel_engine_add_wait(engine, wait);
783 	spin_unlock(&b->rb_lock);
784 
785 	if (wakeup) {
786 		wake_up_process(b->signaler);
787 		return !intel_wait_complete(wait);
788 	}
789 
790 	return true;
791 }
792 
intel_engine_cancel_signaling(struct i915_request * request)793 void intel_engine_cancel_signaling(struct i915_request *request)
794 {
795 	struct intel_engine_cs *engine = request->engine;
796 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
797 
798 	GEM_BUG_ON(!irqs_disabled());
799 	lockdep_assert_held(&request->lock);
800 
801 	if (!READ_ONCE(request->signaling.wait.seqno))
802 		return;
803 
804 	spin_lock(&b->rb_lock);
805 	__intel_engine_remove_wait(engine, &request->signaling.wait);
806 	if (fetch_and_zero(&request->signaling.wait.seqno))
807 		__list_del_entry(&request->signaling.link);
808 	spin_unlock(&b->rb_lock);
809 }
810 
intel_engine_init_breadcrumbs(struct intel_engine_cs * engine)811 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
812 {
813 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
814 	struct task_struct *tsk;
815 
816 	spin_lock_init(&b->rb_lock);
817 	spin_lock_init(&b->irq_lock);
818 
819 	timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
820 	timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
821 
822 	INIT_LIST_HEAD(&b->signals);
823 
824 	/* Spawn a thread to provide a common bottom-half for all signals.
825 	 * As this is an asynchronous interface we cannot steal the current
826 	 * task for handling the bottom-half to the user interrupt, therefore
827 	 * we create a thread to do the coherent seqno dance after the
828 	 * interrupt and then signal the waitqueue (via the dma-buf/fence).
829 	 */
830 	tsk = kthread_run(intel_breadcrumbs_signaler, engine,
831 			  "i915/signal:%d", engine->id);
832 	if (IS_ERR(tsk))
833 		return PTR_ERR(tsk);
834 
835 	b->signaler = tsk;
836 
837 	return 0;
838 }
839 
cancel_fake_irq(struct intel_engine_cs * engine)840 static void cancel_fake_irq(struct intel_engine_cs *engine)
841 {
842 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
843 
844 	del_timer_sync(&b->fake_irq); /* may queue b->hangcheck */
845 	del_timer_sync(&b->hangcheck);
846 	clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
847 }
848 
intel_engine_reset_breadcrumbs(struct intel_engine_cs * engine)849 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
850 {
851 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
852 	unsigned long flags;
853 
854 	spin_lock_irqsave(&b->irq_lock, flags);
855 
856 	/*
857 	 * Leave the fake_irq timer enabled (if it is running), but clear the
858 	 * bit so that it turns itself off on its next wake up and goes back
859 	 * to the long hangcheck interval if still required.
860 	 */
861 	clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
862 
863 	if (b->irq_enabled)
864 		irq_enable(engine);
865 	else
866 		irq_disable(engine);
867 
868 	/*
869 	 * We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
870 	 * GPU is active and may have already executed the MI_USER_INTERRUPT
871 	 * before the CPU is ready to receive. However, the engine is currently
872 	 * idle (we haven't started it yet), there is no possibility for a
873 	 * missed interrupt as we enabled the irq and so we can clear the
874 	 * immediate wakeup (until a real interrupt arrives for the waiter).
875 	 */
876 	clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
877 
878 	spin_unlock_irqrestore(&b->irq_lock, flags);
879 }
880 
intel_engine_fini_breadcrumbs(struct intel_engine_cs * engine)881 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
882 {
883 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
884 
885 	/* The engines should be idle and all requests accounted for! */
886 	WARN_ON(READ_ONCE(b->irq_wait));
887 	WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
888 	WARN_ON(!list_empty(&b->signals));
889 
890 	if (!IS_ERR_OR_NULL(b->signaler))
891 		kthread_stop(b->signaler);
892 
893 	cancel_fake_irq(engine);
894 }
895 
896 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
897 #include "selftests/intel_breadcrumbs.c"
898 #endif
899