1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
4 *
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 *
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.
9 * Tasks that are not periodic or sporadic or that tries to execute more
10 * than their reserved bandwidth will be slowed down (and may potentially
11 * miss some of their deadlines), and won't affect any other task.
12 *
13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
14 * Juri Lelli <juri.lelli@gmail.com>,
15 * Michael Trimarchi <michael@amarulasolutions.com>,
16 * Fabio Checconi <fchecconi@gmail.com>
17 */
18 #include "sched.h"
19 #include "pelt.h"
20
21 struct dl_bandwidth def_dl_bandwidth;
22
dl_task_of(struct sched_dl_entity * dl_se)23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24 {
25 return container_of(dl_se, struct task_struct, dl);
26 }
27
rq_of_dl_rq(struct dl_rq * dl_rq)28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29 {
30 return container_of(dl_rq, struct rq, dl);
31 }
32
dl_rq_of_se(struct sched_dl_entity * dl_se)33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34 {
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39 }
40
on_dl_rq(struct sched_dl_entity * dl_se)41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42 {
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44 }
45
46 #ifdef CONFIG_SMP
dl_bw_of(int i)47 static inline struct dl_bw *dl_bw_of(int i)
48 {
49 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
50 "sched RCU must be held");
51 return &cpu_rq(i)->rd->dl_bw;
52 }
53
dl_bw_cpus(int i)54 static inline int dl_bw_cpus(int i)
55 {
56 struct root_domain *rd = cpu_rq(i)->rd;
57 int cpus = 0;
58
59 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
60 "sched RCU must be held");
61 for_each_cpu_and(i, rd->span, cpu_active_mask)
62 cpus++;
63
64 return cpus;
65 }
66 #else
dl_bw_of(int i)67 static inline struct dl_bw *dl_bw_of(int i)
68 {
69 return &cpu_rq(i)->dl.dl_bw;
70 }
71
dl_bw_cpus(int i)72 static inline int dl_bw_cpus(int i)
73 {
74 return 1;
75 }
76 #endif
77
78 static inline
__add_running_bw(u64 dl_bw,struct dl_rq * dl_rq)79 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
80 {
81 u64 old = dl_rq->running_bw;
82
83 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
84 dl_rq->running_bw += dl_bw;
85 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
86 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
87 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
88 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
89 }
90
91 static inline
__sub_running_bw(u64 dl_bw,struct dl_rq * dl_rq)92 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
93 {
94 u64 old = dl_rq->running_bw;
95
96 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
97 dl_rq->running_bw -= dl_bw;
98 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
99 if (dl_rq->running_bw > old)
100 dl_rq->running_bw = 0;
101 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
102 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
103 }
104
105 static inline
__add_rq_bw(u64 dl_bw,struct dl_rq * dl_rq)106 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
107 {
108 u64 old = dl_rq->this_bw;
109
110 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
111 dl_rq->this_bw += dl_bw;
112 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
113 }
114
115 static inline
__sub_rq_bw(u64 dl_bw,struct dl_rq * dl_rq)116 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
117 {
118 u64 old = dl_rq->this_bw;
119
120 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
121 dl_rq->this_bw -= dl_bw;
122 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
123 if (dl_rq->this_bw > old)
124 dl_rq->this_bw = 0;
125 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
126 }
127
128 static inline
add_rq_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)129 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
130 {
131 if (!dl_entity_is_special(dl_se))
132 __add_rq_bw(dl_se->dl_bw, dl_rq);
133 }
134
135 static inline
sub_rq_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)136 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
137 {
138 if (!dl_entity_is_special(dl_se))
139 __sub_rq_bw(dl_se->dl_bw, dl_rq);
140 }
141
142 static inline
add_running_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)143 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144 {
145 if (!dl_entity_is_special(dl_se))
146 __add_running_bw(dl_se->dl_bw, dl_rq);
147 }
148
149 static inline
sub_running_bw(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)150 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
151 {
152 if (!dl_entity_is_special(dl_se))
153 __sub_running_bw(dl_se->dl_bw, dl_rq);
154 }
155
dl_change_utilization(struct task_struct * p,u64 new_bw)156 void dl_change_utilization(struct task_struct *p, u64 new_bw)
157 {
158 struct rq *rq;
159
160 BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
161
162 if (task_on_rq_queued(p))
163 return;
164
165 rq = task_rq(p);
166 if (p->dl.dl_non_contending) {
167 sub_running_bw(&p->dl, &rq->dl);
168 p->dl.dl_non_contending = 0;
169 /*
170 * If the timer handler is currently running and the
171 * timer cannot be cancelled, inactive_task_timer()
172 * will see that dl_not_contending is not set, and
173 * will not touch the rq's active utilization,
174 * so we are still safe.
175 */
176 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
177 put_task_struct(p);
178 }
179 __sub_rq_bw(p->dl.dl_bw, &rq->dl);
180 __add_rq_bw(new_bw, &rq->dl);
181 }
182
183 /*
184 * The utilization of a task cannot be immediately removed from
185 * the rq active utilization (running_bw) when the task blocks.
186 * Instead, we have to wait for the so called "0-lag time".
187 *
188 * If a task blocks before the "0-lag time", a timer (the inactive
189 * timer) is armed, and running_bw is decreased when the timer
190 * fires.
191 *
192 * If the task wakes up again before the inactive timer fires,
193 * the timer is cancelled, whereas if the task wakes up after the
194 * inactive timer fired (and running_bw has been decreased) the
195 * task's utilization has to be added to running_bw again.
196 * A flag in the deadline scheduling entity (dl_non_contending)
197 * is used to avoid race conditions between the inactive timer handler
198 * and task wakeups.
199 *
200 * The following diagram shows how running_bw is updated. A task is
201 * "ACTIVE" when its utilization contributes to running_bw; an
202 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
203 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
204 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
205 * time already passed, which does not contribute to running_bw anymore.
206 * +------------------+
207 * wakeup | ACTIVE |
208 * +------------------>+ contending |
209 * | add_running_bw | |
210 * | +----+------+------+
211 * | | ^
212 * | dequeue | |
213 * +--------+-------+ | |
214 * | | t >= 0-lag | | wakeup
215 * | INACTIVE |<---------------+ |
216 * | | sub_running_bw | |
217 * +--------+-------+ | |
218 * ^ | |
219 * | t < 0-lag | |
220 * | | |
221 * | V |
222 * | +----+------+------+
223 * | sub_running_bw | ACTIVE |
224 * +-------------------+ |
225 * inactive timer | non contending |
226 * fired +------------------+
227 *
228 * The task_non_contending() function is invoked when a task
229 * blocks, and checks if the 0-lag time already passed or
230 * not (in the first case, it directly updates running_bw;
231 * in the second case, it arms the inactive timer).
232 *
233 * The task_contending() function is invoked when a task wakes
234 * up, and checks if the task is still in the "ACTIVE non contending"
235 * state or not (in the second case, it updates running_bw).
236 */
task_non_contending(struct task_struct * p)237 static void task_non_contending(struct task_struct *p)
238 {
239 struct sched_dl_entity *dl_se = &p->dl;
240 struct hrtimer *timer = &dl_se->inactive_timer;
241 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
242 struct rq *rq = rq_of_dl_rq(dl_rq);
243 s64 zerolag_time;
244
245 /*
246 * If this is a non-deadline task that has been boosted,
247 * do nothing
248 */
249 if (dl_se->dl_runtime == 0)
250 return;
251
252 if (dl_entity_is_special(dl_se))
253 return;
254
255 WARN_ON(hrtimer_active(&dl_se->inactive_timer));
256 WARN_ON(dl_se->dl_non_contending);
257
258 zerolag_time = dl_se->deadline -
259 div64_long((dl_se->runtime * dl_se->dl_period),
260 dl_se->dl_runtime);
261
262 /*
263 * Using relative times instead of the absolute "0-lag time"
264 * allows to simplify the code
265 */
266 zerolag_time -= rq_clock(rq);
267
268 /*
269 * If the "0-lag time" already passed, decrease the active
270 * utilization now, instead of starting a timer
271 */
272 if (zerolag_time < 0) {
273 if (dl_task(p))
274 sub_running_bw(dl_se, dl_rq);
275 if (!dl_task(p) || p->state == TASK_DEAD) {
276 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
277
278 if (p->state == TASK_DEAD)
279 sub_rq_bw(&p->dl, &rq->dl);
280 raw_spin_lock(&dl_b->lock);
281 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
282 __dl_clear_params(p);
283 raw_spin_unlock(&dl_b->lock);
284 }
285
286 return;
287 }
288
289 dl_se->dl_non_contending = 1;
290 get_task_struct(p);
291 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
292 }
293
task_contending(struct sched_dl_entity * dl_se,int flags)294 static void task_contending(struct sched_dl_entity *dl_se, int flags)
295 {
296 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
297
298 /*
299 * If this is a non-deadline task that has been boosted,
300 * do nothing
301 */
302 if (dl_se->dl_runtime == 0)
303 return;
304
305 if (flags & ENQUEUE_MIGRATED)
306 add_rq_bw(dl_se, dl_rq);
307
308 if (dl_se->dl_non_contending) {
309 dl_se->dl_non_contending = 0;
310 /*
311 * If the timer handler is currently running and the
312 * timer cannot be cancelled, inactive_task_timer()
313 * will see that dl_not_contending is not set, and
314 * will not touch the rq's active utilization,
315 * so we are still safe.
316 */
317 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
318 put_task_struct(dl_task_of(dl_se));
319 } else {
320 /*
321 * Since "dl_non_contending" is not set, the
322 * task's utilization has already been removed from
323 * active utilization (either when the task blocked,
324 * when the "inactive timer" fired).
325 * So, add it back.
326 */
327 add_running_bw(dl_se, dl_rq);
328 }
329 }
330
is_leftmost(struct task_struct * p,struct dl_rq * dl_rq)331 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
332 {
333 struct sched_dl_entity *dl_se = &p->dl;
334
335 return dl_rq->root.rb_leftmost == &dl_se->rb_node;
336 }
337
init_dl_bandwidth(struct dl_bandwidth * dl_b,u64 period,u64 runtime)338 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
339 {
340 raw_spin_lock_init(&dl_b->dl_runtime_lock);
341 dl_b->dl_period = period;
342 dl_b->dl_runtime = runtime;
343 }
344
init_dl_bw(struct dl_bw * dl_b)345 void init_dl_bw(struct dl_bw *dl_b)
346 {
347 raw_spin_lock_init(&dl_b->lock);
348 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
349 if (global_rt_runtime() == RUNTIME_INF)
350 dl_b->bw = -1;
351 else
352 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
353 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
354 dl_b->total_bw = 0;
355 }
356
init_dl_rq(struct dl_rq * dl_rq)357 void init_dl_rq(struct dl_rq *dl_rq)
358 {
359 dl_rq->root = RB_ROOT_CACHED;
360
361 #ifdef CONFIG_SMP
362 /* zero means no -deadline tasks */
363 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
364
365 dl_rq->dl_nr_migratory = 0;
366 dl_rq->overloaded = 0;
367 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
368 #else
369 init_dl_bw(&dl_rq->dl_bw);
370 #endif
371
372 dl_rq->running_bw = 0;
373 dl_rq->this_bw = 0;
374 init_dl_rq_bw_ratio(dl_rq);
375 }
376
377 #ifdef CONFIG_SMP
378
dl_overloaded(struct rq * rq)379 static inline int dl_overloaded(struct rq *rq)
380 {
381 return atomic_read(&rq->rd->dlo_count);
382 }
383
dl_set_overload(struct rq * rq)384 static inline void dl_set_overload(struct rq *rq)
385 {
386 if (!rq->online)
387 return;
388
389 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
390 /*
391 * Must be visible before the overload count is
392 * set (as in sched_rt.c).
393 *
394 * Matched by the barrier in pull_dl_task().
395 */
396 smp_wmb();
397 atomic_inc(&rq->rd->dlo_count);
398 }
399
dl_clear_overload(struct rq * rq)400 static inline void dl_clear_overload(struct rq *rq)
401 {
402 if (!rq->online)
403 return;
404
405 atomic_dec(&rq->rd->dlo_count);
406 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
407 }
408
update_dl_migration(struct dl_rq * dl_rq)409 static void update_dl_migration(struct dl_rq *dl_rq)
410 {
411 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
412 if (!dl_rq->overloaded) {
413 dl_set_overload(rq_of_dl_rq(dl_rq));
414 dl_rq->overloaded = 1;
415 }
416 } else if (dl_rq->overloaded) {
417 dl_clear_overload(rq_of_dl_rq(dl_rq));
418 dl_rq->overloaded = 0;
419 }
420 }
421
inc_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)422 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
423 {
424 struct task_struct *p = dl_task_of(dl_se);
425
426 if (p->nr_cpus_allowed > 1)
427 dl_rq->dl_nr_migratory++;
428
429 update_dl_migration(dl_rq);
430 }
431
dec_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)432 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
433 {
434 struct task_struct *p = dl_task_of(dl_se);
435
436 if (p->nr_cpus_allowed > 1)
437 dl_rq->dl_nr_migratory--;
438
439 update_dl_migration(dl_rq);
440 }
441
442 /*
443 * The list of pushable -deadline task is not a plist, like in
444 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
445 */
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)446 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
447 {
448 struct dl_rq *dl_rq = &rq->dl;
449 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
450 struct rb_node *parent = NULL;
451 struct task_struct *entry;
452 bool leftmost = true;
453
454 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
455
456 while (*link) {
457 parent = *link;
458 entry = rb_entry(parent, struct task_struct,
459 pushable_dl_tasks);
460 if (dl_entity_preempt(&p->dl, &entry->dl))
461 link = &parent->rb_left;
462 else {
463 link = &parent->rb_right;
464 leftmost = false;
465 }
466 }
467
468 if (leftmost)
469 dl_rq->earliest_dl.next = p->dl.deadline;
470
471 rb_link_node(&p->pushable_dl_tasks, parent, link);
472 rb_insert_color_cached(&p->pushable_dl_tasks,
473 &dl_rq->pushable_dl_tasks_root, leftmost);
474 }
475
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)476 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
477 {
478 struct dl_rq *dl_rq = &rq->dl;
479
480 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
481 return;
482
483 if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
484 struct rb_node *next_node;
485
486 next_node = rb_next(&p->pushable_dl_tasks);
487 if (next_node) {
488 dl_rq->earliest_dl.next = rb_entry(next_node,
489 struct task_struct, pushable_dl_tasks)->dl.deadline;
490 }
491 }
492
493 rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
494 RB_CLEAR_NODE(&p->pushable_dl_tasks);
495 }
496
has_pushable_dl_tasks(struct rq * rq)497 static inline int has_pushable_dl_tasks(struct rq *rq)
498 {
499 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
500 }
501
502 static int push_dl_task(struct rq *rq);
503
need_pull_dl_task(struct rq * rq,struct task_struct * prev)504 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
505 {
506 return dl_task(prev);
507 }
508
509 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
510 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
511
512 static void push_dl_tasks(struct rq *);
513 static void pull_dl_task(struct rq *);
514
deadline_queue_push_tasks(struct rq * rq)515 static inline void deadline_queue_push_tasks(struct rq *rq)
516 {
517 if (!has_pushable_dl_tasks(rq))
518 return;
519
520 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
521 }
522
deadline_queue_pull_task(struct rq * rq)523 static inline void deadline_queue_pull_task(struct rq *rq)
524 {
525 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
526 }
527
528 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
529
dl_task_offline_migration(struct rq * rq,struct task_struct * p)530 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
531 {
532 struct rq *later_rq = NULL;
533
534 later_rq = find_lock_later_rq(p, rq);
535 if (!later_rq) {
536 int cpu;
537
538 /*
539 * If we cannot preempt any rq, fall back to pick any
540 * online CPU:
541 */
542 cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
543 if (cpu >= nr_cpu_ids) {
544 /*
545 * Failed to find any suitable CPU.
546 * The task will never come back!
547 */
548 BUG_ON(dl_bandwidth_enabled());
549
550 /*
551 * If admission control is disabled we
552 * try a little harder to let the task
553 * run.
554 */
555 cpu = cpumask_any(cpu_active_mask);
556 }
557 later_rq = cpu_rq(cpu);
558 double_lock_balance(rq, later_rq);
559 }
560
561 set_task_cpu(p, later_rq->cpu);
562 double_unlock_balance(later_rq, rq);
563
564 return later_rq;
565 }
566
567 #else
568
569 static inline
enqueue_pushable_dl_task(struct rq * rq,struct task_struct * p)570 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
571 {
572 }
573
574 static inline
dequeue_pushable_dl_task(struct rq * rq,struct task_struct * p)575 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
576 {
577 }
578
579 static inline
inc_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)580 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
581 {
582 }
583
584 static inline
dec_dl_migration(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)585 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
586 {
587 }
588
need_pull_dl_task(struct rq * rq,struct task_struct * prev)589 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
590 {
591 return false;
592 }
593
pull_dl_task(struct rq * rq)594 static inline void pull_dl_task(struct rq *rq)
595 {
596 }
597
deadline_queue_push_tasks(struct rq * rq)598 static inline void deadline_queue_push_tasks(struct rq *rq)
599 {
600 }
601
deadline_queue_pull_task(struct rq * rq)602 static inline void deadline_queue_pull_task(struct rq *rq)
603 {
604 }
605 #endif /* CONFIG_SMP */
606
607 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
608 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
609 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
610
611 /*
612 * We are being explicitly informed that a new instance is starting,
613 * and this means that:
614 * - the absolute deadline of the entity has to be placed at
615 * current time + relative deadline;
616 * - the runtime of the entity has to be set to the maximum value.
617 *
618 * The capability of specifying such event is useful whenever a -deadline
619 * entity wants to (try to!) synchronize its behaviour with the scheduler's
620 * one, and to (try to!) reconcile itself with its own scheduling
621 * parameters.
622 */
setup_new_dl_entity(struct sched_dl_entity * dl_se)623 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
624 {
625 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
626 struct rq *rq = rq_of_dl_rq(dl_rq);
627
628 WARN_ON(dl_se->dl_boosted);
629 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
630
631 /*
632 * We are racing with the deadline timer. So, do nothing because
633 * the deadline timer handler will take care of properly recharging
634 * the runtime and postponing the deadline
635 */
636 if (dl_se->dl_throttled)
637 return;
638
639 /*
640 * We use the regular wall clock time to set deadlines in the
641 * future; in fact, we must consider execution overheads (time
642 * spent on hardirq context, etc.).
643 */
644 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
645 dl_se->runtime = dl_se->dl_runtime;
646 }
647
648 /*
649 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
650 * possibility of a entity lasting more than what it declared, and thus
651 * exhausting its runtime.
652 *
653 * Here we are interested in making runtime overrun possible, but we do
654 * not want a entity which is misbehaving to affect the scheduling of all
655 * other entities.
656 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
657 * is used, in order to confine each entity within its own bandwidth.
658 *
659 * This function deals exactly with that, and ensures that when the runtime
660 * of a entity is replenished, its deadline is also postponed. That ensures
661 * the overrunning entity can't interfere with other entity in the system and
662 * can't make them miss their deadlines. Reasons why this kind of overruns
663 * could happen are, typically, a entity voluntarily trying to overcome its
664 * runtime, or it just underestimated it during sched_setattr().
665 */
replenish_dl_entity(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se)666 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
667 struct sched_dl_entity *pi_se)
668 {
669 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
670 struct rq *rq = rq_of_dl_rq(dl_rq);
671
672 BUG_ON(pi_se->dl_runtime <= 0);
673
674 /*
675 * This could be the case for a !-dl task that is boosted.
676 * Just go with full inherited parameters.
677 */
678 if (dl_se->dl_deadline == 0) {
679 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
680 dl_se->runtime = pi_se->dl_runtime;
681 }
682
683 if (dl_se->dl_yielded && dl_se->runtime > 0)
684 dl_se->runtime = 0;
685
686 /*
687 * We keep moving the deadline away until we get some
688 * available runtime for the entity. This ensures correct
689 * handling of situations where the runtime overrun is
690 * arbitrary large.
691 */
692 while (dl_se->runtime <= 0) {
693 dl_se->deadline += pi_se->dl_period;
694 dl_se->runtime += pi_se->dl_runtime;
695 }
696
697 /*
698 * At this point, the deadline really should be "in
699 * the future" with respect to rq->clock. If it's
700 * not, we are, for some reason, lagging too much!
701 * Anyway, after having warn userspace abut that,
702 * we still try to keep the things running by
703 * resetting the deadline and the budget of the
704 * entity.
705 */
706 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
707 printk_deferred_once("sched: DL replenish lagged too much\n");
708 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
709 dl_se->runtime = pi_se->dl_runtime;
710 }
711
712 if (dl_se->dl_yielded)
713 dl_se->dl_yielded = 0;
714 if (dl_se->dl_throttled)
715 dl_se->dl_throttled = 0;
716 }
717
718 /*
719 * Here we check if --at time t-- an entity (which is probably being
720 * [re]activated or, in general, enqueued) can use its remaining runtime
721 * and its current deadline _without_ exceeding the bandwidth it is
722 * assigned (function returns true if it can't). We are in fact applying
723 * one of the CBS rules: when a task wakes up, if the residual runtime
724 * over residual deadline fits within the allocated bandwidth, then we
725 * can keep the current (absolute) deadline and residual budget without
726 * disrupting the schedulability of the system. Otherwise, we should
727 * refill the runtime and set the deadline a period in the future,
728 * because keeping the current (absolute) deadline of the task would
729 * result in breaking guarantees promised to other tasks (refer to
730 * Documentation/scheduler/sched-deadline.txt for more informations).
731 *
732 * This function returns true if:
733 *
734 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
735 *
736 * IOW we can't recycle current parameters.
737 *
738 * Notice that the bandwidth check is done against the deadline. For
739 * task with deadline equal to period this is the same of using
740 * dl_period instead of dl_deadline in the equation above.
741 */
dl_entity_overflow(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se,u64 t)742 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
743 struct sched_dl_entity *pi_se, u64 t)
744 {
745 u64 left, right;
746
747 /*
748 * left and right are the two sides of the equation above,
749 * after a bit of shuffling to use multiplications instead
750 * of divisions.
751 *
752 * Note that none of the time values involved in the two
753 * multiplications are absolute: dl_deadline and dl_runtime
754 * are the relative deadline and the maximum runtime of each
755 * instance, runtime is the runtime left for the last instance
756 * and (deadline - t), since t is rq->clock, is the time left
757 * to the (absolute) deadline. Even if overflowing the u64 type
758 * is very unlikely to occur in both cases, here we scale down
759 * as we want to avoid that risk at all. Scaling down by 10
760 * means that we reduce granularity to 1us. We are fine with it,
761 * since this is only a true/false check and, anyway, thinking
762 * of anything below microseconds resolution is actually fiction
763 * (but still we want to give the user that illusion >;).
764 */
765 left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
766 right = ((dl_se->deadline - t) >> DL_SCALE) *
767 (pi_se->dl_runtime >> DL_SCALE);
768
769 return dl_time_before(right, left);
770 }
771
772 /*
773 * Revised wakeup rule [1]: For self-suspending tasks, rather then
774 * re-initializing task's runtime and deadline, the revised wakeup
775 * rule adjusts the task's runtime to avoid the task to overrun its
776 * density.
777 *
778 * Reasoning: a task may overrun the density if:
779 * runtime / (deadline - t) > dl_runtime / dl_deadline
780 *
781 * Therefore, runtime can be adjusted to:
782 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
783 *
784 * In such way that runtime will be equal to the maximum density
785 * the task can use without breaking any rule.
786 *
787 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
788 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
789 */
790 static void
update_dl_revised_wakeup(struct sched_dl_entity * dl_se,struct rq * rq)791 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
792 {
793 u64 laxity = dl_se->deadline - rq_clock(rq);
794
795 /*
796 * If the task has deadline < period, and the deadline is in the past,
797 * it should already be throttled before this check.
798 *
799 * See update_dl_entity() comments for further details.
800 */
801 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
802
803 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
804 }
805
806 /*
807 * Regarding the deadline, a task with implicit deadline has a relative
808 * deadline == relative period. A task with constrained deadline has a
809 * relative deadline <= relative period.
810 *
811 * We support constrained deadline tasks. However, there are some restrictions
812 * applied only for tasks which do not have an implicit deadline. See
813 * update_dl_entity() to know more about such restrictions.
814 *
815 * The dl_is_implicit() returns true if the task has an implicit deadline.
816 */
dl_is_implicit(struct sched_dl_entity * dl_se)817 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
818 {
819 return dl_se->dl_deadline == dl_se->dl_period;
820 }
821
822 /*
823 * When a deadline entity is placed in the runqueue, its runtime and deadline
824 * might need to be updated. This is done by a CBS wake up rule. There are two
825 * different rules: 1) the original CBS; and 2) the Revisited CBS.
826 *
827 * When the task is starting a new period, the Original CBS is used. In this
828 * case, the runtime is replenished and a new absolute deadline is set.
829 *
830 * When a task is queued before the begin of the next period, using the
831 * remaining runtime and deadline could make the entity to overflow, see
832 * dl_entity_overflow() to find more about runtime overflow. When such case
833 * is detected, the runtime and deadline need to be updated.
834 *
835 * If the task has an implicit deadline, i.e., deadline == period, the Original
836 * CBS is applied. the runtime is replenished and a new absolute deadline is
837 * set, as in the previous cases.
838 *
839 * However, the Original CBS does not work properly for tasks with
840 * deadline < period, which are said to have a constrained deadline. By
841 * applying the Original CBS, a constrained deadline task would be able to run
842 * runtime/deadline in a period. With deadline < period, the task would
843 * overrun the runtime/period allowed bandwidth, breaking the admission test.
844 *
845 * In order to prevent this misbehave, the Revisited CBS is used for
846 * constrained deadline tasks when a runtime overflow is detected. In the
847 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
848 * the remaining runtime of the task is reduced to avoid runtime overflow.
849 * Please refer to the comments update_dl_revised_wakeup() function to find
850 * more about the Revised CBS rule.
851 */
update_dl_entity(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se)852 static void update_dl_entity(struct sched_dl_entity *dl_se,
853 struct sched_dl_entity *pi_se)
854 {
855 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
856 struct rq *rq = rq_of_dl_rq(dl_rq);
857
858 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
859 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
860
861 if (unlikely(!dl_is_implicit(dl_se) &&
862 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
863 !dl_se->dl_boosted)){
864 update_dl_revised_wakeup(dl_se, rq);
865 return;
866 }
867
868 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
869 dl_se->runtime = pi_se->dl_runtime;
870 }
871 }
872
dl_next_period(struct sched_dl_entity * dl_se)873 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
874 {
875 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
876 }
877
878 /*
879 * If the entity depleted all its runtime, and if we want it to sleep
880 * while waiting for some new execution time to become available, we
881 * set the bandwidth replenishment timer to the replenishment instant
882 * and try to activate it.
883 *
884 * Notice that it is important for the caller to know if the timer
885 * actually started or not (i.e., the replenishment instant is in
886 * the future or in the past).
887 */
start_dl_timer(struct task_struct * p)888 static int start_dl_timer(struct task_struct *p)
889 {
890 struct sched_dl_entity *dl_se = &p->dl;
891 struct hrtimer *timer = &dl_se->dl_timer;
892 struct rq *rq = task_rq(p);
893 ktime_t now, act;
894 s64 delta;
895
896 lockdep_assert_held(&rq->lock);
897
898 /*
899 * We want the timer to fire at the deadline, but considering
900 * that it is actually coming from rq->clock and not from
901 * hrtimer's time base reading.
902 */
903 act = ns_to_ktime(dl_next_period(dl_se));
904 now = hrtimer_cb_get_time(timer);
905 delta = ktime_to_ns(now) - rq_clock(rq);
906 act = ktime_add_ns(act, delta);
907
908 /*
909 * If the expiry time already passed, e.g., because the value
910 * chosen as the deadline is too small, don't even try to
911 * start the timer in the past!
912 */
913 if (ktime_us_delta(act, now) < 0)
914 return 0;
915
916 /*
917 * !enqueued will guarantee another callback; even if one is already in
918 * progress. This ensures a balanced {get,put}_task_struct().
919 *
920 * The race against __run_timer() clearing the enqueued state is
921 * harmless because we're holding task_rq()->lock, therefore the timer
922 * expiring after we've done the check will wait on its task_rq_lock()
923 * and observe our state.
924 */
925 if (!hrtimer_is_queued(timer)) {
926 get_task_struct(p);
927 hrtimer_start(timer, act, HRTIMER_MODE_ABS);
928 }
929
930 return 1;
931 }
932
933 /*
934 * This is the bandwidth enforcement timer callback. If here, we know
935 * a task is not on its dl_rq, since the fact that the timer was running
936 * means the task is throttled and needs a runtime replenishment.
937 *
938 * However, what we actually do depends on the fact the task is active,
939 * (it is on its rq) or has been removed from there by a call to
940 * dequeue_task_dl(). In the former case we must issue the runtime
941 * replenishment and add the task back to the dl_rq; in the latter, we just
942 * do nothing but clearing dl_throttled, so that runtime and deadline
943 * updating (and the queueing back to dl_rq) will be done by the
944 * next call to enqueue_task_dl().
945 */
dl_task_timer(struct hrtimer * timer)946 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
947 {
948 struct sched_dl_entity *dl_se = container_of(timer,
949 struct sched_dl_entity,
950 dl_timer);
951 struct task_struct *p = dl_task_of(dl_se);
952 struct rq_flags rf;
953 struct rq *rq;
954
955 rq = task_rq_lock(p, &rf);
956
957 /*
958 * The task might have changed its scheduling policy to something
959 * different than SCHED_DEADLINE (through switched_from_dl()).
960 */
961 if (!dl_task(p))
962 goto unlock;
963
964 /*
965 * The task might have been boosted by someone else and might be in the
966 * boosting/deboosting path, its not throttled.
967 */
968 if (dl_se->dl_boosted)
969 goto unlock;
970
971 /*
972 * Spurious timer due to start_dl_timer() race; or we already received
973 * a replenishment from rt_mutex_setprio().
974 */
975 if (!dl_se->dl_throttled)
976 goto unlock;
977
978 sched_clock_tick();
979 update_rq_clock(rq);
980
981 /*
982 * If the throttle happened during sched-out; like:
983 *
984 * schedule()
985 * deactivate_task()
986 * dequeue_task_dl()
987 * update_curr_dl()
988 * start_dl_timer()
989 * __dequeue_task_dl()
990 * prev->on_rq = 0;
991 *
992 * We can be both throttled and !queued. Replenish the counter
993 * but do not enqueue -- wait for our wakeup to do that.
994 */
995 if (!task_on_rq_queued(p)) {
996 replenish_dl_entity(dl_se, dl_se);
997 goto unlock;
998 }
999
1000 #ifdef CONFIG_SMP
1001 if (unlikely(!rq->online)) {
1002 /*
1003 * If the runqueue is no longer available, migrate the
1004 * task elsewhere. This necessarily changes rq.
1005 */
1006 lockdep_unpin_lock(&rq->lock, rf.cookie);
1007 rq = dl_task_offline_migration(rq, p);
1008 rf.cookie = lockdep_pin_lock(&rq->lock);
1009 update_rq_clock(rq);
1010
1011 /*
1012 * Now that the task has been migrated to the new RQ and we
1013 * have that locked, proceed as normal and enqueue the task
1014 * there.
1015 */
1016 }
1017 #endif
1018
1019 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1020 if (dl_task(rq->curr))
1021 check_preempt_curr_dl(rq, p, 0);
1022 else
1023 resched_curr(rq);
1024
1025 #ifdef CONFIG_SMP
1026 /*
1027 * Queueing this task back might have overloaded rq, check if we need
1028 * to kick someone away.
1029 */
1030 if (has_pushable_dl_tasks(rq)) {
1031 /*
1032 * Nothing relies on rq->lock after this, so its safe to drop
1033 * rq->lock.
1034 */
1035 rq_unpin_lock(rq, &rf);
1036 push_dl_task(rq);
1037 rq_repin_lock(rq, &rf);
1038 }
1039 #endif
1040
1041 unlock:
1042 task_rq_unlock(rq, p, &rf);
1043
1044 /*
1045 * This can free the task_struct, including this hrtimer, do not touch
1046 * anything related to that after this.
1047 */
1048 put_task_struct(p);
1049
1050 return HRTIMER_NORESTART;
1051 }
1052
init_dl_task_timer(struct sched_dl_entity * dl_se)1053 void init_dl_task_timer(struct sched_dl_entity *dl_se)
1054 {
1055 struct hrtimer *timer = &dl_se->dl_timer;
1056
1057 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1058 timer->function = dl_task_timer;
1059 }
1060
1061 /*
1062 * During the activation, CBS checks if it can reuse the current task's
1063 * runtime and period. If the deadline of the task is in the past, CBS
1064 * cannot use the runtime, and so it replenishes the task. This rule
1065 * works fine for implicit deadline tasks (deadline == period), and the
1066 * CBS was designed for implicit deadline tasks. However, a task with
1067 * constrained deadline (deadine < period) might be awakened after the
1068 * deadline, but before the next period. In this case, replenishing the
1069 * task would allow it to run for runtime / deadline. As in this case
1070 * deadline < period, CBS enables a task to run for more than the
1071 * runtime / period. In a very loaded system, this can cause a domino
1072 * effect, making other tasks miss their deadlines.
1073 *
1074 * To avoid this problem, in the activation of a constrained deadline
1075 * task after the deadline but before the next period, throttle the
1076 * task and set the replenishing timer to the begin of the next period,
1077 * unless it is boosted.
1078 */
dl_check_constrained_dl(struct sched_dl_entity * dl_se)1079 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1080 {
1081 struct task_struct *p = dl_task_of(dl_se);
1082 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1083
1084 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1085 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1086 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1087 return;
1088 dl_se->dl_throttled = 1;
1089 if (dl_se->runtime > 0)
1090 dl_se->runtime = 0;
1091 }
1092 }
1093
1094 static
dl_runtime_exceeded(struct sched_dl_entity * dl_se)1095 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1096 {
1097 return (dl_se->runtime <= 0);
1098 }
1099
1100 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1101
1102 /*
1103 * This function implements the GRUB accounting rule:
1104 * according to the GRUB reclaiming algorithm, the runtime is
1105 * not decreased as "dq = -dt", but as
1106 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1107 * where u is the utilization of the task, Umax is the maximum reclaimable
1108 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1109 * as the difference between the "total runqueue utilization" and the
1110 * runqueue active utilization, and Uextra is the (per runqueue) extra
1111 * reclaimable utilization.
1112 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1113 * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1114 * BW_SHIFT.
1115 * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1116 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1117 * Since delta is a 64 bit variable, to have an overflow its value
1118 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1119 * So, overflow is not an issue here.
1120 */
grub_reclaim(u64 delta,struct rq * rq,struct sched_dl_entity * dl_se)1121 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1122 {
1123 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1124 u64 u_act;
1125 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1126
1127 /*
1128 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1129 * we compare u_inact + rq->dl.extra_bw with
1130 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1131 * u_inact + rq->dl.extra_bw can be larger than
1132 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1133 * leading to wrong results)
1134 */
1135 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1136 u_act = u_act_min;
1137 else
1138 u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1139
1140 return (delta * u_act) >> BW_SHIFT;
1141 }
1142
1143 /*
1144 * Update the current task's runtime statistics (provided it is still
1145 * a -deadline task and has not been removed from the dl_rq).
1146 */
update_curr_dl(struct rq * rq)1147 static void update_curr_dl(struct rq *rq)
1148 {
1149 struct task_struct *curr = rq->curr;
1150 struct sched_dl_entity *dl_se = &curr->dl;
1151 u64 delta_exec, scaled_delta_exec;
1152 int cpu = cpu_of(rq);
1153 u64 now;
1154
1155 if (!dl_task(curr) || !on_dl_rq(dl_se))
1156 return;
1157
1158 /*
1159 * Consumed budget is computed considering the time as
1160 * observed by schedulable tasks (excluding time spent
1161 * in hardirq context, etc.). Deadlines are instead
1162 * computed using hard walltime. This seems to be the more
1163 * natural solution, but the full ramifications of this
1164 * approach need further study.
1165 */
1166 now = rq_clock_task(rq);
1167 delta_exec = now - curr->se.exec_start;
1168 if (unlikely((s64)delta_exec <= 0)) {
1169 if (unlikely(dl_se->dl_yielded))
1170 goto throttle;
1171 return;
1172 }
1173
1174 schedstat_set(curr->se.statistics.exec_max,
1175 max(curr->se.statistics.exec_max, delta_exec));
1176
1177 curr->se.sum_exec_runtime += delta_exec;
1178 account_group_exec_runtime(curr, delta_exec);
1179
1180 curr->se.exec_start = now;
1181 cgroup_account_cputime(curr, delta_exec);
1182
1183 if (dl_entity_is_special(dl_se))
1184 return;
1185
1186 /*
1187 * For tasks that participate in GRUB, we implement GRUB-PA: the
1188 * spare reclaimed bandwidth is used to clock down frequency.
1189 *
1190 * For the others, we still need to scale reservation parameters
1191 * according to current frequency and CPU maximum capacity.
1192 */
1193 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1194 scaled_delta_exec = grub_reclaim(delta_exec,
1195 rq,
1196 &curr->dl);
1197 } else {
1198 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1199 unsigned long scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
1200
1201 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1202 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1203 }
1204
1205 dl_se->runtime -= scaled_delta_exec;
1206
1207 throttle:
1208 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1209 dl_se->dl_throttled = 1;
1210
1211 /* If requested, inform the user about runtime overruns. */
1212 if (dl_runtime_exceeded(dl_se) &&
1213 (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1214 dl_se->dl_overrun = 1;
1215
1216 __dequeue_task_dl(rq, curr, 0);
1217 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
1218 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1219
1220 if (!is_leftmost(curr, &rq->dl))
1221 resched_curr(rq);
1222 }
1223
1224 /*
1225 * Because -- for now -- we share the rt bandwidth, we need to
1226 * account our runtime there too, otherwise actual rt tasks
1227 * would be able to exceed the shared quota.
1228 *
1229 * Account to the root rt group for now.
1230 *
1231 * The solution we're working towards is having the RT groups scheduled
1232 * using deadline servers -- however there's a few nasties to figure
1233 * out before that can happen.
1234 */
1235 if (rt_bandwidth_enabled()) {
1236 struct rt_rq *rt_rq = &rq->rt;
1237
1238 raw_spin_lock(&rt_rq->rt_runtime_lock);
1239 /*
1240 * We'll let actual RT tasks worry about the overflow here, we
1241 * have our own CBS to keep us inline; only account when RT
1242 * bandwidth is relevant.
1243 */
1244 if (sched_rt_bandwidth_account(rt_rq))
1245 rt_rq->rt_time += delta_exec;
1246 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1247 }
1248 }
1249
inactive_task_timer(struct hrtimer * timer)1250 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1251 {
1252 struct sched_dl_entity *dl_se = container_of(timer,
1253 struct sched_dl_entity,
1254 inactive_timer);
1255 struct task_struct *p = dl_task_of(dl_se);
1256 struct rq_flags rf;
1257 struct rq *rq;
1258
1259 rq = task_rq_lock(p, &rf);
1260
1261 sched_clock_tick();
1262 update_rq_clock(rq);
1263
1264 if (!dl_task(p) || p->state == TASK_DEAD) {
1265 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1266
1267 if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1268 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1269 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1270 dl_se->dl_non_contending = 0;
1271 }
1272
1273 raw_spin_lock(&dl_b->lock);
1274 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1275 raw_spin_unlock(&dl_b->lock);
1276 __dl_clear_params(p);
1277
1278 goto unlock;
1279 }
1280 if (dl_se->dl_non_contending == 0)
1281 goto unlock;
1282
1283 sub_running_bw(dl_se, &rq->dl);
1284 dl_se->dl_non_contending = 0;
1285 unlock:
1286 task_rq_unlock(rq, p, &rf);
1287 put_task_struct(p);
1288
1289 return HRTIMER_NORESTART;
1290 }
1291
init_dl_inactive_task_timer(struct sched_dl_entity * dl_se)1292 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1293 {
1294 struct hrtimer *timer = &dl_se->inactive_timer;
1295
1296 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1297 timer->function = inactive_task_timer;
1298 }
1299
1300 #ifdef CONFIG_SMP
1301
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1302 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1303 {
1304 struct rq *rq = rq_of_dl_rq(dl_rq);
1305
1306 if (dl_rq->earliest_dl.curr == 0 ||
1307 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1308 dl_rq->earliest_dl.curr = deadline;
1309 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1310 }
1311 }
1312
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1313 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1314 {
1315 struct rq *rq = rq_of_dl_rq(dl_rq);
1316
1317 /*
1318 * Since we may have removed our earliest (and/or next earliest)
1319 * task we must recompute them.
1320 */
1321 if (!dl_rq->dl_nr_running) {
1322 dl_rq->earliest_dl.curr = 0;
1323 dl_rq->earliest_dl.next = 0;
1324 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1325 } else {
1326 struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1327 struct sched_dl_entity *entry;
1328
1329 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1330 dl_rq->earliest_dl.curr = entry->deadline;
1331 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1332 }
1333 }
1334
1335 #else
1336
inc_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1337 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
dec_dl_deadline(struct dl_rq * dl_rq,u64 deadline)1338 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1339
1340 #endif /* CONFIG_SMP */
1341
1342 static inline
inc_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)1343 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1344 {
1345 int prio = dl_task_of(dl_se)->prio;
1346 u64 deadline = dl_se->deadline;
1347
1348 WARN_ON(!dl_prio(prio));
1349 dl_rq->dl_nr_running++;
1350 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1351
1352 inc_dl_deadline(dl_rq, deadline);
1353 inc_dl_migration(dl_se, dl_rq);
1354 }
1355
1356 static inline
dec_dl_tasks(struct sched_dl_entity * dl_se,struct dl_rq * dl_rq)1357 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1358 {
1359 int prio = dl_task_of(dl_se)->prio;
1360
1361 WARN_ON(!dl_prio(prio));
1362 WARN_ON(!dl_rq->dl_nr_running);
1363 dl_rq->dl_nr_running--;
1364 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1365
1366 dec_dl_deadline(dl_rq, dl_se->deadline);
1367 dec_dl_migration(dl_se, dl_rq);
1368 }
1369
__enqueue_dl_entity(struct sched_dl_entity * dl_se)1370 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1371 {
1372 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1373 struct rb_node **link = &dl_rq->root.rb_root.rb_node;
1374 struct rb_node *parent = NULL;
1375 struct sched_dl_entity *entry;
1376 int leftmost = 1;
1377
1378 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1379
1380 while (*link) {
1381 parent = *link;
1382 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1383 if (dl_time_before(dl_se->deadline, entry->deadline))
1384 link = &parent->rb_left;
1385 else {
1386 link = &parent->rb_right;
1387 leftmost = 0;
1388 }
1389 }
1390
1391 rb_link_node(&dl_se->rb_node, parent, link);
1392 rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
1393
1394 inc_dl_tasks(dl_se, dl_rq);
1395 }
1396
__dequeue_dl_entity(struct sched_dl_entity * dl_se)1397 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1398 {
1399 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1400
1401 if (RB_EMPTY_NODE(&dl_se->rb_node))
1402 return;
1403
1404 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1405 RB_CLEAR_NODE(&dl_se->rb_node);
1406
1407 dec_dl_tasks(dl_se, dl_rq);
1408 }
1409
1410 static void
enqueue_dl_entity(struct sched_dl_entity * dl_se,struct sched_dl_entity * pi_se,int flags)1411 enqueue_dl_entity(struct sched_dl_entity *dl_se,
1412 struct sched_dl_entity *pi_se, int flags)
1413 {
1414 BUG_ON(on_dl_rq(dl_se));
1415
1416 /*
1417 * If this is a wakeup or a new instance, the scheduling
1418 * parameters of the task might need updating. Otherwise,
1419 * we want a replenishment of its runtime.
1420 */
1421 if (flags & ENQUEUE_WAKEUP) {
1422 task_contending(dl_se, flags);
1423 update_dl_entity(dl_se, pi_se);
1424 } else if (flags & ENQUEUE_REPLENISH) {
1425 replenish_dl_entity(dl_se, pi_se);
1426 } else if ((flags & ENQUEUE_RESTORE) &&
1427 dl_time_before(dl_se->deadline,
1428 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1429 setup_new_dl_entity(dl_se);
1430 }
1431
1432 __enqueue_dl_entity(dl_se);
1433 }
1434
dequeue_dl_entity(struct sched_dl_entity * dl_se)1435 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1436 {
1437 __dequeue_dl_entity(dl_se);
1438 }
1439
enqueue_task_dl(struct rq * rq,struct task_struct * p,int flags)1440 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1441 {
1442 struct task_struct *pi_task = rt_mutex_get_top_task(p);
1443 struct sched_dl_entity *pi_se = &p->dl;
1444
1445 /*
1446 * Use the scheduling parameters of the top pi-waiter task if:
1447 * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
1448 * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
1449 * smaller than our deadline OR we are a !SCHED_DEADLINE task getting
1450 * boosted due to a SCHED_DEADLINE pi-waiter).
1451 * Otherwise we keep our runtime and deadline.
1452 */
1453 if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
1454 pi_se = &pi_task->dl;
1455 } else if (!dl_prio(p->normal_prio)) {
1456 /*
1457 * Special case in which we have a !SCHED_DEADLINE task
1458 * that is going to be deboosted, but exceeds its
1459 * runtime while doing so. No point in replenishing
1460 * it, as it's going to return back to its original
1461 * scheduling class after this.
1462 */
1463 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1464 return;
1465 }
1466
1467 /*
1468 * Check if a constrained deadline task was activated
1469 * after the deadline but before the next period.
1470 * If that is the case, the task will be throttled and
1471 * the replenishment timer will be set to the next period.
1472 */
1473 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1474 dl_check_constrained_dl(&p->dl);
1475
1476 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
1477 add_rq_bw(&p->dl, &rq->dl);
1478 add_running_bw(&p->dl, &rq->dl);
1479 }
1480
1481 /*
1482 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1483 * its budget it needs a replenishment and, since it now is on
1484 * its rq, the bandwidth timer callback (which clearly has not
1485 * run yet) will take care of this.
1486 * However, the active utilization does not depend on the fact
1487 * that the task is on the runqueue or not (but depends on the
1488 * task's state - in GRUB parlance, "inactive" vs "active contending").
1489 * In other words, even if a task is throttled its utilization must
1490 * be counted in the active utilization; hence, we need to call
1491 * add_running_bw().
1492 */
1493 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1494 if (flags & ENQUEUE_WAKEUP)
1495 task_contending(&p->dl, flags);
1496
1497 return;
1498 }
1499
1500 enqueue_dl_entity(&p->dl, pi_se, flags);
1501
1502 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1503 enqueue_pushable_dl_task(rq, p);
1504 }
1505
__dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)1506 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1507 {
1508 dequeue_dl_entity(&p->dl);
1509 dequeue_pushable_dl_task(rq, p);
1510 }
1511
dequeue_task_dl(struct rq * rq,struct task_struct * p,int flags)1512 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1513 {
1514 update_curr_dl(rq);
1515 __dequeue_task_dl(rq, p, flags);
1516
1517 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
1518 sub_running_bw(&p->dl, &rq->dl);
1519 sub_rq_bw(&p->dl, &rq->dl);
1520 }
1521
1522 /*
1523 * This check allows to start the inactive timer (or to immediately
1524 * decrease the active utilization, if needed) in two cases:
1525 * when the task blocks and when it is terminating
1526 * (p->state == TASK_DEAD). We can handle the two cases in the same
1527 * way, because from GRUB's point of view the same thing is happening
1528 * (the task moves from "active contending" to "active non contending"
1529 * or "inactive")
1530 */
1531 if (flags & DEQUEUE_SLEEP)
1532 task_non_contending(p);
1533 }
1534
1535 /*
1536 * Yield task semantic for -deadline tasks is:
1537 *
1538 * get off from the CPU until our next instance, with
1539 * a new runtime. This is of little use now, since we
1540 * don't have a bandwidth reclaiming mechanism. Anyway,
1541 * bandwidth reclaiming is planned for the future, and
1542 * yield_task_dl will indicate that some spare budget
1543 * is available for other task instances to use it.
1544 */
yield_task_dl(struct rq * rq)1545 static void yield_task_dl(struct rq *rq)
1546 {
1547 /*
1548 * We make the task go to sleep until its current deadline by
1549 * forcing its runtime to zero. This way, update_curr_dl() stops
1550 * it and the bandwidth timer will wake it up and will give it
1551 * new scheduling parameters (thanks to dl_yielded=1).
1552 */
1553 rq->curr->dl.dl_yielded = 1;
1554
1555 update_rq_clock(rq);
1556 update_curr_dl(rq);
1557 /*
1558 * Tell update_rq_clock() that we've just updated,
1559 * so we don't do microscopic update in schedule()
1560 * and double the fastpath cost.
1561 */
1562 rq_clock_skip_update(rq);
1563 }
1564
1565 #ifdef CONFIG_SMP
1566
1567 static int find_later_rq(struct task_struct *task);
1568
1569 static int
select_task_rq_dl(struct task_struct * p,int cpu,int sd_flag,int flags)1570 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1571 {
1572 struct task_struct *curr;
1573 struct rq *rq;
1574
1575 if (sd_flag != SD_BALANCE_WAKE)
1576 goto out;
1577
1578 rq = cpu_rq(cpu);
1579
1580 rcu_read_lock();
1581 curr = READ_ONCE(rq->curr); /* unlocked access */
1582
1583 /*
1584 * If we are dealing with a -deadline task, we must
1585 * decide where to wake it up.
1586 * If it has a later deadline and the current task
1587 * on this rq can't move (provided the waking task
1588 * can!) we prefer to send it somewhere else. On the
1589 * other hand, if it has a shorter deadline, we
1590 * try to make it stay here, it might be important.
1591 */
1592 if (unlikely(dl_task(curr)) &&
1593 (curr->nr_cpus_allowed < 2 ||
1594 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1595 (p->nr_cpus_allowed > 1)) {
1596 int target = find_later_rq(p);
1597
1598 if (target != -1 &&
1599 (dl_time_before(p->dl.deadline,
1600 cpu_rq(target)->dl.earliest_dl.curr) ||
1601 (cpu_rq(target)->dl.dl_nr_running == 0)))
1602 cpu = target;
1603 }
1604 rcu_read_unlock();
1605
1606 out:
1607 return cpu;
1608 }
1609
migrate_task_rq_dl(struct task_struct * p,int new_cpu __maybe_unused)1610 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
1611 {
1612 struct rq *rq;
1613
1614 if (p->state != TASK_WAKING)
1615 return;
1616
1617 rq = task_rq(p);
1618 /*
1619 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1620 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1621 * rq->lock is not... So, lock it
1622 */
1623 raw_spin_lock(&rq->lock);
1624 if (p->dl.dl_non_contending) {
1625 sub_running_bw(&p->dl, &rq->dl);
1626 p->dl.dl_non_contending = 0;
1627 /*
1628 * If the timer handler is currently running and the
1629 * timer cannot be cancelled, inactive_task_timer()
1630 * will see that dl_not_contending is not set, and
1631 * will not touch the rq's active utilization,
1632 * so we are still safe.
1633 */
1634 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1635 put_task_struct(p);
1636 }
1637 sub_rq_bw(&p->dl, &rq->dl);
1638 raw_spin_unlock(&rq->lock);
1639 }
1640
check_preempt_equal_dl(struct rq * rq,struct task_struct * p)1641 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1642 {
1643 /*
1644 * Current can't be migrated, useless to reschedule,
1645 * let's hope p can move out.
1646 */
1647 if (rq->curr->nr_cpus_allowed == 1 ||
1648 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1649 return;
1650
1651 /*
1652 * p is migratable, so let's not schedule it and
1653 * see if it is pushed or pulled somewhere else.
1654 */
1655 if (p->nr_cpus_allowed != 1 &&
1656 cpudl_find(&rq->rd->cpudl, p, NULL))
1657 return;
1658
1659 resched_curr(rq);
1660 }
1661
1662 #endif /* CONFIG_SMP */
1663
1664 /*
1665 * Only called when both the current and waking task are -deadline
1666 * tasks.
1667 */
check_preempt_curr_dl(struct rq * rq,struct task_struct * p,int flags)1668 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1669 int flags)
1670 {
1671 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1672 resched_curr(rq);
1673 return;
1674 }
1675
1676 #ifdef CONFIG_SMP
1677 /*
1678 * In the unlikely case current and p have the same deadline
1679 * let us try to decide what's the best thing to do...
1680 */
1681 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1682 !test_tsk_need_resched(rq->curr))
1683 check_preempt_equal_dl(rq, p);
1684 #endif /* CONFIG_SMP */
1685 }
1686
1687 #ifdef CONFIG_SCHED_HRTICK
start_hrtick_dl(struct rq * rq,struct task_struct * p)1688 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1689 {
1690 hrtick_start(rq, p->dl.runtime);
1691 }
1692 #else /* !CONFIG_SCHED_HRTICK */
start_hrtick_dl(struct rq * rq,struct task_struct * p)1693 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1694 {
1695 }
1696 #endif
1697
pick_next_dl_entity(struct rq * rq,struct dl_rq * dl_rq)1698 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1699 struct dl_rq *dl_rq)
1700 {
1701 struct rb_node *left = rb_first_cached(&dl_rq->root);
1702
1703 if (!left)
1704 return NULL;
1705
1706 return rb_entry(left, struct sched_dl_entity, rb_node);
1707 }
1708
1709 static struct task_struct *
pick_next_task_dl(struct rq * rq,struct task_struct * prev,struct rq_flags * rf)1710 pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1711 {
1712 struct sched_dl_entity *dl_se;
1713 struct task_struct *p;
1714 struct dl_rq *dl_rq;
1715
1716 dl_rq = &rq->dl;
1717
1718 if (need_pull_dl_task(rq, prev)) {
1719 /*
1720 * This is OK, because current is on_cpu, which avoids it being
1721 * picked for load-balance and preemption/IRQs are still
1722 * disabled avoiding further scheduler activity on it and we're
1723 * being very careful to re-start the picking loop.
1724 */
1725 rq_unpin_lock(rq, rf);
1726 pull_dl_task(rq);
1727 rq_repin_lock(rq, rf);
1728 /*
1729 * pull_dl_task() can drop (and re-acquire) rq->lock; this
1730 * means a stop task can slip in, in which case we need to
1731 * re-start task selection.
1732 */
1733 if (rq->stop && task_on_rq_queued(rq->stop))
1734 return RETRY_TASK;
1735 }
1736
1737 /*
1738 * When prev is DL, we may throttle it in put_prev_task().
1739 * So, we update time before we check for dl_nr_running.
1740 */
1741 if (prev->sched_class == &dl_sched_class)
1742 update_curr_dl(rq);
1743
1744 if (unlikely(!dl_rq->dl_nr_running))
1745 return NULL;
1746
1747 put_prev_task(rq, prev);
1748
1749 dl_se = pick_next_dl_entity(rq, dl_rq);
1750 BUG_ON(!dl_se);
1751
1752 p = dl_task_of(dl_se);
1753 p->se.exec_start = rq_clock_task(rq);
1754
1755 /* Running task will never be pushed. */
1756 dequeue_pushable_dl_task(rq, p);
1757
1758 if (hrtick_enabled(rq))
1759 start_hrtick_dl(rq, p);
1760
1761 deadline_queue_push_tasks(rq);
1762
1763 if (rq->curr->sched_class != &dl_sched_class)
1764 update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
1765
1766 return p;
1767 }
1768
put_prev_task_dl(struct rq * rq,struct task_struct * p)1769 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1770 {
1771 update_curr_dl(rq);
1772
1773 update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
1774 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1775 enqueue_pushable_dl_task(rq, p);
1776 }
1777
1778 /*
1779 * scheduler tick hitting a task of our scheduling class.
1780 *
1781 * NOTE: This function can be called remotely by the tick offload that
1782 * goes along full dynticks. Therefore no local assumption can be made
1783 * and everything must be accessed through the @rq and @curr passed in
1784 * parameters.
1785 */
task_tick_dl(struct rq * rq,struct task_struct * p,int queued)1786 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1787 {
1788 update_curr_dl(rq);
1789
1790 update_dl_rq_load_avg(rq_clock_task(rq), rq, 1);
1791 /*
1792 * Even when we have runtime, update_curr_dl() might have resulted in us
1793 * not being the leftmost task anymore. In that case NEED_RESCHED will
1794 * be set and schedule() will start a new hrtick for the next task.
1795 */
1796 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1797 is_leftmost(p, &rq->dl))
1798 start_hrtick_dl(rq, p);
1799 }
1800
task_fork_dl(struct task_struct * p)1801 static void task_fork_dl(struct task_struct *p)
1802 {
1803 /*
1804 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1805 * sched_fork()
1806 */
1807 }
1808
set_curr_task_dl(struct rq * rq)1809 static void set_curr_task_dl(struct rq *rq)
1810 {
1811 struct task_struct *p = rq->curr;
1812
1813 p->se.exec_start = rq_clock_task(rq);
1814
1815 /* You can't push away the running task */
1816 dequeue_pushable_dl_task(rq, p);
1817 }
1818
1819 #ifdef CONFIG_SMP
1820
1821 /* Only try algorithms three times */
1822 #define DL_MAX_TRIES 3
1823
pick_dl_task(struct rq * rq,struct task_struct * p,int cpu)1824 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1825 {
1826 if (!task_running(rq, p) &&
1827 cpumask_test_cpu(cpu, &p->cpus_allowed))
1828 return 1;
1829 return 0;
1830 }
1831
1832 /*
1833 * Return the earliest pushable rq's task, which is suitable to be executed
1834 * on the CPU, NULL otherwise:
1835 */
pick_earliest_pushable_dl_task(struct rq * rq,int cpu)1836 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1837 {
1838 struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1839 struct task_struct *p = NULL;
1840
1841 if (!has_pushable_dl_tasks(rq))
1842 return NULL;
1843
1844 next_node:
1845 if (next_node) {
1846 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1847
1848 if (pick_dl_task(rq, p, cpu))
1849 return p;
1850
1851 next_node = rb_next(next_node);
1852 goto next_node;
1853 }
1854
1855 return NULL;
1856 }
1857
1858 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1859
find_later_rq(struct task_struct * task)1860 static int find_later_rq(struct task_struct *task)
1861 {
1862 struct sched_domain *sd;
1863 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1864 int this_cpu = smp_processor_id();
1865 int cpu = task_cpu(task);
1866
1867 /* Make sure the mask is initialized first */
1868 if (unlikely(!later_mask))
1869 return -1;
1870
1871 if (task->nr_cpus_allowed == 1)
1872 return -1;
1873
1874 /*
1875 * We have to consider system topology and task affinity
1876 * first, then we can look for a suitable CPU.
1877 */
1878 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1879 return -1;
1880
1881 /*
1882 * If we are here, some targets have been found, including
1883 * the most suitable which is, among the runqueues where the
1884 * current tasks have later deadlines than the task's one, the
1885 * rq with the latest possible one.
1886 *
1887 * Now we check how well this matches with task's
1888 * affinity and system topology.
1889 *
1890 * The last CPU where the task run is our first
1891 * guess, since it is most likely cache-hot there.
1892 */
1893 if (cpumask_test_cpu(cpu, later_mask))
1894 return cpu;
1895 /*
1896 * Check if this_cpu is to be skipped (i.e., it is
1897 * not in the mask) or not.
1898 */
1899 if (!cpumask_test_cpu(this_cpu, later_mask))
1900 this_cpu = -1;
1901
1902 rcu_read_lock();
1903 for_each_domain(cpu, sd) {
1904 if (sd->flags & SD_WAKE_AFFINE) {
1905 int best_cpu;
1906
1907 /*
1908 * If possible, preempting this_cpu is
1909 * cheaper than migrating.
1910 */
1911 if (this_cpu != -1 &&
1912 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1913 rcu_read_unlock();
1914 return this_cpu;
1915 }
1916
1917 best_cpu = cpumask_first_and(later_mask,
1918 sched_domain_span(sd));
1919 /*
1920 * Last chance: if a CPU being in both later_mask
1921 * and current sd span is valid, that becomes our
1922 * choice. Of course, the latest possible CPU is
1923 * already under consideration through later_mask.
1924 */
1925 if (best_cpu < nr_cpu_ids) {
1926 rcu_read_unlock();
1927 return best_cpu;
1928 }
1929 }
1930 }
1931 rcu_read_unlock();
1932
1933 /*
1934 * At this point, all our guesses failed, we just return
1935 * 'something', and let the caller sort the things out.
1936 */
1937 if (this_cpu != -1)
1938 return this_cpu;
1939
1940 cpu = cpumask_any(later_mask);
1941 if (cpu < nr_cpu_ids)
1942 return cpu;
1943
1944 return -1;
1945 }
1946
1947 /* Locks the rq it finds */
find_lock_later_rq(struct task_struct * task,struct rq * rq)1948 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1949 {
1950 struct rq *later_rq = NULL;
1951 int tries;
1952 int cpu;
1953
1954 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1955 cpu = find_later_rq(task);
1956
1957 if ((cpu == -1) || (cpu == rq->cpu))
1958 break;
1959
1960 later_rq = cpu_rq(cpu);
1961
1962 if (later_rq->dl.dl_nr_running &&
1963 !dl_time_before(task->dl.deadline,
1964 later_rq->dl.earliest_dl.curr)) {
1965 /*
1966 * Target rq has tasks of equal or earlier deadline,
1967 * retrying does not release any lock and is unlikely
1968 * to yield a different result.
1969 */
1970 later_rq = NULL;
1971 break;
1972 }
1973
1974 /* Retry if something changed. */
1975 if (double_lock_balance(rq, later_rq)) {
1976 if (unlikely(task_rq(task) != rq ||
1977 !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
1978 task_running(rq, task) ||
1979 !dl_task(task) ||
1980 !task_on_rq_queued(task))) {
1981 double_unlock_balance(rq, later_rq);
1982 later_rq = NULL;
1983 break;
1984 }
1985 }
1986
1987 /*
1988 * If the rq we found has no -deadline task, or
1989 * its earliest one has a later deadline than our
1990 * task, the rq is a good one.
1991 */
1992 if (!later_rq->dl.dl_nr_running ||
1993 dl_time_before(task->dl.deadline,
1994 later_rq->dl.earliest_dl.curr))
1995 break;
1996
1997 /* Otherwise we try again. */
1998 double_unlock_balance(rq, later_rq);
1999 later_rq = NULL;
2000 }
2001
2002 return later_rq;
2003 }
2004
pick_next_pushable_dl_task(struct rq * rq)2005 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2006 {
2007 struct task_struct *p;
2008
2009 if (!has_pushable_dl_tasks(rq))
2010 return NULL;
2011
2012 p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
2013 struct task_struct, pushable_dl_tasks);
2014
2015 BUG_ON(rq->cpu != task_cpu(p));
2016 BUG_ON(task_current(rq, p));
2017 BUG_ON(p->nr_cpus_allowed <= 1);
2018
2019 BUG_ON(!task_on_rq_queued(p));
2020 BUG_ON(!dl_task(p));
2021
2022 return p;
2023 }
2024
2025 /*
2026 * See if the non running -deadline tasks on this rq
2027 * can be sent to some other CPU where they can preempt
2028 * and start executing.
2029 */
push_dl_task(struct rq * rq)2030 static int push_dl_task(struct rq *rq)
2031 {
2032 struct task_struct *next_task;
2033 struct rq *later_rq;
2034 int ret = 0;
2035
2036 if (!rq->dl.overloaded)
2037 return 0;
2038
2039 next_task = pick_next_pushable_dl_task(rq);
2040 if (!next_task)
2041 return 0;
2042
2043 retry:
2044 if (unlikely(next_task == rq->curr)) {
2045 WARN_ON(1);
2046 return 0;
2047 }
2048
2049 /*
2050 * If next_task preempts rq->curr, and rq->curr
2051 * can move away, it makes sense to just reschedule
2052 * without going further in pushing next_task.
2053 */
2054 if (dl_task(rq->curr) &&
2055 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2056 rq->curr->nr_cpus_allowed > 1) {
2057 resched_curr(rq);
2058 return 0;
2059 }
2060
2061 /* We might release rq lock */
2062 get_task_struct(next_task);
2063
2064 /* Will lock the rq it'll find */
2065 later_rq = find_lock_later_rq(next_task, rq);
2066 if (!later_rq) {
2067 struct task_struct *task;
2068
2069 /*
2070 * We must check all this again, since
2071 * find_lock_later_rq releases rq->lock and it is
2072 * then possible that next_task has migrated.
2073 */
2074 task = pick_next_pushable_dl_task(rq);
2075 if (task == next_task) {
2076 /*
2077 * The task is still there. We don't try
2078 * again, some other CPU will pull it when ready.
2079 */
2080 goto out;
2081 }
2082
2083 if (!task)
2084 /* No more tasks */
2085 goto out;
2086
2087 put_task_struct(next_task);
2088 next_task = task;
2089 goto retry;
2090 }
2091
2092 deactivate_task(rq, next_task, 0);
2093 sub_running_bw(&next_task->dl, &rq->dl);
2094 sub_rq_bw(&next_task->dl, &rq->dl);
2095 set_task_cpu(next_task, later_rq->cpu);
2096 add_rq_bw(&next_task->dl, &later_rq->dl);
2097
2098 /*
2099 * Update the later_rq clock here, because the clock is used
2100 * by the cpufreq_update_util() inside __add_running_bw().
2101 */
2102 update_rq_clock(later_rq);
2103 add_running_bw(&next_task->dl, &later_rq->dl);
2104 activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2105 ret = 1;
2106
2107 resched_curr(later_rq);
2108
2109 double_unlock_balance(rq, later_rq);
2110
2111 out:
2112 put_task_struct(next_task);
2113
2114 return ret;
2115 }
2116
push_dl_tasks(struct rq * rq)2117 static void push_dl_tasks(struct rq *rq)
2118 {
2119 /* push_dl_task() will return true if it moved a -deadline task */
2120 while (push_dl_task(rq))
2121 ;
2122 }
2123
pull_dl_task(struct rq * this_rq)2124 static void pull_dl_task(struct rq *this_rq)
2125 {
2126 int this_cpu = this_rq->cpu, cpu;
2127 struct task_struct *p;
2128 bool resched = false;
2129 struct rq *src_rq;
2130 u64 dmin = LONG_MAX;
2131
2132 if (likely(!dl_overloaded(this_rq)))
2133 return;
2134
2135 /*
2136 * Match the barrier from dl_set_overloaded; this guarantees that if we
2137 * see overloaded we must also see the dlo_mask bit.
2138 */
2139 smp_rmb();
2140
2141 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2142 if (this_cpu == cpu)
2143 continue;
2144
2145 src_rq = cpu_rq(cpu);
2146
2147 /*
2148 * It looks racy, abd it is! However, as in sched_rt.c,
2149 * we are fine with this.
2150 */
2151 if (this_rq->dl.dl_nr_running &&
2152 dl_time_before(this_rq->dl.earliest_dl.curr,
2153 src_rq->dl.earliest_dl.next))
2154 continue;
2155
2156 /* Might drop this_rq->lock */
2157 double_lock_balance(this_rq, src_rq);
2158
2159 /*
2160 * If there are no more pullable tasks on the
2161 * rq, we're done with it.
2162 */
2163 if (src_rq->dl.dl_nr_running <= 1)
2164 goto skip;
2165
2166 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2167
2168 /*
2169 * We found a task to be pulled if:
2170 * - it preempts our current (if there's one),
2171 * - it will preempt the last one we pulled (if any).
2172 */
2173 if (p && dl_time_before(p->dl.deadline, dmin) &&
2174 (!this_rq->dl.dl_nr_running ||
2175 dl_time_before(p->dl.deadline,
2176 this_rq->dl.earliest_dl.curr))) {
2177 WARN_ON(p == src_rq->curr);
2178 WARN_ON(!task_on_rq_queued(p));
2179
2180 /*
2181 * Then we pull iff p has actually an earlier
2182 * deadline than the current task of its runqueue.
2183 */
2184 if (dl_time_before(p->dl.deadline,
2185 src_rq->curr->dl.deadline))
2186 goto skip;
2187
2188 resched = true;
2189
2190 deactivate_task(src_rq, p, 0);
2191 sub_running_bw(&p->dl, &src_rq->dl);
2192 sub_rq_bw(&p->dl, &src_rq->dl);
2193 set_task_cpu(p, this_cpu);
2194 add_rq_bw(&p->dl, &this_rq->dl);
2195 add_running_bw(&p->dl, &this_rq->dl);
2196 activate_task(this_rq, p, 0);
2197 dmin = p->dl.deadline;
2198
2199 /* Is there any other task even earlier? */
2200 }
2201 skip:
2202 double_unlock_balance(this_rq, src_rq);
2203 }
2204
2205 if (resched)
2206 resched_curr(this_rq);
2207 }
2208
2209 /*
2210 * Since the task is not running and a reschedule is not going to happen
2211 * anytime soon on its runqueue, we try pushing it away now.
2212 */
task_woken_dl(struct rq * rq,struct task_struct * p)2213 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2214 {
2215 if (!task_running(rq, p) &&
2216 !test_tsk_need_resched(rq->curr) &&
2217 p->nr_cpus_allowed > 1 &&
2218 dl_task(rq->curr) &&
2219 (rq->curr->nr_cpus_allowed < 2 ||
2220 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2221 push_dl_tasks(rq);
2222 }
2223 }
2224
set_cpus_allowed_dl(struct task_struct * p,const struct cpumask * new_mask)2225 static void set_cpus_allowed_dl(struct task_struct *p,
2226 const struct cpumask *new_mask)
2227 {
2228 struct root_domain *src_rd;
2229 struct rq *rq;
2230
2231 BUG_ON(!dl_task(p));
2232
2233 rq = task_rq(p);
2234 src_rd = rq->rd;
2235 /*
2236 * Migrating a SCHED_DEADLINE task between exclusive
2237 * cpusets (different root_domains) entails a bandwidth
2238 * update. We already made space for us in the destination
2239 * domain (see cpuset_can_attach()).
2240 */
2241 if (!cpumask_intersects(src_rd->span, new_mask)) {
2242 struct dl_bw *src_dl_b;
2243
2244 src_dl_b = dl_bw_of(cpu_of(rq));
2245 /*
2246 * We now free resources of the root_domain we are migrating
2247 * off. In the worst case, sched_setattr() may temporary fail
2248 * until we complete the update.
2249 */
2250 raw_spin_lock(&src_dl_b->lock);
2251 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2252 raw_spin_unlock(&src_dl_b->lock);
2253 }
2254
2255 set_cpus_allowed_common(p, new_mask);
2256 }
2257
2258 /* Assumes rq->lock is held */
rq_online_dl(struct rq * rq)2259 static void rq_online_dl(struct rq *rq)
2260 {
2261 if (rq->dl.overloaded)
2262 dl_set_overload(rq);
2263
2264 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2265 if (rq->dl.dl_nr_running > 0)
2266 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2267 }
2268
2269 /* Assumes rq->lock is held */
rq_offline_dl(struct rq * rq)2270 static void rq_offline_dl(struct rq *rq)
2271 {
2272 if (rq->dl.overloaded)
2273 dl_clear_overload(rq);
2274
2275 cpudl_clear(&rq->rd->cpudl, rq->cpu);
2276 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2277 }
2278
init_sched_dl_class(void)2279 void __init init_sched_dl_class(void)
2280 {
2281 unsigned int i;
2282
2283 for_each_possible_cpu(i)
2284 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2285 GFP_KERNEL, cpu_to_node(i));
2286 }
2287
2288 #endif /* CONFIG_SMP */
2289
switched_from_dl(struct rq * rq,struct task_struct * p)2290 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2291 {
2292 /*
2293 * task_non_contending() can start the "inactive timer" (if the 0-lag
2294 * time is in the future). If the task switches back to dl before
2295 * the "inactive timer" fires, it can continue to consume its current
2296 * runtime using its current deadline. If it stays outside of
2297 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2298 * will reset the task parameters.
2299 */
2300 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2301 task_non_contending(p);
2302
2303 if (!task_on_rq_queued(p)) {
2304 /*
2305 * Inactive timer is armed. However, p is leaving DEADLINE and
2306 * might migrate away from this rq while continuing to run on
2307 * some other class. We need to remove its contribution from
2308 * this rq running_bw now, or sub_rq_bw (below) will complain.
2309 */
2310 if (p->dl.dl_non_contending)
2311 sub_running_bw(&p->dl, &rq->dl);
2312 sub_rq_bw(&p->dl, &rq->dl);
2313 }
2314
2315 /*
2316 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2317 * at the 0-lag time, because the task could have been migrated
2318 * while SCHED_OTHER in the meanwhile.
2319 */
2320 if (p->dl.dl_non_contending)
2321 p->dl.dl_non_contending = 0;
2322
2323 /*
2324 * Since this might be the only -deadline task on the rq,
2325 * this is the right place to try to pull some other one
2326 * from an overloaded CPU, if any.
2327 */
2328 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2329 return;
2330
2331 deadline_queue_pull_task(rq);
2332 }
2333
2334 /*
2335 * When switching to -deadline, we may overload the rq, then
2336 * we try to push someone off, if possible.
2337 */
switched_to_dl(struct rq * rq,struct task_struct * p)2338 static void switched_to_dl(struct rq *rq, struct task_struct *p)
2339 {
2340 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2341 put_task_struct(p);
2342
2343 /* If p is not queued we will update its parameters at next wakeup. */
2344 if (!task_on_rq_queued(p)) {
2345 add_rq_bw(&p->dl, &rq->dl);
2346
2347 return;
2348 }
2349
2350 if (rq->curr != p) {
2351 #ifdef CONFIG_SMP
2352 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2353 deadline_queue_push_tasks(rq);
2354 #endif
2355 if (dl_task(rq->curr))
2356 check_preempt_curr_dl(rq, p, 0);
2357 else
2358 resched_curr(rq);
2359 }
2360 }
2361
2362 /*
2363 * If the scheduling parameters of a -deadline task changed,
2364 * a push or pull operation might be needed.
2365 */
prio_changed_dl(struct rq * rq,struct task_struct * p,int oldprio)2366 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2367 int oldprio)
2368 {
2369 if (task_on_rq_queued(p) || rq->curr == p) {
2370 #ifdef CONFIG_SMP
2371 /*
2372 * This might be too much, but unfortunately
2373 * we don't have the old deadline value, and
2374 * we can't argue if the task is increasing
2375 * or lowering its prio, so...
2376 */
2377 if (!rq->dl.overloaded)
2378 deadline_queue_pull_task(rq);
2379
2380 /*
2381 * If we now have a earlier deadline task than p,
2382 * then reschedule, provided p is still on this
2383 * runqueue.
2384 */
2385 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
2386 resched_curr(rq);
2387 #else
2388 /*
2389 * Again, we don't know if p has a earlier
2390 * or later deadline, so let's blindly set a
2391 * (maybe not needed) rescheduling point.
2392 */
2393 resched_curr(rq);
2394 #endif /* CONFIG_SMP */
2395 }
2396 }
2397
2398 const struct sched_class dl_sched_class = {
2399 .next = &rt_sched_class,
2400 .enqueue_task = enqueue_task_dl,
2401 .dequeue_task = dequeue_task_dl,
2402 .yield_task = yield_task_dl,
2403
2404 .check_preempt_curr = check_preempt_curr_dl,
2405
2406 .pick_next_task = pick_next_task_dl,
2407 .put_prev_task = put_prev_task_dl,
2408
2409 #ifdef CONFIG_SMP
2410 .select_task_rq = select_task_rq_dl,
2411 .migrate_task_rq = migrate_task_rq_dl,
2412 .set_cpus_allowed = set_cpus_allowed_dl,
2413 .rq_online = rq_online_dl,
2414 .rq_offline = rq_offline_dl,
2415 .task_woken = task_woken_dl,
2416 #endif
2417
2418 .set_curr_task = set_curr_task_dl,
2419 .task_tick = task_tick_dl,
2420 .task_fork = task_fork_dl,
2421
2422 .prio_changed = prio_changed_dl,
2423 .switched_from = switched_from_dl,
2424 .switched_to = switched_to_dl,
2425
2426 .update_curr = update_curr_dl,
2427 };
2428
sched_dl_global_validate(void)2429 int sched_dl_global_validate(void)
2430 {
2431 u64 runtime = global_rt_runtime();
2432 u64 period = global_rt_period();
2433 u64 new_bw = to_ratio(period, runtime);
2434 struct dl_bw *dl_b;
2435 int cpu, ret = 0;
2436 unsigned long flags;
2437
2438 /*
2439 * Here we want to check the bandwidth not being set to some
2440 * value smaller than the currently allocated bandwidth in
2441 * any of the root_domains.
2442 *
2443 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
2444 * cycling on root_domains... Discussion on different/better
2445 * solutions is welcome!
2446 */
2447 for_each_possible_cpu(cpu) {
2448 rcu_read_lock_sched();
2449 dl_b = dl_bw_of(cpu);
2450
2451 raw_spin_lock_irqsave(&dl_b->lock, flags);
2452 if (new_bw < dl_b->total_bw)
2453 ret = -EBUSY;
2454 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2455
2456 rcu_read_unlock_sched();
2457
2458 if (ret)
2459 break;
2460 }
2461
2462 return ret;
2463 }
2464
init_dl_rq_bw_ratio(struct dl_rq * dl_rq)2465 void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
2466 {
2467 if (global_rt_runtime() == RUNTIME_INF) {
2468 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2469 dl_rq->extra_bw = 1 << BW_SHIFT;
2470 } else {
2471 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2472 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2473 dl_rq->extra_bw = to_ratio(global_rt_period(),
2474 global_rt_runtime());
2475 }
2476 }
2477
sched_dl_do_global(void)2478 void sched_dl_do_global(void)
2479 {
2480 u64 new_bw = -1;
2481 struct dl_bw *dl_b;
2482 int cpu;
2483 unsigned long flags;
2484
2485 def_dl_bandwidth.dl_period = global_rt_period();
2486 def_dl_bandwidth.dl_runtime = global_rt_runtime();
2487
2488 if (global_rt_runtime() != RUNTIME_INF)
2489 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2490
2491 /*
2492 * FIXME: As above...
2493 */
2494 for_each_possible_cpu(cpu) {
2495 rcu_read_lock_sched();
2496 dl_b = dl_bw_of(cpu);
2497
2498 raw_spin_lock_irqsave(&dl_b->lock, flags);
2499 dl_b->bw = new_bw;
2500 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2501
2502 rcu_read_unlock_sched();
2503 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2504 }
2505 }
2506
2507 /*
2508 * We must be sure that accepting a new task (or allowing changing the
2509 * parameters of an existing one) is consistent with the bandwidth
2510 * constraints. If yes, this function also accordingly updates the currently
2511 * allocated bandwidth to reflect the new situation.
2512 *
2513 * This function is called while holding p's rq->lock.
2514 */
sched_dl_overflow(struct task_struct * p,int policy,const struct sched_attr * attr)2515 int sched_dl_overflow(struct task_struct *p, int policy,
2516 const struct sched_attr *attr)
2517 {
2518 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2519 u64 period = attr->sched_period ?: attr->sched_deadline;
2520 u64 runtime = attr->sched_runtime;
2521 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2522 int cpus, err = -1;
2523
2524 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2525 return 0;
2526
2527 /* !deadline task may carry old deadline bandwidth */
2528 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2529 return 0;
2530
2531 /*
2532 * Either if a task, enters, leave, or stays -deadline but changes
2533 * its parameters, we may need to update accordingly the total
2534 * allocated bandwidth of the container.
2535 */
2536 raw_spin_lock(&dl_b->lock);
2537 cpus = dl_bw_cpus(task_cpu(p));
2538 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2539 !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2540 if (hrtimer_active(&p->dl.inactive_timer))
2541 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2542 __dl_add(dl_b, new_bw, cpus);
2543 err = 0;
2544 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2545 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2546 /*
2547 * XXX this is slightly incorrect: when the task
2548 * utilization decreases, we should delay the total
2549 * utilization change until the task's 0-lag point.
2550 * But this would require to set the task's "inactive
2551 * timer" when the task is not inactive.
2552 */
2553 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2554 __dl_add(dl_b, new_bw, cpus);
2555 dl_change_utilization(p, new_bw);
2556 err = 0;
2557 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2558 /*
2559 * Do not decrease the total deadline utilization here,
2560 * switched_from_dl() will take care to do it at the correct
2561 * (0-lag) time.
2562 */
2563 err = 0;
2564 }
2565 raw_spin_unlock(&dl_b->lock);
2566
2567 return err;
2568 }
2569
2570 /*
2571 * This function initializes the sched_dl_entity of a newly becoming
2572 * SCHED_DEADLINE task.
2573 *
2574 * Only the static values are considered here, the actual runtime and the
2575 * absolute deadline will be properly calculated when the task is enqueued
2576 * for the first time with its new policy.
2577 */
__setparam_dl(struct task_struct * p,const struct sched_attr * attr)2578 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2579 {
2580 struct sched_dl_entity *dl_se = &p->dl;
2581
2582 dl_se->dl_runtime = attr->sched_runtime;
2583 dl_se->dl_deadline = attr->sched_deadline;
2584 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2585 dl_se->flags = attr->sched_flags;
2586 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2587 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2588 }
2589
__getparam_dl(struct task_struct * p,struct sched_attr * attr)2590 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2591 {
2592 struct sched_dl_entity *dl_se = &p->dl;
2593
2594 attr->sched_priority = p->rt_priority;
2595 attr->sched_runtime = dl_se->dl_runtime;
2596 attr->sched_deadline = dl_se->dl_deadline;
2597 attr->sched_period = dl_se->dl_period;
2598 attr->sched_flags = dl_se->flags;
2599 }
2600
2601 /*
2602 * This function validates the new parameters of a -deadline task.
2603 * We ask for the deadline not being zero, and greater or equal
2604 * than the runtime, as well as the period of being zero or
2605 * greater than deadline. Furthermore, we have to be sure that
2606 * user parameters are above the internal resolution of 1us (we
2607 * check sched_runtime only since it is always the smaller one) and
2608 * below 2^63 ns (we have to check both sched_deadline and
2609 * sched_period, as the latter can be zero).
2610 */
__checkparam_dl(const struct sched_attr * attr)2611 bool __checkparam_dl(const struct sched_attr *attr)
2612 {
2613 /* special dl tasks don't actually use any parameter */
2614 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2615 return true;
2616
2617 /* deadline != 0 */
2618 if (attr->sched_deadline == 0)
2619 return false;
2620
2621 /*
2622 * Since we truncate DL_SCALE bits, make sure we're at least
2623 * that big.
2624 */
2625 if (attr->sched_runtime < (1ULL << DL_SCALE))
2626 return false;
2627
2628 /*
2629 * Since we use the MSB for wrap-around and sign issues, make
2630 * sure it's not set (mind that period can be equal to zero).
2631 */
2632 if (attr->sched_deadline & (1ULL << 63) ||
2633 attr->sched_period & (1ULL << 63))
2634 return false;
2635
2636 /* runtime <= deadline <= period (if period != 0) */
2637 if ((attr->sched_period != 0 &&
2638 attr->sched_period < attr->sched_deadline) ||
2639 attr->sched_deadline < attr->sched_runtime)
2640 return false;
2641
2642 return true;
2643 }
2644
2645 /*
2646 * This function clears the sched_dl_entity static params.
2647 */
__dl_clear_params(struct task_struct * p)2648 void __dl_clear_params(struct task_struct *p)
2649 {
2650 struct sched_dl_entity *dl_se = &p->dl;
2651
2652 dl_se->dl_runtime = 0;
2653 dl_se->dl_deadline = 0;
2654 dl_se->dl_period = 0;
2655 dl_se->flags = 0;
2656 dl_se->dl_bw = 0;
2657 dl_se->dl_density = 0;
2658
2659 dl_se->dl_throttled = 0;
2660 dl_se->dl_yielded = 0;
2661 dl_se->dl_non_contending = 0;
2662 dl_se->dl_overrun = 0;
2663 }
2664
dl_param_changed(struct task_struct * p,const struct sched_attr * attr)2665 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2666 {
2667 struct sched_dl_entity *dl_se = &p->dl;
2668
2669 if (dl_se->dl_runtime != attr->sched_runtime ||
2670 dl_se->dl_deadline != attr->sched_deadline ||
2671 dl_se->dl_period != attr->sched_period ||
2672 dl_se->flags != attr->sched_flags)
2673 return true;
2674
2675 return false;
2676 }
2677
2678 #ifdef CONFIG_SMP
dl_task_can_attach(struct task_struct * p,const struct cpumask * cs_cpus_allowed)2679 int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2680 {
2681 unsigned int dest_cpu;
2682 struct dl_bw *dl_b;
2683 bool overflow;
2684 int cpus, ret;
2685 unsigned long flags;
2686
2687 dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
2688
2689 rcu_read_lock_sched();
2690 dl_b = dl_bw_of(dest_cpu);
2691 raw_spin_lock_irqsave(&dl_b->lock, flags);
2692 cpus = dl_bw_cpus(dest_cpu);
2693 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
2694 if (overflow) {
2695 ret = -EBUSY;
2696 } else {
2697 /*
2698 * We reserve space for this task in the destination
2699 * root_domain, as we can't fail after this point.
2700 * We will free resources in the source root_domain
2701 * later on (see set_cpus_allowed_dl()).
2702 */
2703 __dl_add(dl_b, p->dl.dl_bw, cpus);
2704 ret = 0;
2705 }
2706 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2707 rcu_read_unlock_sched();
2708
2709 return ret;
2710 }
2711
dl_cpuset_cpumask_can_shrink(const struct cpumask * cur,const struct cpumask * trial)2712 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2713 const struct cpumask *trial)
2714 {
2715 int ret = 1, trial_cpus;
2716 struct dl_bw *cur_dl_b;
2717 unsigned long flags;
2718
2719 rcu_read_lock_sched();
2720 cur_dl_b = dl_bw_of(cpumask_any(cur));
2721 trial_cpus = cpumask_weight(trial);
2722
2723 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2724 if (cur_dl_b->bw != -1 &&
2725 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2726 ret = 0;
2727 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2728 rcu_read_unlock_sched();
2729
2730 return ret;
2731 }
2732
dl_cpu_busy(unsigned int cpu)2733 bool dl_cpu_busy(unsigned int cpu)
2734 {
2735 unsigned long flags;
2736 struct dl_bw *dl_b;
2737 bool overflow;
2738 int cpus;
2739
2740 rcu_read_lock_sched();
2741 dl_b = dl_bw_of(cpu);
2742 raw_spin_lock_irqsave(&dl_b->lock, flags);
2743 cpus = dl_bw_cpus(cpu);
2744 overflow = __dl_overflow(dl_b, cpus, 0, 0);
2745 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2746 rcu_read_unlock_sched();
2747
2748 return overflow;
2749 }
2750 #endif
2751
2752 #ifdef CONFIG_SCHED_DEBUG
print_dl_stats(struct seq_file * m,int cpu)2753 void print_dl_stats(struct seq_file *m, int cpu)
2754 {
2755 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2756 }
2757 #endif /* CONFIG_SCHED_DEBUG */
2758