Lines Matching refs:dl_rq
28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq() argument
30 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se) in dl_rq_of_se()
79 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq) in __add_running_bw() argument
81 u64 old = dl_rq->running_bw; in __add_running_bw()
83 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); in __add_running_bw()
84 dl_rq->running_bw += dl_bw; in __add_running_bw()
85 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ in __add_running_bw()
86 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); in __add_running_bw()
88 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); in __add_running_bw()
92 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq) in __sub_running_bw() argument
94 u64 old = dl_rq->running_bw; in __sub_running_bw()
96 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); in __sub_running_bw()
97 dl_rq->running_bw -= dl_bw; in __sub_running_bw()
98 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */ in __sub_running_bw()
99 if (dl_rq->running_bw > old) in __sub_running_bw()
100 dl_rq->running_bw = 0; in __sub_running_bw()
102 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); in __sub_running_bw()
106 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) in __add_rq_bw() argument
108 u64 old = dl_rq->this_bw; in __add_rq_bw()
110 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); in __add_rq_bw()
111 dl_rq->this_bw += dl_bw; in __add_rq_bw()
112 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */ in __add_rq_bw()
116 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) in __sub_rq_bw() argument
118 u64 old = dl_rq->this_bw; in __sub_rq_bw()
120 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); in __sub_rq_bw()
121 dl_rq->this_bw -= dl_bw; in __sub_rq_bw()
122 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */ in __sub_rq_bw()
123 if (dl_rq->this_bw > old) in __sub_rq_bw()
124 dl_rq->this_bw = 0; in __sub_rq_bw()
125 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); in __sub_rq_bw()
129 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in add_rq_bw() argument
132 __add_rq_bw(dl_se->dl_bw, dl_rq); in add_rq_bw()
136 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in sub_rq_bw() argument
139 __sub_rq_bw(dl_se->dl_bw, dl_rq); in sub_rq_bw()
143 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in add_running_bw() argument
146 __add_running_bw(dl_se->dl_bw, dl_rq); in add_running_bw()
150 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in sub_running_bw() argument
153 __sub_running_bw(dl_se->dl_bw, dl_rq); in sub_running_bw()
241 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); in task_non_contending() local
242 struct rq *rq = rq_of_dl_rq(dl_rq); in task_non_contending()
274 sub_running_bw(dl_se, dl_rq); in task_non_contending()
296 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); in task_contending() local
306 add_rq_bw(dl_se, dl_rq); in task_contending()
327 add_running_bw(dl_se, dl_rq); in task_contending()
331 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) in is_leftmost() argument
335 return dl_rq->root.rb_leftmost == &dl_se->rb_node; in is_leftmost()
357 void init_dl_rq(struct dl_rq *dl_rq) in init_dl_rq() argument
359 dl_rq->root = RB_ROOT_CACHED; in init_dl_rq()
363 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; in init_dl_rq()
365 dl_rq->dl_nr_migratory = 0; in init_dl_rq()
366 dl_rq->overloaded = 0; in init_dl_rq()
367 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED; in init_dl_rq()
369 init_dl_bw(&dl_rq->dl_bw); in init_dl_rq()
372 dl_rq->running_bw = 0; in init_dl_rq()
373 dl_rq->this_bw = 0; in init_dl_rq()
374 init_dl_rq_bw_ratio(dl_rq); in init_dl_rq()
409 static void update_dl_migration(struct dl_rq *dl_rq) in update_dl_migration() argument
411 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) { in update_dl_migration()
412 if (!dl_rq->overloaded) { in update_dl_migration()
413 dl_set_overload(rq_of_dl_rq(dl_rq)); in update_dl_migration()
414 dl_rq->overloaded = 1; in update_dl_migration()
416 } else if (dl_rq->overloaded) { in update_dl_migration()
417 dl_clear_overload(rq_of_dl_rq(dl_rq)); in update_dl_migration()
418 dl_rq->overloaded = 0; in update_dl_migration()
422 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in inc_dl_migration() argument
427 dl_rq->dl_nr_migratory++; in inc_dl_migration()
429 update_dl_migration(dl_rq); in inc_dl_migration()
432 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in dec_dl_migration() argument
437 dl_rq->dl_nr_migratory--; in dec_dl_migration()
439 update_dl_migration(dl_rq); in dec_dl_migration()
448 struct dl_rq *dl_rq = &rq->dl; in enqueue_pushable_dl_task() local
449 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node; in enqueue_pushable_dl_task()
469 dl_rq->earliest_dl.next = p->dl.deadline; in enqueue_pushable_dl_task()
473 &dl_rq->pushable_dl_tasks_root, leftmost); in enqueue_pushable_dl_task()
478 struct dl_rq *dl_rq = &rq->dl; in dequeue_pushable_dl_task() local
483 if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) { in dequeue_pushable_dl_task()
488 dl_rq->earliest_dl.next = rb_entry(next_node, in dequeue_pushable_dl_task()
493 rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); in dequeue_pushable_dl_task()
580 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in inc_dl_migration() argument
585 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in dec_dl_migration() argument
625 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); in setup_new_dl_entity() local
626 struct rq *rq = rq_of_dl_rq(dl_rq); in setup_new_dl_entity()
669 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); in replenish_dl_entity() local
670 struct rq *rq = rq_of_dl_rq(dl_rq); in replenish_dl_entity()
855 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); in update_dl_entity() local
856 struct rq *rq = rq_of_dl_rq(dl_rq); in update_dl_entity()
1302 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) in inc_dl_deadline() argument
1304 struct rq *rq = rq_of_dl_rq(dl_rq); in inc_dl_deadline()
1306 if (dl_rq->earliest_dl.curr == 0 || in inc_dl_deadline()
1307 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { in inc_dl_deadline()
1308 dl_rq->earliest_dl.curr = deadline; in inc_dl_deadline()
1313 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) in dec_dl_deadline() argument
1315 struct rq *rq = rq_of_dl_rq(dl_rq); in dec_dl_deadline()
1321 if (!dl_rq->dl_nr_running) { in dec_dl_deadline()
1322 dl_rq->earliest_dl.curr = 0; in dec_dl_deadline()
1323 dl_rq->earliest_dl.next = 0; in dec_dl_deadline()
1326 struct rb_node *leftmost = dl_rq->root.rb_leftmost; in dec_dl_deadline()
1330 dl_rq->earliest_dl.curr = entry->deadline; in dec_dl_deadline()
1337 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} in inc_dl_deadline() argument
1338 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} in dec_dl_deadline() argument
1343 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in inc_dl_tasks() argument
1349 dl_rq->dl_nr_running++; in inc_dl_tasks()
1350 add_nr_running(rq_of_dl_rq(dl_rq), 1); in inc_dl_tasks()
1352 inc_dl_deadline(dl_rq, deadline); in inc_dl_tasks()
1353 inc_dl_migration(dl_se, dl_rq); in inc_dl_tasks()
1357 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in dec_dl_tasks() argument
1362 WARN_ON(!dl_rq->dl_nr_running); in dec_dl_tasks()
1363 dl_rq->dl_nr_running--; in dec_dl_tasks()
1364 sub_nr_running(rq_of_dl_rq(dl_rq), 1); in dec_dl_tasks()
1366 dec_dl_deadline(dl_rq, dl_se->deadline); in dec_dl_tasks()
1367 dec_dl_migration(dl_se, dl_rq); in dec_dl_tasks()
1372 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); in __enqueue_dl_entity() local
1373 struct rb_node **link = &dl_rq->root.rb_root.rb_node; in __enqueue_dl_entity()
1392 rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost); in __enqueue_dl_entity()
1394 inc_dl_tasks(dl_se, dl_rq); in __enqueue_dl_entity()
1399 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); in __dequeue_dl_entity() local
1404 rb_erase_cached(&dl_se->rb_node, &dl_rq->root); in __dequeue_dl_entity()
1407 dec_dl_tasks(dl_se, dl_rq); in __dequeue_dl_entity()
1699 struct dl_rq *dl_rq) in pick_next_dl_entity() argument
1701 struct rb_node *left = rb_first_cached(&dl_rq->root); in pick_next_dl_entity()
1714 struct dl_rq *dl_rq; in pick_next_task_dl() local
1716 dl_rq = &rq->dl; in pick_next_task_dl()
1744 if (unlikely(!dl_rq->dl_nr_running)) in pick_next_task_dl()
1749 dl_se = pick_next_dl_entity(rq, dl_rq); in pick_next_task_dl()
2465 void init_dl_rq_bw_ratio(struct dl_rq *dl_rq) in init_dl_rq_bw_ratio() argument
2468 dl_rq->bw_ratio = 1 << RATIO_SHIFT; in init_dl_rq_bw_ratio()
2469 dl_rq->extra_bw = 1 << BW_SHIFT; in init_dl_rq_bw_ratio()
2471 dl_rq->bw_ratio = to_ratio(global_rt_runtime(), in init_dl_rq_bw_ratio()
2473 dl_rq->extra_bw = to_ratio(global_rt_period(), in init_dl_rq_bw_ratio()