Lines Matching refs:dl_rq
28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq() argument
30 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se) in dl_rq_of_se()
140 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq) in __add_running_bw() argument
142 u64 old = dl_rq->running_bw; in __add_running_bw()
144 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); in __add_running_bw()
145 dl_rq->running_bw += dl_bw; in __add_running_bw()
146 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ in __add_running_bw()
147 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); in __add_running_bw()
149 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); in __add_running_bw()
153 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq) in __sub_running_bw() argument
155 u64 old = dl_rq->running_bw; in __sub_running_bw()
157 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); in __sub_running_bw()
158 dl_rq->running_bw -= dl_bw; in __sub_running_bw()
159 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */ in __sub_running_bw()
160 if (dl_rq->running_bw > old) in __sub_running_bw()
161 dl_rq->running_bw = 0; in __sub_running_bw()
163 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0); in __sub_running_bw()
167 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) in __add_rq_bw() argument
169 u64 old = dl_rq->this_bw; in __add_rq_bw()
171 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); in __add_rq_bw()
172 dl_rq->this_bw += dl_bw; in __add_rq_bw()
173 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */ in __add_rq_bw()
177 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq) in __sub_rq_bw() argument
179 u64 old = dl_rq->this_bw; in __sub_rq_bw()
181 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); in __sub_rq_bw()
182 dl_rq->this_bw -= dl_bw; in __sub_rq_bw()
183 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */ in __sub_rq_bw()
184 if (dl_rq->this_bw > old) in __sub_rq_bw()
185 dl_rq->this_bw = 0; in __sub_rq_bw()
186 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw); in __sub_rq_bw()
190 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in add_rq_bw() argument
193 __add_rq_bw(dl_se->dl_bw, dl_rq); in add_rq_bw()
197 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in sub_rq_bw() argument
200 __sub_rq_bw(dl_se->dl_bw, dl_rq); in sub_rq_bw()
204 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in add_running_bw() argument
207 __add_running_bw(dl_se->dl_bw, dl_rq); in add_running_bw()
211 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in sub_running_bw() argument
214 __sub_running_bw(dl_se->dl_bw, dl_rq); in sub_running_bw()
302 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); in task_non_contending() local
303 struct rq *rq = rq_of_dl_rq(dl_rq); in task_non_contending()
334 sub_running_bw(dl_se, dl_rq); in task_non_contending()
356 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); in task_contending() local
366 add_rq_bw(dl_se, dl_rq); in task_contending()
387 add_running_bw(dl_se, dl_rq); in task_contending()
391 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) in is_leftmost() argument
395 return dl_rq->root.rb_leftmost == &dl_se->rb_node; in is_leftmost()
398 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
419 void init_dl_rq(struct dl_rq *dl_rq) in init_dl_rq() argument
421 dl_rq->root = RB_ROOT_CACHED; in init_dl_rq()
425 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; in init_dl_rq()
427 dl_rq->dl_nr_migratory = 0; in init_dl_rq()
428 dl_rq->overloaded = 0; in init_dl_rq()
429 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED; in init_dl_rq()
431 init_dl_bw(&dl_rq->dl_bw); in init_dl_rq()
434 dl_rq->running_bw = 0; in init_dl_rq()
435 dl_rq->this_bw = 0; in init_dl_rq()
436 init_dl_rq_bw_ratio(dl_rq); in init_dl_rq()
471 static void update_dl_migration(struct dl_rq *dl_rq) in update_dl_migration() argument
473 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) { in update_dl_migration()
474 if (!dl_rq->overloaded) { in update_dl_migration()
475 dl_set_overload(rq_of_dl_rq(dl_rq)); in update_dl_migration()
476 dl_rq->overloaded = 1; in update_dl_migration()
478 } else if (dl_rq->overloaded) { in update_dl_migration()
479 dl_clear_overload(rq_of_dl_rq(dl_rq)); in update_dl_migration()
480 dl_rq->overloaded = 0; in update_dl_migration()
484 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in inc_dl_migration() argument
489 dl_rq->dl_nr_migratory++; in inc_dl_migration()
491 update_dl_migration(dl_rq); in inc_dl_migration()
494 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in dec_dl_migration() argument
499 dl_rq->dl_nr_migratory--; in dec_dl_migration()
501 update_dl_migration(dl_rq); in dec_dl_migration()
510 struct dl_rq *dl_rq = &rq->dl; in enqueue_pushable_dl_task() local
511 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node; in enqueue_pushable_dl_task()
531 dl_rq->earliest_dl.next = p->dl.deadline; in enqueue_pushable_dl_task()
535 &dl_rq->pushable_dl_tasks_root, leftmost); in enqueue_pushable_dl_task()
540 struct dl_rq *dl_rq = &rq->dl; in dequeue_pushable_dl_task() local
545 if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) { in dequeue_pushable_dl_task()
550 dl_rq->earliest_dl.next = rb_entry(next_node, in dequeue_pushable_dl_task()
555 rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); in dequeue_pushable_dl_task()
675 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in inc_dl_migration() argument
680 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in dec_dl_migration() argument
720 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); in setup_new_dl_entity() local
721 struct rq *rq = rq_of_dl_rq(dl_rq); in setup_new_dl_entity()
763 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); in replenish_dl_entity() local
764 struct rq *rq = rq_of_dl_rq(dl_rq); in replenish_dl_entity()
947 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); in update_dl_entity() local
948 struct rq *rq = rq_of_dl_rq(dl_rq); in update_dl_entity()
1394 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) in inc_dl_deadline() argument
1396 struct rq *rq = rq_of_dl_rq(dl_rq); in inc_dl_deadline()
1398 if (dl_rq->earliest_dl.curr == 0 || in inc_dl_deadline()
1399 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { in inc_dl_deadline()
1400 dl_rq->earliest_dl.curr = deadline; in inc_dl_deadline()
1405 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) in dec_dl_deadline() argument
1407 struct rq *rq = rq_of_dl_rq(dl_rq); in dec_dl_deadline()
1413 if (!dl_rq->dl_nr_running) { in dec_dl_deadline()
1414 dl_rq->earliest_dl.curr = 0; in dec_dl_deadline()
1415 dl_rq->earliest_dl.next = 0; in dec_dl_deadline()
1418 struct rb_node *leftmost = dl_rq->root.rb_leftmost; in dec_dl_deadline()
1422 dl_rq->earliest_dl.curr = entry->deadline; in dec_dl_deadline()
1429 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} in inc_dl_deadline() argument
1430 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} in dec_dl_deadline() argument
1435 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in inc_dl_tasks() argument
1441 dl_rq->dl_nr_running++; in inc_dl_tasks()
1442 add_nr_running(rq_of_dl_rq(dl_rq), 1); in inc_dl_tasks()
1444 inc_dl_deadline(dl_rq, deadline); in inc_dl_tasks()
1445 inc_dl_migration(dl_se, dl_rq); in inc_dl_tasks()
1449 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) in dec_dl_tasks() argument
1454 WARN_ON(!dl_rq->dl_nr_running); in dec_dl_tasks()
1455 dl_rq->dl_nr_running--; in dec_dl_tasks()
1456 sub_nr_running(rq_of_dl_rq(dl_rq), 1); in dec_dl_tasks()
1458 dec_dl_deadline(dl_rq, dl_se->deadline); in dec_dl_tasks()
1459 dec_dl_migration(dl_se, dl_rq); in dec_dl_tasks()
1464 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); in __enqueue_dl_entity() local
1465 struct rb_node **link = &dl_rq->root.rb_root.rb_node; in __enqueue_dl_entity()
1484 rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost); in __enqueue_dl_entity()
1486 inc_dl_tasks(dl_se, dl_rq); in __enqueue_dl_entity()
1491 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); in __dequeue_dl_entity() local
1496 rb_erase_cached(&dl_se->rb_node, &dl_rq->root); in __dequeue_dl_entity()
1499 dec_dl_tasks(dl_se, dl_rq); in __dequeue_dl_entity()
1847 struct dl_rq *dl_rq) in pick_next_dl_entity() argument
1849 struct rb_node *left = rb_first_cached(&dl_rq->root); in pick_next_dl_entity()
1860 struct dl_rq *dl_rq = &rq->dl; in pick_next_task_dl() local
1866 dl_se = pick_next_dl_entity(rq, dl_rq); in pick_next_task_dl()
2580 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq) in init_dl_rq_bw_ratio() argument
2583 dl_rq->bw_ratio = 1 << RATIO_SHIFT; in init_dl_rq_bw_ratio()
2584 dl_rq->extra_bw = 1 << BW_SHIFT; in init_dl_rq_bw_ratio()
2586 dl_rq->bw_ratio = to_ratio(global_rt_runtime(), in init_dl_rq_bw_ratio()
2588 dl_rq->extra_bw = to_ratio(global_rt_period(), in init_dl_rq_bw_ratio()