Lines Matching refs:rq
120 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
122 return rt_rq->rq; in rq_of_rt_rq()
130 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
134 return rt_rq->rq; in rq_of_rt_se()
159 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
163 rt_rq->rq = rq; in init_tg_rt_entry()
173 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
231 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
233 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
236 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
245 struct rq *rq = rq_of_rt_se(rt_se); in rt_rq_of_se() local
247 return &rq->rt; in rt_rq_of_se()
260 static void pull_rt_task(struct rq *this_rq);
262 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) in need_pull_rt_task() argument
265 return rq->rt.highest_prio.curr > prev->prio; in need_pull_rt_task()
268 static inline int rt_overloaded(struct rq *rq) in rt_overloaded() argument
270 return atomic_read(&rq->rd->rto_count); in rt_overloaded()
273 static inline void rt_set_overload(struct rq *rq) in rt_set_overload() argument
275 if (!rq->online) in rt_set_overload()
278 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); in rt_set_overload()
289 atomic_inc(&rq->rd->rto_count); in rt_set_overload()
292 static inline void rt_clear_overload(struct rq *rq) in rt_clear_overload() argument
294 if (!rq->online) in rt_clear_overload()
298 atomic_dec(&rq->rd->rto_count); in rt_clear_overload()
299 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); in rt_clear_overload()
349 static inline int has_pushable_tasks(struct rq *rq) in has_pushable_tasks() argument
351 return !plist_head_empty(&rq->rt.pushable_tasks); in has_pushable_tasks()
357 static void push_rt_tasks(struct rq *);
358 static void pull_rt_task(struct rq *);
360 static inline void rt_queue_push_tasks(struct rq *rq) in rt_queue_push_tasks() argument
362 if (!has_pushable_tasks(rq)) in rt_queue_push_tasks()
365 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); in rt_queue_push_tasks()
368 static inline void rt_queue_pull_task(struct rq *rq) in rt_queue_pull_task() argument
370 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); in rt_queue_pull_task()
373 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument
375 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
377 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); in enqueue_pushable_task()
380 if (p->prio < rq->rt.highest_prio.next) in enqueue_pushable_task()
381 rq->rt.highest_prio.next = p->prio; in enqueue_pushable_task()
384 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_task() argument
386 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); in dequeue_pushable_task()
389 if (has_pushable_tasks(rq)) { in dequeue_pushable_task()
390 p = plist_first_entry(&rq->rt.pushable_tasks, in dequeue_pushable_task()
392 rq->rt.highest_prio.next = p->prio; in dequeue_pushable_task()
394 rq->rt.highest_prio.next = MAX_RT_PRIO; in dequeue_pushable_task()
399 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_task() argument
403 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_task() argument
417 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) in need_pull_rt_task() argument
422 static inline void pull_rt_task(struct rq *this_rq) in pull_rt_task()
426 static inline void rt_queue_push_tasks(struct rq *rq) in rt_queue_push_tasks() argument
469 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
472 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
488 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue() local
491 int cpu = cpu_of(rq); in sched_rt_rq_enqueue()
502 resched_curr(rq); in sched_rt_rq_enqueue()
576 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
577 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
589 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue() local
595 resched_curr(rq); in sched_rt_rq_enqueue()
690 static void __disable_runtime(struct rq *rq) in __disable_runtime() argument
692 struct root_domain *rd = rq->rd; in __disable_runtime()
699 for_each_rt_rq(rt_rq, iter, rq) { in __disable_runtime()
772 static void __enable_runtime(struct rq *rq) in __enable_runtime() argument
783 for_each_rt_rq(rt_rq, iter, rq) { in __enable_runtime()
833 struct rq *rq = rq_of_rt_rq(rt_rq); in do_sched_rt_period_timer() local
848 raw_spin_lock(&rq->lock); in do_sched_rt_period_timer()
849 update_rq_clock(rq); in do_sched_rt_period_timer()
870 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer()
871 rq_clock_cancel_skipupdate(rq); in do_sched_rt_period_timer()
886 raw_spin_unlock(&rq->lock); in do_sched_rt_period_timer()
954 static void update_curr_rt(struct rq *rq) in update_curr_rt() argument
956 struct task_struct *curr = rq->curr; in update_curr_rt()
964 now = rq_clock_task(rq); in update_curr_rt()
988 resched_curr(rq); in update_curr_rt()
997 struct rq *rq = rq_of_rt_rq(rt_rq); in dequeue_top_rt_rq() local
999 BUG_ON(&rq->rt != rt_rq); in dequeue_top_rt_rq()
1004 BUG_ON(!rq->nr_running); in dequeue_top_rt_rq()
1006 sub_nr_running(rq, rt_rq->rt_nr_running); in dequeue_top_rt_rq()
1014 struct rq *rq = rq_of_rt_rq(rt_rq); in enqueue_top_rt_rq() local
1016 BUG_ON(&rq->rt != rt_rq); in enqueue_top_rt_rq()
1025 add_nr_running(rq, rt_rq->rt_nr_running); in enqueue_top_rt_rq()
1030 cpufreq_update_util(rq, 0); in enqueue_top_rt_rq()
1038 struct rq *rq = rq_of_rt_rq(rt_rq); in inc_rt_prio_smp() local
1044 if (&rq->rt != rt_rq) in inc_rt_prio_smp()
1047 if (rq->online && prio < prev_prio) in inc_rt_prio_smp()
1048 cpupri_set(&rq->rd->cpupri, rq->cpu, prio); in inc_rt_prio_smp()
1054 struct rq *rq = rq_of_rt_rq(rt_rq); in dec_rt_prio_smp() local
1060 if (&rq->rt != rt_rq) in dec_rt_prio_smp()
1063 if (rq->online && rt_rq->highest_prio.curr != prev_prio) in dec_rt_prio_smp()
1064 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); in dec_rt_prio_smp()
1301 struct rq *rq = rq_of_rt_se(rt_se); in enqueue_rt_entity() local
1306 enqueue_top_rt_rq(&rq->rt); in enqueue_rt_entity()
1311 struct rq *rq = rq_of_rt_se(rt_se); in dequeue_rt_entity() local
1321 enqueue_top_rt_rq(&rq->rt); in dequeue_rt_entity()
1328 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_rt() argument
1337 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) in enqueue_task_rt()
1338 enqueue_pushable_task(rq, p); in enqueue_task_rt()
1341 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_rt() argument
1345 update_curr_rt(rq); in dequeue_task_rt()
1348 dequeue_pushable_task(rq, p); in dequeue_task_rt()
1369 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) in requeue_task_rt() argument
1380 static void yield_task_rt(struct rq *rq) in yield_task_rt() argument
1382 requeue_task_rt(rq, rq->curr, 0); in yield_task_rt()
1392 struct rq *rq; in select_task_rq_rt() local
1398 rq = cpu_rq(cpu); in select_task_rq_rt()
1401 curr = READ_ONCE(rq->curr); /* unlocked access */ in select_task_rq_rt()
1444 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) in check_preempt_equal_prio() argument
1450 if (rq->curr->nr_cpus_allowed == 1 || in check_preempt_equal_prio()
1451 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) in check_preempt_equal_prio()
1459 && cpupri_find(&rq->rd->cpupri, p, NULL)) in check_preempt_equal_prio()
1467 requeue_task_rt(rq, p, 1); in check_preempt_equal_prio()
1468 resched_curr(rq); in check_preempt_equal_prio()
1476 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_rt() argument
1478 if (p->prio < rq->curr->prio) { in check_preempt_curr_rt()
1479 resched_curr(rq); in check_preempt_curr_rt()
1496 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) in check_preempt_curr_rt()
1497 check_preempt_equal_prio(rq, p); in check_preempt_curr_rt()
1501 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, in pick_next_rt_entity() argument
1518 static struct task_struct *_pick_next_task_rt(struct rq *rq) in _pick_next_task_rt() argument
1522 struct rt_rq *rt_rq = &rq->rt; in _pick_next_task_rt()
1525 rt_se = pick_next_rt_entity(rq, rt_rq); in _pick_next_task_rt()
1531 p->se.exec_start = rq_clock_task(rq); in _pick_next_task_rt()
1537 pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task_rt() argument
1540 struct rt_rq *rt_rq = &rq->rt; in pick_next_task_rt()
1542 if (need_pull_rt_task(rq, prev)) { in pick_next_task_rt()
1549 rq_unpin_lock(rq, rf); in pick_next_task_rt()
1550 pull_rt_task(rq); in pick_next_task_rt()
1551 rq_repin_lock(rq, rf); in pick_next_task_rt()
1557 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) || in pick_next_task_rt()
1558 rq->dl.dl_nr_running)) in pick_next_task_rt()
1567 update_curr_rt(rq); in pick_next_task_rt()
1572 put_prev_task(rq, prev); in pick_next_task_rt()
1574 p = _pick_next_task_rt(rq); in pick_next_task_rt()
1577 dequeue_pushable_task(rq, p); in pick_next_task_rt()
1579 rt_queue_push_tasks(rq); in pick_next_task_rt()
1586 if (rq->curr->sched_class != &rt_sched_class) in pick_next_task_rt()
1587 update_rt_rq_load_avg(rq_clock_task(rq), rq, 0); in pick_next_task_rt()
1592 static void put_prev_task_rt(struct rq *rq, struct task_struct *p) in put_prev_task_rt() argument
1594 update_curr_rt(rq); in put_prev_task_rt()
1596 update_rt_rq_load_avg(rq_clock_task(rq), rq, 1); in put_prev_task_rt()
1603 enqueue_pushable_task(rq, p); in put_prev_task_rt()
1611 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) in pick_rt_task() argument
1613 if (!task_running(rq, p) && in pick_rt_task()
1624 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) in pick_highest_pushable_task() argument
1626 struct plist_head *head = &rq->rt.pushable_tasks; in pick_highest_pushable_task()
1629 if (!has_pushable_tasks(rq)) in pick_highest_pushable_task()
1633 if (pick_rt_task(rq, p, cpu)) in pick_highest_pushable_task()
1718 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) in find_lock_lowest_rq() argument
1720 struct rq *lowest_rq = NULL; in find_lock_lowest_rq()
1727 if ((cpu == -1) || (cpu == rq->cpu)) in find_lock_lowest_rq()
1743 if (double_lock_balance(rq, lowest_rq)) { in find_lock_lowest_rq()
1750 if (unlikely(task_rq(task) != rq || in find_lock_lowest_rq()
1752 task_running(rq, task) || in find_lock_lowest_rq()
1756 double_unlock_balance(rq, lowest_rq); in find_lock_lowest_rq()
1767 double_unlock_balance(rq, lowest_rq); in find_lock_lowest_rq()
1774 static struct task_struct *pick_next_pushable_task(struct rq *rq) in pick_next_pushable_task() argument
1778 if (!has_pushable_tasks(rq)) in pick_next_pushable_task()
1781 p = plist_first_entry(&rq->rt.pushable_tasks, in pick_next_pushable_task()
1784 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_task()
1785 BUG_ON(task_current(rq, p)); in pick_next_pushable_task()
1799 static int push_rt_task(struct rq *rq) in push_rt_task() argument
1802 struct rq *lowest_rq; in push_rt_task()
1805 if (!rq->rt.overloaded) in push_rt_task()
1808 next_task = pick_next_pushable_task(rq); in push_rt_task()
1813 if (unlikely(next_task == rq->curr)) { in push_rt_task()
1823 if (unlikely(next_task->prio < rq->curr->prio)) { in push_rt_task()
1824 resched_curr(rq); in push_rt_task()
1832 lowest_rq = find_lock_lowest_rq(next_task, rq); in push_rt_task()
1843 task = pick_next_pushable_task(rq); in push_rt_task()
1866 deactivate_task(rq, next_task, 0); in push_rt_task()
1873 double_unlock_balance(rq, lowest_rq); in push_rt_task()
1881 static void push_rt_tasks(struct rq *rq) in push_rt_tasks() argument
1884 while (push_rt_task(rq)) in push_rt_tasks()
1988 static void tell_cpu_to_push(struct rq *rq) in tell_cpu_to_push() argument
1993 atomic_inc(&rq->rd->rto_loop_next); in tell_cpu_to_push()
1996 if (!rto_start_trylock(&rq->rd->rto_loop_start)) in tell_cpu_to_push()
1999 raw_spin_lock(&rq->rd->rto_lock); in tell_cpu_to_push()
2007 if (rq->rd->rto_cpu < 0) in tell_cpu_to_push()
2008 cpu = rto_next_cpu(rq->rd); in tell_cpu_to_push()
2010 raw_spin_unlock(&rq->rd->rto_lock); in tell_cpu_to_push()
2012 rto_start_unlock(&rq->rd->rto_loop_start); in tell_cpu_to_push()
2016 sched_get_rd(rq->rd); in tell_cpu_to_push()
2017 irq_work_queue_on(&rq->rd->rto_push_work, cpu); in tell_cpu_to_push()
2026 struct rq *rq; in rto_push_irq_work_func() local
2029 rq = this_rq(); in rto_push_irq_work_func()
2035 if (has_pushable_tasks(rq)) { in rto_push_irq_work_func()
2036 raw_spin_lock(&rq->lock); in rto_push_irq_work_func()
2037 push_rt_tasks(rq); in rto_push_irq_work_func()
2038 raw_spin_unlock(&rq->lock); in rto_push_irq_work_func()
2058 static void pull_rt_task(struct rq *this_rq) in pull_rt_task()
2063 struct rq *src_rq; in pull_rt_task()
2160 static void task_woken_rt(struct rq *rq, struct task_struct *p) in task_woken_rt() argument
2162 if (!task_running(rq, p) && in task_woken_rt()
2163 !test_tsk_need_resched(rq->curr) && in task_woken_rt()
2165 (dl_task(rq->curr) || rt_task(rq->curr)) && in task_woken_rt()
2166 (rq->curr->nr_cpus_allowed < 2 || in task_woken_rt()
2167 rq->curr->prio <= p->prio)) in task_woken_rt()
2168 push_rt_tasks(rq); in task_woken_rt()
2172 static void rq_online_rt(struct rq *rq) in rq_online_rt() argument
2174 if (rq->rt.overloaded) in rq_online_rt()
2175 rt_set_overload(rq); in rq_online_rt()
2177 __enable_runtime(rq); in rq_online_rt()
2179 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); in rq_online_rt()
2183 static void rq_offline_rt(struct rq *rq) in rq_offline_rt() argument
2185 if (rq->rt.overloaded) in rq_offline_rt()
2186 rt_clear_overload(rq); in rq_offline_rt()
2188 __disable_runtime(rq); in rq_offline_rt()
2190 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); in rq_offline_rt()
2197 static void switched_from_rt(struct rq *rq, struct task_struct *p) in switched_from_rt() argument
2206 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) in switched_from_rt()
2209 rt_queue_pull_task(rq); in switched_from_rt()
2228 static void switched_to_rt(struct rq *rq, struct task_struct *p) in switched_to_rt() argument
2237 if (task_on_rq_queued(p) && rq->curr != p) { in switched_to_rt()
2239 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) in switched_to_rt()
2240 rt_queue_push_tasks(rq); in switched_to_rt()
2242 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) in switched_to_rt()
2243 resched_curr(rq); in switched_to_rt()
2252 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) in prio_changed_rt() argument
2257 if (rq->curr == p) { in prio_changed_rt()
2264 rt_queue_pull_task(rq); in prio_changed_rt()
2270 if (p->prio > rq->rt.highest_prio.curr) in prio_changed_rt()
2271 resched_curr(rq); in prio_changed_rt()
2275 resched_curr(rq); in prio_changed_rt()
2283 if (p->prio < rq->curr->prio) in prio_changed_rt()
2284 resched_curr(rq); in prio_changed_rt()
2289 static void watchdog(struct rq *rq, struct task_struct *p) in watchdog() argument
2311 static inline void watchdog(struct rq *rq, struct task_struct *p) { } in watchdog() argument
2322 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) in task_tick_rt() argument
2326 update_curr_rt(rq); in task_tick_rt()
2327 update_rt_rq_load_avg(rq_clock_task(rq), rq, 1); in task_tick_rt()
2329 watchdog(rq, p); in task_tick_rt()
2349 requeue_task_rt(rq, p, 0); in task_tick_rt()
2350 resched_curr(rq); in task_tick_rt()
2356 static void set_curr_task_rt(struct rq *rq) in set_curr_task_rt() argument
2358 struct task_struct *p = rq->curr; in set_curr_task_rt()
2360 p->se.exec_start = rq_clock_task(rq); in set_curr_task_rt()
2363 dequeue_pushable_task(rq, p); in set_curr_task_rt()
2366 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) in get_rr_interval_rt() argument