Lines Matching refs:rq
45 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
180 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock()
181 __acquires(rq->lock) in __task_rq_lock()
183 struct rq *rq; in __task_rq_lock() local
188 rq = task_rq(p); in __task_rq_lock()
189 raw_spin_lock(&rq->lock); in __task_rq_lock()
190 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock()
191 rq_pin_lock(rq, rf); in __task_rq_lock()
192 return rq; in __task_rq_lock()
194 raw_spin_unlock(&rq->lock); in __task_rq_lock()
204 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock()
206 __acquires(rq->lock) in task_rq_lock()
208 struct rq *rq; in task_rq_lock() local
212 rq = task_rq(p); in task_rq_lock()
213 raw_spin_lock(&rq->lock); in task_rq_lock()
231 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in task_rq_lock()
232 rq_pin_lock(rq, rf); in task_rq_lock()
233 return rq; in task_rq_lock()
235 raw_spin_unlock(&rq->lock); in task_rq_lock()
247 static void update_rq_clock_task(struct rq *rq, s64 delta) in update_rq_clock_task() argument
256 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task()
276 rq->prev_irq_time += irq_delta; in update_rq_clock_task()
281 steal = paravirt_steal_clock(cpu_of(rq)); in update_rq_clock_task()
282 steal -= rq->prev_steal_time_rq; in update_rq_clock_task()
287 rq->prev_steal_time_rq += steal; in update_rq_clock_task()
292 rq->clock_task += delta; in update_rq_clock_task()
296 update_irq_load_avg(rq, irq_delta + steal); in update_rq_clock_task()
298 update_rq_clock_pelt(rq, delta); in update_rq_clock_task()
301 void update_rq_clock(struct rq *rq) in update_rq_clock() argument
305 lockdep_assert_held(&rq->lock); in update_rq_clock()
307 if (rq->clock_update_flags & RQCF_ACT_SKIP) in update_rq_clock()
312 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); in update_rq_clock()
313 rq->clock_update_flags |= RQCF_UPDATED; in update_rq_clock()
316 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock()
319 rq->clock += delta; in update_rq_clock()
320 update_rq_clock_task(rq, delta); in update_rq_clock()
324 rq_csd_init(struct rq *rq, call_single_data_t *csd, smp_call_func_t func) in rq_csd_init() argument
328 csd->info = rq; in rq_csd_init()
336 static void hrtick_clear(struct rq *rq) in hrtick_clear() argument
338 if (hrtimer_active(&rq->hrtick_timer)) in hrtick_clear()
339 hrtimer_cancel(&rq->hrtick_timer); in hrtick_clear()
348 struct rq *rq = container_of(timer, struct rq, hrtick_timer); in hrtick() local
351 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); in hrtick()
353 rq_lock(rq, &rf); in hrtick()
354 update_rq_clock(rq); in hrtick()
355 rq->curr->sched_class->task_tick(rq, rq->curr, 1); in hrtick()
356 rq_unlock(rq, &rf); in hrtick()
363 static void __hrtick_restart(struct rq *rq) in __hrtick_restart() argument
365 struct hrtimer *timer = &rq->hrtick_timer; in __hrtick_restart()
375 struct rq *rq = arg; in __hrtick_start() local
378 rq_lock(rq, &rf); in __hrtick_start()
379 __hrtick_restart(rq); in __hrtick_start()
380 rq_unlock(rq, &rf); in __hrtick_start()
388 void hrtick_start(struct rq *rq, u64 delay) in hrtick_start() argument
390 struct hrtimer *timer = &rq->hrtick_timer; in hrtick_start()
403 if (rq == this_rq()) in hrtick_start()
404 __hrtick_restart(rq); in hrtick_start()
406 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start()
415 void hrtick_start(struct rq *rq, u64 delay) in hrtick_start() argument
422 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), in hrtick_start()
428 static void hrtick_rq_init(struct rq *rq) in hrtick_rq_init() argument
431 rq_csd_init(rq, &rq->hrtick_csd, __hrtick_start); in hrtick_rq_init()
433 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); in hrtick_rq_init()
434 rq->hrtick_timer.function = hrtick; in hrtick_rq_init()
437 static inline void hrtick_clear(struct rq *rq) in hrtick_clear() argument
441 static inline void hrtick_rq_init(struct rq *rq) in hrtick_rq_init() argument
609 void resched_curr(struct rq *rq) in resched_curr() argument
611 struct task_struct *curr = rq->curr; in resched_curr()
614 lockdep_assert_held(&rq->lock); in resched_curr()
619 cpu = cpu_of(rq); in resched_curr()
635 struct rq *rq = cpu_rq(cpu); in resched_cpu() local
638 raw_spin_lock_irqsave(&rq->lock, flags); in resched_cpu()
640 resched_curr(rq); in resched_cpu()
641 raw_spin_unlock_irqrestore(&rq->lock, flags); in resched_cpu()
699 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu() local
704 if (set_nr_and_not_polling(rq->idle)) in wake_up_idle_cpu()
743 struct rq *rq = info; in nohz_csd_func() local
744 int cpu = cpu_of(rq); in nohz_csd_func()
753 rq->idle_balance = idle_cpu(cpu); in nohz_csd_func()
754 if (rq->idle_balance && !need_resched()) { in nohz_csd_func()
755 rq->nohz_idle_balance = flags; in nohz_csd_func()
763 bool sched_can_stop_tick(struct rq *rq) in sched_can_stop_tick() argument
768 if (rq->dl.dl_nr_running) in sched_can_stop_tick()
775 if (rq->rt.rr_nr_running) { in sched_can_stop_tick()
776 if (rq->rt.rr_nr_running == 1) in sched_can_stop_tick()
786 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; in sched_can_stop_tick()
795 if (rq->nr_running > 1) in sched_can_stop_tick()
960 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, in uclamp_idle_value() argument
969 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; in uclamp_idle_value()
976 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, in uclamp_idle_reset() argument
980 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_idle_reset()
983 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); in uclamp_idle_reset()
987 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, in uclamp_rq_max_value() argument
990 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; in uclamp_rq_max_value()
1004 return uclamp_idle_value(rq, clamp_id, clamp_value); in uclamp_rq_max_value()
1027 struct rq *rq; in uclamp_update_util_min_rt_default() local
1033 rq = task_rq_lock(p, &rf); in uclamp_update_util_min_rt_default()
1035 task_rq_unlock(rq, p, &rf); in uclamp_update_util_min_rt_default()
1133 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, in uclamp_rq_inc_id() argument
1136 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_inc_id()
1140 lockdep_assert_held(&rq->lock); in uclamp_rq_inc_id()
1149 uclamp_idle_reset(rq, clamp_id, uc_se->value); in uclamp_rq_inc_id()
1171 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, in uclamp_rq_dec_id() argument
1174 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_dec_id()
1180 lockdep_assert_held(&rq->lock); in uclamp_rq_dec_id()
1232 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); in uclamp_rq_dec_id()
1237 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) in uclamp_rq_inc() argument
1254 uclamp_rq_inc_id(rq, p, clamp_id); in uclamp_rq_inc()
1257 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) in uclamp_rq_inc()
1258 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_inc()
1261 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) in uclamp_rq_dec() argument
1278 uclamp_rq_dec_id(rq, p, clamp_id); in uclamp_rq_dec()
1285 struct rq *rq; in uclamp_update_active() local
1295 rq = task_rq_lock(p, &rf); in uclamp_update_active()
1304 uclamp_rq_dec_id(rq, p, clamp_id); in uclamp_update_active()
1305 uclamp_rq_inc_id(rq, p, clamp_id); in uclamp_update_active()
1308 task_rq_unlock(rq, p, &rf); in uclamp_update_active()
1507 static void __init init_uclamp_rq(struct rq *rq) in init_uclamp_rq() argument
1510 struct uclamp_rq *uc_rq = rq->uclamp; in init_uclamp_rq()
1518 rq->uclamp_flags = 0; in init_uclamp_rq()
1547 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } in uclamp_rq_inc() argument
1548 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } in uclamp_rq_dec() argument
1561 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) in enqueue_task() argument
1564 update_rq_clock(rq); in enqueue_task()
1567 sched_info_queued(rq, p); in enqueue_task()
1571 uclamp_rq_inc(rq, p); in enqueue_task()
1572 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
1575 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) in dequeue_task() argument
1578 update_rq_clock(rq); in dequeue_task()
1581 sched_info_dequeued(rq, p); in dequeue_task()
1585 uclamp_rq_dec(rq, p); in dequeue_task()
1586 p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
1589 void activate_task(struct rq *rq, struct task_struct *p, int flags) in activate_task() argument
1591 enqueue_task(rq, p, flags); in activate_task()
1596 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) in deactivate_task() argument
1600 dequeue_task(rq, p, flags); in deactivate_task()
1669 static inline void check_class_changed(struct rq *rq, struct task_struct *p, in check_class_changed() argument
1675 prev_class->switched_from(rq, p); in check_class_changed()
1677 p->sched_class->switched_to(rq, p); in check_class_changed()
1679 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
1682 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr() argument
1684 if (p->sched_class == rq->curr->sched_class) in check_preempt_curr()
1685 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr()
1686 else if (p->sched_class > rq->curr->sched_class) in check_preempt_curr()
1687 resched_curr(rq); in check_preempt_curr()
1693 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) in check_preempt_curr()
1694 rq_clock_skip_update(rq); in check_preempt_curr()
1733 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task() argument
1736 lockdep_assert_held(&rq->lock); in move_queued_task()
1738 deactivate_task(rq, p, DEQUEUE_NOCLOCK); in move_queued_task()
1740 rq_unlock(rq, rf); in move_queued_task()
1742 rq = cpu_rq(new_cpu); in move_queued_task()
1744 rq_lock(rq, rf); in move_queued_task()
1746 activate_task(rq, p, 0); in move_queued_task()
1747 check_preempt_curr(rq, p, 0); in move_queued_task()
1749 return rq; in move_queued_task()
1766 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task() argument
1771 return rq; in __migrate_task()
1773 update_rq_clock(rq); in __migrate_task()
1774 rq = move_queued_task(rq, rf, p, dest_cpu); in __migrate_task()
1776 return rq; in __migrate_task()
1788 struct rq *rq = this_rq(); in migration_cpu_stop() local
1804 rq_lock(rq, &rf); in migration_cpu_stop()
1810 if (task_rq(p) == rq) { in migration_cpu_stop()
1812 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
1816 rq_unlock(rq, &rf); in migration_cpu_stop()
1835 struct rq *rq = task_rq(p); in do_set_cpus_allowed() local
1841 running = task_current(rq, p); in do_set_cpus_allowed()
1848 lockdep_assert_held(&rq->lock); in do_set_cpus_allowed()
1849 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in do_set_cpus_allowed()
1852 put_prev_task(rq, p); in do_set_cpus_allowed()
1857 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in do_set_cpus_allowed()
1859 set_next_task(rq, p); in do_set_cpus_allowed()
1877 struct rq *rq; in __set_cpus_allowed_ptr() local
1880 rq = task_rq_lock(p, &rf); in __set_cpus_allowed_ptr()
1881 update_rq_clock(rq); in __set_cpus_allowed_ptr()
1929 if (task_running(rq, p) || p->state == TASK_WAKING) { in __set_cpus_allowed_ptr()
1932 task_rq_unlock(rq, p, &rf); in __set_cpus_allowed_ptr()
1933 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); in __set_cpus_allowed_ptr()
1940 rq = move_queued_task(rq, &rf, p, dest_cpu); in __set_cpus_allowed_ptr()
1943 task_rq_unlock(rq, p, &rf); in __set_cpus_allowed_ptr()
2010 struct rq *src_rq, *dst_rq; in __migrate_swap_task()
2045 struct rq *src_rq, *dst_rq; in migrate_swap_stop()
2144 struct rq *rq; in wait_task_inactive() local
2153 rq = task_rq(p); in wait_task_inactive()
2166 while (task_running(rq, p)) { in wait_task_inactive()
2177 rq = task_rq_lock(p, &rf); in wait_task_inactive()
2179 running = task_running(rq, p); in wait_task_inactive()
2184 task_rq_unlock(rq, p, &rf); in wait_task_inactive()
2419 struct rq *rq; in ttwu_stat() local
2424 rq = this_rq(); in ttwu_stat()
2427 if (cpu == rq->cpu) { in ttwu_stat()
2428 __schedstat_inc(rq->ttwu_local); in ttwu_stat()
2435 for_each_domain(rq->cpu, sd) { in ttwu_stat()
2448 __schedstat_inc(rq->ttwu_count); in ttwu_stat()
2458 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, in ttwu_do_wakeup() argument
2461 check_preempt_curr(rq, p, wake_flags); in ttwu_do_wakeup()
2471 rq_unpin_lock(rq, rf); in ttwu_do_wakeup()
2472 p->sched_class->task_woken(rq, p); in ttwu_do_wakeup()
2473 rq_repin_lock(rq, rf); in ttwu_do_wakeup()
2476 if (rq->idle_stamp) { in ttwu_do_wakeup()
2477 u64 delta = rq_clock(rq) - rq->idle_stamp; in ttwu_do_wakeup()
2478 u64 max = 2*rq->max_idle_balance_cost; in ttwu_do_wakeup()
2480 update_avg(&rq->avg_idle, delta); in ttwu_do_wakeup()
2482 if (rq->avg_idle > max) in ttwu_do_wakeup()
2483 rq->avg_idle = max; in ttwu_do_wakeup()
2485 rq->idle_stamp = 0; in ttwu_do_wakeup()
2491 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, in ttwu_do_activate() argument
2496 lockdep_assert_held(&rq->lock); in ttwu_do_activate()
2499 rq->nr_uninterruptible--; in ttwu_do_activate()
2511 activate_task(rq, p, en_flags); in ttwu_do_activate()
2512 ttwu_do_wakeup(rq, p, wake_flags, rf); in ttwu_do_activate()
2543 struct rq *rq; in ttwu_runnable() local
2546 rq = __task_rq_lock(p, &rf); in ttwu_runnable()
2549 update_rq_clock(rq); in ttwu_runnable()
2550 ttwu_do_wakeup(rq, p, wake_flags, &rf); in ttwu_runnable()
2553 __task_rq_unlock(rq, &rf); in ttwu_runnable()
2562 struct rq *rq = this_rq(); in sched_ttwu_pending() local
2574 WRITE_ONCE(rq->ttwu_pending, 0); in sched_ttwu_pending()
2576 rq_lock_irqsave(rq, &rf); in sched_ttwu_pending()
2577 update_rq_clock(rq); in sched_ttwu_pending()
2583 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) in sched_ttwu_pending()
2584 set_task_cpu(p, cpu_of(rq)); in sched_ttwu_pending()
2586 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); in sched_ttwu_pending()
2589 rq_unlock_irqrestore(rq, &rf); in sched_ttwu_pending()
2594 struct rq *rq = cpu_rq(cpu); in send_call_function_single_ipi() local
2596 if (!set_nr_if_polling(rq->idle)) in send_call_function_single_ipi()
2610 struct rq *rq = cpu_rq(cpu); in __ttwu_queue_wakelist() local
2614 WRITE_ONCE(rq->ttwu_pending, 1); in __ttwu_queue_wakelist()
2620 struct rq *rq = cpu_rq(cpu); in wake_up_if_idle() local
2625 if (!is_idle_task(rcu_dereference(rq->curr))) in wake_up_if_idle()
2628 if (set_nr_if_polling(rq->idle)) { in wake_up_if_idle()
2631 rq_lock_irqsave(rq, &rf); in wake_up_if_idle()
2632 if (is_idle_task(rq->curr)) in wake_up_if_idle()
2635 rq_unlock_irqrestore(rq, &rf); in wake_up_if_idle()
2693 struct rq *rq = cpu_rq(cpu); in ttwu_queue() local
2699 rq_lock(rq, &rf); in ttwu_queue()
2700 update_rq_clock(rq); in ttwu_queue()
2701 ttwu_do_activate(rq, p, wake_flags, &rf); in ttwu_queue()
2702 rq_unlock(rq, &rf); in ttwu_queue()
3012 struct rq *rq; in try_invoke_on_locked_down_task() local
3017 rq = __task_rq_lock(p, &rf); in try_invoke_on_locked_down_task()
3018 if (task_rq(p) == rq) in try_invoke_on_locked_down_task()
3020 rq_unlock(rq, &rf); in try_invoke_on_locked_down_task()
3337 struct rq *rq; in wake_up_new_task() local
3354 rq = __task_rq_lock(p, &rf); in wake_up_new_task()
3355 update_rq_clock(rq); in wake_up_new_task()
3358 activate_task(rq, p, ENQUEUE_NOCLOCK); in wake_up_new_task()
3360 check_preempt_curr(rq, p, WF_FORK); in wake_up_new_task()
3367 rq_unpin_lock(rq, &rf); in wake_up_new_task()
3368 p->sched_class->task_woken(rq, p); in wake_up_new_task()
3369 rq_repin_lock(rq, &rf); in wake_up_new_task()
3372 task_rq_unlock(rq, p, &rf); in wake_up_new_task()
3494 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) in prepare_lock_switch() argument
3502 rq_unpin_lock(rq, rf); in prepare_lock_switch()
3503 spin_release(&rq->lock.dep_map, _THIS_IP_); in prepare_lock_switch()
3506 rq->lock.owner = next; in prepare_lock_switch()
3510 static inline void finish_lock_switch(struct rq *rq) in finish_lock_switch() argument
3517 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); in finish_lock_switch()
3518 raw_spin_unlock_irq(&rq->lock); in finish_lock_switch()
3547 prepare_task_switch(struct rq *rq, struct task_struct *prev, in prepare_task_switch() argument
3551 sched_info_switch(rq, prev, next); in prepare_task_switch()
3578 static struct rq *finish_task_switch(struct task_struct *prev) in finish_task_switch()
3579 __releases(rq->lock) in finish_task_switch()
3581 struct rq *rq = this_rq(); in finish_task_switch() local
3582 struct mm_struct *mm = rq->prev_mm; in finish_task_switch()
3601 rq->prev_mm = NULL; in finish_task_switch()
3618 finish_lock_switch(rq); in finish_task_switch()
3656 return rq; in finish_task_switch()
3662 static void __balance_callback(struct rq *rq) in __balance_callback() argument
3665 void (*func)(struct rq *rq); in __balance_callback()
3668 raw_spin_lock_irqsave(&rq->lock, flags); in __balance_callback()
3669 head = rq->balance_callback; in __balance_callback()
3670 rq->balance_callback = NULL; in __balance_callback()
3672 func = (void (*)(struct rq *))head->func; in __balance_callback()
3677 func(rq); in __balance_callback()
3679 raw_spin_unlock_irqrestore(&rq->lock, flags); in __balance_callback()
3682 static inline void balance_callback(struct rq *rq) in balance_callback() argument
3684 if (unlikely(rq->balance_callback)) in balance_callback()
3685 __balance_callback(rq); in balance_callback()
3690 static inline void balance_callback(struct rq *rq) in balance_callback() argument
3701 __releases(rq->lock) in schedule_tail()
3703 struct rq *rq; in schedule_tail() local
3714 rq = finish_task_switch(prev); in schedule_tail()
3715 balance_callback(rq); in schedule_tail()
3727 static __always_inline struct rq *
3728 context_switch(struct rq *rq, struct task_struct *prev, in context_switch() argument
3731 prepare_task_switch(rq, prev, next); in context_switch()
3756 membarrier_switch_mm(rq, prev->active_mm, next->mm); in context_switch()
3769 rq->prev_mm = prev->active_mm; in context_switch()
3774 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); in context_switch()
3776 prepare_lock_switch(rq, next, rf); in context_switch()
3944 struct rq *rq; in task_sched_runtime() local
3963 rq = task_rq_lock(p, &rf); in task_sched_runtime()
3969 if (task_current(rq, p) && task_on_rq_queued(p)) { in task_sched_runtime()
3971 update_rq_clock(rq); in task_sched_runtime()
3972 p->sched_class->update_curr(rq); in task_sched_runtime()
3975 task_rq_unlock(rq, p, &rf); in task_sched_runtime()
3987 struct rq *rq = cpu_rq(cpu); in scheduler_tick() local
3988 struct task_struct *curr = rq->curr; in scheduler_tick()
3995 rq_lock(rq, &rf); in scheduler_tick()
3997 update_rq_clock(rq); in scheduler_tick()
3998 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); in scheduler_tick()
3999 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); in scheduler_tick()
4000 curr->sched_class->task_tick(rq, curr, 0); in scheduler_tick()
4001 calc_global_load_tick(rq); in scheduler_tick()
4002 psi_task_tick(rq); in scheduler_tick()
4004 rq_unlock(rq, &rf); in scheduler_tick()
4009 rq->idle_balance = idle_cpu(cpu); in scheduler_tick()
4010 trigger_load_balance(rq); in scheduler_tick()
4056 struct rq *rq = cpu_rq(cpu); in sched_tick_remote() local
4072 rq_lock_irq(rq, &rf); in sched_tick_remote()
4073 curr = rq->curr; in sched_tick_remote()
4077 update_rq_clock(rq); in sched_tick_remote()
4084 delta = rq_clock_task(rq) - curr->se.exec_start; in sched_tick_remote()
4087 curr->sched_class->task_tick(rq, curr, 0); in sched_tick_remote()
4089 calc_load_nohz_remote(rq); in sched_tick_remote()
4091 rq_unlock_irq(rq, &rf); in sched_tick_remote()
4305 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, in put_prev_task_balance() argument
4319 if (class->balance(rq, prev, rf)) in put_prev_task_balance()
4324 put_prev_task(rq, prev); in put_prev_task_balance()
4331 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task() argument
4343 rq->nr_running == rq->cfs.h_nr_running)) { in pick_next_task()
4345 p = pick_next_task_fair(rq, prev, rf); in pick_next_task()
4351 put_prev_task(rq, prev); in pick_next_task()
4352 p = pick_next_task_idle(rq); in pick_next_task()
4359 put_prev_task_balance(rq, prev, rf); in pick_next_task()
4362 p = class->pick_next_task(rq); in pick_next_task()
4416 struct rq *rq; in __schedule() local
4420 rq = cpu_rq(cpu); in __schedule()
4421 prev = rq->curr; in __schedule()
4426 hrtick_clear(rq); in __schedule()
4446 rq_lock(rq, &rf); in __schedule()
4450 rq->clock_update_flags <<= 1; in __schedule()
4451 update_rq_clock(rq); in __schedule()
4473 rq->nr_uninterruptible++; in __schedule()
4486 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); in __schedule()
4489 atomic_inc(&rq->nr_iowait); in __schedule()
4496 next = pick_next_task(rq, prev, &rf); in __schedule()
4501 rq->nr_switches++; in __schedule()
4506 RCU_INIT_POINTER(rq->curr, next); in __schedule()
4528 rq = context_switch(rq, prev, next, &rf); in __schedule()
4530 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); in __schedule()
4531 rq_unlock_irq(rq, &rf); in __schedule()
4534 balance_callback(rq); in __schedule()
4839 struct rq *rq; in rt_mutex_setprio() local
4850 rq = __task_rq_lock(p, &rf); in rt_mutex_setprio()
4851 update_rq_clock(rq); in rt_mutex_setprio()
4882 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
4883 WARN_ON(p != rq->curr); in rt_mutex_setprio()
4896 running = task_current(rq, p); in rt_mutex_setprio()
4898 dequeue_task(rq, p, queue_flag); in rt_mutex_setprio()
4900 put_prev_task(rq, p); in rt_mutex_setprio()
4938 enqueue_task(rq, p, queue_flag); in rt_mutex_setprio()
4940 set_next_task(rq, p); in rt_mutex_setprio()
4942 check_class_changed(rq, p, prev_class, oldprio); in rt_mutex_setprio()
4946 __task_rq_unlock(rq, &rf); in rt_mutex_setprio()
4948 balance_callback(rq); in rt_mutex_setprio()
4963 struct rq *rq; in set_user_nice() local
4971 rq = task_rq_lock(p, &rf); in set_user_nice()
4972 update_rq_clock(rq); in set_user_nice()
4985 running = task_current(rq, p); in set_user_nice()
4987 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in set_user_nice()
4989 put_prev_task(rq, p); in set_user_nice()
4997 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in set_user_nice()
4999 set_next_task(rq, p); in set_user_nice()
5005 p->sched_class->prio_changed(rq, p, old_prio); in set_user_nice()
5008 task_rq_unlock(rq, p, &rf); in set_user_nice()
5082 struct rq *rq = cpu_rq(cpu); in idle_cpu() local
5084 if (rq->curr != rq->idle) in idle_cpu()
5087 if (rq->nr_running) in idle_cpu()
5091 if (rq->ttwu_pending) in idle_cpu()
5169 static void __setscheduler(struct rq *rq, struct task_struct *p, in __setscheduler() argument
5225 struct rq *rq; in __sched_setscheduler() local
5333 rq = task_rq_lock(p, &rf); in __sched_setscheduler()
5334 update_rq_clock(rq); in __sched_setscheduler()
5339 if (p == rq->stop) { in __sched_setscheduler()
5380 cpumask_t *span = rq->rd->span; in __sched_setscheduler()
5388 rq->rd->dl_bw.bw == 0) { in __sched_setscheduler()
5399 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
5432 running = task_current(rq, p); in __sched_setscheduler()
5434 dequeue_task(rq, p, queue_flags); in __sched_setscheduler()
5436 put_prev_task(rq, p); in __sched_setscheduler()
5440 __setscheduler(rq, p, attr, pi); in __sched_setscheduler()
5451 enqueue_task(rq, p, queue_flags); in __sched_setscheduler()
5454 set_next_task(rq, p); in __sched_setscheduler()
5456 check_class_changed(rq, p, prev_class, oldprio); in __sched_setscheduler()
5460 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
5468 balance_callback(rq); in __sched_setscheduler()
5474 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
6096 struct rq *rq; in do_sched_yield() local
6098 rq = this_rq_lock_irq(&rf); in do_sched_yield()
6100 schedstat_inc(rq->yld_count); in do_sched_yield()
6101 current->sched_class->yield_task(rq); in do_sched_yield()
6108 rq_unlock(rq, &rf); in do_sched_yield()
6208 struct rq *rq, *p_rq; in yield_to() local
6213 rq = this_rq(); in yield_to()
6221 if (rq->nr_running == 1 && p_rq->nr_running == 1) { in yield_to()
6226 double_rq_lock(rq, p_rq); in yield_to()
6228 double_rq_unlock(rq, p_rq); in yield_to()
6241 yielded = curr->sched_class->yield_to_task(rq, p); in yield_to()
6243 schedstat_inc(rq->yld_count); in yield_to()
6248 if (preempt && rq != p_rq) in yield_to()
6253 double_rq_unlock(rq, p_rq); in yield_to()
6364 struct rq *rq; in sched_rr_get_interval() local
6380 rq = task_rq_lock(p, &rf); in sched_rr_get_interval()
6383 time_slice = p->sched_class->get_rr_interval(rq, p); in sched_rr_get_interval()
6384 task_rq_unlock(rq, p, &rf); in sched_rr_get_interval()
6524 struct rq *rq = cpu_rq(cpu); in init_idle() local
6530 raw_spin_lock(&rq->lock); in init_idle()
6562 rq->idle = idle; in init_idle()
6563 rcu_assign_pointer(rq->curr, idle); in init_idle()
6568 raw_spin_unlock(&rq->lock); in init_idle()
6656 struct rq *rq; in sched_setnuma() local
6658 rq = task_rq_lock(p, &rf); in sched_setnuma()
6660 running = task_current(rq, p); in sched_setnuma()
6663 dequeue_task(rq, p, DEQUEUE_SAVE); in sched_setnuma()
6665 put_prev_task(rq, p); in sched_setnuma()
6670 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in sched_setnuma()
6672 set_next_task(rq, p); in sched_setnuma()
6673 task_rq_unlock(rq, p, &rf); in sched_setnuma()
6706 static void calc_load_migrate(struct rq *rq) in calc_load_migrate() argument
6708 long delta = calc_load_fold_active(rq, 1); in calc_load_migrate()
6713 static struct task_struct *__pick_migrate_task(struct rq *rq) in __pick_migrate_task() argument
6719 next = class->pick_next_task(rq); in __pick_migrate_task()
6721 next->sched_class->put_prev_task(rq, next); in __pick_migrate_task()
6738 static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) in migrate_tasks()
6740 struct rq *rq = dead_rq; in migrate_tasks() local
6741 struct task_struct *next, *stop = rq->stop; in migrate_tasks()
6754 rq->stop = NULL; in migrate_tasks()
6761 update_rq_clock(rq); in migrate_tasks()
6768 if (rq->nr_running == 1) in migrate_tasks()
6771 next = __pick_migrate_task(rq); in migrate_tasks()
6782 rq_unlock(rq, rf); in migrate_tasks()
6784 rq_relock(rq, rf); in migrate_tasks()
6791 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { in migrate_tasks()
6798 rq = __migrate_task(rq, rf, next, dest_cpu); in migrate_tasks()
6799 if (rq != dead_rq) { in migrate_tasks()
6800 rq_unlock(rq, rf); in migrate_tasks()
6801 rq = dead_rq; in migrate_tasks()
6803 rq_relock(rq, rf); in migrate_tasks()
6808 rq->stop = stop; in migrate_tasks()
6812 void set_rq_online(struct rq *rq) in set_rq_online() argument
6814 if (!rq->online) { in set_rq_online()
6817 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online()
6818 rq->online = 1; in set_rq_online()
6822 class->rq_online(rq); in set_rq_online()
6827 void set_rq_offline(struct rq *rq) in set_rq_offline() argument
6829 if (rq->online) { in set_rq_offline()
6834 class->rq_offline(rq); in set_rq_offline()
6837 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline()
6838 rq->online = 0; in set_rq_offline()
6892 struct rq *rq = cpu_rq(cpu); in sched_cpu_activate() local
6918 rq_lock_irqsave(rq, &rf); in sched_cpu_activate()
6919 if (rq->rd) { in sched_cpu_activate()
6920 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_activate()
6921 set_rq_online(rq); in sched_cpu_activate()
6923 rq_unlock_irqrestore(rq, &rf); in sched_cpu_activate()
6964 struct rq *rq = cpu_rq(cpu); in sched_rq_cpu_starting() local
6966 rq->calc_load_update = calc_load_update; in sched_rq_cpu_starting()
6980 struct rq *rq = cpu_rq(cpu); in sched_cpu_dying() local
6986 rq_lock_irqsave(rq, &rf); in sched_cpu_dying()
6987 if (rq->rd) { in sched_cpu_dying()
6988 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_dying()
6989 set_rq_offline(rq); in sched_cpu_dying()
6991 migrate_tasks(rq, &rf); in sched_cpu_dying()
6992 BUG_ON(rq->nr_running != 1); in sched_cpu_dying()
6993 rq_unlock_irqrestore(rq, &rf); in sched_cpu_dying()
6995 calc_load_migrate(rq); in sched_cpu_dying()
6997 nohz_balance_exit_idle(rq); in sched_cpu_dying()
6998 hrtick_clear(rq); in sched_cpu_dying()
7137 struct rq *rq; in sched_init() local
7139 rq = cpu_rq(i); in sched_init()
7140 raw_spin_lock_init(&rq->lock); in sched_init()
7141 rq->nr_running = 0; in sched_init()
7142 rq->calc_load_active = 0; in sched_init()
7143 rq->calc_load_update = jiffies + LOAD_FREQ; in sched_init()
7144 init_cfs_rq(&rq->cfs); in sched_init()
7145 init_rt_rq(&rq->rt); in sched_init()
7146 init_dl_rq(&rq->dl); in sched_init()
7148 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); in sched_init()
7149 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in sched_init()
7169 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); in sched_init()
7172 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; in sched_init()
7174 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); in sched_init()
7177 rq->sd = NULL; in sched_init()
7178 rq->rd = NULL; in sched_init()
7179 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; in sched_init()
7180 rq->balance_callback = NULL; in sched_init()
7181 rq->active_balance = 0; in sched_init()
7182 rq->next_balance = jiffies; in sched_init()
7183 rq->push_cpu = 0; in sched_init()
7184 rq->cpu = i; in sched_init()
7185 rq->online = 0; in sched_init()
7186 rq->idle_stamp = 0; in sched_init()
7187 rq->avg_idle = 2*sysctl_sched_migration_cost; in sched_init()
7188 rq->max_idle_balance_cost = sysctl_sched_migration_cost; in sched_init()
7190 INIT_LIST_HEAD(&rq->cfs_tasks); in sched_init()
7192 rq_attach_root(rq, &def_root_domain); in sched_init()
7194 rq->last_blocked_load_update_tick = jiffies; in sched_init()
7195 atomic_set(&rq->nohz_flags, 0); in sched_init()
7197 rq_csd_init(rq, &rq->nohz_csd, nohz_csd_func); in sched_init()
7200 hrtick_rq_init(rq); in sched_init()
7201 atomic_set(&rq->nr_iowait, 0); in sched_init()
7553 struct rq *rq; in sched_move_task() local
7555 rq = task_rq_lock(tsk, &rf); in sched_move_task()
7556 update_rq_clock(rq); in sched_move_task()
7558 running = task_current(rq, tsk); in sched_move_task()
7562 dequeue_task(rq, tsk, queue_flags); in sched_move_task()
7564 put_prev_task(rq, tsk); in sched_move_task()
7569 enqueue_task(rq, tsk, queue_flags); in sched_move_task()
7571 set_next_task(rq, tsk); in sched_move_task()
7577 resched_curr(rq); in sched_move_task()
7580 task_rq_unlock(rq, tsk, &rf); in sched_move_task()
7647 struct rq *rq; in cpu_cgroup_fork() local
7649 rq = task_rq_lock(task, &rf); in cpu_cgroup_fork()
7651 update_rq_clock(rq); in cpu_cgroup_fork()
7654 task_rq_unlock(rq, task, &rf); in cpu_cgroup_fork()
7960 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth() local
7963 rq_lock_irq(rq, &rf); in tg_set_cfs_bandwidth()
7969 rq_unlock_irq(rq, &rf); in tg_set_cfs_bandwidth()
8486 void call_trace_sched_update_nr_running(struct rq *rq, int count) in call_trace_sched_update_nr_running() argument
8488 trace_sched_update_nr_running_tp(rq, count); in call_trace_sched_update_nr_running()