Lines Matching refs:rq

45 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
170 void sched_core_enqueue(struct rq *rq, struct task_struct *p) in sched_core_enqueue() argument
172 rq->core->core_task_seq++; in sched_core_enqueue()
177 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
180 void sched_core_dequeue(struct rq *rq, struct task_struct *p) in sched_core_dequeue() argument
182 rq->core->core_task_seq++; in sched_core_dequeue()
187 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
194 static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie) in sched_core_find() argument
198 node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); in sched_core_find()
203 return idle_sched_class.pick_task(rq); in sched_core_find()
362 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { } in sched_core_enqueue() argument
363 static inline void sched_core_dequeue(struct rq *rq, struct task_struct *p) { } in sched_core_dequeue() argument
467 void raw_spin_rq_lock_nested(struct rq *rq, int subclass) in raw_spin_rq_lock_nested() argument
474 raw_spin_lock_nested(&rq->__lock, subclass); in raw_spin_rq_lock_nested()
481 lock = __rq_lockp(rq); in raw_spin_rq_lock_nested()
483 if (likely(lock == __rq_lockp(rq))) { in raw_spin_rq_lock_nested()
492 bool raw_spin_rq_trylock(struct rq *rq) in raw_spin_rq_trylock() argument
500 ret = raw_spin_trylock(&rq->__lock); in raw_spin_rq_trylock()
506 lock = __rq_lockp(rq); in raw_spin_rq_trylock()
508 if (!ret || (likely(lock == __rq_lockp(rq)))) { in raw_spin_rq_trylock()
516 void raw_spin_rq_unlock(struct rq *rq) in raw_spin_rq_unlock() argument
518 raw_spin_unlock(rq_lockp(rq)); in raw_spin_rq_unlock()
525 void double_rq_lock(struct rq *rq1, struct rq *rq2) in double_rq_lock()
543 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock()
544 __acquires(rq->lock) in __task_rq_lock()
546 struct rq *rq; in __task_rq_lock() local
551 rq = task_rq(p); in __task_rq_lock()
552 raw_spin_rq_lock(rq); in __task_rq_lock()
553 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock()
554 rq_pin_lock(rq, rf); in __task_rq_lock()
555 return rq; in __task_rq_lock()
557 raw_spin_rq_unlock(rq); in __task_rq_lock()
567 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock()
569 __acquires(rq->lock) in task_rq_lock()
571 struct rq *rq; in task_rq_lock() local
575 rq = task_rq(p); in task_rq_lock()
576 raw_spin_rq_lock(rq); in task_rq_lock()
594 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in task_rq_lock()
595 rq_pin_lock(rq, rf); in task_rq_lock()
596 return rq; in task_rq_lock()
598 raw_spin_rq_unlock(rq); in task_rq_lock()
610 static void update_rq_clock_task(struct rq *rq, s64 delta) in update_rq_clock_task() argument
619 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task()
639 rq->prev_irq_time += irq_delta; in update_rq_clock_task()
644 steal = paravirt_steal_clock(cpu_of(rq)); in update_rq_clock_task()
645 steal -= rq->prev_steal_time_rq; in update_rq_clock_task()
650 rq->prev_steal_time_rq += steal; in update_rq_clock_task()
655 rq->clock_task += delta; in update_rq_clock_task()
659 update_irq_load_avg(rq, irq_delta + steal); in update_rq_clock_task()
661 update_rq_clock_pelt(rq, delta); in update_rq_clock_task()
664 void update_rq_clock(struct rq *rq) in update_rq_clock() argument
668 lockdep_assert_rq_held(rq); in update_rq_clock()
670 if (rq->clock_update_flags & RQCF_ACT_SKIP) in update_rq_clock()
675 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); in update_rq_clock()
676 rq->clock_update_flags |= RQCF_UPDATED; in update_rq_clock()
679 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock()
682 rq->clock += delta; in update_rq_clock()
683 update_rq_clock_task(rq, delta); in update_rq_clock()
691 static void hrtick_clear(struct rq *rq) in hrtick_clear() argument
693 if (hrtimer_active(&rq->hrtick_timer)) in hrtick_clear()
694 hrtimer_cancel(&rq->hrtick_timer); in hrtick_clear()
703 struct rq *rq = container_of(timer, struct rq, hrtick_timer); in hrtick() local
706 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); in hrtick()
708 rq_lock(rq, &rf); in hrtick()
709 update_rq_clock(rq); in hrtick()
710 rq->curr->sched_class->task_tick(rq, rq->curr, 1); in hrtick()
711 rq_unlock(rq, &rf); in hrtick()
718 static void __hrtick_restart(struct rq *rq) in __hrtick_restart() argument
720 struct hrtimer *timer = &rq->hrtick_timer; in __hrtick_restart()
721 ktime_t time = rq->hrtick_time; in __hrtick_restart()
731 struct rq *rq = arg; in __hrtick_start() local
734 rq_lock(rq, &rf); in __hrtick_start()
735 __hrtick_restart(rq); in __hrtick_start()
736 rq_unlock(rq, &rf); in __hrtick_start()
744 void hrtick_start(struct rq *rq, u64 delay) in hrtick_start() argument
746 struct hrtimer *timer = &rq->hrtick_timer; in hrtick_start()
754 rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); in hrtick_start()
756 if (rq == this_rq()) in hrtick_start()
757 __hrtick_restart(rq); in hrtick_start()
759 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start()
768 void hrtick_start(struct rq *rq, u64 delay) in hrtick_start() argument
775 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), in hrtick_start()
781 static void hrtick_rq_init(struct rq *rq) in hrtick_rq_init() argument
784 INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); in hrtick_rq_init()
786 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); in hrtick_rq_init()
787 rq->hrtick_timer.function = hrtick; in hrtick_rq_init()
790 static inline void hrtick_clear(struct rq *rq) in hrtick_clear() argument
794 static inline void hrtick_rq_init(struct rq *rq) in hrtick_rq_init() argument
961 void resched_curr(struct rq *rq) in resched_curr() argument
963 struct task_struct *curr = rq->curr; in resched_curr()
966 lockdep_assert_rq_held(rq); in resched_curr()
971 cpu = cpu_of(rq); in resched_curr()
987 struct rq *rq = cpu_rq(cpu); in resched_cpu() local
990 raw_spin_rq_lock_irqsave(rq, flags); in resched_cpu()
992 resched_curr(rq); in resched_cpu()
993 raw_spin_rq_unlock_irqrestore(rq, flags); in resched_cpu()
1053 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu() local
1058 if (set_nr_and_not_polling(rq->idle)) in wake_up_idle_cpu()
1097 struct rq *rq = info; in nohz_csd_func() local
1098 int cpu = cpu_of(rq); in nohz_csd_func()
1107 rq->idle_balance = idle_cpu(cpu); in nohz_csd_func()
1108 if (rq->idle_balance && !need_resched()) { in nohz_csd_func()
1109 rq->nohz_idle_balance = flags; in nohz_csd_func()
1117 bool sched_can_stop_tick(struct rq *rq) in sched_can_stop_tick() argument
1122 if (rq->dl.dl_nr_running) in sched_can_stop_tick()
1129 if (rq->rt.rr_nr_running) { in sched_can_stop_tick()
1130 if (rq->rt.rr_nr_running == 1) in sched_can_stop_tick()
1140 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; in sched_can_stop_tick()
1149 if (rq->nr_running > 1) in sched_can_stop_tick()
1314 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, in uclamp_idle_value() argument
1323 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; in uclamp_idle_value()
1330 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, in uclamp_idle_reset() argument
1334 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_idle_reset()
1337 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); in uclamp_idle_reset()
1341 unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, in uclamp_rq_max_value() argument
1344 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; in uclamp_rq_max_value()
1358 return uclamp_idle_value(rq, clamp_id, clamp_value); in uclamp_rq_max_value()
1381 struct rq *rq; in uclamp_update_util_min_rt_default() local
1387 rq = task_rq_lock(p, &rf); in uclamp_update_util_min_rt_default()
1389 task_rq_unlock(rq, p, &rf); in uclamp_update_util_min_rt_default()
1490 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, in uclamp_rq_inc_id() argument
1493 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_inc_id()
1497 lockdep_assert_rq_held(rq); in uclamp_rq_inc_id()
1506 uclamp_idle_reset(rq, clamp_id, uc_se->value); in uclamp_rq_inc_id()
1528 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, in uclamp_rq_dec_id() argument
1531 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_dec_id()
1537 lockdep_assert_rq_held(rq); in uclamp_rq_dec_id()
1589 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); in uclamp_rq_dec_id()
1594 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) in uclamp_rq_inc() argument
1611 uclamp_rq_inc_id(rq, p, clamp_id); in uclamp_rq_inc()
1614 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) in uclamp_rq_inc()
1615 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_inc()
1618 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) in uclamp_rq_dec() argument
1635 uclamp_rq_dec_id(rq, p, clamp_id); in uclamp_rq_dec()
1638 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p, in uclamp_rq_reinc_id() argument
1644 uclamp_rq_dec_id(rq, p, clamp_id); in uclamp_rq_reinc_id()
1645 uclamp_rq_inc_id(rq, p, clamp_id); in uclamp_rq_reinc_id()
1651 if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_rq_reinc_id()
1652 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_reinc_id()
1660 struct rq *rq; in uclamp_update_active() local
1670 rq = task_rq_lock(p, &rf); in uclamp_update_active()
1679 uclamp_rq_reinc_id(rq, p, clamp_id); in uclamp_update_active()
1681 task_rq_unlock(rq, p, &rf); in uclamp_update_active()
1906 static void __init init_uclamp_rq(struct rq *rq) in init_uclamp_rq() argument
1909 struct uclamp_rq *uc_rq = rq->uclamp; in init_uclamp_rq()
1917 rq->uclamp_flags = 0; in init_uclamp_rq()
1946 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } in uclamp_rq_inc() argument
1947 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } in uclamp_rq_dec() argument
1965 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) in enqueue_task() argument
1968 update_rq_clock(rq); in enqueue_task()
1971 sched_info_enqueue(rq, p); in enqueue_task()
1975 uclamp_rq_inc(rq, p); in enqueue_task()
1976 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
1978 if (sched_core_enabled(rq)) in enqueue_task()
1979 sched_core_enqueue(rq, p); in enqueue_task()
1982 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) in dequeue_task() argument
1984 if (sched_core_enabled(rq)) in dequeue_task()
1985 sched_core_dequeue(rq, p); in dequeue_task()
1988 update_rq_clock(rq); in dequeue_task()
1991 sched_info_dequeue(rq, p); in dequeue_task()
1995 uclamp_rq_dec(rq, p); in dequeue_task()
1996 p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
1999 void activate_task(struct rq *rq, struct task_struct *p, int flags) in activate_task() argument
2001 enqueue_task(rq, p, flags); in activate_task()
2006 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) in deactivate_task() argument
2010 dequeue_task(rq, p, flags); in deactivate_task()
2077 static inline void check_class_changed(struct rq *rq, struct task_struct *p, in check_class_changed() argument
2083 prev_class->switched_from(rq, p); in check_class_changed()
2085 p->sched_class->switched_to(rq, p); in check_class_changed()
2087 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
2090 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr() argument
2092 if (p->sched_class == rq->curr->sched_class) in check_preempt_curr()
2093 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr()
2094 else if (p->sched_class > rq->curr->sched_class) in check_preempt_curr()
2095 resched_curr(rq); in check_preempt_curr()
2101 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) in check_preempt_curr()
2102 rq_clock_skip_update(rq); in check_preempt_curr()
2114 static void migrate_disable_switch(struct rq *rq, struct task_struct *p) in migrate_disable_switch() argument
2125 __do_set_cpus_allowed(p, cpumask_of(rq->cpu), SCA_MIGRATE_DISABLE); in migrate_disable_switch()
2172 static inline bool rq_has_pinned_tasks(struct rq *rq) in rq_has_pinned_tasks() argument
2174 return rq->nr_pinned; in rq_has_pinned_tasks()
2226 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task() argument
2229 lockdep_assert_rq_held(rq); in move_queued_task()
2231 deactivate_task(rq, p, DEQUEUE_NOCLOCK); in move_queued_task()
2233 rq_unlock(rq, rf); in move_queued_task()
2235 rq = cpu_rq(new_cpu); in move_queued_task()
2237 rq_lock(rq, rf); in move_queued_task()
2239 activate_task(rq, p, 0); in move_queued_task()
2240 check_preempt_curr(rq, p, 0); in move_queued_task()
2242 return rq; in move_queued_task()
2272 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task() argument
2277 return rq; in __migrate_task()
2279 update_rq_clock(rq); in __migrate_task()
2280 rq = move_queued_task(rq, rf, p, dest_cpu); in __migrate_task()
2282 return rq; in __migrate_task()
2295 struct rq *rq = this_rq(); in migration_cpu_stop() local
2312 rq_lock(rq, &rf); in migration_cpu_stop()
2325 if (task_rq(p) == rq) { in migration_cpu_stop()
2338 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
2376 task_rq_unlock(rq, p, &rf); in migration_cpu_stop()
2384 task_rq_unlock(rq, p, &rf); in migration_cpu_stop()
2394 struct rq *lowest_rq = NULL, *rq = this_rq(); in push_cpu_stop() local
2398 raw_spin_rq_lock(rq); in push_cpu_stop()
2400 if (task_rq(p) != rq) in push_cpu_stop()
2411 lowest_rq = p->sched_class->find_lock_rq(p, rq); in push_cpu_stop()
2417 if (task_rq(p) == rq) { in push_cpu_stop()
2418 deactivate_task(rq, p, 0); in push_cpu_stop()
2424 double_unlock_balance(rq, lowest_rq); in push_cpu_stop()
2427 rq->push_busy = false; in push_cpu_stop()
2428 raw_spin_rq_unlock(rq); in push_cpu_stop()
2453 struct rq *rq = task_rq(p); in __do_set_cpus_allowed() local
2474 running = task_current(rq, p); in __do_set_cpus_allowed()
2481 lockdep_assert_rq_held(rq); in __do_set_cpus_allowed()
2482 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in __do_set_cpus_allowed()
2485 put_prev_task(rq, p); in __do_set_cpus_allowed()
2490 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in __do_set_cpus_allowed()
2492 set_next_task(rq, p); in __do_set_cpus_allowed()
2604 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, in affine_move_task() argument
2615 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { in affine_move_task()
2616 rq->push_busy = true; in affine_move_task()
2630 task_rq_unlock(rq, p, rf); in affine_move_task()
2633 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, in affine_move_task()
2634 p, &rq->push_work); in affine_move_task()
2684 task_rq_unlock(rq, p, rf); in affine_move_task()
2688 if (task_running(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { in affine_move_task()
2701 task_rq_unlock(rq, p, rf); in affine_move_task()
2704 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, in affine_move_task()
2714 rq = move_queued_task(rq, rf, p, dest_cpu); in affine_move_task()
2721 task_rq_unlock(rq, p, rf); in affine_move_task()
2750 struct rq *rq, in __set_cpus_allowed_ptr_locked() argument
2752 __releases(rq->lock) in __set_cpus_allowed_ptr_locked()
2762 update_rq_clock(rq); in __set_cpus_allowed_ptr_locked()
2820 ret = affine_move_task(rq, p, rf, dest_cpu, flags); in __set_cpus_allowed_ptr_locked()
2827 task_rq_unlock(rq, p, rf); in __set_cpus_allowed_ptr_locked()
2845 struct rq *rq; in __set_cpus_allowed_ptr() local
2847 rq = task_rq_lock(p, &rf); in __set_cpus_allowed_ptr()
2848 return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, &rf); in __set_cpus_allowed_ptr()
2870 struct rq *rq; in restrict_cpus_allowed_ptr() local
2879 rq = task_rq_lock(p, &rf); in restrict_cpus_allowed_ptr()
2905 return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf); in restrict_cpus_allowed_ptr()
2908 task_rq_unlock(rq, p, &rf); in restrict_cpus_allowed_ptr()
3048 struct rq *src_rq, *dst_rq; in __migrate_swap_task()
3083 struct rq *src_rq, *dst_rq; in migrate_swap_stop()
3182 struct rq *rq; in wait_task_inactive() local
3191 rq = task_rq(p); in wait_task_inactive()
3204 while (task_running(rq, p)) { in wait_task_inactive()
3215 rq = task_rq_lock(p, &rf); in wait_task_inactive()
3217 running = task_running(rq, p); in wait_task_inactive()
3222 task_rq_unlock(rq, p, &rf); in wait_task_inactive()
3470 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } in migrate_disable_switch() argument
3472 static inline bool rq_has_pinned_tasks(struct rq *rq) in rq_has_pinned_tasks() argument
3482 struct rq *rq; in ttwu_stat() local
3487 rq = this_rq(); in ttwu_stat()
3490 if (cpu == rq->cpu) { in ttwu_stat()
3491 __schedstat_inc(rq->ttwu_local); in ttwu_stat()
3498 for_each_domain(rq->cpu, sd) { in ttwu_stat()
3511 __schedstat_inc(rq->ttwu_count); in ttwu_stat()
3521 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, in ttwu_do_wakeup() argument
3524 check_preempt_curr(rq, p, wake_flags); in ttwu_do_wakeup()
3534 rq_unpin_lock(rq, rf); in ttwu_do_wakeup()
3535 p->sched_class->task_woken(rq, p); in ttwu_do_wakeup()
3536 rq_repin_lock(rq, rf); in ttwu_do_wakeup()
3539 if (rq->idle_stamp) { in ttwu_do_wakeup()
3540 u64 delta = rq_clock(rq) - rq->idle_stamp; in ttwu_do_wakeup()
3541 u64 max = 2*rq->max_idle_balance_cost; in ttwu_do_wakeup()
3543 update_avg(&rq->avg_idle, delta); in ttwu_do_wakeup()
3545 if (rq->avg_idle > max) in ttwu_do_wakeup()
3546 rq->avg_idle = max; in ttwu_do_wakeup()
3548 rq->wake_stamp = jiffies; in ttwu_do_wakeup()
3549 rq->wake_avg_idle = rq->avg_idle / 2; in ttwu_do_wakeup()
3551 rq->idle_stamp = 0; in ttwu_do_wakeup()
3557 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, in ttwu_do_activate() argument
3562 lockdep_assert_rq_held(rq); in ttwu_do_activate()
3565 rq->nr_uninterruptible--; in ttwu_do_activate()
3577 activate_task(rq, p, en_flags); in ttwu_do_activate()
3578 ttwu_do_wakeup(rq, p, wake_flags, rf); in ttwu_do_activate()
3609 struct rq *rq; in ttwu_runnable() local
3612 rq = __task_rq_lock(p, &rf); in ttwu_runnable()
3615 update_rq_clock(rq); in ttwu_runnable()
3616 ttwu_do_wakeup(rq, p, wake_flags, &rf); in ttwu_runnable()
3619 __task_rq_unlock(rq, &rf); in ttwu_runnable()
3628 struct rq *rq = this_rq(); in sched_ttwu_pending() local
3640 WRITE_ONCE(rq->ttwu_pending, 0); in sched_ttwu_pending()
3642 rq_lock_irqsave(rq, &rf); in sched_ttwu_pending()
3643 update_rq_clock(rq); in sched_ttwu_pending()
3649 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) in sched_ttwu_pending()
3650 set_task_cpu(p, cpu_of(rq)); in sched_ttwu_pending()
3652 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); in sched_ttwu_pending()
3655 rq_unlock_irqrestore(rq, &rf); in sched_ttwu_pending()
3660 struct rq *rq = cpu_rq(cpu); in send_call_function_single_ipi() local
3662 if (!set_nr_if_polling(rq->idle)) in send_call_function_single_ipi()
3676 struct rq *rq = cpu_rq(cpu); in __ttwu_queue_wakelist() local
3680 WRITE_ONCE(rq->ttwu_pending, 1); in __ttwu_queue_wakelist()
3686 struct rq *rq = cpu_rq(cpu); in wake_up_if_idle() local
3691 if (!is_idle_task(rcu_dereference(rq->curr))) in wake_up_if_idle()
3694 if (set_nr_if_polling(rq->idle)) { in wake_up_if_idle()
3697 rq_lock_irqsave(rq, &rf); in wake_up_if_idle()
3698 if (is_idle_task(rq->curr)) in wake_up_if_idle()
3701 rq_unlock_irqrestore(rq, &rf); in wake_up_if_idle()
3766 struct rq *rq = cpu_rq(cpu); in ttwu_queue() local
3772 rq_lock(rq, &rf); in ttwu_queue()
3773 update_rq_clock(rq); in ttwu_queue()
3774 ttwu_do_activate(rq, p, wake_flags, &rf); in ttwu_queue()
3775 rq_unlock(rq, &rf); in ttwu_queue()
4130 struct rq *rq; in try_invoke_on_locked_down_task() local
4134 rq = __task_rq_lock(p, &rf); in try_invoke_on_locked_down_task()
4135 if (task_rq(p) == rq) in try_invoke_on_locked_down_task()
4137 rq_unlock(rq, &rf); in try_invoke_on_locked_down_task()
4442 struct rq *rq; in wake_up_new_task() local
4459 rq = __task_rq_lock(p, &rf); in wake_up_new_task()
4460 update_rq_clock(rq); in wake_up_new_task()
4463 activate_task(rq, p, ENQUEUE_NOCLOCK); in wake_up_new_task()
4465 check_preempt_curr(rq, p, WF_FORK); in wake_up_new_task()
4472 rq_unpin_lock(rq, &rf); in wake_up_new_task()
4473 p->sched_class->task_woken(rq, p); in wake_up_new_task()
4474 rq_repin_lock(rq, &rf); in wake_up_new_task()
4477 task_rq_unlock(rq, p, &rf); in wake_up_new_task()
4600 static void do_balance_callbacks(struct rq *rq, struct callback_head *head) in do_balance_callbacks() argument
4602 void (*func)(struct rq *rq); in do_balance_callbacks()
4605 lockdep_assert_rq_held(rq); in do_balance_callbacks()
4608 func = (void (*)(struct rq *))head->func; in do_balance_callbacks()
4613 func(rq); in do_balance_callbacks()
4617 static void balance_push(struct rq *rq);
4624 static inline struct callback_head *splice_balance_callbacks(struct rq *rq) in splice_balance_callbacks() argument
4626 struct callback_head *head = rq->balance_callback; in splice_balance_callbacks()
4628 lockdep_assert_rq_held(rq); in splice_balance_callbacks()
4630 rq->balance_callback = NULL; in splice_balance_callbacks()
4635 static void __balance_callbacks(struct rq *rq) in __balance_callbacks() argument
4637 do_balance_callbacks(rq, splice_balance_callbacks(rq)); in __balance_callbacks()
4640 static inline void balance_callbacks(struct rq *rq, struct callback_head *head) in balance_callbacks() argument
4645 raw_spin_rq_lock_irqsave(rq, flags); in balance_callbacks()
4646 do_balance_callbacks(rq, head); in balance_callbacks()
4647 raw_spin_rq_unlock_irqrestore(rq, flags); in balance_callbacks()
4653 static inline void __balance_callbacks(struct rq *rq) in __balance_callbacks() argument
4657 static inline struct callback_head *splice_balance_callbacks(struct rq *rq) in splice_balance_callbacks() argument
4662 static inline void balance_callbacks(struct rq *rq, struct callback_head *head) in balance_callbacks() argument
4669 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) in prepare_lock_switch() argument
4677 rq_unpin_lock(rq, rf); in prepare_lock_switch()
4678 spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); in prepare_lock_switch()
4681 rq_lockp(rq)->owner = next; in prepare_lock_switch()
4685 static inline void finish_lock_switch(struct rq *rq) in finish_lock_switch() argument
4692 spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); in finish_lock_switch()
4693 __balance_callbacks(rq); in finish_lock_switch()
4694 raw_spin_rq_unlock_irq(rq); in finish_lock_switch()
4739 prepare_task_switch(struct rq *rq, struct task_struct *prev, in prepare_task_switch() argument
4743 sched_info_switch(rq, prev, next); in prepare_task_switch()
4771 static struct rq *finish_task_switch(struct task_struct *prev) in finish_task_switch()
4772 __releases(rq->lock) in finish_task_switch()
4774 struct rq *rq = this_rq(); in finish_task_switch() local
4775 struct mm_struct *mm = rq->prev_mm; in finish_task_switch()
4794 rq->prev_mm = NULL; in finish_task_switch()
4812 finish_lock_switch(rq); in finish_task_switch()
4857 return rq; in finish_task_switch()
4865 __releases(rq->lock) in schedule_tail()
4888 static __always_inline struct rq *
4889 context_switch(struct rq *rq, struct task_struct *prev, in context_switch() argument
4892 prepare_task_switch(rq, prev, next); in context_switch()
4917 membarrier_switch_mm(rq, prev->active_mm, next->mm); in context_switch()
4930 rq->prev_mm = prev->active_mm; in context_switch()
4935 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); in context_switch()
4937 prepare_lock_switch(rq, next, rf); in context_switch()
5105 struct rq *rq; in task_sched_runtime() local
5124 rq = task_rq_lock(p, &rf); in task_sched_runtime()
5130 if (task_current(rq, p) && task_on_rq_queued(p)) { in task_sched_runtime()
5132 update_rq_clock(rq); in task_sched_runtime()
5133 p->sched_class->update_curr(rq); in task_sched_runtime()
5136 task_rq_unlock(rq, p, &rf); in task_sched_runtime()
5142 static u64 cpu_resched_latency(struct rq *rq) in cpu_resched_latency() argument
5145 u64 resched_latency, now = rq_clock(rq); in cpu_resched_latency()
5157 if (!rq->last_seen_need_resched_ns) { in cpu_resched_latency()
5158 rq->last_seen_need_resched_ns = now; in cpu_resched_latency()
5159 rq->ticks_without_resched = 0; in cpu_resched_latency()
5163 rq->ticks_without_resched++; in cpu_resched_latency()
5164 resched_latency = now - rq->last_seen_need_resched_ns; in cpu_resched_latency()
5187 static inline u64 cpu_resched_latency(struct rq *rq) { return 0; } in cpu_resched_latency() argument
5197 struct rq *rq = cpu_rq(cpu); in scheduler_tick() local
5198 struct task_struct *curr = rq->curr; in scheduler_tick()
5206 rq_lock(rq, &rf); in scheduler_tick()
5208 update_rq_clock(rq); in scheduler_tick()
5209 thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); in scheduler_tick()
5210 update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); in scheduler_tick()
5211 curr->sched_class->task_tick(rq, curr, 0); in scheduler_tick()
5213 resched_latency = cpu_resched_latency(rq); in scheduler_tick()
5214 calc_global_load_tick(rq); in scheduler_tick()
5216 rq_unlock(rq, &rf); in scheduler_tick()
5224 rq->idle_balance = idle_cpu(cpu); in scheduler_tick()
5225 trigger_load_balance(rq); in scheduler_tick()
5271 struct rq *rq = cpu_rq(cpu); in sched_tick_remote() local
5287 rq_lock_irq(rq, &rf); in sched_tick_remote()
5288 curr = rq->curr; in sched_tick_remote()
5292 update_rq_clock(rq); in sched_tick_remote()
5299 delta = rq_clock_task(rq) - curr->se.exec_start; in sched_tick_remote()
5302 curr->sched_class->task_tick(rq, curr, 0); in sched_tick_remote()
5304 calc_load_nohz_remote(rq); in sched_tick_remote()
5306 rq_unlock_irq(rq, &rf); in sched_tick_remote()
5521 static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, in put_prev_task_balance() argument
5535 if (class->balance(rq, prev, rf)) in put_prev_task_balance()
5540 put_prev_task(rq, prev); in put_prev_task_balance()
5547 __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in __pick_next_task() argument
5559 rq->nr_running == rq->cfs.h_nr_running)) { in __pick_next_task()
5561 p = pick_next_task_fair(rq, prev, rf); in __pick_next_task()
5567 put_prev_task(rq, prev); in __pick_next_task()
5568 p = pick_next_task_idle(rq); in __pick_next_task()
5575 put_prev_task_balance(rq, prev, rf); in __pick_next_task()
5578 p = class->pick_next_task(rq); in __pick_next_task()
5615 pick_task(struct rq *rq, const struct sched_class *class, struct task_struct *max, bool in_fi) in pick_task() argument
5618 unsigned long cookie = rq->core->core_cookie; in pick_task()
5620 class_pick = class->pick_task(rq); in pick_task()
5631 return idle_sched_class.pick_task(rq); in pick_task()
5642 cookie_pick = sched_core_find(rq, cookie); in pick_task()
5656 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
5659 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task() argument
5668 if (!sched_core_enabled(rq)) in pick_next_task()
5669 return __pick_next_task(rq, prev, rf); in pick_next_task()
5671 cpu = cpu_of(rq); in pick_next_task()
5680 rq->core_pick = NULL; in pick_next_task()
5681 return __pick_next_task(rq, prev, rf); in pick_next_task()
5693 if (rq->core->core_pick_seq == rq->core->core_task_seq && in pick_next_task()
5694 rq->core->core_pick_seq != rq->core_sched_seq && in pick_next_task()
5695 rq->core_pick) { in pick_next_task()
5696 WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); in pick_next_task()
5698 next = rq->core_pick; in pick_next_task()
5700 put_prev_task(rq, prev); in pick_next_task()
5701 set_next_task(rq, next); in pick_next_task()
5704 rq->core_pick = NULL; in pick_next_task()
5708 put_prev_task_balance(rq, prev, rf); in pick_next_task()
5711 need_sync = !!rq->core->core_cookie; in pick_next_task()
5714 rq->core->core_cookie = 0UL; in pick_next_task()
5715 if (rq->core->core_forceidle) { in pick_next_task()
5718 rq->core->core_forceidle = false; in pick_next_task()
5731 rq->core->core_task_seq++; in pick_next_task()
5739 next = class->pick_task(rq); in pick_next_task()
5745 rq->core_pick = NULL; in pick_next_task()
5751 task_vruntime_update(rq, next, false); in pick_next_task()
5757 struct rq *rq_i = cpu_rq(i); in pick_next_task()
5772 struct rq *rq_i = cpu_rq(i); in pick_next_task()
5793 rq->core->core_forceidle = true; in pick_next_task()
5795 rq->core->core_forceidle_seq++; in pick_next_task()
5811 rq->core->core_cookie = p->core_cookie; in pick_next_task()
5815 rq->core->core_forceidle = false; in pick_next_task()
5829 rq->core->core_pick_seq = rq->core->core_task_seq; in pick_next_task()
5830 next = rq->core_pick; in pick_next_task()
5831 rq->core_sched_seq = rq->core->core_pick_seq; in pick_next_task()
5845 struct rq *rq_i = cpu_rq(i); in pick_next_task()
5865 if (!(fi_before && rq->core->core_forceidle)) in pick_next_task()
5866 task_vruntime_update(rq_i, rq_i->core_pick, rq->core->core_forceidle); in pick_next_task()
5887 set_next_task(rq, next); in pick_next_task()
5893 struct rq *dst = cpu_rq(this), *src = cpu_rq(that); in try_steal_cookie()
5960 static void sched_core_balance(struct rq *rq) in sched_core_balance() argument
5963 int cpu = cpu_of(rq); in sched_core_balance()
5967 raw_spin_rq_unlock_irq(rq); in sched_core_balance()
5975 raw_spin_rq_lock_irq(rq); in sched_core_balance()
5982 void queue_core_balance(struct rq *rq) in queue_core_balance() argument
5984 if (!sched_core_enabled(rq)) in queue_core_balance()
5987 if (!rq->core->core_cookie) in queue_core_balance()
5990 if (!rq->nr_running) /* not forced idle */ in queue_core_balance()
5993 queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); in queue_core_balance()
5999 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; in sched_core_cpu_starting() local
6005 WARN_ON_ONCE(rq->core != rq); in sched_core_cpu_starting()
6015 rq = cpu_rq(t); in sched_core_cpu_starting()
6016 if (rq->core == rq) { in sched_core_cpu_starting()
6017 core_rq = rq; in sched_core_cpu_starting()
6027 rq = cpu_rq(t); in sched_core_cpu_starting()
6030 rq->core = core_rq; in sched_core_cpu_starting()
6032 WARN_ON_ONCE(rq->core != core_rq); in sched_core_cpu_starting()
6042 struct rq *rq = cpu_rq(cpu), *core_rq = NULL; in sched_core_cpu_deactivate() local
6050 WARN_ON_ONCE(rq->core != rq); in sched_core_cpu_deactivate()
6055 if (rq->core != rq) in sched_core_cpu_deactivate()
6070 core_rq->core_task_seq = rq->core_task_seq; in sched_core_cpu_deactivate()
6071 core_rq->core_pick_seq = rq->core_pick_seq; in sched_core_cpu_deactivate()
6072 core_rq->core_cookie = rq->core_cookie; in sched_core_cpu_deactivate()
6073 core_rq->core_forceidle = rq->core_forceidle; in sched_core_cpu_deactivate()
6074 core_rq->core_forceidle_seq = rq->core_forceidle_seq; in sched_core_cpu_deactivate()
6078 rq = cpu_rq(t); in sched_core_cpu_deactivate()
6079 rq->core = core_rq; in sched_core_cpu_deactivate()
6088 struct rq *rq = cpu_rq(cpu); in sched_core_cpu_dying() local
6090 if (rq->core != rq) in sched_core_cpu_dying()
6091 rq->core = rq; in sched_core_cpu_dying()
6101 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task() argument
6103 return __pick_next_task(rq, prev, rf); in pick_next_task()
6171 struct rq *rq; in __schedule() local
6175 rq = cpu_rq(cpu); in __schedule()
6176 prev = rq->curr; in __schedule()
6181 hrtick_clear(rq); in __schedule()
6201 rq_lock(rq, &rf); in __schedule()
6205 rq->clock_update_flags <<= 1; in __schedule()
6206 update_rq_clock(rq); in __schedule()
6228 rq->nr_uninterruptible++; in __schedule()
6241 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); in __schedule()
6244 atomic_inc(&rq->nr_iowait); in __schedule()
6251 next = pick_next_task(rq, prev, &rf); in __schedule()
6255 rq->last_seen_need_resched_ns = 0; in __schedule()
6259 rq->nr_switches++; in __schedule()
6264 RCU_INIT_POINTER(rq->curr, next); in __schedule()
6281 migrate_disable_switch(rq, prev); in __schedule()
6287 rq = context_switch(rq, prev, next, &rf); in __schedule()
6289 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); in __schedule()
6291 rq_unpin_lock(rq, &rf); in __schedule()
6292 __balance_callbacks(rq); in __schedule()
6293 raw_spin_rq_unlock_irq(rq); in __schedule()
6749 struct rq *rq; in rt_mutex_setprio() local
6760 rq = __task_rq_lock(p, &rf); in rt_mutex_setprio()
6761 update_rq_clock(rq); in rt_mutex_setprio()
6792 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
6793 WARN_ON(p != rq->curr); in rt_mutex_setprio()
6806 running = task_current(rq, p); in rt_mutex_setprio()
6808 dequeue_task(rq, p, queue_flag); in rt_mutex_setprio()
6810 put_prev_task(rq, p); in rt_mutex_setprio()
6845 enqueue_task(rq, p, queue_flag); in rt_mutex_setprio()
6847 set_next_task(rq, p); in rt_mutex_setprio()
6849 check_class_changed(rq, p, prev_class, oldprio); in rt_mutex_setprio()
6854 rq_unpin_lock(rq, &rf); in rt_mutex_setprio()
6855 __balance_callbacks(rq); in rt_mutex_setprio()
6856 raw_spin_rq_unlock(rq); in rt_mutex_setprio()
6872 struct rq *rq; in set_user_nice() local
6880 rq = task_rq_lock(p, &rf); in set_user_nice()
6881 update_rq_clock(rq); in set_user_nice()
6894 running = task_current(rq, p); in set_user_nice()
6896 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in set_user_nice()
6898 put_prev_task(rq, p); in set_user_nice()
6906 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in set_user_nice()
6908 set_next_task(rq, p); in set_user_nice()
6914 p->sched_class->prio_changed(rq, p, old_prio); in set_user_nice()
6917 task_rq_unlock(rq, p, &rf); in set_user_nice()
6995 struct rq *rq = cpu_rq(cpu); in idle_cpu() local
6997 if (rq->curr != rq->idle) in idle_cpu()
7000 if (rq->nr_running) in idle_cpu()
7004 if (rq->ttwu_pending) in idle_cpu()
7065 struct rq *rq = cpu_rq(cpu); in effective_cpu_util() local
7068 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { in effective_cpu_util()
7077 irq = cpu_util_irq(rq); in effective_cpu_util()
7093 util = util_cfs + cpu_util_rt(rq); in effective_cpu_util()
7095 util = uclamp_rq_util_with(rq, util, p); in effective_cpu_util()
7097 dl_util = cpu_util_dl(rq); in effective_cpu_util()
7141 util += cpu_bw_dl(rq); in effective_cpu_util()
7222 struct rq *rq; in __sched_setscheduler() local
7329 rq = task_rq_lock(p, &rf); in __sched_setscheduler()
7330 update_rq_clock(rq); in __sched_setscheduler()
7335 if (p == rq->stop) { in __sched_setscheduler()
7376 cpumask_t *span = rq->rd->span; in __sched_setscheduler()
7384 rq->rd->dl_bw.bw == 0) { in __sched_setscheduler()
7395 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
7429 running = task_current(rq, p); in __sched_setscheduler()
7431 dequeue_task(rq, p, queue_flags); in __sched_setscheduler()
7433 put_prev_task(rq, p); in __sched_setscheduler()
7451 enqueue_task(rq, p, queue_flags); in __sched_setscheduler()
7454 set_next_task(rq, p); in __sched_setscheduler()
7456 check_class_changed(rq, p, prev_class, oldprio); in __sched_setscheduler()
7460 head = splice_balance_callbacks(rq); in __sched_setscheduler()
7461 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
7469 balance_callbacks(rq, head); in __sched_setscheduler()
7475 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
8119 struct rq *rq; in do_sched_yield() local
8121 rq = this_rq_lock_irq(&rf); in do_sched_yield()
8123 schedstat_inc(rq->yld_count); in do_sched_yield()
8124 current->sched_class->yield_task(rq); in do_sched_yield()
8127 rq_unlock_irq(rq, &rf); in do_sched_yield()
8296 struct rq *rq, *p_rq; in yield_to() local
8301 rq = this_rq(); in yield_to()
8309 if (rq->nr_running == 1 && p_rq->nr_running == 1) { in yield_to()
8314 double_rq_lock(rq, p_rq); in yield_to()
8316 double_rq_unlock(rq, p_rq); in yield_to()
8329 yielded = curr->sched_class->yield_to_task(rq, p); in yield_to()
8331 schedstat_inc(rq->yld_count); in yield_to()
8336 if (preempt && rq != p_rq) in yield_to()
8341 double_rq_unlock(rq, p_rq); in yield_to()
8452 struct rq *rq; in sched_rr_get_interval() local
8468 rq = task_rq_lock(p, &rf); in sched_rr_get_interval()
8471 time_slice = p->sched_class->get_rr_interval(rq, p); in sched_rr_get_interval()
8472 task_rq_unlock(rq, p, &rf); in sched_rr_get_interval()
8615 struct rq *rq = cpu_rq(cpu); in init_idle() local
8629 raw_spin_rq_lock(rq); in init_idle()
8666 rq->idle = idle; in init_idle()
8667 rcu_assign_pointer(rq->curr, idle); in init_idle()
8672 raw_spin_rq_unlock(rq); in init_idle()
8760 struct rq *rq; in sched_setnuma() local
8762 rq = task_rq_lock(p, &rf); in sched_setnuma()
8764 running = task_current(rq, p); in sched_setnuma()
8767 dequeue_task(rq, p, DEQUEUE_SAVE); in sched_setnuma()
8769 put_prev_task(rq, p); in sched_setnuma()
8774 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in sched_setnuma()
8776 set_next_task(rq, p); in sched_setnuma()
8777 task_rq_unlock(rq, p, &rf); in sched_setnuma()
8805 struct rq *rq = this_rq(); in __balance_push_cpu_stop() local
8810 rq_lock(rq, &rf); in __balance_push_cpu_stop()
8812 update_rq_clock(rq); in __balance_push_cpu_stop()
8814 if (task_rq(p) == rq && task_on_rq_queued(p)) { in __balance_push_cpu_stop()
8815 cpu = select_fallback_rq(rq->cpu, p); in __balance_push_cpu_stop()
8816 rq = __migrate_task(rq, &rf, p, cpu); in __balance_push_cpu_stop()
8819 rq_unlock(rq, &rf); in __balance_push_cpu_stop()
8835 static void balance_push(struct rq *rq) in balance_push() argument
8837 struct task_struct *push_task = rq->curr; in balance_push()
8839 lockdep_assert_rq_held(rq); in balance_push()
8844 rq->balance_callback = &balance_push_callback; in balance_push()
8850 if (!cpu_dying(rq->cpu) || rq != this_rq()) in balance_push()
8871 if (!rq->nr_running && !rq_has_pinned_tasks(rq) && in balance_push()
8872 rcuwait_active(&rq->hotplug_wait)) { in balance_push()
8873 raw_spin_rq_unlock(rq); in balance_push()
8874 rcuwait_wake_up(&rq->hotplug_wait); in balance_push()
8875 raw_spin_rq_lock(rq); in balance_push()
8885 raw_spin_rq_unlock(rq); in balance_push()
8886 stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, in balance_push()
8893 raw_spin_rq_lock(rq); in balance_push()
8898 struct rq *rq = cpu_rq(cpu); in balance_push_set() local
8901 rq_lock_irqsave(rq, &rf); in balance_push_set()
8903 WARN_ON_ONCE(rq->balance_callback); in balance_push_set()
8904 rq->balance_callback = &balance_push_callback; in balance_push_set()
8905 } else if (rq->balance_callback == &balance_push_callback) { in balance_push_set()
8906 rq->balance_callback = NULL; in balance_push_set()
8908 rq_unlock_irqrestore(rq, &rf); in balance_push_set()
8919 struct rq *rq = this_rq(); in balance_hotplug_wait() local
8921 rcuwait_wait_event(&rq->hotplug_wait, in balance_hotplug_wait()
8922 rq->nr_running == 1 && !rq_has_pinned_tasks(rq), in balance_hotplug_wait()
8928 static inline void balance_push(struct rq *rq) in balance_push() argument
8942 void set_rq_online(struct rq *rq) in set_rq_online() argument
8944 if (!rq->online) { in set_rq_online()
8947 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online()
8948 rq->online = 1; in set_rq_online()
8952 class->rq_online(rq); in set_rq_online()
8957 void set_rq_offline(struct rq *rq) in set_rq_offline() argument
8959 if (rq->online) { in set_rq_offline()
8964 class->rq_offline(rq); in set_rq_offline()
8967 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline()
8968 rq->online = 0; in set_rq_offline()
9022 struct rq *rq = cpu_rq(cpu); in sched_cpu_activate() local
9054 rq_lock_irqsave(rq, &rf); in sched_cpu_activate()
9055 if (rq->rd) { in sched_cpu_activate()
9056 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_activate()
9057 set_rq_online(rq); in sched_cpu_activate()
9059 rq_unlock_irqrestore(rq, &rf); in sched_cpu_activate()
9066 struct rq *rq = cpu_rq(cpu); in sched_cpu_deactivate() local
9074 nohz_balance_exit_idle(rq); in sched_cpu_deactivate()
9098 rq_lock_irqsave(rq, &rf); in sched_cpu_deactivate()
9099 if (rq->rd) { in sched_cpu_deactivate()
9100 update_rq_clock(rq); in sched_cpu_deactivate()
9101 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_deactivate()
9102 set_rq_offline(rq); in sched_cpu_deactivate()
9104 rq_unlock_irqrestore(rq, &rf); in sched_cpu_deactivate()
9131 struct rq *rq = cpu_rq(cpu); in sched_rq_cpu_starting() local
9133 rq->calc_load_update = calc_load_update; in sched_rq_cpu_starting()
9173 static void calc_load_migrate(struct rq *rq) in calc_load_migrate() argument
9175 long delta = calc_load_fold_active(rq, 1); in calc_load_migrate()
9181 static void dump_rq_tasks(struct rq *rq, const char *loglvl) in dump_rq_tasks() argument
9184 int cpu = cpu_of(rq); in dump_rq_tasks()
9186 lockdep_assert_rq_held(rq); in dump_rq_tasks()
9188 printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); in dump_rq_tasks()
9202 struct rq *rq = cpu_rq(cpu); in sched_cpu_dying() local
9208 rq_lock_irqsave(rq, &rf); in sched_cpu_dying()
9209 if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { in sched_cpu_dying()
9211 dump_rq_tasks(rq, KERN_WARNING); in sched_cpu_dying()
9213 rq_unlock_irqrestore(rq, &rf); in sched_cpu_dying()
9215 calc_load_migrate(rq); in sched_cpu_dying()
9217 hrtick_clear(rq); in sched_cpu_dying()
9358 struct rq *rq; in sched_init() local
9360 rq = cpu_rq(i); in sched_init()
9361 raw_spin_lock_init(&rq->__lock); in sched_init()
9362 rq->nr_running = 0; in sched_init()
9363 rq->calc_load_active = 0; in sched_init()
9364 rq->calc_load_update = jiffies + LOAD_FREQ; in sched_init()
9365 init_cfs_rq(&rq->cfs); in sched_init()
9366 init_rt_rq(&rq->rt); in sched_init()
9367 init_dl_rq(&rq->dl); in sched_init()
9369 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); in sched_init()
9370 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in sched_init()
9390 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); in sched_init()
9393 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; in sched_init()
9395 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); in sched_init()
9398 rq->sd = NULL; in sched_init()
9399 rq->rd = NULL; in sched_init()
9400 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; in sched_init()
9401 rq->balance_callback = &balance_push_callback; in sched_init()
9402 rq->active_balance = 0; in sched_init()
9403 rq->next_balance = jiffies; in sched_init()
9404 rq->push_cpu = 0; in sched_init()
9405 rq->cpu = i; in sched_init()
9406 rq->online = 0; in sched_init()
9407 rq->idle_stamp = 0; in sched_init()
9408 rq->avg_idle = 2*sysctl_sched_migration_cost; in sched_init()
9409 rq->wake_stamp = jiffies; in sched_init()
9410 rq->wake_avg_idle = rq->avg_idle; in sched_init()
9411 rq->max_idle_balance_cost = sysctl_sched_migration_cost; in sched_init()
9413 INIT_LIST_HEAD(&rq->cfs_tasks); in sched_init()
9415 rq_attach_root(rq, &def_root_domain); in sched_init()
9417 rq->last_blocked_load_update_tick = jiffies; in sched_init()
9418 atomic_set(&rq->nohz_flags, 0); in sched_init()
9420 INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); in sched_init()
9423 rcuwait_init(&rq->hotplug_wait); in sched_init()
9426 hrtick_rq_init(rq); in sched_init()
9427 atomic_set(&rq->nr_iowait, 0); in sched_init()
9430 rq->core = rq; in sched_init()
9431 rq->core_pick = NULL; in sched_init()
9432 rq->core_enabled = 0; in sched_init()
9433 rq->core_tree = RB_ROOT; in sched_init()
9434 rq->core_forceidle = false; in sched_init()
9436 rq->core_cookie = 0UL; in sched_init()
9821 struct rq *rq; in sched_move_task() local
9823 rq = task_rq_lock(tsk, &rf); in sched_move_task()
9824 update_rq_clock(rq); in sched_move_task()
9826 running = task_current(rq, tsk); in sched_move_task()
9830 dequeue_task(rq, tsk, queue_flags); in sched_move_task()
9832 put_prev_task(rq, tsk); in sched_move_task()
9837 enqueue_task(rq, tsk, queue_flags); in sched_move_task()
9839 set_next_task(rq, tsk); in sched_move_task()
9845 resched_curr(rq); in sched_move_task()
9848 task_rq_unlock(rq, tsk, &rf); in sched_move_task()
9919 struct rq *rq; in cpu_cgroup_fork() local
9921 rq = task_rq_lock(task, &rf); in cpu_cgroup_fork()
9923 update_rq_clock(rq); in cpu_cgroup_fork()
9926 task_rq_unlock(rq, task, &rf); in cpu_cgroup_fork()
10241 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth() local
10244 rq_lock_irq(rq, &rf); in tg_set_cfs_bandwidth()
10250 rq_unlock_irq(rq, &rf); in tg_set_cfs_bandwidth()
10842 void call_trace_sched_update_nr_running(struct rq *rq, int count) in call_trace_sched_update_nr_running() argument
10844 trace_sched_update_nr_running_tp(rq, count); in call_trace_sched_update_nr_running()