Lines Matching refs:rq

25 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
66 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock()
67 __acquires(rq->lock) in __task_rq_lock()
69 struct rq *rq; in __task_rq_lock() local
74 rq = task_rq(p); in __task_rq_lock()
75 raw_spin_lock(&rq->lock); in __task_rq_lock()
76 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock()
77 rq_pin_lock(rq, rf); in __task_rq_lock()
78 return rq; in __task_rq_lock()
80 raw_spin_unlock(&rq->lock); in __task_rq_lock()
90 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock()
92 __acquires(rq->lock) in task_rq_lock()
94 struct rq *rq; in task_rq_lock() local
98 rq = task_rq(p); in task_rq_lock()
99 raw_spin_lock(&rq->lock); in task_rq_lock()
116 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in task_rq_lock()
117 rq_pin_lock(rq, rf); in task_rq_lock()
118 return rq; in task_rq_lock()
120 raw_spin_unlock(&rq->lock); in task_rq_lock()
132 static void update_rq_clock_task(struct rq *rq, s64 delta) in update_rq_clock_task() argument
142 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task()
162 rq->prev_irq_time += irq_delta; in update_rq_clock_task()
167 steal = paravirt_steal_clock(cpu_of(rq)); in update_rq_clock_task()
168 steal -= rq->prev_steal_time_rq; in update_rq_clock_task()
173 rq->prev_steal_time_rq += steal; in update_rq_clock_task()
178 rq->clock_task += delta; in update_rq_clock_task()
182 update_irq_load_avg(rq, irq_delta + steal); in update_rq_clock_task()
186 void update_rq_clock(struct rq *rq) in update_rq_clock() argument
190 lockdep_assert_held(&rq->lock); in update_rq_clock()
192 if (rq->clock_update_flags & RQCF_ACT_SKIP) in update_rq_clock()
197 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); in update_rq_clock()
198 rq->clock_update_flags |= RQCF_UPDATED; in update_rq_clock()
201 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock()
204 rq->clock += delta; in update_rq_clock()
205 update_rq_clock_task(rq, delta); in update_rq_clock()
214 static void hrtick_clear(struct rq *rq) in hrtick_clear() argument
216 if (hrtimer_active(&rq->hrtick_timer)) in hrtick_clear()
217 hrtimer_cancel(&rq->hrtick_timer); in hrtick_clear()
226 struct rq *rq = container_of(timer, struct rq, hrtick_timer); in hrtick() local
229 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); in hrtick()
231 rq_lock(rq, &rf); in hrtick()
232 update_rq_clock(rq); in hrtick()
233 rq->curr->sched_class->task_tick(rq, rq->curr, 1); in hrtick()
234 rq_unlock(rq, &rf); in hrtick()
241 static void __hrtick_restart(struct rq *rq) in __hrtick_restart() argument
243 struct hrtimer *timer = &rq->hrtick_timer; in __hrtick_restart()
253 struct rq *rq = arg; in __hrtick_start() local
256 rq_lock(rq, &rf); in __hrtick_start()
257 __hrtick_restart(rq); in __hrtick_start()
258 rq->hrtick_csd_pending = 0; in __hrtick_start()
259 rq_unlock(rq, &rf); in __hrtick_start()
267 void hrtick_start(struct rq *rq, u64 delay) in hrtick_start() argument
269 struct hrtimer *timer = &rq->hrtick_timer; in hrtick_start()
282 if (rq == this_rq()) { in hrtick_start()
283 __hrtick_restart(rq); in hrtick_start()
284 } else if (!rq->hrtick_csd_pending) { in hrtick_start()
285 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start()
286 rq->hrtick_csd_pending = 1; in hrtick_start()
296 void hrtick_start(struct rq *rq, u64 delay) in hrtick_start() argument
303 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), in hrtick_start()
308 static void hrtick_rq_init(struct rq *rq) in hrtick_rq_init() argument
311 rq->hrtick_csd_pending = 0; in hrtick_rq_init()
313 rq->hrtick_csd.flags = 0; in hrtick_rq_init()
314 rq->hrtick_csd.func = __hrtick_start; in hrtick_rq_init()
315 rq->hrtick_csd.info = rq; in hrtick_rq_init()
318 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in hrtick_rq_init()
319 rq->hrtick_timer.function = hrtick; in hrtick_rq_init()
322 static inline void hrtick_clear(struct rq *rq) in hrtick_clear() argument
326 static inline void hrtick_rq_init(struct rq *rq) in hrtick_rq_init() argument
453 void resched_curr(struct rq *rq) in resched_curr() argument
455 struct task_struct *curr = rq->curr; in resched_curr()
458 lockdep_assert_held(&rq->lock); in resched_curr()
463 cpu = cpu_of(rq); in resched_curr()
479 struct rq *rq = cpu_rq(cpu); in resched_cpu() local
482 raw_spin_lock_irqsave(&rq->lock, flags); in resched_cpu()
484 resched_curr(rq); in resched_cpu()
485 raw_spin_unlock_irqrestore(&rq->lock, flags); in resched_cpu()
538 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu() local
543 if (set_nr_and_not_polling(rq->idle)) in wake_up_idle_cpu()
608 bool sched_can_stop_tick(struct rq *rq) in sched_can_stop_tick() argument
613 if (rq->dl.dl_nr_running) in sched_can_stop_tick()
620 if (rq->rt.rr_nr_running) { in sched_can_stop_tick()
621 if (rq->rt.rr_nr_running == 1) in sched_can_stop_tick()
631 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; in sched_can_stop_tick()
640 if (rq->nr_running > 1) in sched_can_stop_tick()
719 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) in enqueue_task() argument
722 update_rq_clock(rq); in enqueue_task()
725 sched_info_queued(rq, p); in enqueue_task()
727 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
730 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) in dequeue_task() argument
733 update_rq_clock(rq); in dequeue_task()
736 sched_info_dequeued(rq, p); in dequeue_task()
738 p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
741 void activate_task(struct rq *rq, struct task_struct *p, int flags) in activate_task() argument
744 rq->nr_uninterruptible--; in activate_task()
746 enqueue_task(rq, p, flags); in activate_task()
749 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) in deactivate_task() argument
752 rq->nr_uninterruptible++; in deactivate_task()
754 dequeue_task(rq, p, flags); in deactivate_task()
823 static inline void check_class_changed(struct rq *rq, struct task_struct *p, in check_class_changed() argument
829 prev_class->switched_from(rq, p); in check_class_changed()
831 p->sched_class->switched_to(rq, p); in check_class_changed()
833 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
836 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr() argument
840 if (p->sched_class == rq->curr->sched_class) { in check_preempt_curr()
841 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr()
844 if (class == rq->curr->sched_class) in check_preempt_curr()
847 resched_curr(rq); in check_preempt_curr()
857 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) in check_preempt_curr()
858 rq_clock_skip_update(rq); in check_preempt_curr()
908 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task() argument
911 lockdep_assert_held(&rq->lock); in move_queued_task()
914 dequeue_task(rq, p, DEQUEUE_NOCLOCK); in move_queued_task()
916 rq_unlock(rq, rf); in move_queued_task()
918 rq = cpu_rq(new_cpu); in move_queued_task()
920 rq_lock(rq, rf); in move_queued_task()
922 enqueue_task(rq, p, 0); in move_queued_task()
924 check_preempt_curr(rq, p, 0); in move_queued_task()
926 return rq; in move_queued_task()
943 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task() argument
948 return rq; in __migrate_task()
950 update_rq_clock(rq); in __migrate_task()
951 rq = move_queued_task(rq, rf, p, dest_cpu); in __migrate_task()
953 return rq; in __migrate_task()
965 struct rq *rq = this_rq(); in migration_cpu_stop() local
981 rq_lock(rq, &rf); in migration_cpu_stop()
987 if (task_rq(p) == rq) { in migration_cpu_stop()
989 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
993 rq_unlock(rq, &rf); in migration_cpu_stop()
1012 struct rq *rq = task_rq(p); in do_set_cpus_allowed() local
1018 running = task_current(rq, p); in do_set_cpus_allowed()
1025 lockdep_assert_held(&rq->lock); in do_set_cpus_allowed()
1026 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in do_set_cpus_allowed()
1029 put_prev_task(rq, p); in do_set_cpus_allowed()
1034 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in do_set_cpus_allowed()
1036 set_curr_task(rq, p); in do_set_cpus_allowed()
1054 struct rq *rq; in __set_cpus_allowed_ptr() local
1057 rq = task_rq_lock(p, &rf); in __set_cpus_allowed_ptr()
1058 update_rq_clock(rq); in __set_cpus_allowed_ptr()
1101 if (task_running(rq, p) || p->state == TASK_WAKING) { in __set_cpus_allowed_ptr()
1104 task_rq_unlock(rq, p, &rf); in __set_cpus_allowed_ptr()
1105 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); in __set_cpus_allowed_ptr()
1113 rq = move_queued_task(rq, &rf, p, dest_cpu); in __set_cpus_allowed_ptr()
1116 task_rq_unlock(rq, p, &rf); in __set_cpus_allowed_ptr()
1183 struct rq *src_rq, *dst_rq; in __migrate_swap_task()
1220 struct rq *src_rq, *dst_rq; in migrate_swap_stop()
1319 struct rq *rq; in wait_task_inactive() local
1328 rq = task_rq(p); in wait_task_inactive()
1341 while (task_running(rq, p)) { in wait_task_inactive()
1352 rq = task_rq_lock(p, &rf); in wait_task_inactive()
1354 running = task_running(rq, p); in wait_task_inactive()
1359 task_rq_unlock(rq, p, &rf); in wait_task_inactive()
1600 struct rq *rq; in ttwu_stat() local
1605 rq = this_rq(); in ttwu_stat()
1608 if (cpu == rq->cpu) { in ttwu_stat()
1609 __schedstat_inc(rq->ttwu_local); in ttwu_stat()
1616 for_each_domain(rq->cpu, sd) { in ttwu_stat()
1629 __schedstat_inc(rq->ttwu_count); in ttwu_stat()
1636 static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) in ttwu_activate() argument
1638 activate_task(rq, p, en_flags); in ttwu_activate()
1643 wq_worker_waking_up(p, cpu_of(rq)); in ttwu_activate()
1649 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, in ttwu_do_wakeup() argument
1652 check_preempt_curr(rq, p, wake_flags); in ttwu_do_wakeup()
1662 rq_unpin_lock(rq, rf); in ttwu_do_wakeup()
1663 p->sched_class->task_woken(rq, p); in ttwu_do_wakeup()
1664 rq_repin_lock(rq, rf); in ttwu_do_wakeup()
1667 if (rq->idle_stamp) { in ttwu_do_wakeup()
1668 u64 delta = rq_clock(rq) - rq->idle_stamp; in ttwu_do_wakeup()
1669 u64 max = 2*rq->max_idle_balance_cost; in ttwu_do_wakeup()
1671 update_avg(&rq->avg_idle, delta); in ttwu_do_wakeup()
1673 if (rq->avg_idle > max) in ttwu_do_wakeup()
1674 rq->avg_idle = max; in ttwu_do_wakeup()
1676 rq->idle_stamp = 0; in ttwu_do_wakeup()
1682 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, in ttwu_do_activate() argument
1687 lockdep_assert_held(&rq->lock); in ttwu_do_activate()
1691 rq->nr_uninterruptible--; in ttwu_do_activate()
1697 ttwu_activate(rq, p, en_flags); in ttwu_do_activate()
1698 ttwu_do_wakeup(rq, p, wake_flags, rf); in ttwu_do_activate()
1710 struct rq *rq; in ttwu_remote() local
1713 rq = __task_rq_lock(p, &rf); in ttwu_remote()
1716 update_rq_clock(rq); in ttwu_remote()
1717 ttwu_do_wakeup(rq, p, wake_flags, &rf); in ttwu_remote()
1720 __task_rq_unlock(rq, &rf); in ttwu_remote()
1728 struct rq *rq = this_rq(); in sched_ttwu_pending() local
1729 struct llist_node *llist = llist_del_all(&rq->wake_list); in sched_ttwu_pending()
1736 rq_lock_irqsave(rq, &rf); in sched_ttwu_pending()
1737 update_rq_clock(rq); in sched_ttwu_pending()
1740 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); in sched_ttwu_pending()
1742 rq_unlock_irqrestore(rq, &rf); in sched_ttwu_pending()
1785 struct rq *rq = cpu_rq(cpu); in ttwu_queue_remote() local
1790 if (!set_nr_if_polling(rq->idle)) in ttwu_queue_remote()
1799 struct rq *rq = cpu_rq(cpu); in wake_up_if_idle() local
1804 if (!is_idle_task(rcu_dereference(rq->curr))) in wake_up_if_idle()
1807 if (set_nr_if_polling(rq->idle)) { in wake_up_if_idle()
1810 rq_lock_irqsave(rq, &rf); in wake_up_if_idle()
1811 if (is_idle_task(rq->curr)) in wake_up_if_idle()
1814 rq_unlock_irqrestore(rq, &rf); in wake_up_if_idle()
1829 struct rq *rq = cpu_rq(cpu); in ttwu_queue() local
1840 rq_lock(rq, &rf); in ttwu_queue()
1841 update_rq_clock(rq); in ttwu_queue()
1842 ttwu_do_activate(rq, p, wake_flags, &rf); in ttwu_queue()
1843 rq_unlock(rq, &rf); in ttwu_queue()
2071 struct rq *rq = task_rq(p); in try_to_wake_up_local() local
2073 if (WARN_ON_ONCE(rq != this_rq()) || in try_to_wake_up_local()
2077 lockdep_assert_held(&rq->lock); in try_to_wake_up_local()
2086 rq_unlock(rq, rf); in try_to_wake_up_local()
2088 rq_relock(rq, rf); in try_to_wake_up_local()
2099 atomic_dec(&rq->nr_iowait); in try_to_wake_up_local()
2101 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK); in try_to_wake_up_local()
2104 ttwu_do_wakeup(rq, p, 0, rf); in try_to_wake_up_local()
2397 struct rq *rq; in wake_up_new_task() local
2413 rq = __task_rq_lock(p, &rf); in wake_up_new_task()
2414 update_rq_clock(rq); in wake_up_new_task()
2417 activate_task(rq, p, ENQUEUE_NOCLOCK); in wake_up_new_task()
2420 check_preempt_curr(rq, p, WF_FORK); in wake_up_new_task()
2427 rq_unpin_lock(rq, &rf); in wake_up_new_task()
2428 p->sched_class->task_woken(rq, p); in wake_up_new_task()
2429 rq_repin_lock(rq, &rf); in wake_up_new_task()
2432 task_rq_unlock(rq, p, &rf); in wake_up_new_task()
2551 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) in prepare_lock_switch() argument
2559 rq_unpin_lock(rq, rf); in prepare_lock_switch()
2560 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); in prepare_lock_switch()
2563 rq->lock.owner = next; in prepare_lock_switch()
2567 static inline void finish_lock_switch(struct rq *rq) in finish_lock_switch() argument
2574 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); in finish_lock_switch()
2575 raw_spin_unlock_irq(&rq->lock); in finish_lock_switch()
2604 prepare_task_switch(struct rq *rq, struct task_struct *prev, in prepare_task_switch() argument
2608 sched_info_switch(rq, prev, next); in prepare_task_switch()
2635 static struct rq *finish_task_switch(struct task_struct *prev) in finish_task_switch()
2636 __releases(rq->lock) in finish_task_switch()
2638 struct rq *rq = this_rq(); in finish_task_switch() local
2639 struct mm_struct *mm = rq->prev_mm; in finish_task_switch()
2658 rq->prev_mm = NULL; in finish_task_switch()
2675 finish_lock_switch(rq); in finish_task_switch()
2713 return rq; in finish_task_switch()
2719 static void __balance_callback(struct rq *rq) in __balance_callback() argument
2722 void (*func)(struct rq *rq); in __balance_callback()
2725 raw_spin_lock_irqsave(&rq->lock, flags); in __balance_callback()
2726 head = rq->balance_callback; in __balance_callback()
2727 rq->balance_callback = NULL; in __balance_callback()
2729 func = (void (*)(struct rq *))head->func; in __balance_callback()
2734 func(rq); in __balance_callback()
2736 raw_spin_unlock_irqrestore(&rq->lock, flags); in __balance_callback()
2739 static inline void balance_callback(struct rq *rq) in balance_callback() argument
2741 if (unlikely(rq->balance_callback)) in balance_callback()
2742 __balance_callback(rq); in balance_callback()
2747 static inline void balance_callback(struct rq *rq) in balance_callback() argument
2758 __releases(rq->lock) in schedule_tail()
2760 struct rq *rq; in schedule_tail() local
2771 rq = finish_task_switch(prev); in schedule_tail()
2772 balance_callback(rq); in schedule_tail()
2784 static __always_inline struct rq *
2785 context_switch(struct rq *rq, struct task_struct *prev, in context_switch() argument
2790 prepare_task_switch(rq, prev, next); in context_switch()
2817 rq->prev_mm = oldmm; in context_switch()
2820 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); in context_switch()
2822 prepare_lock_switch(rq, next, rf); in context_switch()
2926 struct rq *this = cpu_rq(cpu); in nr_iowait_cpu()
2932 struct rq *rq = this_rq(); in get_iowait_load() local
2933 *nr_waiters = atomic_read(&rq->nr_iowait); in get_iowait_load()
2934 *load = rq->load.weight; in get_iowait_load()
2998 struct rq *rq; in task_sched_runtime() local
3017 rq = task_rq_lock(p, &rf); in task_sched_runtime()
3023 if (task_current(rq, p) && task_on_rq_queued(p)) { in task_sched_runtime()
3025 update_rq_clock(rq); in task_sched_runtime()
3026 p->sched_class->update_curr(rq); in task_sched_runtime()
3029 task_rq_unlock(rq, p, &rf); in task_sched_runtime()
3041 struct rq *rq = cpu_rq(cpu); in scheduler_tick() local
3042 struct task_struct *curr = rq->curr; in scheduler_tick()
3047 rq_lock(rq, &rf); in scheduler_tick()
3049 update_rq_clock(rq); in scheduler_tick()
3050 curr->sched_class->task_tick(rq, curr, 0); in scheduler_tick()
3051 cpu_load_update_active(rq); in scheduler_tick()
3052 calc_global_load_tick(rq); in scheduler_tick()
3054 rq_unlock(rq, &rf); in scheduler_tick()
3059 rq->idle_balance = idle_cpu(cpu); in scheduler_tick()
3060 trigger_load_balance(rq); in scheduler_tick()
3078 struct rq *rq = cpu_rq(cpu); in sched_tick_remote() local
3093 rq_lock_irq(rq, &rf); in sched_tick_remote()
3094 curr = rq->curr; in sched_tick_remote()
3098 update_rq_clock(rq); in sched_tick_remote()
3099 delta = rq_clock_task(rq) - curr->se.exec_start; in sched_tick_remote()
3106 curr->sched_class->task_tick(rq, curr, 0); in sched_tick_remote()
3109 rq_unlock_irq(rq, &rf); in sched_tick_remote()
3304 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task() argument
3317 rq->nr_running == rq->cfs.h_nr_running)) { in pick_next_task()
3319 p = fair_sched_class.pick_next_task(rq, prev, rf); in pick_next_task()
3325 p = idle_sched_class.pick_next_task(rq, prev, rf); in pick_next_task()
3332 p = class->pick_next_task(rq, prev, rf); in pick_next_task()
3388 struct rq *rq; in __schedule() local
3392 rq = cpu_rq(cpu); in __schedule()
3393 prev = rq->curr; in __schedule()
3398 hrtick_clear(rq); in __schedule()
3411 rq_lock(rq, &rf); in __schedule()
3415 rq->clock_update_flags <<= 1; in __schedule()
3416 update_rq_clock(rq); in __schedule()
3423 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); in __schedule()
3427 atomic_inc(&rq->nr_iowait); in __schedule()
3447 next = pick_next_task(rq, prev, &rf); in __schedule()
3452 rq->nr_switches++; in __schedule()
3453 rq->curr = next; in __schedule()
3473 rq = context_switch(rq, prev, next, &rf); in __schedule()
3475 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); in __schedule()
3476 rq_unlock_irq(rq, &rf); in __schedule()
3479 balance_callback(rq); in __schedule()
3749 struct rq *rq; in rt_mutex_setprio() local
3760 rq = __task_rq_lock(p, &rf); in rt_mutex_setprio()
3761 update_rq_clock(rq); in rt_mutex_setprio()
3792 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
3793 WARN_ON(p != rq->curr); in rt_mutex_setprio()
3806 running = task_current(rq, p); in rt_mutex_setprio()
3808 dequeue_task(rq, p, queue_flag); in rt_mutex_setprio()
3810 put_prev_task(rq, p); in rt_mutex_setprio()
3846 enqueue_task(rq, p, queue_flag); in rt_mutex_setprio()
3848 set_curr_task(rq, p); in rt_mutex_setprio()
3850 check_class_changed(rq, p, prev_class, oldprio); in rt_mutex_setprio()
3854 __task_rq_unlock(rq, &rf); in rt_mutex_setprio()
3856 balance_callback(rq); in rt_mutex_setprio()
3871 struct rq *rq; in set_user_nice() local
3879 rq = task_rq_lock(p, &rf); in set_user_nice()
3880 update_rq_clock(rq); in set_user_nice()
3893 running = task_current(rq, p); in set_user_nice()
3895 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in set_user_nice()
3897 put_prev_task(rq, p); in set_user_nice()
3906 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in set_user_nice()
3911 if (delta < 0 || (delta > 0 && task_running(rq, p))) in set_user_nice()
3912 resched_curr(rq); in set_user_nice()
3915 set_curr_task(rq, p); in set_user_nice()
3917 task_rq_unlock(rq, p, &rf); in set_user_nice()
3991 struct rq *rq = cpu_rq(cpu); in idle_cpu() local
3993 if (rq->curr != rq->idle) in idle_cpu()
3996 if (rq->nr_running) in idle_cpu()
4000 if (!llist_empty(&rq->wake_list)) in idle_cpu()
4078 static void __setscheduler(struct rq *rq, struct task_struct *p, in __setscheduler() argument
4127 struct rq *rq; in __sched_setscheduler() local
4225 rq = task_rq_lock(p, &rf); in __sched_setscheduler()
4226 update_rq_clock(rq); in __sched_setscheduler()
4231 if (p == rq->stop) { in __sched_setscheduler()
4232 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
4249 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
4263 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
4270 cpumask_t *span = rq->rd->span; in __sched_setscheduler()
4278 rq->rd->dl_bw.bw == 0) { in __sched_setscheduler()
4279 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
4289 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
4299 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
4320 running = task_current(rq, p); in __sched_setscheduler()
4322 dequeue_task(rq, p, queue_flags); in __sched_setscheduler()
4324 put_prev_task(rq, p); in __sched_setscheduler()
4327 __setscheduler(rq, p, attr, pi); in __sched_setscheduler()
4337 enqueue_task(rq, p, queue_flags); in __sched_setscheduler()
4340 set_curr_task(rq, p); in __sched_setscheduler()
4342 check_class_changed(rq, p, prev_class, oldprio); in __sched_setscheduler()
4346 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
4352 balance_callback(rq); in __sched_setscheduler()
4933 struct rq *rq; in do_sched_yield() local
4936 rq = this_rq(); in do_sched_yield()
4937 rq_lock(rq, &rf); in do_sched_yield()
4939 schedstat_inc(rq->yld_count); in do_sched_yield()
4940 current->sched_class->yield_task(rq); in do_sched_yield()
4947 rq_unlock(rq, &rf); in do_sched_yield()
5047 struct rq *rq, *p_rq; in yield_to() local
5052 rq = this_rq(); in yield_to()
5060 if (rq->nr_running == 1 && p_rq->nr_running == 1) { in yield_to()
5065 double_rq_lock(rq, p_rq); in yield_to()
5067 double_rq_unlock(rq, p_rq); in yield_to()
5080 yielded = curr->sched_class->yield_to_task(rq, p, preempt); in yield_to()
5082 schedstat_inc(rq->yld_count); in yield_to()
5087 if (preempt && rq != p_rq) in yield_to()
5092 double_rq_unlock(rq, p_rq); in yield_to()
5203 struct rq *rq; in sched_rr_get_interval() local
5219 rq = task_rq_lock(p, &rf); in sched_rr_get_interval()
5222 time_slice = p->sched_class->get_rr_interval(rq, p); in sched_rr_get_interval()
5223 task_rq_unlock(rq, p, &rf); in sched_rr_get_interval()
5371 struct rq *rq = cpu_rq(cpu); in init_idle() local
5375 raw_spin_lock(&rq->lock); in init_idle()
5407 rq->curr = rq->idle = idle; in init_idle()
5412 raw_spin_unlock(&rq->lock); in init_idle()
5500 struct rq *rq; in sched_setnuma() local
5502 rq = task_rq_lock(p, &rf); in sched_setnuma()
5504 running = task_current(rq, p); in sched_setnuma()
5507 dequeue_task(rq, p, DEQUEUE_SAVE); in sched_setnuma()
5509 put_prev_task(rq, p); in sched_setnuma()
5514 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in sched_setnuma()
5516 set_curr_task(rq, p); in sched_setnuma()
5517 task_rq_unlock(rq, p, &rf); in sched_setnuma()
5549 static void calc_load_migrate(struct rq *rq) in calc_load_migrate() argument
5551 long delta = calc_load_fold_active(rq, 1); in calc_load_migrate()
5556 static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) in put_prev_task_fake() argument
5580 static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) in migrate_tasks()
5582 struct rq *rq = dead_rq; in migrate_tasks() local
5583 struct task_struct *next, *stop = rq->stop; in migrate_tasks()
5596 rq->stop = NULL; in migrate_tasks()
5603 update_rq_clock(rq); in migrate_tasks()
5610 if (rq->nr_running == 1) in migrate_tasks()
5616 next = pick_next_task(rq, &fake_task, rf); in migrate_tasks()
5618 put_prev_task(rq, next); in migrate_tasks()
5629 rq_unlock(rq, rf); in migrate_tasks()
5631 rq_relock(rq, rf); in migrate_tasks()
5638 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { in migrate_tasks()
5645 rq = __migrate_task(rq, rf, next, dest_cpu); in migrate_tasks()
5646 if (rq != dead_rq) { in migrate_tasks()
5647 rq_unlock(rq, rf); in migrate_tasks()
5648 rq = dead_rq; in migrate_tasks()
5650 rq_relock(rq, rf); in migrate_tasks()
5655 rq->stop = stop; in migrate_tasks()
5659 void set_rq_online(struct rq *rq) in set_rq_online() argument
5661 if (!rq->online) { in set_rq_online()
5664 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online()
5665 rq->online = 1; in set_rq_online()
5669 class->rq_online(rq); in set_rq_online()
5674 void set_rq_offline(struct rq *rq) in set_rq_offline() argument
5676 if (rq->online) { in set_rq_offline()
5681 class->rq_offline(rq); in set_rq_offline()
5684 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline()
5685 rq->online = 0; in set_rq_offline()
5739 struct rq *rq = cpu_rq(cpu); in sched_cpu_activate() local
5770 rq_lock_irqsave(rq, &rf); in sched_cpu_activate()
5771 if (rq->rd) { in sched_cpu_activate()
5772 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_activate()
5773 set_rq_online(rq); in sched_cpu_activate()
5775 rq_unlock_irqrestore(rq, &rf); in sched_cpu_activate()
5810 struct rq *rq = cpu_rq(cpu); in sched_rq_cpu_starting() local
5812 rq->calc_load_update = calc_load_update; in sched_rq_cpu_starting()
5826 struct rq *rq = cpu_rq(cpu); in sched_cpu_dying() local
5833 rq_lock_irqsave(rq, &rf); in sched_cpu_dying()
5834 if (rq->rd) { in sched_cpu_dying()
5835 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_dying()
5836 set_rq_offline(rq); in sched_cpu_dying()
5838 migrate_tasks(rq, &rf); in sched_cpu_dying()
5839 BUG_ON(rq->nr_running != 1); in sched_cpu_dying()
5840 rq_unlock_irqrestore(rq, &rf); in sched_cpu_dying()
5842 calc_load_migrate(rq); in sched_cpu_dying()
5844 nohz_balance_exit_idle(rq); in sched_cpu_dying()
5845 hrtick_clear(rq); in sched_cpu_dying()
5974 struct rq *rq; in sched_init() local
5976 rq = cpu_rq(i); in sched_init()
5977 raw_spin_lock_init(&rq->lock); in sched_init()
5978 rq->nr_running = 0; in sched_init()
5979 rq->calc_load_active = 0; in sched_init()
5980 rq->calc_load_update = jiffies + LOAD_FREQ; in sched_init()
5981 init_cfs_rq(&rq->cfs); in sched_init()
5982 init_rt_rq(&rq->rt); in sched_init()
5983 init_dl_rq(&rq->dl); in sched_init()
5986 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); in sched_init()
5987 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in sched_init()
6008 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); in sched_init()
6011 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; in sched_init()
6013 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); in sched_init()
6017 rq->cpu_load[j] = 0; in sched_init()
6020 rq->sd = NULL; in sched_init()
6021 rq->rd = NULL; in sched_init()
6022 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; in sched_init()
6023 rq->balance_callback = NULL; in sched_init()
6024 rq->active_balance = 0; in sched_init()
6025 rq->next_balance = jiffies; in sched_init()
6026 rq->push_cpu = 0; in sched_init()
6027 rq->cpu = i; in sched_init()
6028 rq->online = 0; in sched_init()
6029 rq->idle_stamp = 0; in sched_init()
6030 rq->avg_idle = 2*sysctl_sched_migration_cost; in sched_init()
6031 rq->max_idle_balance_cost = sysctl_sched_migration_cost; in sched_init()
6033 INIT_LIST_HEAD(&rq->cfs_tasks); in sched_init()
6035 rq_attach_root(rq, &def_root_domain); in sched_init()
6037 rq->last_load_update_tick = jiffies; in sched_init()
6038 rq->last_blocked_load_update_tick = jiffies; in sched_init()
6039 atomic_set(&rq->nohz_flags, 0); in sched_init()
6042 hrtick_rq_init(rq); in sched_init()
6043 atomic_set(&rq->nr_iowait, 0); in sched_init()
6348 struct rq *rq; in sched_move_task() local
6350 rq = task_rq_lock(tsk, &rf); in sched_move_task()
6351 update_rq_clock(rq); in sched_move_task()
6353 running = task_current(rq, tsk); in sched_move_task()
6357 dequeue_task(rq, tsk, queue_flags); in sched_move_task()
6359 put_prev_task(rq, tsk); in sched_move_task()
6364 enqueue_task(rq, tsk, queue_flags); in sched_move_task()
6366 set_curr_task(rq, tsk); in sched_move_task()
6368 task_rq_unlock(rq, tsk, &rf); in sched_move_task()
6429 struct rq *rq; in cpu_cgroup_fork() local
6431 rq = task_rq_lock(task, &rf); in cpu_cgroup_fork()
6433 update_rq_clock(rq); in cpu_cgroup_fork()
6436 task_rq_unlock(rq, task, &rf); in cpu_cgroup_fork()
6562 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth() local
6565 rq_lock_irq(rq, &rf); in tg_set_cfs_bandwidth()
6571 rq_unlock_irq(rq, &rf); in tg_set_cfs_bandwidth()