Lines Matching refs:rq
37 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
78 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock()
79 __acquires(rq->lock) in __task_rq_lock()
81 struct rq *rq; in __task_rq_lock() local
86 rq = task_rq(p); in __task_rq_lock()
87 raw_spin_lock(&rq->lock); in __task_rq_lock()
88 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock()
89 rq_pin_lock(rq, rf); in __task_rq_lock()
90 return rq; in __task_rq_lock()
92 raw_spin_unlock(&rq->lock); in __task_rq_lock()
102 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock()
104 __acquires(rq->lock) in task_rq_lock()
106 struct rq *rq; in task_rq_lock() local
110 rq = task_rq(p); in task_rq_lock()
111 raw_spin_lock(&rq->lock); in task_rq_lock()
129 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in task_rq_lock()
130 rq_pin_lock(rq, rf); in task_rq_lock()
131 return rq; in task_rq_lock()
133 raw_spin_unlock(&rq->lock); in task_rq_lock()
145 static void update_rq_clock_task(struct rq *rq, s64 delta) in update_rq_clock_task() argument
154 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task()
174 rq->prev_irq_time += irq_delta; in update_rq_clock_task()
179 steal = paravirt_steal_clock(cpu_of(rq)); in update_rq_clock_task()
180 steal -= rq->prev_steal_time_rq; in update_rq_clock_task()
185 rq->prev_steal_time_rq += steal; in update_rq_clock_task()
190 rq->clock_task += delta; in update_rq_clock_task()
194 update_irq_load_avg(rq, irq_delta + steal); in update_rq_clock_task()
196 update_rq_clock_pelt(rq, delta); in update_rq_clock_task()
199 void update_rq_clock(struct rq *rq) in update_rq_clock() argument
203 lockdep_assert_held(&rq->lock); in update_rq_clock()
205 if (rq->clock_update_flags & RQCF_ACT_SKIP) in update_rq_clock()
210 SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); in update_rq_clock()
211 rq->clock_update_flags |= RQCF_UPDATED; in update_rq_clock()
214 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock()
217 rq->clock += delta; in update_rq_clock()
218 update_rq_clock_task(rq, delta); in update_rq_clock()
227 static void hrtick_clear(struct rq *rq) in hrtick_clear() argument
229 if (hrtimer_active(&rq->hrtick_timer)) in hrtick_clear()
230 hrtimer_cancel(&rq->hrtick_timer); in hrtick_clear()
239 struct rq *rq = container_of(timer, struct rq, hrtick_timer); in hrtick() local
242 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); in hrtick()
244 rq_lock(rq, &rf); in hrtick()
245 update_rq_clock(rq); in hrtick()
246 rq->curr->sched_class->task_tick(rq, rq->curr, 1); in hrtick()
247 rq_unlock(rq, &rf); in hrtick()
254 static void __hrtick_restart(struct rq *rq) in __hrtick_restart() argument
256 struct hrtimer *timer = &rq->hrtick_timer; in __hrtick_restart()
266 struct rq *rq = arg; in __hrtick_start() local
269 rq_lock(rq, &rf); in __hrtick_start()
270 __hrtick_restart(rq); in __hrtick_start()
271 rq->hrtick_csd_pending = 0; in __hrtick_start()
272 rq_unlock(rq, &rf); in __hrtick_start()
280 void hrtick_start(struct rq *rq, u64 delay) in hrtick_start() argument
282 struct hrtimer *timer = &rq->hrtick_timer; in hrtick_start()
295 if (rq == this_rq()) { in hrtick_start()
296 __hrtick_restart(rq); in hrtick_start()
297 } else if (!rq->hrtick_csd_pending) { in hrtick_start()
298 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start()
299 rq->hrtick_csd_pending = 1; in hrtick_start()
309 void hrtick_start(struct rq *rq, u64 delay) in hrtick_start() argument
316 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), in hrtick_start()
321 static void hrtick_rq_init(struct rq *rq) in hrtick_rq_init() argument
324 rq->hrtick_csd_pending = 0; in hrtick_rq_init()
326 rq->hrtick_csd.flags = 0; in hrtick_rq_init()
327 rq->hrtick_csd.func = __hrtick_start; in hrtick_rq_init()
328 rq->hrtick_csd.info = rq; in hrtick_rq_init()
331 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); in hrtick_rq_init()
332 rq->hrtick_timer.function = hrtick; in hrtick_rq_init()
335 static inline void hrtick_clear(struct rq *rq) in hrtick_clear() argument
339 static inline void hrtick_rq_init(struct rq *rq) in hrtick_rq_init() argument
507 void resched_curr(struct rq *rq) in resched_curr() argument
509 struct task_struct *curr = rq->curr; in resched_curr()
512 lockdep_assert_held(&rq->lock); in resched_curr()
517 cpu = cpu_of(rq); in resched_curr()
533 struct rq *rq = cpu_rq(cpu); in resched_cpu() local
536 raw_spin_lock_irqsave(&rq->lock, flags); in resched_cpu()
538 resched_curr(rq); in resched_cpu()
539 raw_spin_unlock_irqrestore(&rq->lock, flags); in resched_cpu()
592 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu() local
597 if (set_nr_and_not_polling(rq->idle)) in wake_up_idle_cpu()
662 bool sched_can_stop_tick(struct rq *rq) in sched_can_stop_tick() argument
667 if (rq->dl.dl_nr_running) in sched_can_stop_tick()
674 if (rq->rt.rr_nr_running) { in sched_can_stop_tick()
675 if (rq->rt.rr_nr_running == 1) in sched_can_stop_tick()
685 fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; in sched_can_stop_tick()
694 if (rq->nr_running > 1) in sched_can_stop_tick()
829 uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, in uclamp_idle_value() argument
838 rq->uclamp_flags |= UCLAMP_FLAG_IDLE; in uclamp_idle_value()
845 static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, in uclamp_idle_reset() argument
849 if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) in uclamp_idle_reset()
852 WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); in uclamp_idle_reset()
856 enum uclamp_id uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, in uclamp_rq_max_value() argument
859 struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; in uclamp_rq_max_value()
873 return uclamp_idle_value(rq, clamp_id, clamp_value); in uclamp_rq_max_value()
944 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, in uclamp_rq_inc_id() argument
947 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_inc_id()
951 lockdep_assert_held(&rq->lock); in uclamp_rq_inc_id()
960 uclamp_idle_reset(rq, clamp_id, uc_se->value); in uclamp_rq_inc_id()
982 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, in uclamp_rq_dec_id() argument
985 struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; in uclamp_rq_dec_id()
991 lockdep_assert_held(&rq->lock); in uclamp_rq_dec_id()
1015 bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); in uclamp_rq_dec_id()
1020 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) in uclamp_rq_inc() argument
1028 uclamp_rq_inc_id(rq, p, clamp_id); in uclamp_rq_inc()
1031 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) in uclamp_rq_inc()
1032 rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; in uclamp_rq_inc()
1035 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) in uclamp_rq_dec() argument
1043 uclamp_rq_dec_id(rq, p, clamp_id); in uclamp_rq_dec()
1050 struct rq *rq; in uclamp_update_active() local
1060 rq = task_rq_lock(p, &rf); in uclamp_update_active()
1069 uclamp_rq_dec_id(rq, p, clamp_id); in uclamp_update_active()
1070 uclamp_rq_inc_id(rq, p, clamp_id); in uclamp_update_active()
1073 task_rq_unlock(rq, p, &rf); in uclamp_update_active()
1276 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } in uclamp_rq_inc() argument
1277 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } in uclamp_rq_dec() argument
1289 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) in enqueue_task() argument
1292 update_rq_clock(rq); in enqueue_task()
1295 sched_info_queued(rq, p); in enqueue_task()
1299 uclamp_rq_inc(rq, p); in enqueue_task()
1300 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
1303 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) in dequeue_task() argument
1306 update_rq_clock(rq); in dequeue_task()
1309 sched_info_dequeued(rq, p); in dequeue_task()
1313 uclamp_rq_dec(rq, p); in dequeue_task()
1314 p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
1317 void activate_task(struct rq *rq, struct task_struct *p, int flags) in activate_task() argument
1320 rq->nr_uninterruptible--; in activate_task()
1322 enqueue_task(rq, p, flags); in activate_task()
1327 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) in deactivate_task() argument
1332 rq->nr_uninterruptible++; in deactivate_task()
1334 dequeue_task(rq, p, flags); in deactivate_task()
1403 static inline void check_class_changed(struct rq *rq, struct task_struct *p, in check_class_changed() argument
1409 prev_class->switched_from(rq, p); in check_class_changed()
1411 p->sched_class->switched_to(rq, p); in check_class_changed()
1413 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
1416 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr() argument
1420 if (p->sched_class == rq->curr->sched_class) { in check_preempt_curr()
1421 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr()
1424 if (class == rq->curr->sched_class) in check_preempt_curr()
1427 resched_curr(rq); in check_preempt_curr()
1437 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) in check_preempt_curr()
1438 rq_clock_skip_update(rq); in check_preempt_curr()
1488 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, in move_queued_task() argument
1491 lockdep_assert_held(&rq->lock); in move_queued_task()
1494 dequeue_task(rq, p, DEQUEUE_NOCLOCK); in move_queued_task()
1496 rq_unlock(rq, rf); in move_queued_task()
1498 rq = cpu_rq(new_cpu); in move_queued_task()
1500 rq_lock(rq, rf); in move_queued_task()
1502 enqueue_task(rq, p, 0); in move_queued_task()
1504 check_preempt_curr(rq, p, 0); in move_queued_task()
1506 return rq; in move_queued_task()
1523 static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, in __migrate_task() argument
1528 return rq; in __migrate_task()
1530 update_rq_clock(rq); in __migrate_task()
1531 rq = move_queued_task(rq, rf, p, dest_cpu); in __migrate_task()
1533 return rq; in __migrate_task()
1545 struct rq *rq = this_rq(); in migration_cpu_stop() local
1561 rq_lock(rq, &rf); in migration_cpu_stop()
1567 if (task_rq(p) == rq) { in migration_cpu_stop()
1569 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
1573 rq_unlock(rq, &rf); in migration_cpu_stop()
1592 struct rq *rq = task_rq(p); in do_set_cpus_allowed() local
1598 running = task_current(rq, p); in do_set_cpus_allowed()
1605 lockdep_assert_held(&rq->lock); in do_set_cpus_allowed()
1606 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in do_set_cpus_allowed()
1609 put_prev_task(rq, p); in do_set_cpus_allowed()
1614 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in do_set_cpus_allowed()
1616 set_next_task(rq, p); in do_set_cpus_allowed()
1634 struct rq *rq; in __set_cpus_allowed_ptr() local
1637 rq = task_rq_lock(p, &rf); in __set_cpus_allowed_ptr()
1638 update_rq_clock(rq); in __set_cpus_allowed_ptr()
1681 if (task_running(rq, p) || p->state == TASK_WAKING) { in __set_cpus_allowed_ptr()
1684 task_rq_unlock(rq, p, &rf); in __set_cpus_allowed_ptr()
1685 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); in __set_cpus_allowed_ptr()
1692 rq = move_queued_task(rq, &rf, p, dest_cpu); in __set_cpus_allowed_ptr()
1695 task_rq_unlock(rq, p, &rf); in __set_cpus_allowed_ptr()
1762 struct rq *src_rq, *dst_rq; in __migrate_swap_task()
1797 struct rq *src_rq, *dst_rq; in migrate_swap_stop()
1896 struct rq *rq; in wait_task_inactive() local
1905 rq = task_rq(p); in wait_task_inactive()
1918 while (task_running(rq, p)) { in wait_task_inactive()
1929 rq = task_rq_lock(p, &rf); in wait_task_inactive()
1931 running = task_running(rq, p); in wait_task_inactive()
1936 task_rq_unlock(rq, p, &rf); in wait_task_inactive()
2177 struct rq *rq; in ttwu_stat() local
2182 rq = this_rq(); in ttwu_stat()
2185 if (cpu == rq->cpu) { in ttwu_stat()
2186 __schedstat_inc(rq->ttwu_local); in ttwu_stat()
2193 for_each_domain(rq->cpu, sd) { in ttwu_stat()
2206 __schedstat_inc(rq->ttwu_count); in ttwu_stat()
2216 static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, in ttwu_do_wakeup() argument
2219 check_preempt_curr(rq, p, wake_flags); in ttwu_do_wakeup()
2229 rq_unpin_lock(rq, rf); in ttwu_do_wakeup()
2230 p->sched_class->task_woken(rq, p); in ttwu_do_wakeup()
2231 rq_repin_lock(rq, rf); in ttwu_do_wakeup()
2234 if (rq->idle_stamp) { in ttwu_do_wakeup()
2235 u64 delta = rq_clock(rq) - rq->idle_stamp; in ttwu_do_wakeup()
2236 u64 max = 2*rq->max_idle_balance_cost; in ttwu_do_wakeup()
2238 update_avg(&rq->avg_idle, delta); in ttwu_do_wakeup()
2240 if (rq->avg_idle > max) in ttwu_do_wakeup()
2241 rq->avg_idle = max; in ttwu_do_wakeup()
2243 rq->idle_stamp = 0; in ttwu_do_wakeup()
2249 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, in ttwu_do_activate() argument
2254 lockdep_assert_held(&rq->lock); in ttwu_do_activate()
2258 rq->nr_uninterruptible--; in ttwu_do_activate()
2264 activate_task(rq, p, en_flags); in ttwu_do_activate()
2265 ttwu_do_wakeup(rq, p, wake_flags, rf); in ttwu_do_activate()
2277 struct rq *rq; in ttwu_remote() local
2280 rq = __task_rq_lock(p, &rf); in ttwu_remote()
2283 update_rq_clock(rq); in ttwu_remote()
2284 ttwu_do_wakeup(rq, p, wake_flags, &rf); in ttwu_remote()
2287 __task_rq_unlock(rq, &rf); in ttwu_remote()
2295 struct rq *rq = this_rq(); in sched_ttwu_pending() local
2296 struct llist_node *llist = llist_del_all(&rq->wake_list); in sched_ttwu_pending()
2303 rq_lock_irqsave(rq, &rf); in sched_ttwu_pending()
2304 update_rq_clock(rq); in sched_ttwu_pending()
2307 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); in sched_ttwu_pending()
2309 rq_unlock_irqrestore(rq, &rf); in sched_ttwu_pending()
2352 struct rq *rq = cpu_rq(cpu); in ttwu_queue_remote() local
2357 if (!set_nr_if_polling(rq->idle)) in ttwu_queue_remote()
2366 struct rq *rq = cpu_rq(cpu); in wake_up_if_idle() local
2371 if (!is_idle_task(rcu_dereference(rq->curr))) in wake_up_if_idle()
2374 if (set_nr_if_polling(rq->idle)) { in wake_up_if_idle()
2377 rq_lock_irqsave(rq, &rf); in wake_up_if_idle()
2378 if (is_idle_task(rq->curr)) in wake_up_if_idle()
2381 rq_unlock_irqrestore(rq, &rf); in wake_up_if_idle()
2396 struct rq *rq = cpu_rq(cpu); in ttwu_queue() local
2407 rq_lock(rq, &rf); in ttwu_queue()
2408 update_rq_clock(rq); in ttwu_queue()
2409 ttwu_do_activate(rq, p, wake_flags, &rf); in ttwu_queue()
2410 rq_unlock(rq, &rf); in ttwu_queue()
2946 struct rq *rq; in wake_up_new_task() local
2962 rq = __task_rq_lock(p, &rf); in wake_up_new_task()
2963 update_rq_clock(rq); in wake_up_new_task()
2966 activate_task(rq, p, ENQUEUE_NOCLOCK); in wake_up_new_task()
2968 check_preempt_curr(rq, p, WF_FORK); in wake_up_new_task()
2975 rq_unpin_lock(rq, &rf); in wake_up_new_task()
2976 p->sched_class->task_woken(rq, p); in wake_up_new_task()
2977 rq_repin_lock(rq, &rf); in wake_up_new_task()
2980 task_rq_unlock(rq, p, &rf); in wake_up_new_task()
3099 prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) in prepare_lock_switch() argument
3107 rq_unpin_lock(rq, rf); in prepare_lock_switch()
3108 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); in prepare_lock_switch()
3111 rq->lock.owner = next; in prepare_lock_switch()
3115 static inline void finish_lock_switch(struct rq *rq) in finish_lock_switch() argument
3122 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); in finish_lock_switch()
3123 raw_spin_unlock_irq(&rq->lock); in finish_lock_switch()
3152 prepare_task_switch(struct rq *rq, struct task_struct *prev, in prepare_task_switch() argument
3156 sched_info_switch(rq, prev, next); in prepare_task_switch()
3183 static struct rq *finish_task_switch(struct task_struct *prev) in finish_task_switch()
3184 __releases(rq->lock) in finish_task_switch()
3186 struct rq *rq = this_rq(); in finish_task_switch() local
3187 struct mm_struct *mm = rq->prev_mm; in finish_task_switch()
3206 rq->prev_mm = NULL; in finish_task_switch()
3223 finish_lock_switch(rq); in finish_task_switch()
3261 return rq; in finish_task_switch()
3267 static void __balance_callback(struct rq *rq) in __balance_callback() argument
3270 void (*func)(struct rq *rq); in __balance_callback()
3273 raw_spin_lock_irqsave(&rq->lock, flags); in __balance_callback()
3274 head = rq->balance_callback; in __balance_callback()
3275 rq->balance_callback = NULL; in __balance_callback()
3277 func = (void (*)(struct rq *))head->func; in __balance_callback()
3282 func(rq); in __balance_callback()
3284 raw_spin_unlock_irqrestore(&rq->lock, flags); in __balance_callback()
3287 static inline void balance_callback(struct rq *rq) in balance_callback() argument
3289 if (unlikely(rq->balance_callback)) in balance_callback()
3290 __balance_callback(rq); in balance_callback()
3295 static inline void balance_callback(struct rq *rq) in balance_callback() argument
3306 __releases(rq->lock) in schedule_tail()
3308 struct rq *rq; in schedule_tail() local
3319 rq = finish_task_switch(prev); in schedule_tail()
3320 balance_callback(rq); in schedule_tail()
3332 static __always_inline struct rq *
3333 context_switch(struct rq *rq, struct task_struct *prev, in context_switch() argument
3336 prepare_task_switch(rq, prev, next); in context_switch()
3361 membarrier_switch_mm(rq, prev->active_mm, next->mm); in context_switch()
3374 rq->prev_mm = prev->active_mm; in context_switch()
3379 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); in context_switch()
3381 prepare_lock_switch(rq, next, rf); in context_switch()
3549 struct rq *rq; in task_sched_runtime() local
3568 rq = task_rq_lock(p, &rf); in task_sched_runtime()
3574 if (task_current(rq, p) && task_on_rq_queued(p)) { in task_sched_runtime()
3576 update_rq_clock(rq); in task_sched_runtime()
3577 p->sched_class->update_curr(rq); in task_sched_runtime()
3580 task_rq_unlock(rq, p, &rf); in task_sched_runtime()
3592 struct rq *rq = cpu_rq(cpu); in scheduler_tick() local
3593 struct task_struct *curr = rq->curr; in scheduler_tick()
3598 rq_lock(rq, &rf); in scheduler_tick()
3600 update_rq_clock(rq); in scheduler_tick()
3601 curr->sched_class->task_tick(rq, curr, 0); in scheduler_tick()
3602 calc_global_load_tick(rq); in scheduler_tick()
3603 psi_task_tick(rq); in scheduler_tick()
3605 rq_unlock(rq, &rf); in scheduler_tick()
3610 rq->idle_balance = idle_cpu(cpu); in scheduler_tick()
3611 trigger_load_balance(rq); in scheduler_tick()
3657 struct rq *rq = cpu_rq(cpu); in sched_tick_remote() local
3673 rq_lock_irq(rq, &rf); in sched_tick_remote()
3674 curr = rq->curr; in sched_tick_remote()
3678 update_rq_clock(rq); in sched_tick_remote()
3679 delta = rq_clock_task(rq) - curr->se.exec_start; in sched_tick_remote()
3686 curr->sched_class->task_tick(rq, curr, 0); in sched_tick_remote()
3689 rq_unlock_irq(rq, &rf); in sched_tick_remote()
3905 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task() argument
3918 rq->nr_running == rq->cfs.h_nr_running)) { in pick_next_task()
3920 p = fair_sched_class.pick_next_task(rq, prev, rf); in pick_next_task()
3926 p = idle_sched_class.pick_next_task(rq, prev, rf); in pick_next_task()
3942 if (class->balance(rq, prev, rf)) in pick_next_task()
3947 put_prev_task(rq, prev); in pick_next_task()
3950 p = class->pick_next_task(rq, NULL, NULL); in pick_next_task()
4003 struct rq *rq; in __schedule() local
4007 rq = cpu_rq(cpu); in __schedule()
4008 prev = rq->curr; in __schedule()
4013 hrtick_clear(rq); in __schedule()
4026 rq_lock(rq, &rf); in __schedule()
4030 rq->clock_update_flags <<= 1; in __schedule()
4031 update_rq_clock(rq); in __schedule()
4038 deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); in __schedule()
4041 atomic_inc(&rq->nr_iowait); in __schedule()
4048 next = pick_next_task(rq, prev, &rf); in __schedule()
4053 rq->nr_switches++; in __schedule()
4058 RCU_INIT_POINTER(rq->curr, next); in __schedule()
4078 rq = context_switch(rq, prev, next, &rf); in __schedule()
4080 rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); in __schedule()
4081 rq_unlock_irq(rq, &rf); in __schedule()
4084 balance_callback(rq); in __schedule()
4377 struct rq *rq; in rt_mutex_setprio() local
4388 rq = __task_rq_lock(p, &rf); in rt_mutex_setprio()
4389 update_rq_clock(rq); in rt_mutex_setprio()
4420 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
4421 WARN_ON(p != rq->curr); in rt_mutex_setprio()
4434 running = task_current(rq, p); in rt_mutex_setprio()
4436 dequeue_task(rq, p, queue_flag); in rt_mutex_setprio()
4438 put_prev_task(rq, p); in rt_mutex_setprio()
4474 enqueue_task(rq, p, queue_flag); in rt_mutex_setprio()
4476 set_next_task(rq, p); in rt_mutex_setprio()
4478 check_class_changed(rq, p, prev_class, oldprio); in rt_mutex_setprio()
4482 __task_rq_unlock(rq, &rf); in rt_mutex_setprio()
4484 balance_callback(rq); in rt_mutex_setprio()
4499 struct rq *rq; in set_user_nice() local
4507 rq = task_rq_lock(p, &rf); in set_user_nice()
4508 update_rq_clock(rq); in set_user_nice()
4521 running = task_current(rq, p); in set_user_nice()
4523 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in set_user_nice()
4525 put_prev_task(rq, p); in set_user_nice()
4534 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in set_user_nice()
4539 if (delta < 0 || (delta > 0 && task_running(rq, p))) in set_user_nice()
4540 resched_curr(rq); in set_user_nice()
4543 set_next_task(rq, p); in set_user_nice()
4545 task_rq_unlock(rq, p, &rf); in set_user_nice()
4619 struct rq *rq = cpu_rq(cpu); in idle_cpu() local
4621 if (rq->curr != rq->idle) in idle_cpu()
4624 if (rq->nr_running) in idle_cpu()
4628 if (!llist_empty(&rq->wake_list)) in idle_cpu()
4706 static void __setscheduler(struct rq *rq, struct task_struct *p, in __setscheduler() argument
4762 struct rq *rq; in __sched_setscheduler() local
4870 rq = task_rq_lock(p, &rf); in __sched_setscheduler()
4871 update_rq_clock(rq); in __sched_setscheduler()
4876 if (p == rq->stop) { in __sched_setscheduler()
4917 cpumask_t *span = rq->rd->span; in __sched_setscheduler()
4925 rq->rd->dl_bw.bw == 0) { in __sched_setscheduler()
4936 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
4969 running = task_current(rq, p); in __sched_setscheduler()
4971 dequeue_task(rq, p, queue_flags); in __sched_setscheduler()
4973 put_prev_task(rq, p); in __sched_setscheduler()
4977 __setscheduler(rq, p, attr, pi); in __sched_setscheduler()
4988 enqueue_task(rq, p, queue_flags); in __sched_setscheduler()
4991 set_next_task(rq, p); in __sched_setscheduler()
4993 check_class_changed(rq, p, prev_class, oldprio); in __sched_setscheduler()
4997 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
5005 balance_callback(rq); in __sched_setscheduler()
5011 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
5584 struct rq *rq; in do_sched_yield() local
5586 rq = this_rq_lock_irq(&rf); in do_sched_yield()
5588 schedstat_inc(rq->yld_count); in do_sched_yield()
5589 current->sched_class->yield_task(rq); in do_sched_yield()
5596 rq_unlock(rq, &rf); in do_sched_yield()
5696 struct rq *rq, *p_rq; in yield_to() local
5701 rq = this_rq(); in yield_to()
5709 if (rq->nr_running == 1 && p_rq->nr_running == 1) { in yield_to()
5714 double_rq_lock(rq, p_rq); in yield_to()
5716 double_rq_unlock(rq, p_rq); in yield_to()
5729 yielded = curr->sched_class->yield_to_task(rq, p, preempt); in yield_to()
5731 schedstat_inc(rq->yld_count); in yield_to()
5736 if (preempt && rq != p_rq) in yield_to()
5741 double_rq_unlock(rq, p_rq); in yield_to()
5852 struct rq *rq; in sched_rr_get_interval() local
5868 rq = task_rq_lock(p, &rf); in sched_rr_get_interval()
5871 time_slice = p->sched_class->get_rr_interval(rq, p); in sched_rr_get_interval()
5872 task_rq_unlock(rq, p, &rf); in sched_rr_get_interval()
6019 struct rq *rq = cpu_rq(cpu); in init_idle() local
6025 raw_spin_lock(&rq->lock); in init_idle()
6056 rq->idle = idle; in init_idle()
6057 rcu_assign_pointer(rq->curr, idle); in init_idle()
6062 raw_spin_unlock(&rq->lock); in init_idle()
6150 struct rq *rq; in sched_setnuma() local
6152 rq = task_rq_lock(p, &rf); in sched_setnuma()
6154 running = task_current(rq, p); in sched_setnuma()
6157 dequeue_task(rq, p, DEQUEUE_SAVE); in sched_setnuma()
6159 put_prev_task(rq, p); in sched_setnuma()
6164 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in sched_setnuma()
6166 set_next_task(rq, p); in sched_setnuma()
6167 task_rq_unlock(rq, p, &rf); in sched_setnuma()
6199 static void calc_load_migrate(struct rq *rq) in calc_load_migrate() argument
6201 long delta = calc_load_fold_active(rq, 1); in calc_load_migrate()
6206 static struct task_struct *__pick_migrate_task(struct rq *rq) in __pick_migrate_task() argument
6212 next = class->pick_next_task(rq, NULL, NULL); in __pick_migrate_task()
6214 next->sched_class->put_prev_task(rq, next); in __pick_migrate_task()
6231 static void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf) in migrate_tasks()
6233 struct rq *rq = dead_rq; in migrate_tasks() local
6234 struct task_struct *next, *stop = rq->stop; in migrate_tasks()
6247 rq->stop = NULL; in migrate_tasks()
6254 update_rq_clock(rq); in migrate_tasks()
6261 if (rq->nr_running == 1) in migrate_tasks()
6264 next = __pick_migrate_task(rq); in migrate_tasks()
6275 rq_unlock(rq, rf); in migrate_tasks()
6277 rq_relock(rq, rf); in migrate_tasks()
6284 if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { in migrate_tasks()
6291 rq = __migrate_task(rq, rf, next, dest_cpu); in migrate_tasks()
6292 if (rq != dead_rq) { in migrate_tasks()
6293 rq_unlock(rq, rf); in migrate_tasks()
6294 rq = dead_rq; in migrate_tasks()
6296 rq_relock(rq, rf); in migrate_tasks()
6301 rq->stop = stop; in migrate_tasks()
6305 void set_rq_online(struct rq *rq) in set_rq_online() argument
6307 if (!rq->online) { in set_rq_online()
6310 cpumask_set_cpu(rq->cpu, rq->rd->online); in set_rq_online()
6311 rq->online = 1; in set_rq_online()
6315 class->rq_online(rq); in set_rq_online()
6320 void set_rq_offline(struct rq *rq) in set_rq_offline() argument
6322 if (rq->online) { in set_rq_offline()
6327 class->rq_offline(rq); in set_rq_offline()
6330 cpumask_clear_cpu(rq->cpu, rq->rd->online); in set_rq_offline()
6331 rq->online = 0; in set_rq_offline()
6385 struct rq *rq = cpu_rq(cpu); in sched_cpu_activate() local
6411 rq_lock_irqsave(rq, &rf); in sched_cpu_activate()
6412 if (rq->rd) { in sched_cpu_activate()
6413 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_activate()
6414 set_rq_online(rq); in sched_cpu_activate()
6416 rq_unlock_irqrestore(rq, &rf); in sched_cpu_activate()
6457 struct rq *rq = cpu_rq(cpu); in sched_rq_cpu_starting() local
6459 rq->calc_load_update = calc_load_update; in sched_rq_cpu_starting()
6473 struct rq *rq = cpu_rq(cpu); in sched_cpu_dying() local
6480 rq_lock_irqsave(rq, &rf); in sched_cpu_dying()
6481 if (rq->rd) { in sched_cpu_dying()
6482 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); in sched_cpu_dying()
6483 set_rq_offline(rq); in sched_cpu_dying()
6485 migrate_tasks(rq, &rf); in sched_cpu_dying()
6486 BUG_ON(rq->nr_running != 1); in sched_cpu_dying()
6487 rq_unlock_irqrestore(rq, &rf); in sched_cpu_dying()
6489 calc_load_migrate(rq); in sched_cpu_dying()
6491 nohz_balance_exit_idle(rq); in sched_cpu_dying()
6492 hrtick_clear(rq); in sched_cpu_dying()
6621 struct rq *rq; in sched_init() local
6623 rq = cpu_rq(i); in sched_init()
6624 raw_spin_lock_init(&rq->lock); in sched_init()
6625 rq->nr_running = 0; in sched_init()
6626 rq->calc_load_active = 0; in sched_init()
6627 rq->calc_load_update = jiffies + LOAD_FREQ; in sched_init()
6628 init_cfs_rq(&rq->cfs); in sched_init()
6629 init_rt_rq(&rq->rt); in sched_init()
6630 init_dl_rq(&rq->dl); in sched_init()
6633 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); in sched_init()
6634 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in sched_init()
6655 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); in sched_init()
6658 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; in sched_init()
6660 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); in sched_init()
6663 rq->sd = NULL; in sched_init()
6664 rq->rd = NULL; in sched_init()
6665 rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; in sched_init()
6666 rq->balance_callback = NULL; in sched_init()
6667 rq->active_balance = 0; in sched_init()
6668 rq->next_balance = jiffies; in sched_init()
6669 rq->push_cpu = 0; in sched_init()
6670 rq->cpu = i; in sched_init()
6671 rq->online = 0; in sched_init()
6672 rq->idle_stamp = 0; in sched_init()
6673 rq->avg_idle = 2*sysctl_sched_migration_cost; in sched_init()
6674 rq->max_idle_balance_cost = sysctl_sched_migration_cost; in sched_init()
6676 INIT_LIST_HEAD(&rq->cfs_tasks); in sched_init()
6678 rq_attach_root(rq, &def_root_domain); in sched_init()
6680 rq->last_load_update_tick = jiffies; in sched_init()
6681 rq->last_blocked_load_update_tick = jiffies; in sched_init()
6682 atomic_set(&rq->nohz_flags, 0); in sched_init()
6685 hrtick_rq_init(rq); in sched_init()
6686 atomic_set(&rq->nr_iowait, 0); in sched_init()
7039 struct rq *rq; in sched_move_task() local
7041 rq = task_rq_lock(tsk, &rf); in sched_move_task()
7042 update_rq_clock(rq); in sched_move_task()
7044 running = task_current(rq, tsk); in sched_move_task()
7048 dequeue_task(rq, tsk, queue_flags); in sched_move_task()
7050 put_prev_task(rq, tsk); in sched_move_task()
7055 enqueue_task(rq, tsk, queue_flags); in sched_move_task()
7057 set_next_task(rq, tsk); in sched_move_task()
7059 task_rq_unlock(rq, tsk, &rf); in sched_move_task()
7120 struct rq *rq; in cpu_cgroup_fork() local
7122 rq = task_rq_lock(task, &rf); in cpu_cgroup_fork()
7124 update_rq_clock(rq); in cpu_cgroup_fork()
7127 task_rq_unlock(rq, task, &rf); in cpu_cgroup_fork()
7423 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth() local
7426 rq_lock_irq(rq, &rf); in tg_set_cfs_bandwidth()
7432 rq_unlock_irq(rq, &rf); in tg_set_cfs_bandwidth()