Lines Matching refs:cfs_rq

253 static inline struct rq *rq_of(struct cfs_rq *cfs_rq)  in rq_of()  argument
255 return cfs_rq->rq; in rq_of()
268 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq()
270 return p->se.cfs_rq; in task_cfs_rq()
274 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of()
276 return se->cfs_rq; in cfs_rq_of()
280 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) in group_cfs_rq()
285 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
287 if (!cfs_rq->on_list) { in list_add_leaf_cfs_rq()
288 struct rq *rq = rq_of(cfs_rq); in list_add_leaf_cfs_rq()
299 if (cfs_rq->tg->parent && in list_add_leaf_cfs_rq()
300 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { in list_add_leaf_cfs_rq()
307 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
308 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); in list_add_leaf_cfs_rq()
315 } else if (!cfs_rq->tg->parent) { in list_add_leaf_cfs_rq()
320 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
334 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
340 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
343 cfs_rq->on_list = 1; in list_add_leaf_cfs_rq()
347 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq() argument
349 if (cfs_rq->on_list) { in list_del_leaf_cfs_rq()
350 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); in list_del_leaf_cfs_rq()
351 cfs_rq->on_list = 0; in list_del_leaf_cfs_rq()
356 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
357 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
361 static inline struct cfs_rq *
364 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
365 return se->cfs_rq; in is_same_group()
414 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
416 return container_of(cfs_rq, struct rq, cfs); in rq_of()
423 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq()
428 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of()
437 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) in group_cfs_rq()
442 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
446 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq() argument
450 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
451 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
466 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
496 static void update_min_vruntime(struct cfs_rq *cfs_rq) in update_min_vruntime() argument
498 struct sched_entity *curr = cfs_rq->curr; in update_min_vruntime()
499 struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline); in update_min_vruntime()
501 u64 vruntime = cfs_rq->min_vruntime; in update_min_vruntime()
521 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); in update_min_vruntime()
524 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; in update_min_vruntime()
531 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
533 struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node; in __enqueue_entity()
558 &cfs_rq->tasks_timeline, leftmost); in __enqueue_entity()
561 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
563 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); in __dequeue_entity()
566 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) in __pick_first_entity() argument
568 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline); in __pick_first_entity()
587 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) in __pick_last_entity() argument
589 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); in __pick_last_entity()
658 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_slice() argument
660 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); in sched_slice()
666 cfs_rq = cfs_rq_of(se); in sched_slice()
667 load = &cfs_rq->load; in sched_slice()
670 lw = cfs_rq->load; in sched_slice()
685 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_vslice() argument
687 return calc_delta_fair(sched_slice(cfs_rq, se), se); in sched_vslice()
718 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
749 struct cfs_rq *cfs_rq = cfs_rq_of(se); in post_init_entity_util_avg() local
751 long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq))); in post_init_entity_util_avg()
752 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; in post_init_entity_util_avg()
755 if (cfs_rq->avg.util_avg != 0) { in post_init_entity_util_avg()
756 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; in post_init_entity_util_avg()
757 sa->util_avg /= (cfs_rq->avg.load_avg + 1); in post_init_entity_util_avg()
779 se->avg.last_update_time = cfs_rq_clock_task(cfs_rq); in post_init_entity_util_avg()
794 static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) in update_tg_load_avg() argument
802 static void update_curr(struct cfs_rq *cfs_rq) in update_curr() argument
804 struct sched_entity *curr = cfs_rq->curr; in update_curr()
805 u64 now = rq_clock_task(rq_of(cfs_rq)); in update_curr()
821 schedstat_add(cfs_rq->exec_clock, delta_exec); in update_curr()
824 update_min_vruntime(cfs_rq); in update_curr()
834 account_cfs_rq_runtime(cfs_rq, delta_exec); in update_curr()
843 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start() argument
850 wait_start = rq_clock(rq_of(cfs_rq)); in update_stats_wait_start()
861 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end() argument
869 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start); in update_stats_wait_end()
893 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper() argument
908 u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start; in update_stats_enqueue_sleeper()
925 u64 delta = rq_clock(rq_of(cfs_rq)) - block_start; in update_stats_enqueue_sleeper()
964 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue() argument
973 if (se != cfs_rq->curr) in update_stats_enqueue()
974 update_stats_wait_start(cfs_rq, se); in update_stats_enqueue()
977 update_stats_enqueue_sleeper(cfs_rq, se); in update_stats_enqueue()
981 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue() argument
991 if (se != cfs_rq->curr) in update_stats_dequeue()
992 update_stats_wait_end(cfs_rq, se); in update_stats_dequeue()
999 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue()
1002 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue()
1010 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
1015 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
2691 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
2693 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
2695 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_enqueue()
2698 struct rq *rq = rq_of(cfs_rq); in account_entity_enqueue()
2704 cfs_rq->nr_running++; in account_entity_enqueue()
2708 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
2710 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
2712 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_dequeue()
2715 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
2719 cfs_rq->nr_running--; in account_entity_dequeue()
2761 enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_runnable_load_avg() argument
2763 cfs_rq->runnable_weight += se->runnable_weight; in enqueue_runnable_load_avg()
2765 cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg; in enqueue_runnable_load_avg()
2766 cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum; in enqueue_runnable_load_avg()
2770 dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_runnable_load_avg() argument
2772 cfs_rq->runnable_weight -= se->runnable_weight; in dequeue_runnable_load_avg()
2774 sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg); in dequeue_runnable_load_avg()
2775 sub_positive(&cfs_rq->avg.runnable_load_sum, in dequeue_runnable_load_avg()
2780 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg() argument
2782 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
2783 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
2787 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg() argument
2789 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
2790 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
2794 enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_runnable_load_avg() argument
2796 dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_runnable_load_avg() argument
2798 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_load_avg() argument
2800 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_load_avg() argument
2803 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
2808 if (cfs_rq->curr == se) in reweight_entity()
2809 update_curr(cfs_rq); in reweight_entity()
2810 account_entity_dequeue(cfs_rq, se); in reweight_entity()
2811 dequeue_runnable_load_avg(cfs_rq, se); in reweight_entity()
2813 dequeue_load_avg(cfs_rq, se); in reweight_entity()
2828 enqueue_load_avg(cfs_rq, se); in reweight_entity()
2830 account_entity_enqueue(cfs_rq, se); in reweight_entity()
2831 enqueue_runnable_load_avg(cfs_rq, se); in reweight_entity()
2838 struct cfs_rq *cfs_rq = cfs_rq_of(se); in reweight_task() local
2842 reweight_entity(cfs_rq, se, weight, weight); in reweight_task()
2921 static long calc_group_shares(struct cfs_rq *cfs_rq) in calc_group_shares() argument
2924 struct task_group *tg = cfs_rq->tg; in calc_group_shares()
2928 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); in calc_group_shares()
2933 tg_weight -= cfs_rq->tg_load_avg_contrib; in calc_group_shares()
2982 static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares) in calc_group_runnable() argument
2986 load_avg = max(cfs_rq->avg.load_avg, in calc_group_runnable()
2987 scale_load_down(cfs_rq->load.weight)); in calc_group_runnable()
2989 runnable = max(cfs_rq->avg.runnable_load_avg, in calc_group_runnable()
2990 scale_load_down(cfs_rq->runnable_weight)); in calc_group_runnable()
3000 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
3008 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in update_cfs_group()
3036 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) in cfs_rq_util_change() argument
3038 struct rq *rq = rq_of(cfs_rq); in cfs_rq_util_change()
3040 if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) { in cfs_rq_util_change()
3076 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) in update_tg_load_avg() argument
3078 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; in update_tg_load_avg()
3083 if (cfs_rq->tg == &root_task_group) in update_tg_load_avg()
3086 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { in update_tg_load_avg()
3087 atomic_long_add(delta, &cfs_rq->tg->load_avg); in update_tg_load_avg()
3088 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; in update_tg_load_avg()
3098 struct cfs_rq *prev, struct cfs_rq *next) in set_task_rq_fair()
3211 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util() argument
3232 add_positive(&cfs_rq->avg.util_avg, delta); in update_tg_cfs_util()
3233 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX; in update_tg_cfs_util()
3237 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable() argument
3276 arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq))); in update_tg_cfs_runnable()
3287 add_positive(&cfs_rq->avg.load_avg, delta_avg); in update_tg_cfs_runnable()
3288 add_positive(&cfs_rq->avg.load_sum, delta_sum); in update_tg_cfs_runnable()
3299 add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg); in update_tg_cfs_runnable()
3300 add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum); in update_tg_cfs_runnable()
3304 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) in add_tg_cfs_propagate() argument
3306 cfs_rq->propagate = 1; in add_tg_cfs_propagate()
3307 cfs_rq->prop_runnable_sum += runnable_sum; in add_tg_cfs_propagate()
3313 struct cfs_rq *cfs_rq, *gcfs_rq; in propagate_entity_load_avg() local
3324 cfs_rq = cfs_rq_of(se); in propagate_entity_load_avg()
3326 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); in propagate_entity_load_avg()
3328 update_tg_cfs_util(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3329 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3340 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in skip_blocked_update()
3366 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} in update_tg_load_avg() argument
3373 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {} in add_tg_cfs_propagate() argument
3394 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) in update_cfs_rq_load_avg() argument
3397 struct sched_avg *sa = &cfs_rq->avg; in update_cfs_rq_load_avg()
3400 if (cfs_rq->removed.nr) { in update_cfs_rq_load_avg()
3404 raw_spin_lock(&cfs_rq->removed.lock); in update_cfs_rq_load_avg()
3405 swap(cfs_rq->removed.util_avg, removed_util); in update_cfs_rq_load_avg()
3406 swap(cfs_rq->removed.load_avg, removed_load); in update_cfs_rq_load_avg()
3407 swap(cfs_rq->removed.runnable_sum, removed_runnable_sum); in update_cfs_rq_load_avg()
3408 cfs_rq->removed.nr = 0; in update_cfs_rq_load_avg()
3409 raw_spin_unlock(&cfs_rq->removed.lock); in update_cfs_rq_load_avg()
3419 add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum); in update_cfs_rq_load_avg()
3424 decayed |= __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq); in update_cfs_rq_load_avg()
3428 cfs_rq->load_last_update_time_copy = sa->last_update_time; in update_cfs_rq_load_avg()
3432 cfs_rq_util_change(cfs_rq, 0); in update_cfs_rq_load_avg()
3446 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in attach_entity_load_avg() argument
3448 u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib; in attach_entity_load_avg()
3457 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
3458 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
3476 enqueue_load_avg(cfs_rq, se); in attach_entity_load_avg()
3477 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
3478 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
3480 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
3482 cfs_rq_util_change(cfs_rq, flags); in attach_entity_load_avg()
3493 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
3495 dequeue_load_avg(cfs_rq, se); in detach_entity_load_avg()
3496 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
3497 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
3499 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
3501 cfs_rq_util_change(cfs_rq, 0); in detach_entity_load_avg()
3512 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg() argument
3514 u64 now = cfs_rq_clock_task(cfs_rq); in update_load_avg()
3515 struct rq *rq = rq_of(cfs_rq); in update_load_avg()
3524 __update_load_avg_se(now, cpu, cfs_rq, se); in update_load_avg()
3526 decayed = update_cfs_rq_load_avg(now, cfs_rq); in update_load_avg()
3538 attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION); in update_load_avg()
3539 update_tg_load_avg(cfs_rq, 0); in update_load_avg()
3542 update_tg_load_avg(cfs_rq, 0); in update_load_avg()
3546 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) in cfs_rq_last_update_time() argument
3552 last_update_time_copy = cfs_rq->load_last_update_time_copy; in cfs_rq_last_update_time()
3554 last_update_time = cfs_rq->avg.last_update_time; in cfs_rq_last_update_time()
3560 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) in cfs_rq_last_update_time() argument
3562 return cfs_rq->avg.last_update_time; in cfs_rq_last_update_time()
3572 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sync_entity_load_avg() local
3575 last_update_time = cfs_rq_last_update_time(cfs_rq); in sync_entity_load_avg()
3576 __update_load_avg_blocked_se(last_update_time, cpu_of(rq_of(cfs_rq)), se); in sync_entity_load_avg()
3585 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg() local
3600 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags); in remove_entity_load_avg()
3601 ++cfs_rq->removed.nr; in remove_entity_load_avg()
3602 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
3603 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
3604 cfs_rq->removed.runnable_sum += se->avg.load_sum; /* == runnable_sum */ in remove_entity_load_avg()
3605 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags); in remove_entity_load_avg()
3608 static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq) in cfs_rq_runnable_load_avg() argument
3610 return cfs_rq->avg.runnable_load_avg; in cfs_rq_runnable_load_avg()
3613 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) in cfs_rq_load_avg() argument
3615 return cfs_rq->avg.load_avg; in cfs_rq_load_avg()
3637 static inline void util_est_enqueue(struct cfs_rq *cfs_rq, in util_est_enqueue() argument
3646 enqueued = cfs_rq->avg.util_est.enqueued; in util_est_enqueue()
3648 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); in util_est_enqueue()
3665 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) in util_est_dequeue() argument
3674 ue.enqueued = cfs_rq->avg.util_est.enqueued; in util_est_dequeue()
3677 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); in util_est_dequeue()
3732 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) in update_load_avg() argument
3734 cfs_rq_util_change(cfs_rq, 0); in update_load_avg()
3740 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {} in attach_entity_load_avg() argument
3742 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
3750 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} in util_est_enqueue() argument
3753 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, in util_est_dequeue() argument
3758 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) in check_spread() argument
3761 s64 d = se->vruntime - cfs_rq->min_vruntime; in check_spread()
3767 schedstat_inc(cfs_rq->nr_spread_over); in check_spread()
3772 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) in place_entity() argument
3774 u64 vruntime = cfs_rq->min_vruntime; in place_entity()
3783 vruntime += sched_vslice(cfs_rq, se); in place_entity()
3803 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3857 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
3860 bool curr = cfs_rq->curr == se; in enqueue_entity()
3867 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
3869 update_curr(cfs_rq); in enqueue_entity()
3878 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
3888 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); in enqueue_entity()
3890 enqueue_runnable_load_avg(cfs_rq, se); in enqueue_entity()
3891 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
3894 place_entity(cfs_rq, se, 0); in enqueue_entity()
3897 update_stats_enqueue(cfs_rq, se, flags); in enqueue_entity()
3898 check_spread(cfs_rq, se); in enqueue_entity()
3900 __enqueue_entity(cfs_rq, se); in enqueue_entity()
3903 if (cfs_rq->nr_running == 1) { in enqueue_entity()
3904 list_add_leaf_cfs_rq(cfs_rq); in enqueue_entity()
3905 check_enqueue_throttle(cfs_rq); in enqueue_entity()
3912 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_last() local
3913 if (cfs_rq->last != se) in __clear_buddies_last()
3916 cfs_rq->last = NULL; in __clear_buddies_last()
3923 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next() local
3924 if (cfs_rq->next != se) in __clear_buddies_next()
3927 cfs_rq->next = NULL; in __clear_buddies_next()
3934 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_skip() local
3935 if (cfs_rq->skip != se) in __clear_buddies_skip()
3938 cfs_rq->skip = NULL; in __clear_buddies_skip()
3942 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
3944 if (cfs_rq->last == se) in clear_buddies()
3947 if (cfs_rq->next == se) in clear_buddies()
3950 if (cfs_rq->skip == se) in clear_buddies()
3954 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
3957 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
3962 update_curr(cfs_rq); in dequeue_entity()
3972 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_entity()
3973 dequeue_runnable_load_avg(cfs_rq, se); in dequeue_entity()
3975 update_stats_dequeue(cfs_rq, se, flags); in dequeue_entity()
3977 clear_buddies(cfs_rq, se); in dequeue_entity()
3979 if (se != cfs_rq->curr) in dequeue_entity()
3980 __dequeue_entity(cfs_rq, se); in dequeue_entity()
3982 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
3991 se->vruntime -= cfs_rq->min_vruntime; in dequeue_entity()
3994 return_cfs_rq_runtime(cfs_rq); in dequeue_entity()
4005 update_min_vruntime(cfs_rq); in dequeue_entity()
4012 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) in check_preempt_tick() argument
4018 ideal_runtime = sched_slice(cfs_rq, curr); in check_preempt_tick()
4021 resched_curr(rq_of(cfs_rq)); in check_preempt_tick()
4026 clear_buddies(cfs_rq, curr); in check_preempt_tick()
4038 se = __pick_first_entity(cfs_rq); in check_preempt_tick()
4045 resched_curr(rq_of(cfs_rq)); in check_preempt_tick()
4049 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
4058 update_stats_wait_end(cfs_rq, se); in set_next_entity()
4059 __dequeue_entity(cfs_rq, se); in set_next_entity()
4060 update_load_avg(cfs_rq, se, UPDATE_TG); in set_next_entity()
4063 update_stats_curr_start(cfs_rq, se); in set_next_entity()
4064 cfs_rq->curr = se; in set_next_entity()
4071 if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { in set_next_entity()
4091 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) in pick_next_entity() argument
4093 struct sched_entity *left = __pick_first_entity(cfs_rq); in pick_next_entity()
4109 if (cfs_rq->skip == se) { in pick_next_entity()
4113 second = __pick_first_entity(cfs_rq); in pick_next_entity()
4127 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) in pick_next_entity()
4128 se = cfs_rq->last; in pick_next_entity()
4133 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) in pick_next_entity()
4134 se = cfs_rq->next; in pick_next_entity()
4136 clear_buddies(cfs_rq, se); in pick_next_entity()
4141 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
4143 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) in put_prev_entity() argument
4150 update_curr(cfs_rq); in put_prev_entity()
4153 check_cfs_rq_runtime(cfs_rq); in put_prev_entity()
4155 check_spread(cfs_rq, prev); in put_prev_entity()
4158 update_stats_wait_start(cfs_rq, prev); in put_prev_entity()
4160 __enqueue_entity(cfs_rq, prev); in put_prev_entity()
4162 update_load_avg(cfs_rq, prev, 0); in put_prev_entity()
4164 cfs_rq->curr = NULL; in put_prev_entity()
4168 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) in entity_tick() argument
4173 update_curr(cfs_rq); in entity_tick()
4178 update_load_avg(cfs_rq, curr, UPDATE_TG); in entity_tick()
4187 resched_curr(rq_of(cfs_rq)); in entity_tick()
4194 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) in entity_tick()
4198 if (cfs_rq->nr_running > 1) in entity_tick()
4199 check_preempt_tick(cfs_rq, curr); in entity_tick()
4276 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) in cfs_rq_clock_task() argument
4278 if (unlikely(cfs_rq->throttle_count)) in cfs_rq_clock_task()
4279 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time; in cfs_rq_clock_task()
4281 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; in cfs_rq_clock_task()
4285 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) in assign_cfs_rq_runtime() argument
4287 struct task_group *tg = cfs_rq->tg; in assign_cfs_rq_runtime()
4293 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; in assign_cfs_rq_runtime()
4311 cfs_rq->runtime_remaining += amount; in assign_cfs_rq_runtime()
4317 if (cfs_rq->expires_seq != expires_seq) { in assign_cfs_rq_runtime()
4318 cfs_rq->expires_seq = expires_seq; in assign_cfs_rq_runtime()
4319 cfs_rq->runtime_expires = expires; in assign_cfs_rq_runtime()
4322 return cfs_rq->runtime_remaining > 0; in assign_cfs_rq_runtime()
4329 static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) in expire_cfs_rq_runtime() argument
4331 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in expire_cfs_rq_runtime()
4334 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0)) in expire_cfs_rq_runtime()
4337 if (cfs_rq->runtime_remaining < 0) in expire_cfs_rq_runtime()
4348 if (cfs_rq->expires_seq == cfs_b->expires_seq) { in expire_cfs_rq_runtime()
4350 cfs_rq->runtime_expires += TICK_NSEC; in expire_cfs_rq_runtime()
4353 cfs_rq->runtime_remaining = 0; in expire_cfs_rq_runtime()
4357 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in __account_cfs_rq_runtime() argument
4360 cfs_rq->runtime_remaining -= delta_exec; in __account_cfs_rq_runtime()
4361 expire_cfs_rq_runtime(cfs_rq); in __account_cfs_rq_runtime()
4363 if (likely(cfs_rq->runtime_remaining > 0)) in __account_cfs_rq_runtime()
4370 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) in __account_cfs_rq_runtime()
4371 resched_curr(rq_of(cfs_rq)); in __account_cfs_rq_runtime()
4375 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in account_cfs_rq_runtime() argument
4377 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) in account_cfs_rq_runtime()
4380 __account_cfs_rq_runtime(cfs_rq, delta_exec); in account_cfs_rq_runtime()
4383 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled() argument
4385 return cfs_bandwidth_used() && cfs_rq->throttled; in cfs_rq_throttled()
4389 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy() argument
4391 return cfs_bandwidth_used() && cfs_rq->throttle_count; in throttled_hierarchy()
4402 struct cfs_rq *src_cfs_rq, *dest_cfs_rq; in throttled_lb_pair()
4404 src_cfs_rq = tg->cfs_rq[src_cpu]; in throttled_lb_pair()
4405 dest_cfs_rq = tg->cfs_rq[dest_cpu]; in throttled_lb_pair()
4414 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up() local
4416 cfs_rq->throttle_count--; in tg_unthrottle_up()
4417 if (!cfs_rq->throttle_count) { in tg_unthrottle_up()
4419 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - in tg_unthrottle_up()
4420 cfs_rq->throttled_clock_task; in tg_unthrottle_up()
4429 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down() local
4432 if (!cfs_rq->throttle_count) in tg_throttle_down()
4433 cfs_rq->throttled_clock_task = rq_clock_task(rq); in tg_throttle_down()
4434 cfs_rq->throttle_count++; in tg_throttle_down()
4439 static void throttle_cfs_rq(struct cfs_rq *cfs_rq) in throttle_cfs_rq() argument
4441 struct rq *rq = rq_of(cfs_rq); in throttle_cfs_rq()
4442 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in throttle_cfs_rq()
4447 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
4451 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); in throttle_cfs_rq()
4454 task_delta = cfs_rq->h_nr_running; in throttle_cfs_rq()
4456 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
4472 cfs_rq->throttled = 1; in throttle_cfs_rq()
4473 cfs_rq->throttled_clock = rq_clock(rq); in throttle_cfs_rq()
4483 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); in throttle_cfs_rq()
4485 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); in throttle_cfs_rq()
4497 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) in unthrottle_cfs_rq() argument
4499 struct rq *rq = rq_of(cfs_rq); in unthrottle_cfs_rq()
4500 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in unthrottle_cfs_rq()
4505 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
4507 cfs_rq->throttled = 0; in unthrottle_cfs_rq()
4512 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; in unthrottle_cfs_rq()
4513 list_del_rcu(&cfs_rq->throttled_list); in unthrottle_cfs_rq()
4517 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); in unthrottle_cfs_rq()
4519 if (!cfs_rq->load.weight) in unthrottle_cfs_rq()
4522 task_delta = cfs_rq->h_nr_running; in unthrottle_cfs_rq()
4527 cfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
4529 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); in unthrottle_cfs_rq()
4530 cfs_rq->h_nr_running += task_delta; in unthrottle_cfs_rq()
4532 if (cfs_rq_throttled(cfs_rq)) in unthrottle_cfs_rq()
4547 struct cfs_rq *cfs_rq; in distribute_cfs_runtime() local
4552 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, in distribute_cfs_runtime()
4554 struct rq *rq = rq_of(cfs_rq); in distribute_cfs_runtime()
4558 if (!cfs_rq_throttled(cfs_rq)) in distribute_cfs_runtime()
4561 runtime = -cfs_rq->runtime_remaining + 1; in distribute_cfs_runtime()
4566 cfs_rq->runtime_remaining += runtime; in distribute_cfs_runtime()
4567 cfs_rq->runtime_expires = expires; in distribute_cfs_runtime()
4570 if (cfs_rq->runtime_remaining > 0) in distribute_cfs_runtime()
4571 unthrottle_cfs_rq(cfs_rq); in distribute_cfs_runtime()
4703 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in __return_cfs_rq_runtime() argument
4705 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in __return_cfs_rq_runtime()
4706 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; in __return_cfs_rq_runtime()
4713 cfs_rq->runtime_expires == cfs_b->runtime_expires) { in __return_cfs_rq_runtime()
4724 cfs_rq->runtime_remaining -= slack_runtime; in __return_cfs_rq_runtime()
4727 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in return_cfs_rq_runtime() argument
4732 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) in return_cfs_rq_runtime()
4735 __return_cfs_rq_runtime(cfs_rq); in return_cfs_rq_runtime()
4785 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) in check_enqueue_throttle() argument
4791 if (!cfs_rq->runtime_enabled || cfs_rq->curr) in check_enqueue_throttle()
4795 if (cfs_rq_throttled(cfs_rq)) in check_enqueue_throttle()
4799 account_cfs_rq_runtime(cfs_rq, 0); in check_enqueue_throttle()
4800 if (cfs_rq->runtime_remaining <= 0) in check_enqueue_throttle()
4801 throttle_cfs_rq(cfs_rq); in check_enqueue_throttle()
4806 struct cfs_rq *pcfs_rq, *cfs_rq; in sync_throttle() local
4814 cfs_rq = tg->cfs_rq[cpu]; in sync_throttle()
4815 pcfs_rq = tg->parent->cfs_rq[cpu]; in sync_throttle()
4817 cfs_rq->throttle_count = pcfs_rq->throttle_count; in sync_throttle()
4818 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); in sync_throttle()
4822 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) in check_cfs_rq_runtime() argument
4827 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) in check_cfs_rq_runtime()
4834 if (cfs_rq_throttled(cfs_rq)) in check_cfs_rq_runtime()
4837 throttle_cfs_rq(cfs_rq); in check_cfs_rq_runtime()
4888 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) in init_cfs_rq_runtime() argument
4890 cfs_rq->runtime_enabled = 0; in init_cfs_rq_runtime()
4891 INIT_LIST_HEAD(&cfs_rq->throttled_list); in init_cfs_rq_runtime()
4937 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in update_runtime_enabled() local
4940 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; in update_runtime_enabled()
4955 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in unthrottle_offline_cfs_rqs() local
4957 if (!cfs_rq->runtime_enabled) in unthrottle_offline_cfs_rqs()
4964 cfs_rq->runtime_remaining = 1; in unthrottle_offline_cfs_rqs()
4969 cfs_rq->runtime_enabled = 0; in unthrottle_offline_cfs_rqs()
4971 if (cfs_rq_throttled(cfs_rq)) in unthrottle_offline_cfs_rqs()
4972 unthrottle_cfs_rq(cfs_rq); in unthrottle_offline_cfs_rqs()
4978 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq) in cfs_rq_clock_task() argument
4980 return rq_clock_task(rq_of(cfs_rq)); in cfs_rq_clock_task()
4983 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} in account_cfs_rq_runtime() argument
4984 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } in check_cfs_rq_runtime() argument
4985 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} in check_enqueue_throttle() argument
4987 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in return_cfs_rq_runtime() argument
4989 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled() argument
4994 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy() argument
5008 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in init_cfs_rq_runtime() argument
5029 struct cfs_rq *cfs_rq = cfs_rq_of(se); in hrtick_start_fair() local
5034 u64 slice = sched_slice(cfs_rq, se); in hrtick_start_fair()
5081 struct cfs_rq *cfs_rq; in enqueue_task_fair() local
5103 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5104 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
5112 if (cfs_rq_throttled(cfs_rq)) in enqueue_task_fair()
5114 cfs_rq->h_nr_running++; in enqueue_task_fair()
5120 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5121 cfs_rq->h_nr_running++; in enqueue_task_fair()
5123 if (cfs_rq_throttled(cfs_rq)) in enqueue_task_fair()
5126 update_load_avg(cfs_rq, se, UPDATE_TG); in enqueue_task_fair()
5145 struct cfs_rq *cfs_rq; in dequeue_task_fair() local
5150 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
5151 dequeue_entity(cfs_rq, se, flags); in dequeue_task_fair()
5159 if (cfs_rq_throttled(cfs_rq)) in dequeue_task_fair()
5161 cfs_rq->h_nr_running--; in dequeue_task_fair()
5164 if (cfs_rq->load.weight) { in dequeue_task_fair()
5171 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) in dequeue_task_fair()
5179 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
5180 cfs_rq->h_nr_running--; in dequeue_task_fair()
5182 if (cfs_rq_throttled(cfs_rq)) in dequeue_task_fair()
5185 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_task_fair()
6204 struct cfs_rq *cfs_rq; in cpu_util() local
6207 cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util()
6208 util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util()
6211 util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued)); in cpu_util()
6222 struct cfs_rq *cfs_rq; in cpu_util_wake() local
6229 cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util_wake()
6230 util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util_wake()
6262 util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued)); in cpu_util_wake()
6380 struct cfs_rq *cfs_rq = cfs_rq_of(se); in migrate_task_rq_fair() local
6387 min_vruntime_copy = cfs_rq->min_vruntime_copy; in migrate_task_rq_fair()
6389 min_vruntime = cfs_rq->min_vruntime; in migrate_task_rq_fair()
6392 min_vruntime = cfs_rq->min_vruntime; in migrate_task_rq_fair()
6519 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in check_preempt_wakeup() local
6520 int scale = cfs_rq->nr_running >= sched_nr_latency; in check_preempt_wakeup()
6601 struct cfs_rq *cfs_rq = &rq->cfs; in pick_next_task_fair() local
6607 if (!cfs_rq->nr_running) in pick_next_task_fair()
6623 struct sched_entity *curr = cfs_rq->curr; in pick_next_task_fair()
6633 update_curr(cfs_rq); in pick_next_task_fair()
6643 if (unlikely(check_cfs_rq_runtime(cfs_rq))) { in pick_next_task_fair()
6644 cfs_rq = &rq->cfs; in pick_next_task_fair()
6646 if (!cfs_rq->nr_running) in pick_next_task_fair()
6653 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
6654 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
6655 } while (cfs_rq); in pick_next_task_fair()
6667 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
6681 put_prev_entity(cfs_rq, pse); in pick_next_task_fair()
6682 set_next_entity(cfs_rq, se); in pick_next_task_fair()
6692 se = pick_next_entity(cfs_rq, NULL); in pick_next_task_fair()
6693 set_next_entity(cfs_rq, se); in pick_next_task_fair()
6694 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
6695 } while (cfs_rq); in pick_next_task_fair()
6737 struct cfs_rq *cfs_rq; in put_prev_task_fair() local
6740 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
6741 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
6753 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in yield_task_fair() local
6762 clear_buddies(cfs_rq, se); in yield_task_fair()
6769 update_curr(cfs_rq); in yield_task_fair()
7311 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) in cfs_rq_has_blocked() argument
7313 if (cfs_rq->avg.load_avg) in cfs_rq_has_blocked()
7316 if (cfs_rq->avg.util_avg) in cfs_rq_has_blocked()
7340 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) in cfs_rq_is_decayed() argument
7342 if (cfs_rq->load.weight) in cfs_rq_is_decayed()
7345 if (cfs_rq->avg.load_sum) in cfs_rq_is_decayed()
7348 if (cfs_rq->avg.util_sum) in cfs_rq_is_decayed()
7351 if (cfs_rq->avg.runnable_load_sum) in cfs_rq_is_decayed()
7360 struct cfs_rq *cfs_rq, *pos; in update_blocked_averages() local
7372 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { in update_blocked_averages()
7376 if (throttled_hierarchy(cfs_rq)) in update_blocked_averages()
7379 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq)) in update_blocked_averages()
7380 update_tg_load_avg(cfs_rq, 0); in update_blocked_averages()
7383 se = cfs_rq->tg->se[cpu]; in update_blocked_averages()
7391 if (cfs_rq_is_decayed(cfs_rq)) in update_blocked_averages()
7392 list_del_leaf_cfs_rq(cfs_rq); in update_blocked_averages()
7395 if (cfs_rq_has_blocked(cfs_rq)) in update_blocked_averages()
7420 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) in update_cfs_rq_h_load() argument
7422 struct rq *rq = rq_of(cfs_rq); in update_cfs_rq_h_load()
7423 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
7427 if (cfs_rq->last_h_load_update == now) in update_cfs_rq_h_load()
7430 cfs_rq->h_load_next = NULL; in update_cfs_rq_h_load()
7432 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
7433 cfs_rq->h_load_next = se; in update_cfs_rq_h_load()
7434 if (cfs_rq->last_h_load_update == now) in update_cfs_rq_h_load()
7439 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); in update_cfs_rq_h_load()
7440 cfs_rq->last_h_load_update = now; in update_cfs_rq_h_load()
7443 while ((se = cfs_rq->h_load_next) != NULL) { in update_cfs_rq_h_load()
7444 load = cfs_rq->h_load; in update_cfs_rq_h_load()
7446 cfs_rq_load_avg(cfs_rq) + 1); in update_cfs_rq_h_load()
7447 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
7448 cfs_rq->h_load = load; in update_cfs_rq_h_load()
7449 cfs_rq->last_h_load_update = now; in update_cfs_rq_h_load()
7455 struct cfs_rq *cfs_rq = task_cfs_rq(p); in task_h_load() local
7457 update_cfs_rq_h_load(cfs_rq); in task_h_load()
7458 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
7459 cfs_rq_load_avg(cfs_rq) + 1); in task_h_load()
7465 struct cfs_rq *cfs_rq = &rq->cfs; in update_blocked_averages() local
7471 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); in update_blocked_averages()
7479 if (!cfs_rq_has_blocked(cfs_rq) && !others_have_blocked(rq)) in update_blocked_averages()
9651 struct cfs_rq *cfs_rq; in task_tick_fair() local
9655 cfs_rq = cfs_rq_of(se); in task_tick_fair()
9656 entity_tick(cfs_rq, se, queued); in task_tick_fair()
9670 struct cfs_rq *cfs_rq; in task_fork_fair() local
9678 cfs_rq = task_cfs_rq(current); in task_fork_fair()
9679 curr = cfs_rq->curr; in task_fork_fair()
9681 update_curr(cfs_rq); in task_fork_fair()
9684 place_entity(cfs_rq, se, 1); in task_fork_fair()
9695 se->vruntime -= cfs_rq->min_vruntime; in task_fork_fair()
9756 struct cfs_rq *cfs_rq; in propagate_entity_cfs_rq() local
9762 cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
9764 if (cfs_rq_throttled(cfs_rq)) in propagate_entity_cfs_rq()
9767 update_load_avg(cfs_rq, se, UPDATE_TG); in propagate_entity_cfs_rq()
9776 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_entity_cfs_rq() local
9779 update_load_avg(cfs_rq, se, 0); in detach_entity_cfs_rq()
9780 detach_entity_load_avg(cfs_rq, se); in detach_entity_cfs_rq()
9781 update_tg_load_avg(cfs_rq, false); in detach_entity_cfs_rq()
9787 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_entity_cfs_rq() local
9798 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); in attach_entity_cfs_rq()
9799 attach_entity_load_avg(cfs_rq, se, 0); in attach_entity_cfs_rq()
9800 update_tg_load_avg(cfs_rq, false); in attach_entity_cfs_rq()
9807 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_task_cfs_rq() local
9814 place_entity(cfs_rq, se, 0); in detach_task_cfs_rq()
9815 se->vruntime -= cfs_rq->min_vruntime; in detach_task_cfs_rq()
9824 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_task_cfs_rq() local
9829 se->vruntime += cfs_rq->min_vruntime; in attach_task_cfs_rq()
9864 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_curr_task_fair() local
9866 set_next_entity(cfs_rq, se); in set_curr_task_fair()
9868 account_cfs_rq_runtime(cfs_rq, 0); in set_curr_task_fair()
9872 void init_cfs_rq(struct cfs_rq *cfs_rq) in init_cfs_rq() argument
9874 cfs_rq->tasks_timeline = RB_ROOT_CACHED; in init_cfs_rq()
9875 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); in init_cfs_rq()
9877 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; in init_cfs_rq()
9880 raw_spin_lock_init(&cfs_rq->removed.lock); in init_cfs_rq()
9925 if (tg->cfs_rq) in free_fair_sched_group()
9926 kfree(tg->cfs_rq[i]); in free_fair_sched_group()
9931 kfree(tg->cfs_rq); in free_fair_sched_group()
9938 struct cfs_rq *cfs_rq; in alloc_fair_sched_group() local
9941 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); in alloc_fair_sched_group()
9942 if (!tg->cfs_rq) in alloc_fair_sched_group()
9953 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), in alloc_fair_sched_group()
9955 if (!cfs_rq) in alloc_fair_sched_group()
9963 init_cfs_rq(cfs_rq); in alloc_fair_sched_group()
9964 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
9971 kfree(cfs_rq); in alloc_fair_sched_group()
10008 if (!tg->cfs_rq[cpu]->on_list) in unregister_fair_sched_group()
10014 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); in unregister_fair_sched_group()
10019 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, in init_tg_cfs_entry() argument
10025 cfs_rq->tg = tg; in init_tg_cfs_entry()
10026 cfs_rq->rq = rq; in init_tg_cfs_entry()
10027 init_cfs_rq_runtime(cfs_rq); in init_tg_cfs_entry()
10029 tg->cfs_rq[cpu] = cfs_rq; in init_tg_cfs_entry()
10037 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
10040 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
10044 se->my_q = cfs_rq; in init_tg_cfs_entry()
10165 struct cfs_rq *cfs_rq, *pos; in print_cfs_stats() local
10168 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) in print_cfs_stats()
10169 print_cfs_rq(m, cpu, cfs_rq); in print_cfs_stats()