Lines Matching refs:cfs_rq
271 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq()
273 return p->se.cfs_rq; in task_cfs_rq()
277 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of()
279 return se->cfs_rq; in cfs_rq_of()
283 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) in group_cfs_rq()
288 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) in cfs_rq_tg_path() argument
293 if (cfs_rq && task_group_is_autogroup(cfs_rq->tg)) in cfs_rq_tg_path()
294 autogroup_path(cfs_rq->tg, path, len); in cfs_rq_tg_path()
295 else if (cfs_rq && cfs_rq->tg->css.cgroup) in cfs_rq_tg_path()
296 cgroup_path(cfs_rq->tg->css.cgroup, path, len); in cfs_rq_tg_path()
301 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
303 struct rq *rq = rq_of(cfs_rq); in list_add_leaf_cfs_rq()
306 if (cfs_rq->on_list) in list_add_leaf_cfs_rq()
309 cfs_rq->on_list = 1; in list_add_leaf_cfs_rq()
320 if (cfs_rq->tg->parent && in list_add_leaf_cfs_rq()
321 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { in list_add_leaf_cfs_rq()
328 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
329 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); in list_add_leaf_cfs_rq()
339 if (!cfs_rq->tg->parent) { in list_add_leaf_cfs_rq()
344 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
360 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); in list_add_leaf_cfs_rq()
365 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
369 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq() argument
371 if (cfs_rq->on_list) { in list_del_leaf_cfs_rq()
372 struct rq *rq = rq_of(cfs_rq); in list_del_leaf_cfs_rq()
381 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) in list_del_leaf_cfs_rq()
382 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev; in list_del_leaf_cfs_rq()
384 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); in list_del_leaf_cfs_rq()
385 cfs_rq->on_list = 0; in list_del_leaf_cfs_rq()
395 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
396 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
400 static inline struct cfs_rq *
403 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
404 return se->cfs_rq; in is_same_group()
456 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq()
461 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of()
470 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) in group_cfs_rq()
475 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) in cfs_rq_tg_path() argument
481 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
486 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq() argument
494 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
495 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
510 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
540 static void update_min_vruntime(struct cfs_rq *cfs_rq) in update_min_vruntime() argument
542 struct sched_entity *curr = cfs_rq->curr; in update_min_vruntime()
543 struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline); in update_min_vruntime()
545 u64 vruntime = cfs_rq->min_vruntime; in update_min_vruntime()
565 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); in update_min_vruntime()
568 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; in update_min_vruntime()
575 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
577 struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node; in __enqueue_entity()
602 &cfs_rq->tasks_timeline, leftmost); in __enqueue_entity()
605 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
607 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); in __dequeue_entity()
610 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) in __pick_first_entity() argument
612 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline); in __pick_first_entity()
631 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) in __pick_last_entity() argument
633 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); in __pick_last_entity()
701 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_slice() argument
703 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); in sched_slice()
709 cfs_rq = cfs_rq_of(se); in sched_slice()
710 load = &cfs_rq->load; in sched_slice()
713 lw = cfs_rq->load; in sched_slice()
728 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_vslice() argument
730 return calc_delta_fair(sched_slice(cfs_rq, se), se); in sched_vslice()
790 struct cfs_rq *cfs_rq = cfs_rq_of(se); in post_init_entity_util_avg() local
792 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); in post_init_entity_util_avg()
793 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; in post_init_entity_util_avg()
796 if (cfs_rq->avg.util_avg != 0) { in post_init_entity_util_avg()
797 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; in post_init_entity_util_avg()
798 sa->util_avg /= (cfs_rq->avg.load_avg + 1); in post_init_entity_util_avg()
820 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
834 static void update_tg_load_avg(struct cfs_rq *cfs_rq) in update_tg_load_avg() argument
842 static void update_curr(struct cfs_rq *cfs_rq) in update_curr() argument
844 struct sched_entity *curr = cfs_rq->curr; in update_curr()
845 u64 now = rq_clock_task(rq_of(cfs_rq)); in update_curr()
861 schedstat_add(cfs_rq->exec_clock, delta_exec); in update_curr()
864 update_min_vruntime(cfs_rq); in update_curr()
874 account_cfs_rq_runtime(cfs_rq, delta_exec); in update_curr()
883 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start() argument
890 wait_start = rq_clock(rq_of(cfs_rq)); in update_stats_wait_start()
901 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end() argument
909 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start); in update_stats_wait_end()
933 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper() argument
948 u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start; in update_stats_enqueue_sleeper()
965 u64 delta = rq_clock(rq_of(cfs_rq)) - block_start; in update_stats_enqueue_sleeper()
1004 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue() argument
1013 if (se != cfs_rq->curr) in update_stats_enqueue()
1014 update_stats_wait_start(cfs_rq, se); in update_stats_enqueue()
1017 update_stats_enqueue_sleeper(cfs_rq, se); in update_stats_enqueue()
1021 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue() argument
1031 if (se != cfs_rq->curr) in update_stats_dequeue()
1032 update_stats_wait_end(cfs_rq, se); in update_stats_dequeue()
1039 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue()
1042 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue()
1050 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
1055 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
2989 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
2991 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
2994 struct rq *rq = rq_of(cfs_rq); in account_entity_enqueue()
3000 cfs_rq->nr_running++; in account_entity_enqueue()
3004 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
3006 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
3009 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
3013 cfs_rq->nr_running--; in account_entity_dequeue()
3066 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg() argument
3068 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3069 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3073 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg() argument
3075 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3076 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
3080 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_load_avg() argument
3082 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_load_avg() argument
3085 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
3090 if (cfs_rq->curr == se) in reweight_entity()
3091 update_curr(cfs_rq); in reweight_entity()
3092 update_load_sub(&cfs_rq->load, se->load.weight); in reweight_entity()
3094 dequeue_load_avg(cfs_rq, se); in reweight_entity()
3106 enqueue_load_avg(cfs_rq, se); in reweight_entity()
3108 update_load_add(&cfs_rq->load, se->load.weight); in reweight_entity()
3115 struct cfs_rq *cfs_rq = cfs_rq_of(se); in reweight_task() local
3119 reweight_entity(cfs_rq, se, weight); in reweight_task()
3198 static long calc_group_shares(struct cfs_rq *cfs_rq) in calc_group_shares() argument
3201 struct task_group *tg = cfs_rq->tg; in calc_group_shares()
3205 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); in calc_group_shares()
3210 tg_weight -= cfs_rq->tg_load_avg_contrib; in calc_group_shares()
3233 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
3241 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in update_cfs_group()
3268 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) in cfs_rq_util_change() argument
3270 struct rq *rq = rq_of(cfs_rq); in cfs_rq_util_change()
3272 if (&rq->cfs == cfs_rq) { in cfs_rq_util_change()
3307 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) in update_tg_load_avg() argument
3309 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; in update_tg_load_avg()
3314 if (cfs_rq->tg == &root_task_group) in update_tg_load_avg()
3317 if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { in update_tg_load_avg()
3318 atomic_long_add(delta, &cfs_rq->tg->load_avg); in update_tg_load_avg()
3319 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; in update_tg_load_avg()
3329 struct cfs_rq *prev, struct cfs_rq *next) in set_task_rq_fair()
3442 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util() argument
3455 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_util()
3462 add_positive(&cfs_rq->avg.util_avg, delta); in update_tg_cfs_util()
3463 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider; in update_tg_cfs_util()
3467 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable() argument
3480 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_runnable()
3487 add_positive(&cfs_rq->avg.runnable_avg, delta); in update_tg_cfs_runnable()
3488 cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider; in update_tg_cfs_runnable()
3492 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_load() argument
3509 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_load()
3549 add_positive(&cfs_rq->avg.load_avg, delta_avg); in update_tg_cfs_load()
3550 add_positive(&cfs_rq->avg.load_sum, delta_sum); in update_tg_cfs_load()
3553 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) in add_tg_cfs_propagate() argument
3555 cfs_rq->propagate = 1; in add_tg_cfs_propagate()
3556 cfs_rq->prop_runnable_sum += runnable_sum; in add_tg_cfs_propagate()
3562 struct cfs_rq *cfs_rq, *gcfs_rq; in propagate_entity_load_avg() local
3573 cfs_rq = cfs_rq_of(se); in propagate_entity_load_avg()
3575 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); in propagate_entity_load_avg()
3577 update_tg_cfs_util(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3578 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3579 update_tg_cfs_load(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3581 trace_pelt_cfs_tp(cfs_rq); in propagate_entity_load_avg()
3593 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in skip_blocked_update()
3619 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {} in update_tg_load_avg() argument
3626 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {} in add_tg_cfs_propagate() argument
3647 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) in update_cfs_rq_load_avg() argument
3650 struct sched_avg *sa = &cfs_rq->avg; in update_cfs_rq_load_avg()
3653 if (cfs_rq->removed.nr) { in update_cfs_rq_load_avg()
3655 u32 divider = get_pelt_divider(&cfs_rq->avg); in update_cfs_rq_load_avg()
3657 raw_spin_lock(&cfs_rq->removed.lock); in update_cfs_rq_load_avg()
3658 swap(cfs_rq->removed.util_avg, removed_util); in update_cfs_rq_load_avg()
3659 swap(cfs_rq->removed.load_avg, removed_load); in update_cfs_rq_load_avg()
3660 swap(cfs_rq->removed.runnable_avg, removed_runnable); in update_cfs_rq_load_avg()
3661 cfs_rq->removed.nr = 0; in update_cfs_rq_load_avg()
3662 raw_spin_unlock(&cfs_rq->removed.lock); in update_cfs_rq_load_avg()
3680 add_tg_cfs_propagate(cfs_rq, in update_cfs_rq_load_avg()
3686 decayed |= __update_load_avg_cfs_rq(now, cfs_rq); in update_cfs_rq_load_avg()
3690 cfs_rq->load_last_update_time_copy = sa->last_update_time; in update_cfs_rq_load_avg()
3704 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in attach_entity_load_avg() argument
3710 u32 divider = get_pelt_divider(&cfs_rq->avg); in attach_entity_load_avg()
3719 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
3720 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
3738 enqueue_load_avg(cfs_rq, se); in attach_entity_load_avg()
3739 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
3740 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
3741 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
3742 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
3744 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
3746 cfs_rq_util_change(cfs_rq, 0); in attach_entity_load_avg()
3748 trace_pelt_cfs_tp(cfs_rq); in attach_entity_load_avg()
3759 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
3761 dequeue_load_avg(cfs_rq, se); in detach_entity_load_avg()
3762 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
3763 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
3764 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
3765 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); in detach_entity_load_avg()
3767 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
3769 cfs_rq_util_change(cfs_rq, 0); in detach_entity_load_avg()
3771 trace_pelt_cfs_tp(cfs_rq); in detach_entity_load_avg()
3782 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg() argument
3784 u64 now = cfs_rq_clock_pelt(cfs_rq); in update_load_avg()
3792 __update_load_avg_se(now, cfs_rq, se); in update_load_avg()
3794 decayed = update_cfs_rq_load_avg(now, cfs_rq); in update_load_avg()
3806 attach_entity_load_avg(cfs_rq, se); in update_load_avg()
3807 update_tg_load_avg(cfs_rq); in update_load_avg()
3810 cfs_rq_util_change(cfs_rq, 0); in update_load_avg()
3813 update_tg_load_avg(cfs_rq); in update_load_avg()
3818 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) in cfs_rq_last_update_time() argument
3824 last_update_time_copy = cfs_rq->load_last_update_time_copy; in cfs_rq_last_update_time()
3826 last_update_time = cfs_rq->avg.last_update_time; in cfs_rq_last_update_time()
3832 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) in cfs_rq_last_update_time() argument
3834 return cfs_rq->avg.last_update_time; in cfs_rq_last_update_time()
3844 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sync_entity_load_avg() local
3847 last_update_time = cfs_rq_last_update_time(cfs_rq); in sync_entity_load_avg()
3857 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg() local
3868 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags); in remove_entity_load_avg()
3869 ++cfs_rq->removed.nr; in remove_entity_load_avg()
3870 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
3871 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
3872 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
3873 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags); in remove_entity_load_avg()
3876 static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq) in cfs_rq_runnable_avg() argument
3878 return cfs_rq->avg.runnable_avg; in cfs_rq_runnable_avg()
3881 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) in cfs_rq_load_avg() argument
3883 return cfs_rq->avg.load_avg; in cfs_rq_load_avg()
3919 static inline void util_est_enqueue(struct cfs_rq *cfs_rq, in util_est_enqueue() argument
3928 enqueued = cfs_rq->avg.util_est.enqueued; in util_est_enqueue()
3930 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); in util_est_enqueue()
3932 trace_sched_util_est_cfs_tp(cfs_rq); in util_est_enqueue()
3949 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) in util_est_dequeue() argument
3959 ue.enqueued = cfs_rq->avg.util_est.enqueued; in util_est_dequeue()
3961 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued); in util_est_dequeue()
3963 trace_sched_util_est_cfs_tp(cfs_rq); in util_est_dequeue()
4004 cpu = cpu_of(rq_of(cfs_rq)); in util_est_dequeue()
4067 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) in update_load_avg() argument
4069 cfs_rq_util_change(cfs_rq, 0); in update_load_avg()
4075 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in attach_entity_load_avg() argument
4077 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
4085 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} in util_est_enqueue() argument
4088 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, in util_est_dequeue() argument
4094 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) in check_spread() argument
4097 s64 d = se->vruntime - cfs_rq->min_vruntime; in check_spread()
4103 schedstat_inc(cfs_rq->nr_spread_over); in check_spread()
4108 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) in place_entity() argument
4110 u64 vruntime = cfs_rq->min_vruntime; in place_entity()
4119 vruntime += sched_vslice(cfs_rq, se); in place_entity()
4139 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
4194 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
4197 bool curr = cfs_rq->curr == se; in enqueue_entity()
4204 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
4206 update_curr(cfs_rq); in enqueue_entity()
4215 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
4225 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); in enqueue_entity()
4228 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
4231 place_entity(cfs_rq, se, 0); in enqueue_entity()
4234 update_stats_enqueue(cfs_rq, se, flags); in enqueue_entity()
4235 check_spread(cfs_rq, se); in enqueue_entity()
4237 __enqueue_entity(cfs_rq, se); in enqueue_entity()
4245 if (cfs_rq->nr_running == 1 || cfs_bandwidth_used()) in enqueue_entity()
4246 list_add_leaf_cfs_rq(cfs_rq); in enqueue_entity()
4248 if (cfs_rq->nr_running == 1) in enqueue_entity()
4249 check_enqueue_throttle(cfs_rq); in enqueue_entity()
4255 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_last() local
4256 if (cfs_rq->last != se) in __clear_buddies_last()
4259 cfs_rq->last = NULL; in __clear_buddies_last()
4266 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next() local
4267 if (cfs_rq->next != se) in __clear_buddies_next()
4270 cfs_rq->next = NULL; in __clear_buddies_next()
4277 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_skip() local
4278 if (cfs_rq->skip != se) in __clear_buddies_skip()
4281 cfs_rq->skip = NULL; in __clear_buddies_skip()
4285 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
4287 if (cfs_rq->last == se) in clear_buddies()
4290 if (cfs_rq->next == se) in clear_buddies()
4293 if (cfs_rq->skip == se) in clear_buddies()
4297 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
4300 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
4305 update_curr(cfs_rq); in dequeue_entity()
4315 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_entity()
4318 update_stats_dequeue(cfs_rq, se, flags); in dequeue_entity()
4320 clear_buddies(cfs_rq, se); in dequeue_entity()
4322 if (se != cfs_rq->curr) in dequeue_entity()
4323 __dequeue_entity(cfs_rq, se); in dequeue_entity()
4325 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
4334 se->vruntime -= cfs_rq->min_vruntime; in dequeue_entity()
4337 return_cfs_rq_runtime(cfs_rq); in dequeue_entity()
4348 update_min_vruntime(cfs_rq); in dequeue_entity()
4355 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) in check_preempt_tick() argument
4361 ideal_runtime = sched_slice(cfs_rq, curr); in check_preempt_tick()
4364 resched_curr(rq_of(cfs_rq)); in check_preempt_tick()
4369 clear_buddies(cfs_rq, curr); in check_preempt_tick()
4381 se = __pick_first_entity(cfs_rq); in check_preempt_tick()
4388 resched_curr(rq_of(cfs_rq)); in check_preempt_tick()
4392 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
4401 update_stats_wait_end(cfs_rq, se); in set_next_entity()
4402 __dequeue_entity(cfs_rq, se); in set_next_entity()
4403 update_load_avg(cfs_rq, se, UPDATE_TG); in set_next_entity()
4406 update_stats_curr_start(cfs_rq, se); in set_next_entity()
4407 cfs_rq->curr = se; in set_next_entity()
4415 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { in set_next_entity()
4435 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) in pick_next_entity() argument
4437 struct sched_entity *left = __pick_first_entity(cfs_rq); in pick_next_entity()
4453 if (cfs_rq->skip == se) { in pick_next_entity()
4457 second = __pick_first_entity(cfs_rq); in pick_next_entity()
4468 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) { in pick_next_entity()
4472 se = cfs_rq->next; in pick_next_entity()
4473 } else if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) { in pick_next_entity()
4477 se = cfs_rq->last; in pick_next_entity()
4480 clear_buddies(cfs_rq, se); in pick_next_entity()
4485 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
4487 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) in put_prev_entity() argument
4494 update_curr(cfs_rq); in put_prev_entity()
4497 check_cfs_rq_runtime(cfs_rq); in put_prev_entity()
4499 check_spread(cfs_rq, prev); in put_prev_entity()
4502 update_stats_wait_start(cfs_rq, prev); in put_prev_entity()
4504 __enqueue_entity(cfs_rq, prev); in put_prev_entity()
4506 update_load_avg(cfs_rq, prev, 0); in put_prev_entity()
4508 cfs_rq->curr = NULL; in put_prev_entity()
4512 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) in entity_tick() argument
4517 update_curr(cfs_rq); in entity_tick()
4522 update_load_avg(cfs_rq, curr, UPDATE_TG); in entity_tick()
4531 resched_curr(rq_of(cfs_rq)); in entity_tick()
4538 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) in entity_tick()
4542 if (cfs_rq->nr_running > 1) in entity_tick()
4543 check_preempt_tick(cfs_rq, curr); in entity_tick()
4614 struct cfs_rq *cfs_rq, u64 target_runtime) in __assign_cfs_rq_runtime() argument
4621 min_amount = target_runtime - cfs_rq->runtime_remaining; in __assign_cfs_rq_runtime()
4635 cfs_rq->runtime_remaining += amount; in __assign_cfs_rq_runtime()
4637 return cfs_rq->runtime_remaining > 0; in __assign_cfs_rq_runtime()
4641 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) in assign_cfs_rq_runtime() argument
4643 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in assign_cfs_rq_runtime()
4647 ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice()); in assign_cfs_rq_runtime()
4653 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in __account_cfs_rq_runtime() argument
4656 cfs_rq->runtime_remaining -= delta_exec; in __account_cfs_rq_runtime()
4658 if (likely(cfs_rq->runtime_remaining > 0)) in __account_cfs_rq_runtime()
4661 if (cfs_rq->throttled) in __account_cfs_rq_runtime()
4667 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) in __account_cfs_rq_runtime()
4668 resched_curr(rq_of(cfs_rq)); in __account_cfs_rq_runtime()
4672 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in account_cfs_rq_runtime() argument
4674 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) in account_cfs_rq_runtime()
4677 __account_cfs_rq_runtime(cfs_rq, delta_exec); in account_cfs_rq_runtime()
4680 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled() argument
4682 return cfs_bandwidth_used() && cfs_rq->throttled; in cfs_rq_throttled()
4686 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy() argument
4688 return cfs_bandwidth_used() && cfs_rq->throttle_count; in throttled_hierarchy()
4699 struct cfs_rq *src_cfs_rq, *dest_cfs_rq; in throttled_lb_pair()
4701 src_cfs_rq = tg->cfs_rq[src_cpu]; in throttled_lb_pair()
4702 dest_cfs_rq = tg->cfs_rq[dest_cpu]; in throttled_lb_pair()
4711 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up() local
4713 cfs_rq->throttle_count--; in tg_unthrottle_up()
4714 if (!cfs_rq->throttle_count) { in tg_unthrottle_up()
4715 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - in tg_unthrottle_up()
4716 cfs_rq->throttled_clock_task; in tg_unthrottle_up()
4719 if (cfs_rq->nr_running >= 1) in tg_unthrottle_up()
4720 list_add_leaf_cfs_rq(cfs_rq); in tg_unthrottle_up()
4729 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down() local
4732 if (!cfs_rq->throttle_count) { in tg_throttle_down()
4733 cfs_rq->throttled_clock_task = rq_clock_task(rq); in tg_throttle_down()
4734 list_del_leaf_cfs_rq(cfs_rq); in tg_throttle_down()
4736 cfs_rq->throttle_count++; in tg_throttle_down()
4741 static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) in throttle_cfs_rq() argument
4743 struct rq *rq = rq_of(cfs_rq); in throttle_cfs_rq()
4744 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in throttle_cfs_rq()
4750 if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) { in throttle_cfs_rq()
4761 list_add_tail_rcu(&cfs_rq->throttled_list, in throttle_cfs_rq()
4769 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
4773 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); in throttle_cfs_rq()
4776 task_delta = cfs_rq->h_nr_running; in throttle_cfs_rq()
4777 idle_task_delta = cfs_rq->idle_h_nr_running; in throttle_cfs_rq()
4779 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
4805 cfs_rq->throttled = 1; in throttle_cfs_rq()
4806 cfs_rq->throttled_clock = rq_clock(rq); in throttle_cfs_rq()
4810 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) in unthrottle_cfs_rq() argument
4812 struct rq *rq = rq_of(cfs_rq); in unthrottle_cfs_rq()
4813 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in unthrottle_cfs_rq()
4817 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
4819 cfs_rq->throttled = 0; in unthrottle_cfs_rq()
4824 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; in unthrottle_cfs_rq()
4825 list_del_rcu(&cfs_rq->throttled_list); in unthrottle_cfs_rq()
4829 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); in unthrottle_cfs_rq()
4831 if (!cfs_rq->load.weight) in unthrottle_cfs_rq()
4834 task_delta = cfs_rq->h_nr_running; in unthrottle_cfs_rq()
4835 idle_task_delta = cfs_rq->idle_h_nr_running; in unthrottle_cfs_rq()
4839 cfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
4840 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); in unthrottle_cfs_rq()
4842 cfs_rq->h_nr_running += task_delta; in unthrottle_cfs_rq()
4843 cfs_rq->idle_h_nr_running += idle_task_delta; in unthrottle_cfs_rq()
4846 if (cfs_rq_throttled(cfs_rq)) in unthrottle_cfs_rq()
4851 cfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
4853 update_load_avg(cfs_rq, se, UPDATE_TG); in unthrottle_cfs_rq()
4856 cfs_rq->h_nr_running += task_delta; in unthrottle_cfs_rq()
4857 cfs_rq->idle_h_nr_running += idle_task_delta; in unthrottle_cfs_rq()
4861 if (cfs_rq_throttled(cfs_rq)) in unthrottle_cfs_rq()
4868 if (throttled_hierarchy(cfs_rq)) in unthrottle_cfs_rq()
4869 list_add_leaf_cfs_rq(cfs_rq); in unthrottle_cfs_rq()
4882 cfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
4884 if (list_add_leaf_cfs_rq(cfs_rq)) in unthrottle_cfs_rq()
4897 struct cfs_rq *cfs_rq; in distribute_cfs_runtime() local
4901 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, in distribute_cfs_runtime()
4903 struct rq *rq = rq_of(cfs_rq); in distribute_cfs_runtime()
4907 if (!cfs_rq_throttled(cfs_rq)) in distribute_cfs_runtime()
4911 SCHED_WARN_ON(cfs_rq->runtime_remaining > 0); in distribute_cfs_runtime()
4914 runtime = -cfs_rq->runtime_remaining + 1; in distribute_cfs_runtime()
4921 cfs_rq->runtime_remaining += runtime; in distribute_cfs_runtime()
4924 if (cfs_rq->runtime_remaining > 0) in distribute_cfs_runtime()
4925 unthrottle_cfs_rq(cfs_rq); in distribute_cfs_runtime()
5047 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in __return_cfs_rq_runtime() argument
5049 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in __return_cfs_rq_runtime()
5050 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; in __return_cfs_rq_runtime()
5067 cfs_rq->runtime_remaining -= slack_runtime; in __return_cfs_rq_runtime()
5070 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in return_cfs_rq_runtime() argument
5075 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) in return_cfs_rq_runtime()
5078 __return_cfs_rq_runtime(cfs_rq); in return_cfs_rq_runtime()
5118 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) in check_enqueue_throttle() argument
5124 if (!cfs_rq->runtime_enabled || cfs_rq->curr) in check_enqueue_throttle()
5128 if (cfs_rq_throttled(cfs_rq)) in check_enqueue_throttle()
5132 account_cfs_rq_runtime(cfs_rq, 0); in check_enqueue_throttle()
5133 if (cfs_rq->runtime_remaining <= 0) in check_enqueue_throttle()
5134 throttle_cfs_rq(cfs_rq); in check_enqueue_throttle()
5139 struct cfs_rq *pcfs_rq, *cfs_rq; in sync_throttle() local
5147 cfs_rq = tg->cfs_rq[cpu]; in sync_throttle()
5148 pcfs_rq = tg->parent->cfs_rq[cpu]; in sync_throttle()
5150 cfs_rq->throttle_count = pcfs_rq->throttle_count; in sync_throttle()
5151 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); in sync_throttle()
5155 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) in check_cfs_rq_runtime() argument
5160 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) in check_cfs_rq_runtime()
5167 if (cfs_rq_throttled(cfs_rq)) in check_cfs_rq_runtime()
5170 return throttle_cfs_rq(cfs_rq); in check_cfs_rq_runtime()
5254 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) in init_cfs_rq_runtime() argument
5256 cfs_rq->runtime_enabled = 0; in init_cfs_rq_runtime()
5257 INIT_LIST_HEAD(&cfs_rq->throttled_list); in init_cfs_rq_runtime()
5299 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in update_runtime_enabled() local
5302 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; in update_runtime_enabled()
5317 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in unthrottle_offline_cfs_rqs() local
5319 if (!cfs_rq->runtime_enabled) in unthrottle_offline_cfs_rqs()
5326 cfs_rq->runtime_remaining = 1; in unthrottle_offline_cfs_rqs()
5331 cfs_rq->runtime_enabled = 0; in unthrottle_offline_cfs_rqs()
5333 if (cfs_rq_throttled(cfs_rq)) in unthrottle_offline_cfs_rqs()
5334 unthrottle_cfs_rq(cfs_rq); in unthrottle_offline_cfs_rqs()
5346 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} in account_cfs_rq_runtime() argument
5347 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } in check_cfs_rq_runtime() argument
5348 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} in check_enqueue_throttle() argument
5350 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in return_cfs_rq_runtime() argument
5352 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled() argument
5357 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy() argument
5371 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in init_cfs_rq_runtime() argument
5392 struct cfs_rq *cfs_rq = cfs_rq_of(se); in hrtick_start_fair() local
5397 u64 slice = sched_slice(cfs_rq, se); in hrtick_start_fair()
5477 struct cfs_rq *cfs_rq; in enqueue_task_fair() local
5501 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5502 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
5504 cfs_rq->h_nr_running++; in enqueue_task_fair()
5505 cfs_rq->idle_h_nr_running += idle_h_nr_running; in enqueue_task_fair()
5508 if (cfs_rq_throttled(cfs_rq)) in enqueue_task_fair()
5515 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5517 update_load_avg(cfs_rq, se, UPDATE_TG); in enqueue_task_fair()
5521 cfs_rq->h_nr_running++; in enqueue_task_fair()
5522 cfs_rq->idle_h_nr_running += idle_h_nr_running; in enqueue_task_fair()
5525 if (cfs_rq_throttled(cfs_rq)) in enqueue_task_fair()
5532 if (throttled_hierarchy(cfs_rq)) in enqueue_task_fair()
5533 list_add_leaf_cfs_rq(cfs_rq); in enqueue_task_fair()
5565 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5567 if (list_add_leaf_cfs_rq(cfs_rq)) in enqueue_task_fair()
5586 struct cfs_rq *cfs_rq; in dequeue_task_fair() local
5593 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
5594 dequeue_entity(cfs_rq, se, flags); in dequeue_task_fair()
5596 cfs_rq->h_nr_running--; in dequeue_task_fair()
5597 cfs_rq->idle_h_nr_running -= idle_h_nr_running; in dequeue_task_fair()
5600 if (cfs_rq_throttled(cfs_rq)) in dequeue_task_fair()
5604 if (cfs_rq->load.weight) { in dequeue_task_fair()
5611 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) in dequeue_task_fair()
5619 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
5621 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_task_fair()
5625 cfs_rq->h_nr_running--; in dequeue_task_fair()
5626 cfs_rq->idle_h_nr_running -= idle_h_nr_running; in dequeue_task_fair()
5629 if (cfs_rq_throttled(cfs_rq)) in dequeue_task_fair()
5684 struct cfs_rq *cfs_rq; in cpu_load_without() local
5691 cfs_rq = &rq->cfs; in cpu_load_without()
5692 load = READ_ONCE(cfs_rq->avg.load_avg); in cpu_load_without()
5707 struct cfs_rq *cfs_rq; in cpu_runnable_without() local
5714 cfs_rq = &rq->cfs; in cpu_runnable_without()
5715 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_runnable_without()
6349 struct cfs_rq *cfs_rq; in cpu_util() local
6352 cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util()
6353 util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util()
6356 util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued)); in cpu_util()
6376 struct cfs_rq *cfs_rq; in cpu_util_without() local
6383 cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util_without()
6384 util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util_without()
6417 READ_ONCE(cfs_rq->avg.util_est.enqueued); in cpu_util_without()
6456 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util_next() local
6457 unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util_next()
6471 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued); in cpu_util_next()
6772 struct cfs_rq *cfs_rq = cfs_rq_of(se); in migrate_task_rq_fair() local
6779 min_vruntime_copy = cfs_rq->min_vruntime_copy; in migrate_task_rq_fair()
6781 min_vruntime = cfs_rq->min_vruntime; in migrate_task_rq_fair()
6784 min_vruntime = cfs_rq->min_vruntime; in migrate_task_rq_fair()
6920 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in check_preempt_wakeup() local
6921 int scale = cfs_rq->nr_running >= sched_nr_latency; in check_preempt_wakeup()
7002 struct cfs_rq *cfs_rq = &rq->cfs; in pick_next_task_fair() local
7024 struct sched_entity *curr = cfs_rq->curr; in pick_next_task_fair()
7034 update_curr(cfs_rq); in pick_next_task_fair()
7044 if (unlikely(check_cfs_rq_runtime(cfs_rq))) { in pick_next_task_fair()
7045 cfs_rq = &rq->cfs; in pick_next_task_fair()
7047 if (!cfs_rq->nr_running) in pick_next_task_fair()
7054 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
7055 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
7056 } while (cfs_rq); in pick_next_task_fair()
7068 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
7082 put_prev_entity(cfs_rq, pse); in pick_next_task_fair()
7083 set_next_entity(cfs_rq, se); in pick_next_task_fair()
7093 se = pick_next_entity(cfs_rq, NULL); in pick_next_task_fair()
7094 set_next_entity(cfs_rq, se); in pick_next_task_fair()
7095 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
7096 } while (cfs_rq); in pick_next_task_fair()
7154 struct cfs_rq *cfs_rq; in put_prev_task_fair() local
7157 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
7158 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
7170 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in yield_task_fair() local
7179 clear_buddies(cfs_rq, se); in yield_task_fair()
7186 update_curr(cfs_rq); in yield_task_fair()
7818 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) in cfs_rq_has_blocked() argument
7820 if (cfs_rq->avg.load_avg) in cfs_rq_has_blocked()
7823 if (cfs_rq->avg.util_avg) in cfs_rq_has_blocked()
7856 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; } in cfs_rq_has_blocked() argument
7889 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) in cfs_rq_is_decayed() argument
7891 if (cfs_rq->load.weight) in cfs_rq_is_decayed()
7894 if (cfs_rq->avg.load_sum) in cfs_rq_is_decayed()
7897 if (cfs_rq->avg.util_sum) in cfs_rq_is_decayed()
7900 if (cfs_rq->avg.runnable_sum) in cfs_rq_is_decayed()
7908 struct cfs_rq *cfs_rq, *pos; in __update_blocked_fair() local
7916 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { in __update_blocked_fair()
7919 if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) { in __update_blocked_fair()
7920 update_tg_load_avg(cfs_rq); in __update_blocked_fair()
7922 if (cfs_rq == &rq->cfs) in __update_blocked_fair()
7927 se = cfs_rq->tg->se[cpu]; in __update_blocked_fair()
7935 if (cfs_rq_is_decayed(cfs_rq)) in __update_blocked_fair()
7936 list_del_leaf_cfs_rq(cfs_rq); in __update_blocked_fair()
7939 if (cfs_rq_has_blocked(cfs_rq)) in __update_blocked_fair()
7951 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) in update_cfs_rq_h_load() argument
7953 struct rq *rq = rq_of(cfs_rq); in update_cfs_rq_h_load()
7954 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
7958 if (cfs_rq->last_h_load_update == now) in update_cfs_rq_h_load()
7961 WRITE_ONCE(cfs_rq->h_load_next, NULL); in update_cfs_rq_h_load()
7963 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
7964 WRITE_ONCE(cfs_rq->h_load_next, se); in update_cfs_rq_h_load()
7965 if (cfs_rq->last_h_load_update == now) in update_cfs_rq_h_load()
7970 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); in update_cfs_rq_h_load()
7971 cfs_rq->last_h_load_update = now; in update_cfs_rq_h_load()
7974 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { in update_cfs_rq_h_load()
7975 load = cfs_rq->h_load; in update_cfs_rq_h_load()
7977 cfs_rq_load_avg(cfs_rq) + 1); in update_cfs_rq_h_load()
7978 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
7979 cfs_rq->h_load = load; in update_cfs_rq_h_load()
7980 cfs_rq->last_h_load_update = now; in update_cfs_rq_h_load()
7986 struct cfs_rq *cfs_rq = task_cfs_rq(p); in task_h_load() local
7988 update_cfs_rq_h_load(cfs_rq); in task_h_load()
7989 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
7990 cfs_rq_load_avg(cfs_rq) + 1); in task_h_load()
7995 struct cfs_rq *cfs_rq = &rq->cfs; in __update_blocked_fair() local
7998 decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq); in __update_blocked_fair()
7999 if (cfs_rq_has_blocked(cfs_rq)) in __update_blocked_fair()
10695 struct cfs_rq *cfs_rq; in task_tick_fair() local
10699 cfs_rq = cfs_rq_of(se); in task_tick_fair()
10700 entity_tick(cfs_rq, se, queued); in task_tick_fair()
10717 struct cfs_rq *cfs_rq; in task_fork_fair() local
10725 cfs_rq = task_cfs_rq(current); in task_fork_fair()
10726 curr = cfs_rq->curr; in task_fork_fair()
10728 update_curr(cfs_rq); in task_fork_fair()
10731 place_entity(cfs_rq, se, 1); in task_fork_fair()
10742 se->vruntime -= cfs_rq->min_vruntime; in task_fork_fair()
10806 struct cfs_rq *cfs_rq; in propagate_entity_cfs_rq() local
10812 cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
10814 if (cfs_rq_throttled(cfs_rq)) in propagate_entity_cfs_rq()
10817 update_load_avg(cfs_rq, se, UPDATE_TG); in propagate_entity_cfs_rq()
10826 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_entity_cfs_rq() local
10829 update_load_avg(cfs_rq, se, 0); in detach_entity_cfs_rq()
10830 detach_entity_load_avg(cfs_rq, se); in detach_entity_cfs_rq()
10831 update_tg_load_avg(cfs_rq); in detach_entity_cfs_rq()
10837 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_entity_cfs_rq() local
10848 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); in attach_entity_cfs_rq()
10849 attach_entity_load_avg(cfs_rq, se); in attach_entity_cfs_rq()
10850 update_tg_load_avg(cfs_rq); in attach_entity_cfs_rq()
10857 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_task_cfs_rq() local
10864 place_entity(cfs_rq, se, 0); in detach_task_cfs_rq()
10865 se->vruntime -= cfs_rq->min_vruntime; in detach_task_cfs_rq()
10874 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_task_cfs_rq() local
10879 se->vruntime += cfs_rq->min_vruntime; in attach_task_cfs_rq()
10924 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_next_task_fair() local
10926 set_next_entity(cfs_rq, se); in set_next_task_fair()
10928 account_cfs_rq_runtime(cfs_rq, 0); in set_next_task_fair()
10932 void init_cfs_rq(struct cfs_rq *cfs_rq) in init_cfs_rq() argument
10934 cfs_rq->tasks_timeline = RB_ROOT_CACHED; in init_cfs_rq()
10935 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); in init_cfs_rq()
10937 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; in init_cfs_rq()
10940 raw_spin_lock_init(&cfs_rq->removed.lock); in init_cfs_rq()
10985 if (tg->cfs_rq) in free_fair_sched_group()
10986 kfree(tg->cfs_rq[i]); in free_fair_sched_group()
10991 kfree(tg->cfs_rq); in free_fair_sched_group()
10998 struct cfs_rq *cfs_rq; in alloc_fair_sched_group() local
11001 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); in alloc_fair_sched_group()
11002 if (!tg->cfs_rq) in alloc_fair_sched_group()
11013 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), in alloc_fair_sched_group()
11015 if (!cfs_rq) in alloc_fair_sched_group()
11023 init_cfs_rq(cfs_rq); in alloc_fair_sched_group()
11024 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
11031 kfree(cfs_rq); in alloc_fair_sched_group()
11068 if (!tg->cfs_rq[cpu]->on_list) in unregister_fair_sched_group()
11074 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); in unregister_fair_sched_group()
11079 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, in init_tg_cfs_entry() argument
11085 cfs_rq->tg = tg; in init_tg_cfs_entry()
11086 cfs_rq->rq = rq; in init_tg_cfs_entry()
11087 init_cfs_rq_runtime(cfs_rq); in init_tg_cfs_entry()
11089 tg->cfs_rq[cpu] = cfs_rq; in init_tg_cfs_entry()
11097 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
11100 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
11104 se->my_q = cfs_rq; in init_tg_cfs_entry()
11230 struct cfs_rq *cfs_rq, *pos; in print_cfs_stats() local
11233 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) in print_cfs_stats()
11234 print_cfs_rq(m, cpu, cfs_rq); in print_cfs_stats()
11281 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq) in sched_trace_cfs_rq_avg() argument
11284 return cfs_rq ? &cfs_rq->avg : NULL; in sched_trace_cfs_rq_avg()
11291 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len) in sched_trace_cfs_rq_path() argument
11293 if (!cfs_rq) { in sched_trace_cfs_rq_path()
11300 cfs_rq_tg_path(cfs_rq, str, len); in sched_trace_cfs_rq_path()
11305 int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq) in sched_trace_cfs_rq_cpu() argument
11307 return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1; in sched_trace_cfs_rq_cpu()