Lines Matching refs:se
258 static inline struct task_struct *task_of(struct sched_entity *se) in task_of() argument
260 SCHED_WARN_ON(!entity_is_task(se)); in task_of()
261 return container_of(se, struct task_struct, se); in task_of()
265 #define for_each_sched_entity(se) \ argument
266 for (; se; se = se->parent)
270 return p->se.cfs_rq; in task_cfs_rq()
274 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of() argument
276 return se->cfs_rq; in cfs_rq_of()
362 is_same_group(struct sched_entity *se, struct sched_entity *pse) in is_same_group() argument
364 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
365 return se->cfs_rq; in is_same_group()
370 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
372 return se->parent; in parent_entity()
376 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
388 se_depth = (*se)->depth; in find_matching_se()
393 *se = parent_entity(*se); in find_matching_se()
401 while (!is_same_group(*se, *pse)) { in find_matching_se()
402 *se = parent_entity(*se); in find_matching_se()
409 static inline struct task_struct *task_of(struct sched_entity *se) in task_of() argument
411 return container_of(se, struct task_struct, se); in task_of()
420 #define for_each_sched_entity(se) \ argument
421 for (; se; se = NULL)
428 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of() argument
430 struct task_struct *p = task_of(se); in cfs_rq_of()
453 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
459 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
511 struct sched_entity *se; in update_min_vruntime() local
512 se = rb_entry(leftmost, struct sched_entity, run_node); in update_min_vruntime()
515 vruntime = se->vruntime; in update_min_vruntime()
517 vruntime = min_vruntime(vruntime, se->vruntime); in update_min_vruntime()
531 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
548 if (entity_before(se, entry)) { in __enqueue_entity()
556 rb_link_node(&se->run_node, parent, link); in __enqueue_entity()
557 rb_insert_color_cached(&se->run_node, in __enqueue_entity()
561 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
563 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); in __dequeue_entity()
576 static struct sched_entity *__pick_next_entity(struct sched_entity *se) in __pick_next_entity() argument
578 struct rb_node *next = rb_next(&se->run_node); in __pick_next_entity()
628 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) in calc_delta_fair() argument
630 if (unlikely(se->load.weight != NICE_0_LOAD)) in calc_delta_fair()
631 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair()
658 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_slice() argument
660 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); in sched_slice()
662 for_each_sched_entity(se) { in sched_slice()
666 cfs_rq = cfs_rq_of(se); in sched_slice()
669 if (unlikely(!se->on_rq)) { in sched_slice()
672 update_load_add(&lw, se->load.weight); in sched_slice()
675 slice = __calc_delta(slice, se->load.weight, load); in sched_slice()
685 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_vslice() argument
687 return calc_delta_fair(sched_slice(cfs_rq, se), se); in sched_vslice()
698 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
700 struct sched_avg *sa = &se->avg; in init_entity_runnable_average()
710 if (entity_is_task(se)) in init_entity_runnable_average()
711 sa->runnable_load_avg = sa->load_avg = scale_load_down(se->load.weight); in init_entity_runnable_average()
713 se->runnable_weight = se->load.weight; in init_entity_runnable_average()
719 static void attach_entity_cfs_rq(struct sched_entity *se);
747 void post_init_entity_util_avg(struct sched_entity *se) in post_init_entity_util_avg() argument
749 struct cfs_rq *cfs_rq = cfs_rq_of(se); in post_init_entity_util_avg()
750 struct sched_avg *sa = &se->avg; in post_init_entity_util_avg()
756 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; in post_init_entity_util_avg()
766 if (entity_is_task(se)) { in post_init_entity_util_avg()
767 struct task_struct *p = task_of(se); in post_init_entity_util_avg()
779 se->avg.last_update_time = cfs_rq_clock_task(cfs_rq); in post_init_entity_util_avg()
784 attach_entity_cfs_rq(se); in post_init_entity_util_avg()
788 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
791 void post_init_entity_util_avg(struct sched_entity *se) in post_init_entity_util_avg() argument
839 update_curr(cfs_rq_of(&rq->curr->se)); in update_curr_fair()
843 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start() argument
851 prev_wait_start = schedstat_val(se->statistics.wait_start); in update_stats_wait_start()
853 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) && in update_stats_wait_start()
857 __schedstat_set(se->statistics.wait_start, wait_start); in update_stats_wait_start()
861 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end() argument
869 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start); in update_stats_wait_end()
871 if (entity_is_task(se)) { in update_stats_wait_end()
872 p = task_of(se); in update_stats_wait_end()
879 __schedstat_set(se->statistics.wait_start, delta); in update_stats_wait_end()
885 __schedstat_set(se->statistics.wait_max, in update_stats_wait_end()
886 max(schedstat_val(se->statistics.wait_max), delta)); in update_stats_wait_end()
887 __schedstat_inc(se->statistics.wait_count); in update_stats_wait_end()
888 __schedstat_add(se->statistics.wait_sum, delta); in update_stats_wait_end()
889 __schedstat_set(se->statistics.wait_start, 0); in update_stats_wait_end()
893 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper() argument
901 sleep_start = schedstat_val(se->statistics.sleep_start); in update_stats_enqueue_sleeper()
902 block_start = schedstat_val(se->statistics.block_start); in update_stats_enqueue_sleeper()
904 if (entity_is_task(se)) in update_stats_enqueue_sleeper()
905 tsk = task_of(se); in update_stats_enqueue_sleeper()
913 if (unlikely(delta > schedstat_val(se->statistics.sleep_max))) in update_stats_enqueue_sleeper()
914 __schedstat_set(se->statistics.sleep_max, delta); in update_stats_enqueue_sleeper()
916 __schedstat_set(se->statistics.sleep_start, 0); in update_stats_enqueue_sleeper()
917 __schedstat_add(se->statistics.sum_sleep_runtime, delta); in update_stats_enqueue_sleeper()
930 if (unlikely(delta > schedstat_val(se->statistics.block_max))) in update_stats_enqueue_sleeper()
931 __schedstat_set(se->statistics.block_max, delta); in update_stats_enqueue_sleeper()
933 __schedstat_set(se->statistics.block_start, 0); in update_stats_enqueue_sleeper()
934 __schedstat_add(se->statistics.sum_sleep_runtime, delta); in update_stats_enqueue_sleeper()
938 __schedstat_add(se->statistics.iowait_sum, delta); in update_stats_enqueue_sleeper()
939 __schedstat_inc(se->statistics.iowait_count); in update_stats_enqueue_sleeper()
964 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue() argument
973 if (se != cfs_rq->curr) in update_stats_enqueue()
974 update_stats_wait_start(cfs_rq, se); in update_stats_enqueue()
977 update_stats_enqueue_sleeper(cfs_rq, se); in update_stats_enqueue()
981 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue() argument
991 if (se != cfs_rq->curr) in update_stats_dequeue()
992 update_stats_wait_end(cfs_rq, se); in update_stats_dequeue()
994 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { in update_stats_dequeue()
995 struct task_struct *tsk = task_of(se); in update_stats_dequeue()
998 __schedstat_set(se->statistics.sleep_start, in update_stats_dequeue()
1001 __schedstat_set(se->statistics.block_start, in update_stats_dequeue()
1010 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
1015 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
2014 now = p->se.exec_start; in numa_get_avg_runtime()
2015 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
2021 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
2466 u64 runtime = p->se.sum_exec_runtime; in task_numa_work()
2597 if (unlikely(p->se.sum_exec_runtime != runtime)) { in task_numa_work()
2598 u64 diff = p->se.sum_exec_runtime - runtime; in task_numa_work()
2623 now = curr->se.sum_exec_runtime; in task_tick_numa()
2691 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
2693 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
2694 if (!parent_entity(se)) in account_entity_enqueue()
2695 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_enqueue()
2697 if (entity_is_task(se)) { in account_entity_enqueue()
2700 account_numa_enqueue(rq, task_of(se)); in account_entity_enqueue()
2701 list_add(&se->group_node, &rq->cfs_tasks); in account_entity_enqueue()
2708 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
2710 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
2711 if (!parent_entity(se)) in account_entity_dequeue()
2712 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); in account_entity_dequeue()
2714 if (entity_is_task(se)) { in account_entity_dequeue()
2715 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
2716 list_del_init(&se->group_node); in account_entity_dequeue()
2761 enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_runnable_load_avg() argument
2763 cfs_rq->runnable_weight += se->runnable_weight; in enqueue_runnable_load_avg()
2765 cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg; in enqueue_runnable_load_avg()
2766 cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum; in enqueue_runnable_load_avg()
2770 dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_runnable_load_avg() argument
2772 cfs_rq->runnable_weight -= se->runnable_weight; in dequeue_runnable_load_avg()
2774 sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg); in dequeue_runnable_load_avg()
2776 se_runnable(se) * se->avg.runnable_load_sum); in dequeue_runnable_load_avg()
2780 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg() argument
2782 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
2783 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
2787 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg() argument
2789 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
2790 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
2794 enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_runnable_load_avg() argument
2796 dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_runnable_load_avg() argument
2798 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_load_avg() argument
2800 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_load_avg() argument
2803 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
2806 if (se->on_rq) { in reweight_entity()
2808 if (cfs_rq->curr == se) in reweight_entity()
2810 account_entity_dequeue(cfs_rq, se); in reweight_entity()
2811 dequeue_runnable_load_avg(cfs_rq, se); in reweight_entity()
2813 dequeue_load_avg(cfs_rq, se); in reweight_entity()
2815 se->runnable_weight = runnable; in reweight_entity()
2816 update_load_set(&se->load, weight); in reweight_entity()
2820 u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib; in reweight_entity()
2822 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); in reweight_entity()
2823 se->avg.runnable_load_avg = in reweight_entity()
2824 div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider); in reweight_entity()
2828 enqueue_load_avg(cfs_rq, se); in reweight_entity()
2829 if (se->on_rq) { in reweight_entity()
2830 account_entity_enqueue(cfs_rq, se); in reweight_entity()
2831 enqueue_runnable_load_avg(cfs_rq, se); in reweight_entity()
2837 struct sched_entity *se = &p->se; in reweight_task() local
2838 struct cfs_rq *cfs_rq = cfs_rq_of(se); in reweight_task()
2839 struct load_weight *load = &se->load; in reweight_task()
2842 reweight_entity(cfs_rq, se, weight, weight); in reweight_task()
3006 static void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
3008 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in update_cfs_group()
3020 if (likely(se->load.weight == shares)) in update_cfs_group()
3027 reweight_entity(cfs_rq_of(se), se, shares, runnable); in update_cfs_group()
3031 static inline void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
3097 void set_task_rq_fair(struct sched_entity *se, in set_task_rq_fair() argument
3113 if (!(se->avg.last_update_time && prev)) in set_task_rq_fair()
3137 __update_load_avg_blocked_se(p_last_update_time, cpu_of(rq_of(prev)), se); in set_task_rq_fair()
3138 se->avg.last_update_time = n_last_update_time; in set_task_rq_fair()
3211 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util() argument
3213 long delta = gcfs_rq->avg.util_avg - se->avg.util_avg; in update_tg_cfs_util()
3228 se->avg.util_avg = gcfs_rq->avg.util_avg; in update_tg_cfs_util()
3229 se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX; in update_tg_cfs_util()
3237 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable() argument
3254 runnable_sum += se->avg.load_sum; in update_tg_cfs_runnable()
3267 runnable_sum = min(se->avg.load_sum, load_sum); in update_tg_cfs_runnable()
3275 running_sum = se->avg.util_sum / in update_tg_cfs_runnable()
3279 load_sum = (s64)se_weight(se) * runnable_sum; in update_tg_cfs_runnable()
3282 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; in update_tg_cfs_runnable()
3283 delta_avg = load_avg - se->avg.load_avg; in update_tg_cfs_runnable()
3285 se->avg.load_sum = runnable_sum; in update_tg_cfs_runnable()
3286 se->avg.load_avg = load_avg; in update_tg_cfs_runnable()
3290 runnable_load_sum = (s64)se_runnable(se) * runnable_sum; in update_tg_cfs_runnable()
3292 delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum; in update_tg_cfs_runnable()
3293 delta_avg = runnable_load_avg - se->avg.runnable_load_avg; in update_tg_cfs_runnable()
3295 se->avg.runnable_load_sum = runnable_sum; in update_tg_cfs_runnable()
3296 se->avg.runnable_load_avg = runnable_load_avg; in update_tg_cfs_runnable()
3298 if (se->on_rq) { in update_tg_cfs_runnable()
3311 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
3315 if (entity_is_task(se)) in propagate_entity_load_avg()
3318 gcfs_rq = group_cfs_rq(se); in propagate_entity_load_avg()
3324 cfs_rq = cfs_rq_of(se); in propagate_entity_load_avg()
3328 update_tg_cfs_util(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3329 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3338 static inline bool skip_blocked_update(struct sched_entity *se) in skip_blocked_update() argument
3340 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in skip_blocked_update()
3346 if (se->avg.load_avg || se->avg.util_avg) in skip_blocked_update()
3368 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
3446 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in attach_entity_load_avg() argument
3457 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
3458 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
3466 se->avg.util_sum = se->avg.util_avg * divider; in attach_entity_load_avg()
3468 se->avg.load_sum = divider; in attach_entity_load_avg()
3469 if (se_weight(se)) { in attach_entity_load_avg()
3470 se->avg.load_sum = in attach_entity_load_avg()
3471 div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se)); in attach_entity_load_avg()
3474 se->avg.runnable_load_sum = se->avg.load_sum; in attach_entity_load_avg()
3476 enqueue_load_avg(cfs_rq, se); in attach_entity_load_avg()
3477 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
3478 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
3480 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
3493 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
3495 dequeue_load_avg(cfs_rq, se); in detach_entity_load_avg()
3496 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
3497 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
3499 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
3512 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg() argument
3523 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) in update_load_avg()
3524 __update_load_avg_se(now, cpu, cfs_rq, se); in update_load_avg()
3527 decayed |= propagate_entity_load_avg(se); in update_load_avg()
3529 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { in update_load_avg()
3538 attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION); in update_load_avg()
3570 void sync_entity_load_avg(struct sched_entity *se) in sync_entity_load_avg() argument
3572 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sync_entity_load_avg()
3576 __update_load_avg_blocked_se(last_update_time, cpu_of(rq_of(cfs_rq)), se); in sync_entity_load_avg()
3583 void remove_entity_load_avg(struct sched_entity *se) in remove_entity_load_avg() argument
3585 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg()
3598 sync_entity_load_avg(se); in remove_entity_load_avg()
3602 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
3603 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
3604 cfs_rq->removed.runnable_sum += se->avg.load_sum; /* == runnable_sum */ in remove_entity_load_avg()
3622 return READ_ONCE(p->se.avg.util_avg); in task_util()
3627 struct util_est ue = READ_ONCE(p->se.avg.util_est); in _task_util_est()
3690 ue = p->se.avg.util_est; in util_est_dequeue()
3723 WRITE_ONCE(p->se.avg.util_est, ue); in util_est_dequeue()
3732 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) in update_load_avg() argument
3737 static inline void remove_entity_load_avg(struct sched_entity *se) {} in remove_entity_load_avg() argument
3740 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {} in attach_entity_load_avg() argument
3742 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
3758 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) in check_spread() argument
3761 s64 d = se->vruntime - cfs_rq->min_vruntime; in check_spread()
3772 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) in place_entity() argument
3783 vruntime += sched_vslice(cfs_rq, se); in place_entity()
3800 se->vruntime = max_vruntime(se->vruntime, vruntime); in place_entity()
3857 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
3860 bool curr = cfs_rq->curr == se; in enqueue_entity()
3867 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
3878 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
3888 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); in enqueue_entity()
3889 update_cfs_group(se); in enqueue_entity()
3890 enqueue_runnable_load_avg(cfs_rq, se); in enqueue_entity()
3891 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
3894 place_entity(cfs_rq, se, 0); in enqueue_entity()
3897 update_stats_enqueue(cfs_rq, se, flags); in enqueue_entity()
3898 check_spread(cfs_rq, se); in enqueue_entity()
3900 __enqueue_entity(cfs_rq, se); in enqueue_entity()
3901 se->on_rq = 1; in enqueue_entity()
3909 static void __clear_buddies_last(struct sched_entity *se) in __clear_buddies_last() argument
3911 for_each_sched_entity(se) { in __clear_buddies_last()
3912 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_last()
3913 if (cfs_rq->last != se) in __clear_buddies_last()
3920 static void __clear_buddies_next(struct sched_entity *se) in __clear_buddies_next() argument
3922 for_each_sched_entity(se) { in __clear_buddies_next()
3923 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next()
3924 if (cfs_rq->next != se) in __clear_buddies_next()
3931 static void __clear_buddies_skip(struct sched_entity *se) in __clear_buddies_skip() argument
3933 for_each_sched_entity(se) { in __clear_buddies_skip()
3934 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_skip()
3935 if (cfs_rq->skip != se) in __clear_buddies_skip()
3942 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
3944 if (cfs_rq->last == se) in clear_buddies()
3945 __clear_buddies_last(se); in clear_buddies()
3947 if (cfs_rq->next == se) in clear_buddies()
3948 __clear_buddies_next(se); in clear_buddies()
3950 if (cfs_rq->skip == se) in clear_buddies()
3951 __clear_buddies_skip(se); in clear_buddies()
3957 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
3972 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_entity()
3973 dequeue_runnable_load_avg(cfs_rq, se); in dequeue_entity()
3975 update_stats_dequeue(cfs_rq, se, flags); in dequeue_entity()
3977 clear_buddies(cfs_rq, se); in dequeue_entity()
3979 if (se != cfs_rq->curr) in dequeue_entity()
3980 __dequeue_entity(cfs_rq, se); in dequeue_entity()
3981 se->on_rq = 0; in dequeue_entity()
3982 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
3991 se->vruntime -= cfs_rq->min_vruntime; in dequeue_entity()
3996 update_cfs_group(se); in dequeue_entity()
4015 struct sched_entity *se; in check_preempt_tick() local
4038 se = __pick_first_entity(cfs_rq); in check_preempt_tick()
4039 delta = curr->vruntime - se->vruntime; in check_preempt_tick()
4049 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
4052 if (se->on_rq) { in set_next_entity()
4058 update_stats_wait_end(cfs_rq, se); in set_next_entity()
4059 __dequeue_entity(cfs_rq, se); in set_next_entity()
4060 update_load_avg(cfs_rq, se, UPDATE_TG); in set_next_entity()
4063 update_stats_curr_start(cfs_rq, se); in set_next_entity()
4064 cfs_rq->curr = se; in set_next_entity()
4071 if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { in set_next_entity()
4072 schedstat_set(se->statistics.slice_max, in set_next_entity()
4073 max((u64)schedstat_val(se->statistics.slice_max), in set_next_entity()
4074 se->sum_exec_runtime - se->prev_sum_exec_runtime)); in set_next_entity()
4077 se->prev_sum_exec_runtime = se->sum_exec_runtime; in set_next_entity()
4081 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
4094 struct sched_entity *se; in pick_next_entity() local
4103 se = left; /* ideally we run the leftmost entity */ in pick_next_entity()
4109 if (cfs_rq->skip == se) { in pick_next_entity()
4112 if (se == curr) { in pick_next_entity()
4115 second = __pick_next_entity(se); in pick_next_entity()
4121 se = second; in pick_next_entity()
4128 se = cfs_rq->last; in pick_next_entity()
4134 se = cfs_rq->next; in pick_next_entity()
4136 clear_buddies(cfs_rq, se); in pick_next_entity()
4138 return se; in pick_next_entity()
4443 struct sched_entity *se; in throttle_cfs_rq() local
4447 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
4455 for_each_sched_entity(se) { in throttle_cfs_rq()
4456 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
4458 if (!se->on_rq) in throttle_cfs_rq()
4462 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); in throttle_cfs_rq()
4469 if (!se) in throttle_cfs_rq()
4501 struct sched_entity *se; in unthrottle_cfs_rq() local
4505 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
4523 for_each_sched_entity(se) { in unthrottle_cfs_rq()
4524 if (se->on_rq) in unthrottle_cfs_rq()
4527 cfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
4529 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); in unthrottle_cfs_rq()
4536 if (!se) in unthrottle_cfs_rq()
5028 struct sched_entity *se = &p->se; in hrtick_start_fair() local
5029 struct cfs_rq *cfs_rq = cfs_rq_of(se); in hrtick_start_fair()
5034 u64 slice = sched_slice(cfs_rq, se); in hrtick_start_fair()
5035 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; in hrtick_start_fair()
5059 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) in hrtick_update()
5082 struct sched_entity *se = &p->se; in enqueue_task_fair() local
5100 for_each_sched_entity(se) { in enqueue_task_fair()
5101 if (se->on_rq) in enqueue_task_fair()
5103 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5104 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
5119 for_each_sched_entity(se) { in enqueue_task_fair()
5120 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5126 update_load_avg(cfs_rq, se, UPDATE_TG); in enqueue_task_fair()
5127 update_cfs_group(se); in enqueue_task_fair()
5130 if (!se) in enqueue_task_fair()
5136 static void set_next_buddy(struct sched_entity *se);
5146 struct sched_entity *se = &p->se; in dequeue_task_fair() local
5149 for_each_sched_entity(se) { in dequeue_task_fair()
5150 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
5151 dequeue_entity(cfs_rq, se, flags); in dequeue_task_fair()
5166 se = parent_entity(se); in dequeue_task_fair()
5171 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) in dequeue_task_fair()
5172 set_next_buddy(se); in dequeue_task_fair()
5178 for_each_sched_entity(se) { in dequeue_task_fair()
5179 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
5185 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_task_fair()
5186 update_cfs_group(se); in dequeue_task_fair()
5189 if (!se) in dequeue_task_fair()
5666 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); in wake_affine()
5671 schedstat_inc(p->se.statistics.nr_wakeups_affine); in wake_affine()
5894 sync_entity_load_avg(&p->se); in find_idlest_cpu()
6226 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_wake()
6291 sync_entity_load_avg(&p->se); in wake_cap()
6363 static void detach_entity_cfs_rq(struct sched_entity *se);
6379 struct sched_entity *se = &p->se; in migrate_task_rq_fair() local
6380 struct cfs_rq *cfs_rq = cfs_rq_of(se); in migrate_task_rq_fair()
6395 se->vruntime -= min_vruntime; in migrate_task_rq_fair()
6404 detach_entity_cfs_rq(&p->se); in migrate_task_rq_fair()
6415 remove_entity_load_avg(&p->se); in migrate_task_rq_fair()
6419 p->se.avg.last_update_time = 0; in migrate_task_rq_fair()
6422 p->se.exec_start = 0; in migrate_task_rq_fair()
6429 remove_entity_load_avg(&p->se); in task_dead_fair()
6433 static unsigned long wakeup_gran(struct sched_entity *se) in wakeup_gran() argument
6450 return calc_delta_fair(gran, se); in wakeup_gran()
6468 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) in wakeup_preempt_entity() argument
6470 s64 gran, vdiff = curr->vruntime - se->vruntime; in wakeup_preempt_entity()
6475 gran = wakeup_gran(se); in wakeup_preempt_entity()
6482 static void set_last_buddy(struct sched_entity *se) in set_last_buddy() argument
6484 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) in set_last_buddy()
6487 for_each_sched_entity(se) { in set_last_buddy()
6488 if (SCHED_WARN_ON(!se->on_rq)) in set_last_buddy()
6490 cfs_rq_of(se)->last = se; in set_last_buddy()
6494 static void set_next_buddy(struct sched_entity *se) in set_next_buddy() argument
6496 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE)) in set_next_buddy()
6499 for_each_sched_entity(se) { in set_next_buddy()
6500 if (SCHED_WARN_ON(!se->on_rq)) in set_next_buddy()
6502 cfs_rq_of(se)->next = se; in set_next_buddy()
6506 static void set_skip_buddy(struct sched_entity *se) in set_skip_buddy() argument
6508 for_each_sched_entity(se) in set_skip_buddy()
6509 cfs_rq_of(se)->skip = se; in set_skip_buddy()
6518 struct sched_entity *se = &curr->se, *pse = &p->se; in check_preempt_wakeup() local
6523 if (unlikely(se == pse)) in check_preempt_wakeup()
6565 find_matching_se(&se, &pse); in check_preempt_wakeup()
6566 update_curr(cfs_rq_of(se)); in check_preempt_wakeup()
6568 if (wakeup_preempt_entity(se, pse) == 1) { in check_preempt_wakeup()
6591 if (unlikely(!se->on_rq || curr == rq->idle)) in check_preempt_wakeup()
6594 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) in check_preempt_wakeup()
6595 set_last_buddy(se); in check_preempt_wakeup()
6602 struct sched_entity *se; in pick_next_task_fair() local
6653 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
6654 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
6657 p = task_of(se); in pick_next_task_fair()
6665 struct sched_entity *pse = &prev->se; in pick_next_task_fair()
6667 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
6668 int se_depth = se->depth; in pick_next_task_fair()
6676 set_next_entity(cfs_rq_of(se), se); in pick_next_task_fair()
6677 se = parent_entity(se); in pick_next_task_fair()
6682 set_next_entity(cfs_rq, se); in pick_next_task_fair()
6692 se = pick_next_entity(cfs_rq, NULL); in pick_next_task_fair()
6693 set_next_entity(cfs_rq, se); in pick_next_task_fair()
6694 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
6697 p = task_of(se); in pick_next_task_fair()
6706 list_move(&p->se.group_node, &rq->cfs_tasks); in pick_next_task_fair()
6736 struct sched_entity *se = &prev->se; in put_prev_task_fair() local
6739 for_each_sched_entity(se) { in put_prev_task_fair()
6740 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
6741 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
6754 struct sched_entity *se = &curr->se; in yield_task_fair() local
6762 clear_buddies(cfs_rq, se); in yield_task_fair()
6778 set_skip_buddy(se); in yield_task_fair()
6783 struct sched_entity *se = &p->se; in yield_to_task_fair() local
6786 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) in yield_to_task_fair()
6790 set_next_buddy(se); in yield_to_task_fair()
6972 (&p->se == cfs_rq_of(&p->se)->next || in task_hot()
6973 &p->se == cfs_rq_of(&p->se)->last)) in task_hot()
6981 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
7069 schedstat_inc(p->se.statistics.nr_failed_migrations_affine); in can_migrate_task()
7100 schedstat_inc(p->se.statistics.nr_failed_migrations_running); in can_migrate_task()
7118 schedstat_inc(p->se.statistics.nr_forced_migrations); in can_migrate_task()
7123 schedstat_inc(p->se.statistics.nr_failed_migrations_hot); in can_migrate_task()
7152 &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
7198 p = list_last_entry(tasks, struct task_struct, se.group_node); in detach_tasks()
7224 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
7248 list_move(&p->se.group_node, tasks); in detach_tasks()
7302 p = list_first_entry(tasks, struct task_struct, se.group_node); in attach_tasks()
7303 list_del_init(&p->se.group_node); in attach_tasks()
7373 struct sched_entity *se; in update_blocked_averages() local
7383 se = cfs_rq->tg->se[cpu]; in update_blocked_averages()
7384 if (se && !skip_blocked_update(se)) in update_blocked_averages()
7385 update_load_avg(cfs_rq_of(se), se, 0); in update_blocked_averages()
7423 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load() local
7431 for_each_sched_entity(se) { in update_cfs_rq_h_load()
7432 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
7433 cfs_rq->h_load_next = se; in update_cfs_rq_h_load()
7438 if (!se) { in update_cfs_rq_h_load()
7443 while ((se = cfs_rq->h_load_next) != NULL) { in update_cfs_rq_h_load()
7445 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
7447 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
7458 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
7487 return p->se.avg.load_avg; in task_h_load()
9652 struct sched_entity *se = &curr->se; in task_tick_fair() local
9654 for_each_sched_entity(se) { in task_tick_fair()
9655 cfs_rq = cfs_rq_of(se); in task_tick_fair()
9656 entity_tick(cfs_rq, se, queued); in task_tick_fair()
9671 struct sched_entity *se = &p->se, *curr; in task_fork_fair() local
9682 se->vruntime = curr->vruntime; in task_fork_fair()
9684 place_entity(cfs_rq, se, 1); in task_fork_fair()
9686 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { in task_fork_fair()
9691 swap(curr->vruntime, se->vruntime); in task_fork_fair()
9695 se->vruntime -= cfs_rq->min_vruntime; in task_fork_fair()
9723 struct sched_entity *se = &p->se; in vruntime_normalized() local
9742 if (!se->sum_exec_runtime || in vruntime_normalized()
9754 static void propagate_entity_cfs_rq(struct sched_entity *se) in propagate_entity_cfs_rq() argument
9759 se = se->parent; in propagate_entity_cfs_rq()
9761 for_each_sched_entity(se) { in propagate_entity_cfs_rq()
9762 cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
9767 update_load_avg(cfs_rq, se, UPDATE_TG); in propagate_entity_cfs_rq()
9771 static void propagate_entity_cfs_rq(struct sched_entity *se) { } in propagate_entity_cfs_rq() argument
9774 static void detach_entity_cfs_rq(struct sched_entity *se) in detach_entity_cfs_rq() argument
9776 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_entity_cfs_rq()
9779 update_load_avg(cfs_rq, se, 0); in detach_entity_cfs_rq()
9780 detach_entity_load_avg(cfs_rq, se); in detach_entity_cfs_rq()
9782 propagate_entity_cfs_rq(se); in detach_entity_cfs_rq()
9785 static void attach_entity_cfs_rq(struct sched_entity *se) in attach_entity_cfs_rq() argument
9787 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_entity_cfs_rq()
9794 se->depth = se->parent ? se->parent->depth + 1 : 0; in attach_entity_cfs_rq()
9798 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); in attach_entity_cfs_rq()
9799 attach_entity_load_avg(cfs_rq, se, 0); in attach_entity_cfs_rq()
9801 propagate_entity_cfs_rq(se); in attach_entity_cfs_rq()
9806 struct sched_entity *se = &p->se; in detach_task_cfs_rq() local
9807 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_task_cfs_rq()
9814 place_entity(cfs_rq, se, 0); in detach_task_cfs_rq()
9815 se->vruntime -= cfs_rq->min_vruntime; in detach_task_cfs_rq()
9818 detach_entity_cfs_rq(se); in detach_task_cfs_rq()
9823 struct sched_entity *se = &p->se; in attach_task_cfs_rq() local
9824 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_task_cfs_rq()
9826 attach_entity_cfs_rq(se); in attach_task_cfs_rq()
9829 se->vruntime += cfs_rq->min_vruntime; in attach_task_cfs_rq()
9861 struct sched_entity *se = &rq->curr->se; in set_curr_task_fair() local
9863 for_each_sched_entity(se) { in set_curr_task_fair()
9864 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_curr_task_fair()
9866 set_next_entity(cfs_rq, se); in set_curr_task_fair()
9887 struct sched_entity *se = &p->se; in task_set_group_fair() local
9890 se->depth = se->parent ? se->parent->depth + 1 : 0; in task_set_group_fair()
9900 p->se.avg.last_update_time = 0; in task_move_group_fair()
9927 if (tg->se) in free_fair_sched_group()
9928 kfree(tg->se[i]); in free_fair_sched_group()
9932 kfree(tg->se); in free_fair_sched_group()
9937 struct sched_entity *se; in alloc_fair_sched_group() local
9944 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); in alloc_fair_sched_group()
9945 if (!tg->se) in alloc_fair_sched_group()
9958 se = kzalloc_node(sizeof(struct sched_entity), in alloc_fair_sched_group()
9960 if (!se) in alloc_fair_sched_group()
9964 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
9965 init_entity_runnable_average(se); in alloc_fair_sched_group()
9978 struct sched_entity *se; in online_fair_sched_group() local
9984 se = tg->se[i]; in online_fair_sched_group()
9988 attach_entity_cfs_rq(se); in online_fair_sched_group()
10001 if (tg->se[cpu]) in unregister_fair_sched_group()
10002 remove_entity_load_avg(tg->se[cpu]); in unregister_fair_sched_group()
10020 struct sched_entity *se, int cpu, in init_tg_cfs_entry() argument
10030 tg->se[cpu] = se; in init_tg_cfs_entry()
10033 if (!se) in init_tg_cfs_entry()
10037 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
10038 se->depth = 0; in init_tg_cfs_entry()
10040 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
10041 se->depth = parent->depth + 1; in init_tg_cfs_entry()
10044 se->my_q = cfs_rq; in init_tg_cfs_entry()
10046 update_load_set(&se->load, NICE_0_LOAD); in init_tg_cfs_entry()
10047 se->parent = parent; in init_tg_cfs_entry()
10059 if (!tg->se[0]) in sched_group_set_shares()
10071 struct sched_entity *se = tg->se[i]; in sched_group_set_shares() local
10077 for_each_sched_entity(se) { in sched_group_set_shares()
10078 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in sched_group_set_shares()
10079 update_cfs_group(se); in sched_group_set_shares()
10106 struct sched_entity *se = &task->se; in get_rr_interval_fair() local
10114 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); in get_rr_interval_fair()