Lines Matching refs:se

251 static inline struct task_struct *task_of(struct sched_entity *se)  in task_of()  argument
253 SCHED_WARN_ON(!entity_is_task(se)); in task_of()
254 return container_of(se, struct task_struct, se); in task_of()
258 #define for_each_sched_entity(se) \ argument
259 for (; se; se = se->parent)
263 return p->se.cfs_rq; in task_cfs_rq()
267 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of() argument
269 return se->cfs_rq; in cfs_rq_of()
391 is_same_group(struct sched_entity *se, struct sched_entity *pse) in is_same_group() argument
393 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
394 return se->cfs_rq; in is_same_group()
399 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
401 return se->parent; in parent_entity()
405 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
417 se_depth = (*se)->depth; in find_matching_se()
422 *se = parent_entity(*se); in find_matching_se()
430 while (!is_same_group(*se, *pse)) { in find_matching_se()
431 *se = parent_entity(*se); in find_matching_se()
438 static inline struct task_struct *task_of(struct sched_entity *se) in task_of() argument
440 return container_of(se, struct task_struct, se); in task_of()
443 #define for_each_sched_entity(se) \ argument
444 for (; se; se = NULL)
451 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of() argument
453 struct task_struct *p = task_of(se); in cfs_rq_of()
487 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
493 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
545 struct sched_entity *se; in update_min_vruntime() local
546 se = rb_entry(leftmost, struct sched_entity, run_node); in update_min_vruntime()
549 vruntime = se->vruntime; in update_min_vruntime()
551 vruntime = min_vruntime(vruntime, se->vruntime); in update_min_vruntime()
565 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
582 if (entity_before(se, entry)) { in __enqueue_entity()
590 rb_link_node(&se->run_node, parent, link); in __enqueue_entity()
591 rb_insert_color_cached(&se->run_node, in __enqueue_entity()
595 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
597 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); in __dequeue_entity()
610 static struct sched_entity *__pick_next_entity(struct sched_entity *se) in __pick_next_entity() argument
612 struct rb_node *next = rb_next(&se->run_node); in __pick_next_entity()
662 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) in calc_delta_fair() argument
664 if (unlikely(se->load.weight != NICE_0_LOAD)) in calc_delta_fair()
665 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair()
692 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_slice() argument
694 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); in sched_slice()
696 for_each_sched_entity(se) { in sched_slice()
700 cfs_rq = cfs_rq_of(se); in sched_slice()
703 if (unlikely(!se->on_rq)) { in sched_slice()
706 update_load_add(&lw, se->load.weight); in sched_slice()
709 slice = __calc_delta(slice, se->load.weight, load); in sched_slice()
719 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_vslice() argument
721 return calc_delta_fair(sched_slice(cfs_rq, se), se); in sched_vslice()
732 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
734 struct sched_avg *sa = &se->avg; in init_entity_runnable_average()
744 if (entity_is_task(se)) in init_entity_runnable_average()
745 sa->runnable_load_avg = sa->load_avg = scale_load_down(se->load.weight); in init_entity_runnable_average()
747 se->runnable_weight = se->load.weight; in init_entity_runnable_average()
752 static void attach_entity_cfs_rq(struct sched_entity *se);
782 struct sched_entity *se = &p->se; in post_init_entity_util_avg() local
783 struct cfs_rq *cfs_rq = cfs_rq_of(se); in post_init_entity_util_avg()
784 struct sched_avg *sa = &se->avg; in post_init_entity_util_avg()
790 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; in post_init_entity_util_avg()
811 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
815 attach_entity_cfs_rq(se); in post_init_entity_util_avg()
819 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
870 update_curr(cfs_rq_of(&rq->curr->se)); in update_curr_fair()
874 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start() argument
882 prev_wait_start = schedstat_val(se->statistics.wait_start); in update_stats_wait_start()
884 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) && in update_stats_wait_start()
888 __schedstat_set(se->statistics.wait_start, wait_start); in update_stats_wait_start()
892 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end() argument
900 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start); in update_stats_wait_end()
902 if (entity_is_task(se)) { in update_stats_wait_end()
903 p = task_of(se); in update_stats_wait_end()
910 __schedstat_set(se->statistics.wait_start, delta); in update_stats_wait_end()
916 __schedstat_set(se->statistics.wait_max, in update_stats_wait_end()
917 max(schedstat_val(se->statistics.wait_max), delta)); in update_stats_wait_end()
918 __schedstat_inc(se->statistics.wait_count); in update_stats_wait_end()
919 __schedstat_add(se->statistics.wait_sum, delta); in update_stats_wait_end()
920 __schedstat_set(se->statistics.wait_start, 0); in update_stats_wait_end()
924 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper() argument
932 sleep_start = schedstat_val(se->statistics.sleep_start); in update_stats_enqueue_sleeper()
933 block_start = schedstat_val(se->statistics.block_start); in update_stats_enqueue_sleeper()
935 if (entity_is_task(se)) in update_stats_enqueue_sleeper()
936 tsk = task_of(se); in update_stats_enqueue_sleeper()
944 if (unlikely(delta > schedstat_val(se->statistics.sleep_max))) in update_stats_enqueue_sleeper()
945 __schedstat_set(se->statistics.sleep_max, delta); in update_stats_enqueue_sleeper()
947 __schedstat_set(se->statistics.sleep_start, 0); in update_stats_enqueue_sleeper()
948 __schedstat_add(se->statistics.sum_sleep_runtime, delta); in update_stats_enqueue_sleeper()
961 if (unlikely(delta > schedstat_val(se->statistics.block_max))) in update_stats_enqueue_sleeper()
962 __schedstat_set(se->statistics.block_max, delta); in update_stats_enqueue_sleeper()
964 __schedstat_set(se->statistics.block_start, 0); in update_stats_enqueue_sleeper()
965 __schedstat_add(se->statistics.sum_sleep_runtime, delta); in update_stats_enqueue_sleeper()
969 __schedstat_add(se->statistics.iowait_sum, delta); in update_stats_enqueue_sleeper()
970 __schedstat_inc(se->statistics.iowait_count); in update_stats_enqueue_sleeper()
995 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue() argument
1004 if (se != cfs_rq->curr) in update_stats_enqueue()
1005 update_stats_wait_start(cfs_rq, se); in update_stats_enqueue()
1008 update_stats_enqueue_sleeper(cfs_rq, se); in update_stats_enqueue()
1012 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue() argument
1022 if (se != cfs_rq->curr) in update_stats_dequeue()
1023 update_stats_wait_end(cfs_rq, se); in update_stats_dequeue()
1025 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { in update_stats_dequeue()
1026 struct task_struct *tsk = task_of(se); in update_stats_dequeue()
1029 __schedstat_set(se->statistics.sleep_start, in update_stats_dequeue()
1032 __schedstat_set(se->statistics.block_start, in update_stats_dequeue()
1041 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
1046 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
2014 now = p->se.exec_start; in numa_get_avg_runtime()
2015 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
2025 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
2489 u64 runtime = p->se.sum_exec_runtime; in task_numa_work()
2620 if (unlikely(p->se.sum_exec_runtime != runtime)) { in task_numa_work()
2621 u64 diff = p->se.sum_exec_runtime - runtime; in task_numa_work()
2690 now = curr->se.sum_exec_runtime; in task_tick_numa()
2757 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
2759 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
2761 if (entity_is_task(se)) { in account_entity_enqueue()
2764 account_numa_enqueue(rq, task_of(se)); in account_entity_enqueue()
2765 list_add(&se->group_node, &rq->cfs_tasks); in account_entity_enqueue()
2772 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
2774 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
2776 if (entity_is_task(se)) { in account_entity_dequeue()
2777 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
2778 list_del_init(&se->group_node); in account_entity_dequeue()
2834 enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_runnable_load_avg() argument
2836 cfs_rq->runnable_weight += se->runnable_weight; in enqueue_runnable_load_avg()
2838 cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg; in enqueue_runnable_load_avg()
2839 cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum; in enqueue_runnable_load_avg()
2843 dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_runnable_load_avg() argument
2845 cfs_rq->runnable_weight -= se->runnable_weight; in dequeue_runnable_load_avg()
2847 sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg); in dequeue_runnable_load_avg()
2849 se_runnable(se) * se->avg.runnable_load_sum); in dequeue_runnable_load_avg()
2853 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg() argument
2855 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
2856 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
2860 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg() argument
2862 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
2863 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
2867 enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_runnable_load_avg() argument
2869 dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_runnable_load_avg() argument
2871 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_load_avg() argument
2873 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_load_avg() argument
2876 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
2879 if (se->on_rq) { in reweight_entity()
2881 if (cfs_rq->curr == se) in reweight_entity()
2883 account_entity_dequeue(cfs_rq, se); in reweight_entity()
2884 dequeue_runnable_load_avg(cfs_rq, se); in reweight_entity()
2886 dequeue_load_avg(cfs_rq, se); in reweight_entity()
2888 se->runnable_weight = runnable; in reweight_entity()
2889 update_load_set(&se->load, weight); in reweight_entity()
2893 u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib; in reweight_entity()
2895 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); in reweight_entity()
2896 se->avg.runnable_load_avg = in reweight_entity()
2897 div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider); in reweight_entity()
2901 enqueue_load_avg(cfs_rq, se); in reweight_entity()
2902 if (se->on_rq) { in reweight_entity()
2903 account_entity_enqueue(cfs_rq, se); in reweight_entity()
2904 enqueue_runnable_load_avg(cfs_rq, se); in reweight_entity()
2910 struct sched_entity *se = &p->se; in reweight_task() local
2911 struct cfs_rq *cfs_rq = cfs_rq_of(se); in reweight_task()
2912 struct load_weight *load = &se->load; in reweight_task()
2915 reweight_entity(cfs_rq, se, weight, weight); in reweight_task()
3079 static void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
3081 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in update_cfs_group()
3093 if (likely(se->load.weight == shares)) in update_cfs_group()
3100 reweight_entity(cfs_rq_of(se), se, shares, runnable); in update_cfs_group()
3104 static inline void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
3170 void set_task_rq_fair(struct sched_entity *se, in set_task_rq_fair() argument
3186 if (!(se->avg.last_update_time && prev)) in set_task_rq_fair()
3210 __update_load_avg_blocked_se(p_last_update_time, se); in set_task_rq_fair()
3211 se->avg.last_update_time = n_last_update_time; in set_task_rq_fair()
3284 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util() argument
3286 long delta = gcfs_rq->avg.util_avg - se->avg.util_avg; in update_tg_cfs_util()
3301 se->avg.util_avg = gcfs_rq->avg.util_avg; in update_tg_cfs_util()
3302 se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX; in update_tg_cfs_util()
3310 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable() argument
3327 runnable_sum += se->avg.load_sum; in update_tg_cfs_runnable()
3340 runnable_sum = min(se->avg.load_sum, load_sum); in update_tg_cfs_runnable()
3349 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_runnable()
3352 load_sum = (s64)se_weight(se) * runnable_sum; in update_tg_cfs_runnable()
3355 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; in update_tg_cfs_runnable()
3356 delta_avg = load_avg - se->avg.load_avg; in update_tg_cfs_runnable()
3358 se->avg.load_sum = runnable_sum; in update_tg_cfs_runnable()
3359 se->avg.load_avg = load_avg; in update_tg_cfs_runnable()
3363 runnable_load_sum = (s64)se_runnable(se) * runnable_sum; in update_tg_cfs_runnable()
3365 delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum; in update_tg_cfs_runnable()
3366 delta_avg = runnable_load_avg - se->avg.runnable_load_avg; in update_tg_cfs_runnable()
3368 se->avg.runnable_load_sum = runnable_sum; in update_tg_cfs_runnable()
3369 se->avg.runnable_load_avg = runnable_load_avg; in update_tg_cfs_runnable()
3371 if (se->on_rq) { in update_tg_cfs_runnable()
3384 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
3388 if (entity_is_task(se)) in propagate_entity_load_avg()
3391 gcfs_rq = group_cfs_rq(se); in propagate_entity_load_avg()
3397 cfs_rq = cfs_rq_of(se); in propagate_entity_load_avg()
3401 update_tg_cfs_util(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3402 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3405 trace_pelt_se_tp(se); in propagate_entity_load_avg()
3414 static inline bool skip_blocked_update(struct sched_entity *se) in skip_blocked_update() argument
3416 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in skip_blocked_update()
3422 if (se->avg.load_avg || se->avg.util_avg) in skip_blocked_update()
3444 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
3522 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in attach_entity_load_avg() argument
3533 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
3534 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
3542 se->avg.util_sum = se->avg.util_avg * divider; in attach_entity_load_avg()
3544 se->avg.load_sum = divider; in attach_entity_load_avg()
3545 if (se_weight(se)) { in attach_entity_load_avg()
3546 se->avg.load_sum = in attach_entity_load_avg()
3547 div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se)); in attach_entity_load_avg()
3550 se->avg.runnable_load_sum = se->avg.load_sum; in attach_entity_load_avg()
3552 enqueue_load_avg(cfs_rq, se); in attach_entity_load_avg()
3553 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
3554 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
3556 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
3571 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
3573 dequeue_load_avg(cfs_rq, se); in detach_entity_load_avg()
3574 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
3575 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
3577 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
3592 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg() argument
3601 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) in update_load_avg()
3602 __update_load_avg_se(now, cfs_rq, se); in update_load_avg()
3605 decayed |= propagate_entity_load_avg(se); in update_load_avg()
3607 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { in update_load_avg()
3616 attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION); in update_load_avg()
3648 static void sync_entity_load_avg(struct sched_entity *se) in sync_entity_load_avg() argument
3650 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sync_entity_load_avg()
3654 __update_load_avg_blocked_se(last_update_time, se); in sync_entity_load_avg()
3661 static void remove_entity_load_avg(struct sched_entity *se) in remove_entity_load_avg() argument
3663 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg()
3672 sync_entity_load_avg(se); in remove_entity_load_avg()
3676 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
3677 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
3678 cfs_rq->removed.runnable_sum += se->avg.load_sum; /* == runnable_sum */ in remove_entity_load_avg()
3694 return READ_ONCE(p->se.avg.util_avg); in task_util()
3699 struct util_est ue = READ_ONCE(p->se.avg.util_est); in _task_util_est()
3762 ue = p->se.avg.util_est; in util_est_dequeue()
3803 WRITE_ONCE(p->se.avg.util_est, ue); in util_est_dequeue()
3835 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) in update_load_avg() argument
3840 static inline void remove_entity_load_avg(struct sched_entity *se) {} in remove_entity_load_avg() argument
3843 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {} in attach_entity_load_avg() argument
3845 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
3862 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) in check_spread() argument
3865 s64 d = se->vruntime - cfs_rq->min_vruntime; in check_spread()
3876 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) in place_entity() argument
3887 vruntime += sched_vslice(cfs_rq, se); in place_entity()
3904 se->vruntime = max_vruntime(se->vruntime, vruntime); in place_entity()
3961 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
3964 bool curr = cfs_rq->curr == se; in enqueue_entity()
3971 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
3982 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
3992 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); in enqueue_entity()
3993 update_cfs_group(se); in enqueue_entity()
3994 enqueue_runnable_load_avg(cfs_rq, se); in enqueue_entity()
3995 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
3998 place_entity(cfs_rq, se, 0); in enqueue_entity()
4001 update_stats_enqueue(cfs_rq, se, flags); in enqueue_entity()
4002 check_spread(cfs_rq, se); in enqueue_entity()
4004 __enqueue_entity(cfs_rq, se); in enqueue_entity()
4005 se->on_rq = 1; in enqueue_entity()
4013 static void __clear_buddies_last(struct sched_entity *se) in __clear_buddies_last() argument
4015 for_each_sched_entity(se) { in __clear_buddies_last()
4016 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_last()
4017 if (cfs_rq->last != se) in __clear_buddies_last()
4024 static void __clear_buddies_next(struct sched_entity *se) in __clear_buddies_next() argument
4026 for_each_sched_entity(se) { in __clear_buddies_next()
4027 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next()
4028 if (cfs_rq->next != se) in __clear_buddies_next()
4035 static void __clear_buddies_skip(struct sched_entity *se) in __clear_buddies_skip() argument
4037 for_each_sched_entity(se) { in __clear_buddies_skip()
4038 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_skip()
4039 if (cfs_rq->skip != se) in __clear_buddies_skip()
4046 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
4048 if (cfs_rq->last == se) in clear_buddies()
4049 __clear_buddies_last(se); in clear_buddies()
4051 if (cfs_rq->next == se) in clear_buddies()
4052 __clear_buddies_next(se); in clear_buddies()
4054 if (cfs_rq->skip == se) in clear_buddies()
4055 __clear_buddies_skip(se); in clear_buddies()
4061 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
4076 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_entity()
4077 dequeue_runnable_load_avg(cfs_rq, se); in dequeue_entity()
4079 update_stats_dequeue(cfs_rq, se, flags); in dequeue_entity()
4081 clear_buddies(cfs_rq, se); in dequeue_entity()
4083 if (se != cfs_rq->curr) in dequeue_entity()
4084 __dequeue_entity(cfs_rq, se); in dequeue_entity()
4085 se->on_rq = 0; in dequeue_entity()
4086 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
4095 se->vruntime -= cfs_rq->min_vruntime; in dequeue_entity()
4100 update_cfs_group(se); in dequeue_entity()
4119 struct sched_entity *se; in check_preempt_tick() local
4142 se = __pick_first_entity(cfs_rq); in check_preempt_tick()
4143 delta = curr->vruntime - se->vruntime; in check_preempt_tick()
4153 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
4156 if (se->on_rq) { in set_next_entity()
4162 update_stats_wait_end(cfs_rq, se); in set_next_entity()
4163 __dequeue_entity(cfs_rq, se); in set_next_entity()
4164 update_load_avg(cfs_rq, se, UPDATE_TG); in set_next_entity()
4167 update_stats_curr_start(cfs_rq, se); in set_next_entity()
4168 cfs_rq->curr = se; in set_next_entity()
4176 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { in set_next_entity()
4177 schedstat_set(se->statistics.slice_max, in set_next_entity()
4178 max((u64)schedstat_val(se->statistics.slice_max), in set_next_entity()
4179 se->sum_exec_runtime - se->prev_sum_exec_runtime)); in set_next_entity()
4182 se->prev_sum_exec_runtime = se->sum_exec_runtime; in set_next_entity()
4186 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
4199 struct sched_entity *se; in pick_next_entity() local
4208 se = left; /* ideally we run the leftmost entity */ in pick_next_entity()
4214 if (cfs_rq->skip == se) { in pick_next_entity()
4217 if (se == curr) { in pick_next_entity()
4220 second = __pick_next_entity(se); in pick_next_entity()
4226 se = second; in pick_next_entity()
4233 se = cfs_rq->last; in pick_next_entity()
4239 se = cfs_rq->next; in pick_next_entity()
4241 clear_buddies(cfs_rq, se); in pick_next_entity()
4243 return se; in pick_next_entity()
4494 struct sched_entity *se; in throttle_cfs_rq() local
4498 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
4507 for_each_sched_entity(se) { in throttle_cfs_rq()
4508 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
4510 if (!se->on_rq) in throttle_cfs_rq()
4514 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); in throttle_cfs_rq()
4522 if (!se) in throttle_cfs_rq()
4554 struct sched_entity *se; in unthrottle_cfs_rq() local
4558 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
4577 for_each_sched_entity(se) { in unthrottle_cfs_rq()
4578 if (se->on_rq) in unthrottle_cfs_rq()
4581 cfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
4583 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); in unthrottle_cfs_rq()
4593 if (!se) in unthrottle_cfs_rq()
5118 struct sched_entity *se = &p->se; in hrtick_start_fair() local
5119 struct cfs_rq *cfs_rq = cfs_rq_of(se); in hrtick_start_fair()
5124 u64 slice = sched_slice(cfs_rq, se); in hrtick_start_fair()
5125 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; in hrtick_start_fair()
5149 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) in hrtick_update()
5191 struct sched_entity *se = &p->se; in enqueue_task_fair() local
5210 for_each_sched_entity(se) { in enqueue_task_fair()
5211 if (se->on_rq) in enqueue_task_fair()
5213 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5214 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
5230 for_each_sched_entity(se) { in enqueue_task_fair()
5231 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5238 update_load_avg(cfs_rq, se, UPDATE_TG); in enqueue_task_fair()
5239 update_cfs_group(se); in enqueue_task_fair()
5242 if (!se) { in enqueue_task_fair()
5270 for_each_sched_entity(se) { in enqueue_task_fair()
5271 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5283 static void set_next_buddy(struct sched_entity *se);
5293 struct sched_entity *se = &p->se; in dequeue_task_fair() local
5297 for_each_sched_entity(se) { in dequeue_task_fair()
5298 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
5299 dequeue_entity(cfs_rq, se, flags); in dequeue_task_fair()
5315 se = parent_entity(se); in dequeue_task_fair()
5320 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) in dequeue_task_fair()
5321 set_next_buddy(se); in dequeue_task_fair()
5327 for_each_sched_entity(se) { in dequeue_task_fair()
5328 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
5335 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_task_fair()
5336 update_cfs_group(se); in dequeue_task_fair()
5339 if (!se) in dequeue_task_fair()
5532 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); in wake_affine()
5537 schedstat_inc(p->se.statistics.nr_wakeups_affine); in wake_affine()
5760 sync_entity_load_avg(&p->se); in find_idlest_cpu()
6108 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
6200 sync_entity_load_avg(&p->se); in wake_cap()
6358 sync_entity_load_avg(&p->se); in find_energy_efficient_cpu()
6505 static void detach_entity_cfs_rq(struct sched_entity *se);
6521 struct sched_entity *se = &p->se; in migrate_task_rq_fair() local
6522 struct cfs_rq *cfs_rq = cfs_rq_of(se); in migrate_task_rq_fair()
6537 se->vruntime -= min_vruntime; in migrate_task_rq_fair()
6546 detach_entity_cfs_rq(&p->se); in migrate_task_rq_fair()
6557 remove_entity_load_avg(&p->se); in migrate_task_rq_fair()
6561 p->se.avg.last_update_time = 0; in migrate_task_rq_fair()
6564 p->se.exec_start = 0; in migrate_task_rq_fair()
6571 remove_entity_load_avg(&p->se); in task_dead_fair()
6584 static unsigned long wakeup_gran(struct sched_entity *se) in wakeup_gran() argument
6601 return calc_delta_fair(gran, se); in wakeup_gran()
6619 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) in wakeup_preempt_entity() argument
6621 s64 gran, vdiff = curr->vruntime - se->vruntime; in wakeup_preempt_entity()
6626 gran = wakeup_gran(se); in wakeup_preempt_entity()
6633 static void set_last_buddy(struct sched_entity *se) in set_last_buddy() argument
6635 if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) in set_last_buddy()
6638 for_each_sched_entity(se) { in set_last_buddy()
6639 if (SCHED_WARN_ON(!se->on_rq)) in set_last_buddy()
6641 cfs_rq_of(se)->last = se; in set_last_buddy()
6645 static void set_next_buddy(struct sched_entity *se) in set_next_buddy() argument
6647 if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) in set_next_buddy()
6650 for_each_sched_entity(se) { in set_next_buddy()
6651 if (SCHED_WARN_ON(!se->on_rq)) in set_next_buddy()
6653 cfs_rq_of(se)->next = se; in set_next_buddy()
6657 static void set_skip_buddy(struct sched_entity *se) in set_skip_buddy() argument
6659 for_each_sched_entity(se) in set_skip_buddy()
6660 cfs_rq_of(se)->skip = se; in set_skip_buddy()
6669 struct sched_entity *se = &curr->se, *pse = &p->se; in check_preempt_wakeup() local
6674 if (unlikely(se == pse)) in check_preempt_wakeup()
6716 find_matching_se(&se, &pse); in check_preempt_wakeup()
6717 update_curr(cfs_rq_of(se)); in check_preempt_wakeup()
6719 if (wakeup_preempt_entity(se, pse) == 1) { in check_preempt_wakeup()
6742 if (unlikely(!se->on_rq || curr == rq->idle)) in check_preempt_wakeup()
6745 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) in check_preempt_wakeup()
6746 set_last_buddy(se); in check_preempt_wakeup()
6753 struct sched_entity *se; in pick_next_task_fair() local
6804 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
6805 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
6808 p = task_of(se); in pick_next_task_fair()
6816 struct sched_entity *pse = &prev->se; in pick_next_task_fair()
6818 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
6819 int se_depth = se->depth; in pick_next_task_fair()
6827 set_next_entity(cfs_rq_of(se), se); in pick_next_task_fair()
6828 se = parent_entity(se); in pick_next_task_fair()
6833 set_next_entity(cfs_rq, se); in pick_next_task_fair()
6843 se = pick_next_entity(cfs_rq, NULL); in pick_next_task_fair()
6844 set_next_entity(cfs_rq, se); in pick_next_task_fair()
6845 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
6848 p = task_of(se); in pick_next_task_fair()
6857 list_move(&p->se.group_node, &rq->cfs_tasks); in pick_next_task_fair()
6898 struct sched_entity *se = &prev->se; in put_prev_task_fair() local
6901 for_each_sched_entity(se) { in put_prev_task_fair()
6902 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
6903 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
6916 struct sched_entity *se = &curr->se; in yield_task_fair() local
6924 clear_buddies(cfs_rq, se); in yield_task_fair()
6940 set_skip_buddy(se); in yield_task_fair()
6945 struct sched_entity *se = &p->se; in yield_to_task_fair() local
6948 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) in yield_to_task_fair()
6952 set_next_buddy(se); in yield_to_task_fair()
7142 (&p->se == cfs_rq_of(&p->se)->next || in task_hot()
7143 &p->se == cfs_rq_of(&p->se)->last)) in task_hot()
7151 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
7239 schedstat_inc(p->se.statistics.nr_failed_migrations_affine); in can_migrate_task()
7270 schedstat_inc(p->se.statistics.nr_failed_migrations_running); in can_migrate_task()
7288 schedstat_inc(p->se.statistics.nr_forced_migrations); in can_migrate_task()
7293 schedstat_inc(p->se.statistics.nr_failed_migrations_hot); in can_migrate_task()
7321 &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
7367 p = list_last_entry(tasks, struct task_struct, se.group_node); in detach_tasks()
7393 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
7417 list_move(&p->se.group_node, tasks); in detach_tasks()
7470 p = list_first_entry(tasks, struct task_struct, se.group_node); in attach_tasks()
7471 list_del_init(&p->se.group_node); in attach_tasks()
7568 struct sched_entity *se; in update_blocked_averages() local
7574 se = cfs_rq->tg->se[cpu]; in update_blocked_averages()
7575 if (se && !skip_blocked_update(se)) in update_blocked_averages()
7576 update_load_avg(cfs_rq_of(se), se, 0); in update_blocked_averages()
7602 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load() local
7610 for_each_sched_entity(se) { in update_cfs_rq_h_load()
7611 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
7612 WRITE_ONCE(cfs_rq->h_load_next, se); in update_cfs_rq_h_load()
7617 if (!se) { in update_cfs_rq_h_load()
7622 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { in update_cfs_rq_h_load()
7624 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
7626 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
7637 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
7668 return p->se.avg.load_avg; in task_h_load()
9944 struct sched_entity *se = &curr->se; in task_tick_fair() local
9946 for_each_sched_entity(se) { in task_tick_fair()
9947 cfs_rq = cfs_rq_of(se); in task_tick_fair()
9948 entity_tick(cfs_rq, se, queued); in task_tick_fair()
9966 struct sched_entity *se = &p->se, *curr; in task_fork_fair() local
9977 se->vruntime = curr->vruntime; in task_fork_fair()
9979 place_entity(cfs_rq, se, 1); in task_fork_fair()
9981 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { in task_fork_fair()
9986 swap(curr->vruntime, se->vruntime); in task_fork_fair()
9990 se->vruntime -= cfs_rq->min_vruntime; in task_fork_fair()
10018 struct sched_entity *se = &p->se; in vruntime_normalized() local
10037 if (!se->sum_exec_runtime || in vruntime_normalized()
10049 static void propagate_entity_cfs_rq(struct sched_entity *se) in propagate_entity_cfs_rq() argument
10054 se = se->parent; in propagate_entity_cfs_rq()
10056 for_each_sched_entity(se) { in propagate_entity_cfs_rq()
10057 cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
10062 update_load_avg(cfs_rq, se, UPDATE_TG); in propagate_entity_cfs_rq()
10066 static void propagate_entity_cfs_rq(struct sched_entity *se) { } in propagate_entity_cfs_rq() argument
10069 static void detach_entity_cfs_rq(struct sched_entity *se) in detach_entity_cfs_rq() argument
10071 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_entity_cfs_rq()
10074 update_load_avg(cfs_rq, se, 0); in detach_entity_cfs_rq()
10075 detach_entity_load_avg(cfs_rq, se); in detach_entity_cfs_rq()
10077 propagate_entity_cfs_rq(se); in detach_entity_cfs_rq()
10080 static void attach_entity_cfs_rq(struct sched_entity *se) in attach_entity_cfs_rq() argument
10082 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_entity_cfs_rq()
10089 se->depth = se->parent ? se->parent->depth + 1 : 0; in attach_entity_cfs_rq()
10093 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); in attach_entity_cfs_rq()
10094 attach_entity_load_avg(cfs_rq, se, 0); in attach_entity_cfs_rq()
10096 propagate_entity_cfs_rq(se); in attach_entity_cfs_rq()
10101 struct sched_entity *se = &p->se; in detach_task_cfs_rq() local
10102 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_task_cfs_rq()
10109 place_entity(cfs_rq, se, 0); in detach_task_cfs_rq()
10110 se->vruntime -= cfs_rq->min_vruntime; in detach_task_cfs_rq()
10113 detach_entity_cfs_rq(se); in detach_task_cfs_rq()
10118 struct sched_entity *se = &p->se; in attach_task_cfs_rq() local
10119 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_task_cfs_rq()
10121 attach_entity_cfs_rq(se); in attach_task_cfs_rq()
10124 se->vruntime += cfs_rq->min_vruntime; in attach_task_cfs_rq()
10156 struct sched_entity *se = &p->se; in set_next_task_fair() local
10164 list_move(&se->group_node, &rq->cfs_tasks); in set_next_task_fair()
10168 for_each_sched_entity(se) { in set_next_task_fair()
10169 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_next_task_fair()
10171 set_next_entity(cfs_rq, se); in set_next_task_fair()
10192 struct sched_entity *se = &p->se; in task_set_group_fair() local
10195 se->depth = se->parent ? se->parent->depth + 1 : 0; in task_set_group_fair()
10205 p->se.avg.last_update_time = 0; in task_move_group_fair()
10232 if (tg->se) in free_fair_sched_group()
10233 kfree(tg->se[i]); in free_fair_sched_group()
10237 kfree(tg->se); in free_fair_sched_group()
10242 struct sched_entity *se; in alloc_fair_sched_group() local
10249 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); in alloc_fair_sched_group()
10250 if (!tg->se) in alloc_fair_sched_group()
10263 se = kzalloc_node(sizeof(struct sched_entity), in alloc_fair_sched_group()
10265 if (!se) in alloc_fair_sched_group()
10269 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
10270 init_entity_runnable_average(se); in alloc_fair_sched_group()
10283 struct sched_entity *se; in online_fair_sched_group() local
10290 se = tg->se[i]; in online_fair_sched_group()
10293 attach_entity_cfs_rq(se); in online_fair_sched_group()
10306 if (tg->se[cpu]) in unregister_fair_sched_group()
10307 remove_entity_load_avg(tg->se[cpu]); in unregister_fair_sched_group()
10325 struct sched_entity *se, int cpu, in init_tg_cfs_entry() argument
10335 tg->se[cpu] = se; in init_tg_cfs_entry()
10338 if (!se) in init_tg_cfs_entry()
10342 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
10343 se->depth = 0; in init_tg_cfs_entry()
10345 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
10346 se->depth = parent->depth + 1; in init_tg_cfs_entry()
10349 se->my_q = cfs_rq; in init_tg_cfs_entry()
10351 update_load_set(&se->load, NICE_0_LOAD); in init_tg_cfs_entry()
10352 se->parent = parent; in init_tg_cfs_entry()
10364 if (!tg->se[0]) in sched_group_set_shares()
10376 struct sched_entity *se = tg->se[i]; in sched_group_set_shares() local
10382 for_each_sched_entity(se) { in sched_group_set_shares()
10383 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in sched_group_set_shares()
10384 update_cfs_group(se); in sched_group_set_shares()
10411 struct sched_entity *se = &task->se; in get_rr_interval_fair() local
10419 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); in get_rr_interval_fair()