Lines Matching full:se

273 #define for_each_sched_entity(se) \  argument
274 for (; se; se = se->parent)
389 is_same_group(struct sched_entity *se, struct sched_entity *pse) in is_same_group() argument
391 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
392 return se->cfs_rq; in is_same_group()
397 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
399 return se->parent; in parent_entity()
403 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
415 se_depth = (*se)->depth; in find_matching_se()
420 *se = parent_entity(*se); in find_matching_se()
428 while (!is_same_group(*se, *pse)) { in find_matching_se()
429 *se = parent_entity(*se); in find_matching_se()
444 static int se_is_idle(struct sched_entity *se) in se_is_idle() argument
446 if (entity_is_task(se)) in se_is_idle()
447 return task_has_idle_policy(task_of(se)); in se_is_idle()
448 return cfs_rq_is_idle(group_cfs_rq(se)); in se_is_idle()
453 #define for_each_sched_entity(se) \ argument
454 for (; se; se = NULL)
478 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
484 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
498 static int se_is_idle(struct sched_entity *se) in se_is_idle() argument
554 struct sched_entity *se = __node_2_se(leftmost); in update_min_vruntime() local
557 vruntime = se->vruntime; in update_min_vruntime()
559 vruntime = min_vruntime(vruntime, se->vruntime); in update_min_vruntime()
578 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
580 rb_add_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less); in __enqueue_entity()
583 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
585 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); in __dequeue_entity()
598 static struct sched_entity *__pick_next_entity(struct sched_entity *se) in __pick_next_entity() argument
600 struct rb_node *next = rb_next(&se->run_node); in __pick_next_entity()
644 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) in calc_delta_fair() argument
646 if (unlikely(se->load.weight != NICE_0_LOAD)) in calc_delta_fair()
647 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair()
674 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_slice() argument
682 slice = __sched_period(nr_running + !se->on_rq); in sched_slice()
684 for_each_sched_entity(se) { in sched_slice()
688 cfs_rq = cfs_rq_of(se); in sched_slice()
691 if (unlikely(!se->on_rq)) { in sched_slice()
694 update_load_add(&lw, se->load.weight); in sched_slice()
697 slice = __calc_delta(slice, se->load.weight, load); in sched_slice()
711 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_vslice() argument
713 return calc_delta_fair(sched_slice(cfs_rq, se), se); in sched_vslice()
724 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
726 struct sched_avg *sa = &se->avg; in init_entity_runnable_average()
736 if (entity_is_task(se)) in init_entity_runnable_average()
737 sa->load_avg = scale_load_down(se->load.weight); in init_entity_runnable_average()
742 static void attach_entity_cfs_rq(struct sched_entity *se);
748 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
772 struct sched_entity *se = &p->se; in post_init_entity_util_avg() local
773 struct cfs_rq *cfs_rq = cfs_rq_of(se); in post_init_entity_util_avg()
774 struct sched_avg *sa = &se->avg; in post_init_entity_util_avg()
780 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; in post_init_entity_util_avg()
797 attach_entity_load_avg(cfs_rq, se); in post_init_entity_util_avg()
803 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
807 attach_entity_cfs_rq(se); in post_init_entity_util_avg()
811 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
862 update_curr(cfs_rq_of(&rq->curr->se)); in update_curr_fair()
866 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start() argument
874 prev_wait_start = schedstat_val(se->statistics.wait_start); in update_stats_wait_start()
876 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) && in update_stats_wait_start()
880 __schedstat_set(se->statistics.wait_start, wait_start); in update_stats_wait_start()
884 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end() argument
893 * When the sched_schedstat changes from 0 to 1, some sched se in update_stats_wait_end()
894 * maybe already in the runqueue, the se->statistics.wait_start in update_stats_wait_end()
898 if (unlikely(!schedstat_val(se->statistics.wait_start))) in update_stats_wait_end()
901 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start); in update_stats_wait_end()
903 if (entity_is_task(se)) { in update_stats_wait_end()
904 p = task_of(se); in update_stats_wait_end()
911 __schedstat_set(se->statistics.wait_start, delta); in update_stats_wait_end()
917 __schedstat_set(se->statistics.wait_max, in update_stats_wait_end()
918 max(schedstat_val(se->statistics.wait_max), delta)); in update_stats_wait_end()
919 __schedstat_inc(se->statistics.wait_count); in update_stats_wait_end()
920 __schedstat_add(se->statistics.wait_sum, delta); in update_stats_wait_end()
921 __schedstat_set(se->statistics.wait_start, 0); in update_stats_wait_end()
925 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper() argument
933 sleep_start = schedstat_val(se->statistics.sleep_start); in update_stats_enqueue_sleeper()
934 block_start = schedstat_val(se->statistics.block_start); in update_stats_enqueue_sleeper()
936 if (entity_is_task(se)) in update_stats_enqueue_sleeper()
937 tsk = task_of(se); in update_stats_enqueue_sleeper()
945 if (unlikely(delta > schedstat_val(se->statistics.sleep_max))) in update_stats_enqueue_sleeper()
946 __schedstat_set(se->statistics.sleep_max, delta); in update_stats_enqueue_sleeper()
948 __schedstat_set(se->statistics.sleep_start, 0); in update_stats_enqueue_sleeper()
949 __schedstat_add(se->statistics.sum_sleep_runtime, delta); in update_stats_enqueue_sleeper()
962 if (unlikely(delta > schedstat_val(se->statistics.block_max))) in update_stats_enqueue_sleeper()
963 __schedstat_set(se->statistics.block_max, delta); in update_stats_enqueue_sleeper()
965 __schedstat_set(se->statistics.block_start, 0); in update_stats_enqueue_sleeper()
966 __schedstat_add(se->statistics.sum_sleep_runtime, delta); in update_stats_enqueue_sleeper()
970 __schedstat_add(se->statistics.iowait_sum, delta); in update_stats_enqueue_sleeper()
971 __schedstat_inc(se->statistics.iowait_count); in update_stats_enqueue_sleeper()
996 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue() argument
1005 if (se != cfs_rq->curr) in update_stats_enqueue()
1006 update_stats_wait_start(cfs_rq, se); in update_stats_enqueue()
1009 update_stats_enqueue_sleeper(cfs_rq, se); in update_stats_enqueue()
1013 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue() argument
1023 if (se != cfs_rq->curr) in update_stats_dequeue()
1024 update_stats_wait_end(cfs_rq, se); in update_stats_dequeue()
1026 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { in update_stats_dequeue()
1027 struct task_struct *tsk = task_of(se); in update_stats_dequeue()
1033 __schedstat_set(se->statistics.sleep_start, in update_stats_dequeue()
1036 __schedstat_set(se->statistics.block_start, in update_stats_dequeue()
1045 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
1050 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
2243 now = p->se.exec_start; in numa_get_avg_runtime()
2244 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
2254 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
2718 u64 runtime = p->se.sum_exec_runtime; in task_numa_work()
2849 if (unlikely(p->se.sum_exec_runtime != runtime)) { in task_numa_work()
2850 u64 diff = p->se.sum_exec_runtime - runtime; in task_numa_work()
2919 now = curr->se.sum_exec_runtime; in task_tick_numa()
2986 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
2988 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
2990 if (entity_is_task(se)) { in account_entity_enqueue()
2993 account_numa_enqueue(rq, task_of(se)); in account_entity_enqueue()
2994 list_add(&se->group_node, &rq->cfs_tasks); in account_entity_enqueue()
3001 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
3003 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
3005 if (entity_is_task(se)) { in account_entity_dequeue()
3006 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
3007 list_del_init(&se->group_node); in account_entity_dequeue()
3063 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg() argument
3065 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3066 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3070 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg() argument
3072 u32 divider = get_pelt_divider(&se->avg); in dequeue_load_avg()
3073 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3078 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_load_avg() argument
3080 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_load_avg() argument
3083 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
3086 if (se->on_rq) { in reweight_entity()
3088 if (cfs_rq->curr == se) in reweight_entity()
3090 update_load_sub(&cfs_rq->load, se->load.weight); in reweight_entity()
3092 dequeue_load_avg(cfs_rq, se); in reweight_entity()
3094 update_load_set(&se->load, weight); in reweight_entity()
3098 u32 divider = get_pelt_divider(&se->avg); in reweight_entity()
3100 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); in reweight_entity()
3104 enqueue_load_avg(cfs_rq, se); in reweight_entity()
3105 if (se->on_rq) in reweight_entity()
3106 update_load_add(&cfs_rq->load, se->load.weight); in reweight_entity()
3112 struct sched_entity *se = &p->se; in reweight_task() local
3113 struct cfs_rq *cfs_rq = cfs_rq_of(se); in reweight_task()
3114 struct load_weight *load = &se->load; in reweight_task()
3117 reweight_entity(cfs_rq, se, weight); in reweight_task()
3237 static void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
3239 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in update_cfs_group()
3251 if (likely(se->load.weight == shares)) in update_cfs_group()
3257 reweight_entity(cfs_rq_of(se), se, shares); in update_cfs_group()
3261 static inline void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
3381 void set_task_rq_fair(struct sched_entity *se, in set_task_rq_fair() argument
3397 if (!(se->avg.last_update_time && prev)) in set_task_rq_fair()
3421 __update_load_avg_blocked_se(p_last_update_time, se); in set_task_rq_fair()
3422 se->avg.last_update_time = n_last_update_time; in set_task_rq_fair()
3495 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util() argument
3497 long delta = gcfs_rq->avg.util_avg - se->avg.util_avg; in update_tg_cfs_util()
3505 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in update_tg_cfs_util()
3511 se->avg.util_avg = gcfs_rq->avg.util_avg; in update_tg_cfs_util()
3512 se->avg.util_sum = se->avg.util_avg * divider; in update_tg_cfs_util()
3520 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable() argument
3522 long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; in update_tg_cfs_runnable()
3530 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in update_tg_cfs_runnable()
3536 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; in update_tg_cfs_runnable()
3537 se->avg.runnable_sum = se->avg.runnable_avg * divider; in update_tg_cfs_runnable()
3545 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_load() argument
3558 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in update_tg_cfs_load()
3568 runnable_sum += se->avg.load_sum; in update_tg_cfs_load()
3580 /* But make sure to not inflate se's runnable */ in update_tg_cfs_load()
3581 runnable_sum = min(se->avg.load_sum, load_sum); in update_tg_cfs_load()
3590 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load()
3593 load_sum = (s64)se_weight(se) * runnable_sum; in update_tg_cfs_load()
3596 se->avg.load_sum = runnable_sum; in update_tg_cfs_load()
3598 delta = load_avg - se->avg.load_avg; in update_tg_cfs_load()
3602 se->avg.load_avg = load_avg; in update_tg_cfs_load()
3615 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
3619 if (entity_is_task(se)) in propagate_entity_load_avg()
3622 gcfs_rq = group_cfs_rq(se); in propagate_entity_load_avg()
3628 cfs_rq = cfs_rq_of(se); in propagate_entity_load_avg()
3632 update_tg_cfs_util(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3633 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3634 update_tg_cfs_load(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
3637 trace_pelt_se_tp(se); in propagate_entity_load_avg()
3646 static inline bool skip_blocked_update(struct sched_entity *se) in skip_blocked_update() argument
3648 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in skip_blocked_update()
3654 if (se->avg.load_avg || se->avg.util_avg) in skip_blocked_update()
3676 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
3754 * @se: sched_entity to attach
3759 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in attach_entity_load_avg() argument
3762 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in attach_entity_load_avg()
3768 * When we attach the @se to the @cfs_rq, we must align the decay in attach_entity_load_avg()
3774 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
3775 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
3783 se->avg.util_sum = se->avg.util_avg * divider; in attach_entity_load_avg()
3785 se->avg.runnable_sum = se->avg.runnable_avg * divider; in attach_entity_load_avg()
3787 se->avg.load_sum = divider; in attach_entity_load_avg()
3788 if (se_weight(se)) { in attach_entity_load_avg()
3789 se->avg.load_sum = in attach_entity_load_avg()
3790 div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se)); in attach_entity_load_avg()
3793 enqueue_load_avg(cfs_rq, se); in attach_entity_load_avg()
3794 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
3795 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
3796 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
3797 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
3799 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
3809 * @se: sched_entity to detach
3814 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
3817 * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. in detach_entity_load_avg()
3822 dequeue_load_avg(cfs_rq, se); in detach_entity_load_avg()
3823 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
3825 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
3828 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
3843 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg() argument
3852 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) in update_load_avg()
3853 __update_load_avg_se(now, cfs_rq, se); in update_load_avg()
3856 decayed |= propagate_entity_load_avg(se); in update_load_avg()
3858 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { in update_load_avg()
3867 attach_entity_load_avg(cfs_rq, se); in update_load_avg()
3903 static void sync_entity_load_avg(struct sched_entity *se) in sync_entity_load_avg() argument
3905 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sync_entity_load_avg()
3909 __update_load_avg_blocked_se(last_update_time, se); in sync_entity_load_avg()
3916 static void remove_entity_load_avg(struct sched_entity *se) in remove_entity_load_avg() argument
3918 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg()
3927 sync_entity_load_avg(se); in remove_entity_load_avg()
3931 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
3932 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
3933 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
3951 return READ_ONCE(p->se.avg.util_avg); in task_util()
3956 struct util_est ue = READ_ONCE(p->se.avg.util_est); in _task_util_est()
4048 ue = p->se.avg.util_est; in util_est_update()
4108 WRITE_ONCE(p->se.avg.util_est, ue); in util_est_update()
4110 trace_sched_util_est_se_tp(&p->se); in util_est_update()
4151 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) in update_load_avg() argument
4156 static inline void remove_entity_load_avg(struct sched_entity *se) {} in remove_entity_load_avg() argument
4159 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in attach_entity_load_avg() argument
4161 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
4181 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) in check_spread() argument
4184 s64 d = se->vruntime - cfs_rq->min_vruntime; in check_spread()
4195 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) in place_entity() argument
4206 vruntime += sched_vslice(cfs_rq, se); in place_entity()
4223 se->vruntime = max_vruntime(se->vruntime, vruntime); in place_entity()
4281 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
4284 bool curr = cfs_rq->curr == se; in enqueue_entity()
4291 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
4302 se->vruntime += cfs_rq->min_vruntime; in enqueue_entity()
4312 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); in enqueue_entity()
4313 se_update_runnable(se); in enqueue_entity()
4314 update_cfs_group(se); in enqueue_entity()
4315 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
4318 place_entity(cfs_rq, se, 0); in enqueue_entity()
4321 update_stats_enqueue(cfs_rq, se, flags); in enqueue_entity()
4322 check_spread(cfs_rq, se); in enqueue_entity()
4324 __enqueue_entity(cfs_rq, se); in enqueue_entity()
4325 se->on_rq = 1; in enqueue_entity()
4339 static void __clear_buddies_last(struct sched_entity *se) in __clear_buddies_last() argument
4341 for_each_sched_entity(se) { in __clear_buddies_last()
4342 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_last()
4343 if (cfs_rq->last != se) in __clear_buddies_last()
4350 static void __clear_buddies_next(struct sched_entity *se) in __clear_buddies_next() argument
4352 for_each_sched_entity(se) { in __clear_buddies_next()
4353 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next()
4354 if (cfs_rq->next != se) in __clear_buddies_next()
4361 static void __clear_buddies_skip(struct sched_entity *se) in __clear_buddies_skip() argument
4363 for_each_sched_entity(se) { in __clear_buddies_skip()
4364 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_skip()
4365 if (cfs_rq->skip != se) in __clear_buddies_skip()
4372 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
4374 if (cfs_rq->last == se) in clear_buddies()
4375 __clear_buddies_last(se); in clear_buddies()
4377 if (cfs_rq->next == se) in clear_buddies()
4378 __clear_buddies_next(se); in clear_buddies()
4380 if (cfs_rq->skip == se) in clear_buddies()
4381 __clear_buddies_skip(se); in clear_buddies()
4387 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
4402 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_entity()
4403 se_update_runnable(se); in dequeue_entity()
4405 update_stats_dequeue(cfs_rq, se, flags); in dequeue_entity()
4407 clear_buddies(cfs_rq, se); in dequeue_entity()
4409 if (se != cfs_rq->curr) in dequeue_entity()
4410 __dequeue_entity(cfs_rq, se); in dequeue_entity()
4411 se->on_rq = 0; in dequeue_entity()
4412 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
4416 * min_vruntime if @se is the one holding it back. But before doing in dequeue_entity()
4417 * update_min_vruntime() again, which will discount @se's position and in dequeue_entity()
4421 se->vruntime -= cfs_rq->min_vruntime; in dequeue_entity()
4426 update_cfs_group(se); in dequeue_entity()
4429 * Now advance min_vruntime if @se was the entity holding it back, in dequeue_entity()
4445 struct sched_entity *se; in check_preempt_tick() local
4468 se = __pick_first_entity(cfs_rq); in check_preempt_tick()
4469 delta = curr->vruntime - se->vruntime; in check_preempt_tick()
4479 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
4481 clear_buddies(cfs_rq, se); in set_next_entity()
4484 if (se->on_rq) { in set_next_entity()
4490 update_stats_wait_end(cfs_rq, se); in set_next_entity()
4491 __dequeue_entity(cfs_rq, se); in set_next_entity()
4492 update_load_avg(cfs_rq, se, UPDATE_TG); in set_next_entity()
4495 update_stats_curr_start(cfs_rq, se); in set_next_entity()
4496 cfs_rq->curr = se; in set_next_entity()
4504 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { in set_next_entity()
4505 schedstat_set(se->statistics.slice_max, in set_next_entity()
4506 max((u64)schedstat_val(se->statistics.slice_max), in set_next_entity()
4507 se->sum_exec_runtime - se->prev_sum_exec_runtime)); in set_next_entity()
4510 se->prev_sum_exec_runtime = se->sum_exec_runtime; in set_next_entity()
4514 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
4527 struct sched_entity *se; in pick_next_entity() local
4536 se = left; /* ideally we run the leftmost entity */ in pick_next_entity()
4542 if (cfs_rq->skip && cfs_rq->skip == se) { in pick_next_entity()
4545 if (se == curr) { in pick_next_entity()
4548 second = __pick_next_entity(se); in pick_next_entity()
4554 se = second; in pick_next_entity()
4561 se = cfs_rq->next; in pick_next_entity()
4566 se = cfs_rq->last; in pick_next_entity()
4569 return se; in pick_next_entity()
4835 struct sched_entity *se; in throttle_cfs_rq() local
4859 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
4868 for_each_sched_entity(se) { in throttle_cfs_rq()
4869 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
4871 if (!se->on_rq) in throttle_cfs_rq()
4874 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); in throttle_cfs_rq()
4876 if (cfs_rq_is_idle(group_cfs_rq(se))) in throttle_cfs_rq()
4884 se = parent_entity(se); in throttle_cfs_rq()
4889 for_each_sched_entity(se) { in throttle_cfs_rq()
4890 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
4892 if (!se->on_rq) in throttle_cfs_rq()
4895 update_load_avg(qcfs_rq, se, 0); in throttle_cfs_rq()
4896 se_update_runnable(se); in throttle_cfs_rq()
4898 if (cfs_rq_is_idle(group_cfs_rq(se))) in throttle_cfs_rq()
4905 /* At this point se is NULL and we are at root level*/ in throttle_cfs_rq()
4922 struct sched_entity *se; in unthrottle_cfs_rq() local
4925 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
4948 for_each_sched_entity(se) { in unthrottle_cfs_rq()
4949 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
4951 if (se->on_rq) in unthrottle_cfs_rq()
4953 enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); in unthrottle_cfs_rq()
4955 if (cfs_rq_is_idle(group_cfs_rq(se))) in unthrottle_cfs_rq()
4966 for_each_sched_entity(se) { in unthrottle_cfs_rq()
4967 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
4969 update_load_avg(qcfs_rq, se, UPDATE_TG); in unthrottle_cfs_rq()
4970 se_update_runnable(se); in unthrottle_cfs_rq()
4972 if (cfs_rq_is_idle(group_cfs_rq(se))) in unthrottle_cfs_rq()
4990 /* At this point se is NULL and we are at root level*/ in unthrottle_cfs_rq()
4999 for_each_sched_entity(se) { in unthrottle_cfs_rq()
5000 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
5509 struct sched_entity *se = &p->se; in hrtick_start_fair() local
5510 struct cfs_rq *cfs_rq = cfs_rq_of(se); in hrtick_start_fair()
5515 u64 slice = sched_slice(cfs_rq, se); in hrtick_start_fair()
5516 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; in hrtick_start_fair()
5540 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) in hrtick_update()
5596 struct sched_entity *se = &p->se; in enqueue_task_fair() local
5616 for_each_sched_entity(se) { in enqueue_task_fair()
5617 if (se->on_rq) in enqueue_task_fair()
5619 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5620 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
5635 for_each_sched_entity(se) { in enqueue_task_fair()
5636 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5638 update_load_avg(cfs_rq, se, UPDATE_TG); in enqueue_task_fair()
5639 se_update_runnable(se); in enqueue_task_fair()
5640 update_cfs_group(se); in enqueue_task_fair()
5660 /* At this point se is NULL and we are at root level*/ in enqueue_task_fair()
5688 for_each_sched_entity(se) { in enqueue_task_fair()
5689 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
5701 static void set_next_buddy(struct sched_entity *se);
5711 struct sched_entity *se = &p->se; in dequeue_task_fair() local
5718 for_each_sched_entity(se) { in dequeue_task_fair()
5719 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
5720 dequeue_entity(cfs_rq, se, flags); in dequeue_task_fair()
5735 se = parent_entity(se); in dequeue_task_fair()
5740 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) in dequeue_task_fair()
5741 set_next_buddy(se); in dequeue_task_fair()
5747 for_each_sched_entity(se) { in dequeue_task_fair()
5748 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
5750 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_task_fair()
5751 se_update_runnable(se); in dequeue_task_fair()
5752 update_cfs_group(se); in dequeue_task_fair()
5766 /* At this point se is NULL and we are at root level*/ in dequeue_task_fair()
5820 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
5843 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
5850 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
6000 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); in wake_affine()
6005 schedstat_inc(p->se.statistics.nr_wakeups_affine); in wake_affine()
6085 sync_entity_load_avg(&p->se); in find_idlest_cpu()
6402 sync_entity_load_avg(&p->se); in select_idle_sibling()
6566 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
6812 sync_entity_load_avg(&p->se); in find_energy_efficient_cpu()
6972 static void detach_entity_cfs_rq(struct sched_entity *se);
6988 struct sched_entity *se = &p->se; in migrate_task_rq_fair() local
6989 struct cfs_rq *cfs_rq = cfs_rq_of(se); in migrate_task_rq_fair()
7004 se->vruntime -= min_vruntime; in migrate_task_rq_fair()
7013 detach_entity_cfs_rq(&p->se); in migrate_task_rq_fair()
7024 remove_entity_load_avg(&p->se); in migrate_task_rq_fair()
7028 p->se.avg.last_update_time = 0; in migrate_task_rq_fair()
7031 p->se.exec_start = 0; in migrate_task_rq_fair()
7038 remove_entity_load_avg(&p->se); in task_dead_fair()
7051 static unsigned long wakeup_gran(struct sched_entity *se) in wakeup_gran() argument
7059 * By using 'se' instead of 'curr' we penalize light tasks, so in wakeup_gran()
7060 * they get preempted easier. That is, if 'se' < 'curr' then in wakeup_gran()
7062 * lighter, if otoh 'se' > 'curr' then the resulting gran will in wakeup_gran()
7068 return calc_delta_fair(gran, se); in wakeup_gran()
7072 * Should 'se' preempt 'curr'.
7086 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) in wakeup_preempt_entity() argument
7088 s64 gran, vdiff = curr->vruntime - se->vruntime; in wakeup_preempt_entity()
7093 gran = wakeup_gran(se); in wakeup_preempt_entity()
7100 static void set_last_buddy(struct sched_entity *se) in set_last_buddy() argument
7102 for_each_sched_entity(se) { in set_last_buddy()
7103 if (SCHED_WARN_ON(!se->on_rq)) in set_last_buddy()
7105 if (se_is_idle(se)) in set_last_buddy()
7107 cfs_rq_of(se)->last = se; in set_last_buddy()
7111 static void set_next_buddy(struct sched_entity *se) in set_next_buddy() argument
7113 for_each_sched_entity(se) { in set_next_buddy()
7114 if (SCHED_WARN_ON(!se->on_rq)) in set_next_buddy()
7116 if (se_is_idle(se)) in set_next_buddy()
7118 cfs_rq_of(se)->next = se; in set_next_buddy()
7122 static void set_skip_buddy(struct sched_entity *se) in set_skip_buddy() argument
7124 for_each_sched_entity(se) in set_skip_buddy()
7125 cfs_rq_of(se)->skip = se; in set_skip_buddy()
7134 struct sched_entity *se = &curr->se, *pse = &p->se; in check_preempt_wakeup() local
7140 if (unlikely(se == pse)) in check_preempt_wakeup()
7182 find_matching_se(&se, &pse); in check_preempt_wakeup()
7185 cse_is_idle = se_is_idle(se); in check_preempt_wakeup()
7197 update_curr(cfs_rq_of(se)); in check_preempt_wakeup()
7198 if (wakeup_preempt_entity(se, pse) == 1) { in check_preempt_wakeup()
7221 if (unlikely(!se->on_rq || curr == rq->idle)) in check_preempt_wakeup()
7224 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) in check_preempt_wakeup()
7225 set_last_buddy(se); in check_preempt_wakeup()
7231 struct sched_entity *se; in pick_task_fair() local
7253 se = pick_next_entity(cfs_rq, curr); in pick_task_fair()
7254 cfs_rq = group_cfs_rq(se); in pick_task_fair()
7257 return task_of(se); in pick_task_fair()
7265 struct sched_entity *se; in pick_next_task_fair() local
7316 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
7317 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
7320 p = task_of(se); in pick_next_task_fair()
7328 struct sched_entity *pse = &prev->se; in pick_next_task_fair()
7330 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
7331 int se_depth = se->depth; in pick_next_task_fair()
7339 set_next_entity(cfs_rq_of(se), se); in pick_next_task_fair()
7340 se = parent_entity(se); in pick_next_task_fair()
7345 set_next_entity(cfs_rq, se); in pick_next_task_fair()
7355 se = pick_next_entity(cfs_rq, NULL); in pick_next_task_fair()
7356 set_next_entity(cfs_rq, se); in pick_next_task_fair()
7357 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
7360 p = task_of(se); in pick_next_task_fair()
7369 list_move(&p->se.group_node, &rq->cfs_tasks); in pick_next_task_fair()
7415 struct sched_entity *se = &prev->se; in put_prev_task_fair() local
7418 for_each_sched_entity(se) { in put_prev_task_fair()
7419 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
7420 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
7433 struct sched_entity *se = &curr->se; in yield_task_fair() local
7441 clear_buddies(cfs_rq, se); in yield_task_fair()
7457 set_skip_buddy(se); in yield_task_fair()
7462 struct sched_entity *se = &p->se; in yield_to_task_fair() local
7465 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) in yield_to_task_fair()
7469 set_next_buddy(se); in yield_to_task_fair()
7700 (&p->se == cfs_rq_of(&p->se)->next || in task_hot()
7701 &p->se == cfs_rq_of(&p->se)->last)) in task_hot()
7717 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
7809 schedstat_inc(p->se.statistics.nr_failed_migrations_affine); in can_migrate_task()
7843 schedstat_inc(p->se.statistics.nr_failed_migrations_running); in can_migrate_task()
7865 schedstat_inc(p->se.statistics.nr_forced_migrations); in can_migrate_task()
7870 schedstat_inc(p->se.statistics.nr_failed_migrations_hot); in can_migrate_task()
7898 &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
7953 p = list_last_entry(tasks, struct task_struct, se.group_node); in detach_tasks()
8020 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
8043 list_move(&p->se.group_node, tasks); in detach_tasks()
8096 p = list_first_entry(tasks, struct task_struct, se.group_node); in attach_tasks()
8097 list_del_init(&p->se.group_node); in attach_tasks()
8192 struct sched_entity *se; in __update_blocked_fair() local
8202 se = cfs_rq->tg->se[cpu]; in __update_blocked_fair()
8203 if (se && !skip_blocked_update(se)) in __update_blocked_fair()
8204 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in __update_blocked_fair()
8229 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load() local
8237 for_each_sched_entity(se) { in update_cfs_rq_h_load()
8238 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
8239 WRITE_ONCE(cfs_rq->h_load_next, se); in update_cfs_rq_h_load()
8244 if (!se) { in update_cfs_rq_h_load()
8249 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { in update_cfs_rq_h_load()
8251 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
8253 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
8264 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
8282 return p->se.avg.load_avg; in task_h_load()
8844 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
10945 __entity_slice_used(struct sched_entity *se, int min_nr_tasks) in __entity_slice_used() argument
10947 u64 slice = sched_slice(cfs_rq_of(se), se); in __entity_slice_used()
10948 u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime; in __entity_slice_used()
10974 __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE)) in task_tick_core()
10981 static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle) in se_fi_update() argument
10983 for_each_sched_entity(se) { in se_fi_update()
10984 struct cfs_rq *cfs_rq = cfs_rq_of(se); in se_fi_update()
10998 struct sched_entity *se = &p->se; in task_vruntime_update() local
11003 se_fi_update(se, rq->core->core_forceidle_seq, in_fi); in task_vruntime_update()
11009 struct sched_entity *sea = &a->se; in cfs_prio_less()
11010 struct sched_entity *seb = &b->se; in cfs_prio_less()
11019 * Find an se in the hierarchy for tasks a and b, such that the se's in cfs_prio_less()
11043 * Find delta after normalizing se's vruntime with its cfs_rq's in cfs_prio_less()
11067 struct sched_entity *se = &curr->se; in task_tick_fair() local
11069 for_each_sched_entity(se) { in task_tick_fair()
11070 cfs_rq = cfs_rq_of(se); in task_tick_fair()
11071 entity_tick(cfs_rq, se, queued); in task_tick_fair()
11091 struct sched_entity *se = &p->se, *curr; in task_fork_fair() local
11102 se->vruntime = curr->vruntime; in task_fork_fair()
11104 place_entity(cfs_rq, se, 1); in task_fork_fair()
11106 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { in task_fork_fair()
11111 swap(curr->vruntime, se->vruntime); in task_fork_fair()
11115 se->vruntime -= cfs_rq->min_vruntime; in task_fork_fair()
11146 struct sched_entity *se = &p->se; in vruntime_normalized() local
11165 if (!se->sum_exec_runtime || in vruntime_normalized()
11177 static void propagate_entity_cfs_rq(struct sched_entity *se) in propagate_entity_cfs_rq() argument
11181 list_add_leaf_cfs_rq(cfs_rq_of(se)); in propagate_entity_cfs_rq()
11184 se = se->parent; in propagate_entity_cfs_rq()
11186 for_each_sched_entity(se) { in propagate_entity_cfs_rq()
11187 cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
11190 update_load_avg(cfs_rq, se, UPDATE_TG); in propagate_entity_cfs_rq()
11200 static void propagate_entity_cfs_rq(struct sched_entity *se) { } in propagate_entity_cfs_rq() argument
11203 static void detach_entity_cfs_rq(struct sched_entity *se) in detach_entity_cfs_rq() argument
11205 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_entity_cfs_rq()
11208 update_load_avg(cfs_rq, se, 0); in detach_entity_cfs_rq()
11209 detach_entity_load_avg(cfs_rq, se); in detach_entity_cfs_rq()
11211 propagate_entity_cfs_rq(se); in detach_entity_cfs_rq()
11214 static void attach_entity_cfs_rq(struct sched_entity *se) in attach_entity_cfs_rq() argument
11216 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_entity_cfs_rq()
11223 se->depth = se->parent ? se->parent->depth + 1 : 0; in attach_entity_cfs_rq()
11227 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); in attach_entity_cfs_rq()
11228 attach_entity_load_avg(cfs_rq, se); in attach_entity_cfs_rq()
11230 propagate_entity_cfs_rq(se); in attach_entity_cfs_rq()
11235 struct sched_entity *se = &p->se; in detach_task_cfs_rq() local
11236 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_task_cfs_rq()
11243 place_entity(cfs_rq, se, 0); in detach_task_cfs_rq()
11244 se->vruntime -= cfs_rq->min_vruntime; in detach_task_cfs_rq()
11247 detach_entity_cfs_rq(se); in detach_task_cfs_rq()
11252 struct sched_entity *se = &p->se; in attach_task_cfs_rq() local
11253 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_task_cfs_rq()
11255 attach_entity_cfs_rq(se); in attach_task_cfs_rq()
11258 se->vruntime += cfs_rq->min_vruntime; in attach_task_cfs_rq()
11290 struct sched_entity *se = &p->se; in set_next_task_fair() local
11298 list_move(&se->group_node, &rq->cfs_tasks); in set_next_task_fair()
11302 for_each_sched_entity(se) { in set_next_task_fair()
11303 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_next_task_fair()
11305 set_next_entity(cfs_rq, se); in set_next_task_fair()
11326 struct sched_entity *se = &p->se; in task_set_group_fair() local
11329 se->depth = se->parent ? se->parent->depth + 1 : 0; in task_set_group_fair()
11338 /* Tell se's cfs_rq has been changed -- migrated */ in task_move_group_fair()
11339 p->se.avg.last_update_time = 0; in task_move_group_fair()
11366 if (tg->se) in free_fair_sched_group()
11367 kfree(tg->se[i]); in free_fair_sched_group()
11371 kfree(tg->se); in free_fair_sched_group()
11376 struct sched_entity *se; in alloc_fair_sched_group() local
11383 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); in alloc_fair_sched_group()
11384 if (!tg->se) in alloc_fair_sched_group()
11397 se = kzalloc_node(sizeof(struct sched_entity), in alloc_fair_sched_group()
11399 if (!se) in alloc_fair_sched_group()
11403 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
11404 init_entity_runnable_average(se); in alloc_fair_sched_group()
11417 struct sched_entity *se; in online_fair_sched_group() local
11424 se = tg->se[i]; in online_fair_sched_group()
11427 attach_entity_cfs_rq(se); in online_fair_sched_group()
11440 if (tg->se[cpu]) in unregister_fair_sched_group()
11441 remove_entity_load_avg(tg->se[cpu]); in unregister_fair_sched_group()
11459 struct sched_entity *se, int cpu, in init_tg_cfs_entry() argument
11469 tg->se[cpu] = se; in init_tg_cfs_entry()
11471 /* se could be NULL for root_task_group */ in init_tg_cfs_entry()
11472 if (!se) in init_tg_cfs_entry()
11476 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
11477 se->depth = 0; in init_tg_cfs_entry()
11479 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
11480 se->depth = parent->depth + 1; in init_tg_cfs_entry()
11483 se->my_q = cfs_rq; in init_tg_cfs_entry()
11485 update_load_set(&se->load, NICE_0_LOAD); in init_tg_cfs_entry()
11486 se->parent = parent; in init_tg_cfs_entry()
11500 if (!tg->se[0]) in __sched_group_set_shares()
11511 struct sched_entity *se = tg->se[i]; in __sched_group_set_shares() local
11517 for_each_sched_entity(se) { in __sched_group_set_shares()
11518 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in __sched_group_set_shares()
11519 update_cfs_group(se); in __sched_group_set_shares()
11562 struct sched_entity *se = tg->se[i]; in sched_group_set_idle() local
11579 for_each_sched_entity(se) { in sched_group_set_idle()
11580 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sched_group_set_idle()
11582 if (!se->on_rq) in sched_group_set_idle()
11624 struct sched_entity *se = &task->se; in get_rr_interval_fair() local
11632 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); in get_rr_interval_fair()