Lines Matching refs:se

311 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)  in calc_delta_fair()  argument
313 if (unlikely(se->load.weight != NICE_0_LOAD)) in calc_delta_fair()
314 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); in calc_delta_fair()
328 #define for_each_sched_entity(se) \ argument
329 for (; se; se = se->parent)
431 is_same_group(struct sched_entity *se, struct sched_entity *pse) in is_same_group() argument
433 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
434 return se->cfs_rq; in is_same_group()
439 static inline struct sched_entity *parent_entity(const struct sched_entity *se) in parent_entity() argument
441 return se->parent; in parent_entity()
445 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
457 se_depth = (*se)->depth; in find_matching_se()
462 *se = parent_entity(*se); in find_matching_se()
470 while (!is_same_group(*se, *pse)) { in find_matching_se()
471 *se = parent_entity(*se); in find_matching_se()
486 static int se_is_idle(struct sched_entity *se) in se_is_idle() argument
488 if (entity_is_task(se)) in se_is_idle()
489 return task_has_idle_policy(task_of(se)); in se_is_idle()
490 return cfs_rq_is_idle(group_cfs_rq(se)); in se_is_idle()
495 #define for_each_sched_entity(se) \ argument
496 for (; se; se = NULL)
514 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity() argument
520 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se() argument
534 static int se_is_idle(struct sched_entity *se) in se_is_idle() argument
572 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) in entity_key() argument
574 return (s64)(se->vruntime - cfs_rq->min_vruntime); in entity_key()
639 avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se) in avg_vruntime_add() argument
641 unsigned long weight = scale_load_down(se->load.weight); in avg_vruntime_add()
642 s64 key = entity_key(cfs_rq, se); in avg_vruntime_add()
649 avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se) in avg_vruntime_sub() argument
651 unsigned long weight = scale_load_down(se->load.weight); in avg_vruntime_sub()
652 s64 key = entity_key(cfs_rq, se); in avg_vruntime_sub()
710 static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_entity_lag() argument
714 SCHED_WARN_ON(!se->on_rq); in update_entity_lag()
715 lag = avg_vruntime(cfs_rq) - se->vruntime; in update_entity_lag()
717 limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se); in update_entity_lag()
718 se->vlag = clamp(lag, -limit, limit); in update_entity_lag()
738 int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se) in entity_eligible() argument
751 return avg >= entity_key(cfs_rq, se) * load; in entity_eligible()
770 struct sched_entity *se = __pick_first_entity(cfs_rq); in update_min_vruntime() local
782 if (se) { in update_min_vruntime()
784 vruntime = se->vruntime; in update_min_vruntime()
786 vruntime = min_vruntime(vruntime, se->vruntime); in update_min_vruntime()
801 static inline void __update_min_deadline(struct sched_entity *se, struct rb_node *node) in __update_min_deadline() argument
805 if (deadline_gt(min_deadline, se, rse)) in __update_min_deadline()
806 se->min_deadline = rse->min_deadline; in __update_min_deadline()
813 static inline bool min_deadline_update(struct sched_entity *se, bool exit) in min_deadline_update() argument
815 u64 old_min_deadline = se->min_deadline; in min_deadline_update()
816 struct rb_node *node = &se->run_node; in min_deadline_update()
818 se->min_deadline = se->deadline; in min_deadline_update()
819 __update_min_deadline(se, node->rb_right); in min_deadline_update()
820 __update_min_deadline(se, node->rb_left); in min_deadline_update()
822 return se->min_deadline == old_min_deadline; in min_deadline_update()
831 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
833 avg_vruntime_add(cfs_rq, se); in __enqueue_entity()
834 se->min_deadline = se->deadline; in __enqueue_entity()
835 rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, in __enqueue_entity()
839 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
841 rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, in __dequeue_entity()
843 avg_vruntime_sub(cfs_rq, se); in __dequeue_entity()
894 struct sched_entity *se = __node_2_se(node); in __pick_eevdf() local
899 if (!entity_eligible(cfs_rq, se)) { in __pick_eevdf()
907 if (!best || deadline_gt(deadline, best, se)) in __pick_eevdf()
908 best = se; in __pick_eevdf()
925 if (left->min_deadline == se->min_deadline) in __pick_eevdf()
930 if (se->deadline == se->min_deadline) in __pick_eevdf()
950 struct sched_entity *se = __node_2_se(node); in __pick_eevdf() local
953 if (se->deadline == se->min_deadline) in __pick_eevdf()
954 return se; in __pick_eevdf()
958 __node_2_se(node->rb_left)->min_deadline == se->min_deadline) { in __pick_eevdf()
971 struct sched_entity *se = __pick_eevdf(cfs_rq); in pick_eevdf() local
973 if (!se) { in pick_eevdf()
981 return se; in pick_eevdf()
1013 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
1019 static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_deadline() argument
1021 if ((s64)(se->vruntime - se->deadline) < 0) in update_deadline()
1029 se->slice = sysctl_sched_base_slice; in update_deadline()
1034 se->deadline = se->vruntime + calc_delta_fair(se->slice, se); in update_deadline()
1041 clear_buddies(cfs_rq, se); in update_deadline()
1053 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
1055 struct sched_avg *sa = &se->avg; in init_entity_runnable_average()
1065 if (entity_is_task(se)) in init_entity_runnable_average()
1066 sa->load_avg = scale_load_down(se->load.weight); in init_entity_runnable_average()
1099 struct sched_entity *se = &p->se; in post_init_entity_util_avg() local
1100 struct cfs_rq *cfs_rq = cfs_rq_of(se); in post_init_entity_util_avg()
1101 struct sched_avg *sa = &se->avg; in post_init_entity_util_avg()
1116 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
1122 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; in post_init_entity_util_avg()
1136 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average() argument
1193 update_curr(cfs_rq_of(&rq->curr->se)); in update_curr_fair()
1197 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start_fair() argument
1205 stats = __schedstats_from_se(se); in update_stats_wait_start_fair()
1207 if (entity_is_task(se)) in update_stats_wait_start_fair()
1208 p = task_of(se); in update_stats_wait_start_fair()
1214 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end_fair() argument
1222 stats = __schedstats_from_se(se); in update_stats_wait_end_fair()
1233 if (entity_is_task(se)) in update_stats_wait_end_fair()
1234 p = task_of(se); in update_stats_wait_end_fair()
1240 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper_fair() argument
1248 stats = __schedstats_from_se(se); in update_stats_enqueue_sleeper_fair()
1250 if (entity_is_task(se)) in update_stats_enqueue_sleeper_fair()
1251 tsk = task_of(se); in update_stats_enqueue_sleeper_fair()
1260 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue_fair() argument
1269 if (se != cfs_rq->curr) in update_stats_enqueue_fair()
1270 update_stats_wait_start_fair(cfs_rq, se); in update_stats_enqueue_fair()
1273 update_stats_enqueue_sleeper_fair(cfs_rq, se); in update_stats_enqueue_fair()
1277 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue_fair() argument
1287 if (se != cfs_rq->curr) in update_stats_dequeue_fair()
1288 update_stats_wait_end_fair(cfs_rq, se); in update_stats_dequeue_fair()
1290 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { in update_stats_dequeue_fair()
1291 struct task_struct *tsk = task_of(se); in update_stats_dequeue_fair()
1309 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
1314 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
2697 now = p->se.exec_start; in numa_get_avg_runtime()
2698 runtime = p->se.sum_exec_runtime; in numa_get_avg_runtime()
2708 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
3212 u64 runtime = p->se.sum_exec_runtime; in task_numa_work()
3386 if (unlikely(p->se.sum_exec_runtime != runtime)) { in task_numa_work()
3387 u64 diff = p->se.sum_exec_runtime - runtime; in task_numa_work()
3459 now = curr->se.sum_exec_runtime; in task_tick_numa()
3526 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
3528 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
3530 if (entity_is_task(se)) { in account_entity_enqueue()
3533 account_numa_enqueue(rq, task_of(se)); in account_entity_enqueue()
3534 list_add(&se->group_node, &rq->cfs_tasks); in account_entity_enqueue()
3538 if (se_is_idle(se)) in account_entity_enqueue()
3543 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
3545 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
3547 if (entity_is_task(se)) { in account_entity_dequeue()
3548 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
3549 list_del_init(&se->group_node); in account_entity_dequeue()
3553 if (se_is_idle(se)) in account_entity_dequeue()
3607 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg() argument
3609 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3610 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3614 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg() argument
3616 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3617 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
3624 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_load_avg() argument
3626 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_load_avg() argument
3629 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
3632 unsigned long old_weight = se->load.weight; in reweight_entity()
3634 if (se->on_rq) { in reweight_entity()
3636 if (cfs_rq->curr == se) in reweight_entity()
3639 avg_vruntime_sub(cfs_rq, se); in reweight_entity()
3640 update_load_sub(&cfs_rq->load, se->load.weight); in reweight_entity()
3642 dequeue_load_avg(cfs_rq, se); in reweight_entity()
3644 update_load_set(&se->load, weight); in reweight_entity()
3646 if (!se->on_rq) { in reweight_entity()
3651 se->vlag = div_s64(se->vlag * old_weight, weight); in reweight_entity()
3653 s64 deadline = se->deadline - se->vruntime; in reweight_entity()
3659 se->deadline = se->vruntime + deadline; in reweight_entity()
3660 if (se != cfs_rq->curr) in reweight_entity()
3661 min_deadline_cb_propagate(&se->run_node, NULL); in reweight_entity()
3666 u32 divider = get_pelt_divider(&se->avg); in reweight_entity()
3668 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); in reweight_entity()
3672 enqueue_load_avg(cfs_rq, se); in reweight_entity()
3673 if (se->on_rq) { in reweight_entity()
3674 update_load_add(&cfs_rq->load, se->load.weight); in reweight_entity()
3675 if (cfs_rq->curr != se) in reweight_entity()
3676 avg_vruntime_add(cfs_rq, se); in reweight_entity()
3682 struct sched_entity *se = &p->se; in reweight_task() local
3683 struct cfs_rq *cfs_rq = cfs_rq_of(se); in reweight_task()
3684 struct load_weight *load = &se->load; in reweight_task()
3687 reweight_entity(cfs_rq, se, weight); in reweight_task()
3807 static void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
3809 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in update_cfs_group()
3821 if (likely(se->load.weight == shares)) in update_cfs_group()
3827 reweight_entity(cfs_rq_of(se), se, shares); in update_cfs_group()
3831 static inline void update_cfs_group(struct sched_entity *se) in update_cfs_group() argument
3964 void set_task_rq_fair(struct sched_entity *se, in set_task_rq_fair() argument
3980 if (!(se->avg.last_update_time && prev)) in set_task_rq_fair()
3986 __update_load_avg_blocked_se(p_last_update_time, se); in set_task_rq_fair()
3987 se->avg.last_update_time = n_last_update_time; in set_task_rq_fair()
4058 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util() argument
4060 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg; in update_tg_cfs_util()
4075 se->avg.util_avg = gcfs_rq->avg.util_avg; in update_tg_cfs_util()
4076 new_sum = se->avg.util_avg * divider; in update_tg_cfs_util()
4077 delta_sum = (long)new_sum - (long)se->avg.util_sum; in update_tg_cfs_util()
4078 se->avg.util_sum = new_sum; in update_tg_cfs_util()
4090 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable() argument
4092 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; in update_tg_cfs_runnable()
4106 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; in update_tg_cfs_runnable()
4107 new_sum = se->avg.runnable_avg * divider; in update_tg_cfs_runnable()
4108 delta_sum = (long)new_sum - (long)se->avg.runnable_sum; in update_tg_cfs_runnable()
4109 se->avg.runnable_sum = new_sum; in update_tg_cfs_runnable()
4120 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_load() argument
4144 runnable_sum += se->avg.load_sum; in update_tg_cfs_load()
4157 runnable_sum = min(se->avg.load_sum, load_sum); in update_tg_cfs_load()
4166 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load()
4169 load_sum = se_weight(se) * runnable_sum; in update_tg_cfs_load()
4172 delta_avg = load_avg - se->avg.load_avg; in update_tg_cfs_load()
4176 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; in update_tg_cfs_load()
4178 se->avg.load_sum = runnable_sum; in update_tg_cfs_load()
4179 se->avg.load_avg = load_avg; in update_tg_cfs_load()
4194 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
4198 if (entity_is_task(se)) in propagate_entity_load_avg()
4201 gcfs_rq = group_cfs_rq(se); in propagate_entity_load_avg()
4207 cfs_rq = cfs_rq_of(se); in propagate_entity_load_avg()
4211 update_tg_cfs_util(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4212 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4213 update_tg_cfs_load(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4216 trace_pelt_se_tp(se); in propagate_entity_load_avg()
4225 static inline bool skip_blocked_update(struct sched_entity *se) in skip_blocked_update() argument
4227 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in skip_blocked_update()
4233 if (se->avg.load_avg || se->avg.util_avg) in skip_blocked_update()
4255 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg() argument
4265 static inline void migrate_se_pelt_lag(struct sched_entity *se) in migrate_se_pelt_lag() argument
4272 if (load_avg_is_decayed(&se->avg)) in migrate_se_pelt_lag()
4275 cfs_rq = cfs_rq_of(se); in migrate_se_pelt_lag()
4341 __update_load_avg_blocked_se(now, se); in migrate_se_pelt_lag()
4344 static void migrate_se_pelt_lag(struct sched_entity *se) {} in migrate_se_pelt_lag() argument
4434 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in attach_entity_load_avg() argument
4449 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
4450 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
4458 se->avg.util_sum = se->avg.util_avg * divider; in attach_entity_load_avg()
4460 se->avg.runnable_sum = se->avg.runnable_avg * divider; in attach_entity_load_avg()
4462 se->avg.load_sum = se->avg.load_avg * divider; in attach_entity_load_avg()
4463 if (se_weight(se) < se->avg.load_sum) in attach_entity_load_avg()
4464 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se)); in attach_entity_load_avg()
4466 se->avg.load_sum = 1; in attach_entity_load_avg()
4468 enqueue_load_avg(cfs_rq, se); in attach_entity_load_avg()
4469 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
4470 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
4471 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
4472 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
4474 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
4489 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
4491 dequeue_load_avg(cfs_rq, se); in detach_entity_load_avg()
4492 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
4493 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
4498 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
4499 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); in detach_entity_load_avg()
4504 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
4520 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg() argument
4529 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) in update_load_avg()
4530 __update_load_avg_se(now, cfs_rq, se); in update_load_avg()
4533 decayed |= propagate_entity_load_avg(se); in update_load_avg()
4535 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { in update_load_avg()
4544 attach_entity_load_avg(cfs_rq, se); in update_load_avg()
4552 detach_entity_load_avg(cfs_rq, se); in update_load_avg()
4566 static void sync_entity_load_avg(struct sched_entity *se) in sync_entity_load_avg() argument
4568 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sync_entity_load_avg()
4572 __update_load_avg_blocked_se(last_update_time, se); in sync_entity_load_avg()
4579 static void remove_entity_load_avg(struct sched_entity *se) in remove_entity_load_avg() argument
4581 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg()
4590 sync_entity_load_avg(se); in remove_entity_load_avg()
4594 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
4595 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
4596 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
4614 return READ_ONCE(p->se.avg.util_avg); in task_util()
4619 struct util_est ue = READ_ONCE(p->se.avg.util_est); in _task_util_est()
4713 ue = p->se.avg.util_est; in util_est_update()
4773 WRITE_ONCE(p->se.avg.util_est, ue); in util_est_update()
4775 trace_sched_util_est_se_tp(&p->se); in util_est_update()
4943 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) in update_load_avg() argument
4948 static inline void remove_entity_load_avg(struct sched_entity *se) {} in remove_entity_load_avg() argument
4951 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in attach_entity_load_avg() argument
4953 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
4974 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in place_entity() argument
4979 se->slice = sysctl_sched_base_slice; in place_entity()
4980 vslice = calc_delta_fair(se->slice, se); in place_entity()
4994 lag = se->vlag; in place_entity()
5052 lag *= load + scale_load_down(se->load.weight); in place_entity()
5058 se->vruntime = vruntime - lag; in place_entity()
5071 se->deadline = se->vruntime + vslice; in place_entity()
5080 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
5082 bool curr = cfs_rq->curr == se; in enqueue_entity()
5089 place_entity(cfs_rq, se, flags); in enqueue_entity()
5102 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); in enqueue_entity()
5103 se_update_runnable(se); in enqueue_entity()
5109 update_cfs_group(se); in enqueue_entity()
5116 place_entity(cfs_rq, se, flags); in enqueue_entity()
5118 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
5122 se->exec_start = 0; in enqueue_entity()
5125 update_stats_enqueue_fair(cfs_rq, se, flags); in enqueue_entity()
5127 __enqueue_entity(cfs_rq, se); in enqueue_entity()
5128 se->on_rq = 1; in enqueue_entity()
5147 static void __clear_buddies_next(struct sched_entity *se) in __clear_buddies_next() argument
5149 for_each_sched_entity(se) { in __clear_buddies_next()
5150 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next()
5151 if (cfs_rq->next != se) in __clear_buddies_next()
5158 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
5160 if (cfs_rq->next == se) in clear_buddies()
5161 __clear_buddies_next(se); in clear_buddies()
5167 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
5171 if (entity_is_task(se) && task_on_rq_migrating(task_of(se))) in dequeue_entity()
5188 update_load_avg(cfs_rq, se, action); in dequeue_entity()
5189 se_update_runnable(se); in dequeue_entity()
5191 update_stats_dequeue_fair(cfs_rq, se, flags); in dequeue_entity()
5193 clear_buddies(cfs_rq, se); in dequeue_entity()
5195 update_entity_lag(cfs_rq, se); in dequeue_entity()
5196 if (se != cfs_rq->curr) in dequeue_entity()
5197 __dequeue_entity(cfs_rq, se); in dequeue_entity()
5198 se->on_rq = 0; in dequeue_entity()
5199 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
5204 update_cfs_group(se); in dequeue_entity()
5220 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
5222 clear_buddies(cfs_rq, se); in set_next_entity()
5225 if (se->on_rq) { in set_next_entity()
5231 update_stats_wait_end_fair(cfs_rq, se); in set_next_entity()
5232 __dequeue_entity(cfs_rq, se); in set_next_entity()
5233 update_load_avg(cfs_rq, se, UPDATE_TG); in set_next_entity()
5238 se->vlag = se->deadline; in set_next_entity()
5241 update_stats_curr_start(cfs_rq, se); in set_next_entity()
5242 cfs_rq->curr = se; in set_next_entity()
5250 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { in set_next_entity()
5253 stats = __schedstats_from_se(se); in set_next_entity()
5256 se->sum_exec_runtime - se->prev_sum_exec_runtime)); in set_next_entity()
5259 se->prev_sum_exec_runtime = se->sum_exec_runtime; in set_next_entity()
5564 struct sched_entity *se; in throttle_cfs_rq() local
5588 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
5597 for_each_sched_entity(se) { in throttle_cfs_rq()
5598 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
5600 if (!se->on_rq) in throttle_cfs_rq()
5603 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); in throttle_cfs_rq()
5605 if (cfs_rq_is_idle(group_cfs_rq(se))) in throttle_cfs_rq()
5613 se = parent_entity(se); in throttle_cfs_rq()
5618 for_each_sched_entity(se) { in throttle_cfs_rq()
5619 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
5621 if (!se->on_rq) in throttle_cfs_rq()
5624 update_load_avg(qcfs_rq, se, 0); in throttle_cfs_rq()
5625 se_update_runnable(se); in throttle_cfs_rq()
5627 if (cfs_rq_is_idle(group_cfs_rq(se))) in throttle_cfs_rq()
5653 struct sched_entity *se; in unthrottle_cfs_rq() local
5656 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
5680 for_each_sched_entity(se) { in unthrottle_cfs_rq()
5681 if (list_add_leaf_cfs_rq(cfs_rq_of(se))) in unthrottle_cfs_rq()
5689 for_each_sched_entity(se) { in unthrottle_cfs_rq()
5690 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
5692 if (se->on_rq) in unthrottle_cfs_rq()
5694 enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); in unthrottle_cfs_rq()
5696 if (cfs_rq_is_idle(group_cfs_rq(se))) in unthrottle_cfs_rq()
5707 for_each_sched_entity(se) { in unthrottle_cfs_rq()
5708 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
5710 update_load_avg(qcfs_rq, se, UPDATE_TG); in unthrottle_cfs_rq()
5711 se_update_runnable(se); in unthrottle_cfs_rq()
5713 if (cfs_rq_is_idle(group_cfs_rq(se))) in unthrottle_cfs_rq()
6426 struct sched_entity *se = &p->se; in hrtick_start_fair() local
6431 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; in hrtick_start_fair()
6432 u64 slice = se->slice; in hrtick_start_fair()
6513 struct sched_entity *se = &p->se; in enqueue_task_fair() local
6533 for_each_sched_entity(se) { in enqueue_task_fair()
6534 if (se->on_rq) in enqueue_task_fair()
6536 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
6537 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
6552 for_each_sched_entity(se) { in enqueue_task_fair()
6553 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
6555 update_load_avg(cfs_rq, se, UPDATE_TG); in enqueue_task_fair()
6556 se_update_runnable(se); in enqueue_task_fair()
6557 update_cfs_group(se); in enqueue_task_fair()
6596 static void set_next_buddy(struct sched_entity *se);
6606 struct sched_entity *se = &p->se; in dequeue_task_fair() local
6613 for_each_sched_entity(se) { in dequeue_task_fair()
6614 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
6615 dequeue_entity(cfs_rq, se, flags); in dequeue_task_fair()
6630 se = parent_entity(se); in dequeue_task_fair()
6635 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) in dequeue_task_fair()
6636 set_next_buddy(se); in dequeue_task_fair()
6642 for_each_sched_entity(se) { in dequeue_task_fair()
6643 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
6645 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_task_fair()
6646 se_update_runnable(se); in dequeue_task_fair()
6647 update_cfs_group(se); in dequeue_task_fair()
6717 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
6740 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
6747 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
6982 sync_entity_load_avg(&p->se); in find_idlest_cpu()
7333 sync_entity_load_avg(&p->se); in select_idle_sibling()
7557 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
7758 sync_entity_load_avg(&p->se); in find_energy_efficient_cpu()
8002 struct sched_entity *se = &p->se; in migrate_task_rq_fair() local
8005 remove_entity_load_avg(se); in migrate_task_rq_fair()
8017 migrate_se_pelt_lag(se); in migrate_task_rq_fair()
8021 se->avg.last_update_time = 0; in migrate_task_rq_fair()
8028 remove_entity_load_avg(&p->se); in task_dead_fair()
8041 static void set_next_buddy(struct sched_entity *se) in set_next_buddy() argument
8043 for_each_sched_entity(se) { in set_next_buddy()
8044 if (SCHED_WARN_ON(!se->on_rq)) in set_next_buddy()
8046 if (se_is_idle(se)) in set_next_buddy()
8048 cfs_rq_of(se)->next = se; in set_next_buddy()
8058 struct sched_entity *se = &curr->se, *pse = &p->se; in check_preempt_wakeup() local
8063 if (unlikely(se == pse)) in check_preempt_wakeup()
8105 find_matching_se(&se, &pse); in check_preempt_wakeup()
8108 cse_is_idle = se_is_idle(se); in check_preempt_wakeup()
8120 cfs_rq = cfs_rq_of(se); in check_preempt_wakeup()
8138 struct sched_entity *se; in pick_task_fair() local
8160 se = pick_next_entity(cfs_rq, curr); in pick_task_fair()
8161 cfs_rq = group_cfs_rq(se); in pick_task_fair()
8164 return task_of(se); in pick_task_fair()
8172 struct sched_entity *se; in pick_next_task_fair() local
8223 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
8224 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
8227 p = task_of(se); in pick_next_task_fair()
8235 struct sched_entity *pse = &prev->se; in pick_next_task_fair()
8237 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
8238 int se_depth = se->depth; in pick_next_task_fair()
8246 set_next_entity(cfs_rq_of(se), se); in pick_next_task_fair()
8247 se = parent_entity(se); in pick_next_task_fair()
8252 set_next_entity(cfs_rq, se); in pick_next_task_fair()
8262 se = pick_next_entity(cfs_rq, NULL); in pick_next_task_fair()
8263 set_next_entity(cfs_rq, se); in pick_next_task_fair()
8264 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
8267 p = task_of(se); in pick_next_task_fair()
8276 list_move(&p->se.group_node, &rq->cfs_tasks); in pick_next_task_fair()
8323 struct sched_entity *se = &prev->se; in put_prev_task_fair() local
8326 for_each_sched_entity(se) { in put_prev_task_fair()
8327 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
8328 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
8339 struct sched_entity *se = &curr->se; in yield_task_fair() local
8347 clear_buddies(cfs_rq, se); in yield_task_fair()
8361 se->deadline += calc_delta_fair(se->slice, se); in yield_task_fair()
8366 struct sched_entity *se = &p->se; in yield_to_task_fair() local
8369 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) in yield_to_task_fair()
8373 set_next_buddy(se); in yield_to_task_fair()
8609 (&p->se == cfs_rq_of(&p->se)->next)) in task_hot()
8625 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
8806 &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
8875 p = list_last_entry(tasks, struct task_struct, se.group_node); in detach_tasks()
8930 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
8953 list_move(&p->se.group_node, tasks); in detach_tasks()
9006 p = list_first_entry(tasks, struct task_struct, se.group_node); in attach_tasks()
9007 list_del_init(&p->se.group_node); in attach_tasks()
9102 struct sched_entity *se; in __update_blocked_fair() local
9115 se = cfs_rq->tg->se[cpu]; in __update_blocked_fair()
9116 if (se && !skip_blocked_update(se)) in __update_blocked_fair()
9117 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in __update_blocked_fair()
9142 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load() local
9150 for_each_sched_entity(se) { in update_cfs_rq_h_load()
9151 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
9152 WRITE_ONCE(cfs_rq->h_load_next, se); in update_cfs_rq_h_load()
9157 if (!se) { in update_cfs_rq_h_load()
9162 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { in update_cfs_rq_h_load()
9164 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
9166 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
9177 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
9195 return p->se.avg.load_avg; in task_h_load()
9947 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
12253 __entity_slice_used(struct sched_entity *se, int min_nr_tasks) in __entity_slice_used() argument
12255 u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime; in __entity_slice_used()
12256 u64 slice = se->slice; in __entity_slice_used()
12282 __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE)) in task_tick_core()
12289 static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq, in se_fi_update() argument
12292 for_each_sched_entity(se) { in se_fi_update()
12293 struct cfs_rq *cfs_rq = cfs_rq_of(se); in se_fi_update()
12307 struct sched_entity *se = &p->se; in task_vruntime_update() local
12312 se_fi_update(se, rq->core->core_forceidle_seq, in_fi); in task_vruntime_update()
12319 const struct sched_entity *sea = &a->se; in cfs_prio_less()
12320 const struct sched_entity *seb = &b->se; in cfs_prio_less()
12389 struct sched_entity *se = &curr->se; in task_tick_fair() local
12391 for_each_sched_entity(se) { in task_tick_fair()
12392 cfs_rq = cfs_rq_of(se); in task_tick_fair()
12393 entity_tick(cfs_rq, se, queued); in task_tick_fair()
12412 struct sched_entity *se = &p->se, *curr; in task_fork_fair() local
12424 place_entity(cfs_rq, se, ENQUEUE_INITIAL); in task_fork_fair()
12458 static void propagate_entity_cfs_rq(struct sched_entity *se) in propagate_entity_cfs_rq() argument
12460 struct cfs_rq *cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
12469 se = se->parent; in propagate_entity_cfs_rq()
12471 for_each_sched_entity(se) { in propagate_entity_cfs_rq()
12472 cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
12474 update_load_avg(cfs_rq, se, UPDATE_TG); in propagate_entity_cfs_rq()
12484 static void propagate_entity_cfs_rq(struct sched_entity *se) { } in propagate_entity_cfs_rq() argument
12487 static void detach_entity_cfs_rq(struct sched_entity *se) in detach_entity_cfs_rq() argument
12489 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_entity_cfs_rq()
12498 if (!se->avg.last_update_time) in detach_entity_cfs_rq()
12503 update_load_avg(cfs_rq, se, 0); in detach_entity_cfs_rq()
12504 detach_entity_load_avg(cfs_rq, se); in detach_entity_cfs_rq()
12506 propagate_entity_cfs_rq(se); in detach_entity_cfs_rq()
12509 static void attach_entity_cfs_rq(struct sched_entity *se) in attach_entity_cfs_rq() argument
12511 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_entity_cfs_rq()
12514 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); in attach_entity_cfs_rq()
12515 attach_entity_load_avg(cfs_rq, se); in attach_entity_cfs_rq()
12517 propagate_entity_cfs_rq(se); in attach_entity_cfs_rq()
12522 struct sched_entity *se = &p->se; in detach_task_cfs_rq() local
12524 detach_entity_cfs_rq(se); in detach_task_cfs_rq()
12529 struct sched_entity *se = &p->se; in attach_task_cfs_rq() local
12531 attach_entity_cfs_rq(se); in attach_task_cfs_rq()
12563 struct sched_entity *se = &p->se; in set_next_task_fair() local
12571 list_move(&se->group_node, &rq->cfs_tasks); in set_next_task_fair()
12575 for_each_sched_entity(se) { in set_next_task_fair()
12576 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_next_task_fair()
12578 set_next_entity(cfs_rq, se); in set_next_task_fair()
12607 p->se.avg.last_update_time = 0; in task_change_group_fair()
12620 if (tg->se) in free_fair_sched_group()
12621 kfree(tg->se[i]); in free_fair_sched_group()
12625 kfree(tg->se); in free_fair_sched_group()
12630 struct sched_entity *se; in alloc_fair_sched_group() local
12637 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); in alloc_fair_sched_group()
12638 if (!tg->se) in alloc_fair_sched_group()
12651 se = kzalloc_node(sizeof(struct sched_entity_stats), in alloc_fair_sched_group()
12653 if (!se) in alloc_fair_sched_group()
12657 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
12658 init_entity_runnable_average(se); in alloc_fair_sched_group()
12671 struct sched_entity *se; in online_fair_sched_group() local
12678 se = tg->se[i]; in online_fair_sched_group()
12681 attach_entity_cfs_rq(se); in online_fair_sched_group()
12696 if (tg->se[cpu]) in unregister_fair_sched_group()
12697 remove_entity_load_avg(tg->se[cpu]); in unregister_fair_sched_group()
12715 struct sched_entity *se, int cpu, in init_tg_cfs_entry() argument
12725 tg->se[cpu] = se; in init_tg_cfs_entry()
12728 if (!se) in init_tg_cfs_entry()
12732 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
12733 se->depth = 0; in init_tg_cfs_entry()
12735 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
12736 se->depth = parent->depth + 1; in init_tg_cfs_entry()
12739 se->my_q = cfs_rq; in init_tg_cfs_entry()
12741 update_load_set(&se->load, NICE_0_LOAD); in init_tg_cfs_entry()
12742 se->parent = parent; in init_tg_cfs_entry()
12756 if (!tg->se[0]) in __sched_group_set_shares()
12767 struct sched_entity *se = tg->se[i]; in __sched_group_set_shares() local
12773 for_each_sched_entity(se) { in __sched_group_set_shares()
12774 update_load_avg(cfs_rq_of(se), se, UPDATE_TG); in __sched_group_set_shares()
12775 update_cfs_group(se); in __sched_group_set_shares()
12818 struct sched_entity *se = tg->se[i]; in sched_group_set_idle() local
12830 if (se->on_rq) { in sched_group_set_idle()
12831 parent_cfs_rq = cfs_rq_of(se); in sched_group_set_idle()
12843 for_each_sched_entity(se) { in sched_group_set_idle()
12844 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sched_group_set_idle()
12846 if (!se->on_rq) in sched_group_set_idle()
12888 struct sched_entity *se = &task->se; in get_rr_interval_fair() local
12896 rr_interval = NS_TO_JIFFIES(se->slice); in get_rr_interval_fair()