Lines Matching refs:cfs_rq
331 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
333 struct rq *rq = rq_of(cfs_rq); in list_add_leaf_cfs_rq()
336 if (cfs_rq->on_list) in list_add_leaf_cfs_rq()
339 cfs_rq->on_list = 1; in list_add_leaf_cfs_rq()
350 if (cfs_rq->tg->parent && in list_add_leaf_cfs_rq()
351 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { in list_add_leaf_cfs_rq()
358 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
359 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); in list_add_leaf_cfs_rq()
369 if (!cfs_rq->tg->parent) { in list_add_leaf_cfs_rq()
374 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, in list_add_leaf_cfs_rq()
390 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); in list_add_leaf_cfs_rq()
395 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
399 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq() argument
401 if (cfs_rq->on_list) { in list_del_leaf_cfs_rq()
402 struct rq *rq = rq_of(cfs_rq); in list_del_leaf_cfs_rq()
411 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) in list_del_leaf_cfs_rq()
412 rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev; in list_del_leaf_cfs_rq()
414 list_del_rcu(&cfs_rq->leaf_cfs_rq_list); in list_del_leaf_cfs_rq()
415 cfs_rq->on_list = 0; in list_del_leaf_cfs_rq()
425 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
426 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
430 static inline struct cfs_rq *
433 if (se->cfs_rq == pse->cfs_rq) in is_same_group()
434 return se->cfs_rq; in is_same_group()
481 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq) in cfs_rq_is_idle() argument
483 return cfs_rq->idle > 0; in cfs_rq_is_idle()
498 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument
503 static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_del_leaf_cfs_rq() argument
511 #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ argument
512 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
529 static int cfs_rq_is_idle(struct cfs_rq *cfs_rq) in cfs_rq_is_idle() argument
542 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
572 static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) in entity_key() argument
574 return (s64)(se->vruntime - cfs_rq->min_vruntime); in entity_key()
639 avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se) in avg_vruntime_add() argument
642 s64 key = entity_key(cfs_rq, se); in avg_vruntime_add()
644 cfs_rq->avg_vruntime += key * weight; in avg_vruntime_add()
645 cfs_rq->avg_load += weight; in avg_vruntime_add()
649 avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se) in avg_vruntime_sub() argument
652 s64 key = entity_key(cfs_rq, se); in avg_vruntime_sub()
654 cfs_rq->avg_vruntime -= key * weight; in avg_vruntime_sub()
655 cfs_rq->avg_load -= weight; in avg_vruntime_sub()
659 void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta) in avg_vruntime_update() argument
664 cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta; in avg_vruntime_update()
671 u64 avg_vruntime(struct cfs_rq *cfs_rq) in avg_vruntime() argument
673 struct sched_entity *curr = cfs_rq->curr; in avg_vruntime()
674 s64 avg = cfs_rq->avg_vruntime; in avg_vruntime()
675 long load = cfs_rq->avg_load; in avg_vruntime()
680 avg += entity_key(cfs_rq, curr) * weight; in avg_vruntime()
691 return cfs_rq->min_vruntime + avg; in avg_vruntime()
710 static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_entity_lag() argument
715 lag = avg_vruntime(cfs_rq) - se->vruntime; in update_entity_lag()
738 int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se) in entity_eligible() argument
740 struct sched_entity *curr = cfs_rq->curr; in entity_eligible()
741 s64 avg = cfs_rq->avg_vruntime; in entity_eligible()
742 long load = cfs_rq->avg_load; in entity_eligible()
747 avg += entity_key(cfs_rq, curr) * weight; in entity_eligible()
751 return avg >= entity_key(cfs_rq, se) * load; in entity_eligible()
754 static u64 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime) in __update_min_vruntime() argument
756 u64 min_vruntime = cfs_rq->min_vruntime; in __update_min_vruntime()
762 avg_vruntime_update(cfs_rq, delta); in __update_min_vruntime()
768 static void update_min_vruntime(struct cfs_rq *cfs_rq) in update_min_vruntime() argument
770 struct sched_entity *se = __pick_first_entity(cfs_rq); in update_min_vruntime()
771 struct sched_entity *curr = cfs_rq->curr; in update_min_vruntime()
773 u64 vruntime = cfs_rq->min_vruntime; in update_min_vruntime()
790 u64_u32_store(cfs_rq->min_vruntime, in update_min_vruntime()
791 __update_min_vruntime(cfs_rq, vruntime)); in update_min_vruntime()
831 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity() argument
833 avg_vruntime_add(cfs_rq, se); in __enqueue_entity()
835 rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, in __enqueue_entity()
839 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity() argument
841 rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, in __dequeue_entity()
843 avg_vruntime_sub(cfs_rq, se); in __dequeue_entity()
846 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) in __pick_first_entity() argument
848 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline); in __pick_first_entity()
875 static struct sched_entity *__pick_eevdf(struct cfs_rq *cfs_rq) in __pick_eevdf() argument
877 struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node; in __pick_eevdf()
878 struct sched_entity *curr = cfs_rq->curr; in __pick_eevdf()
882 if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr))) in __pick_eevdf()
899 if (!entity_eligible(cfs_rq, se)) { in __pick_eevdf()
969 static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq) in pick_eevdf() argument
971 struct sched_entity *se = __pick_eevdf(cfs_rq); in pick_eevdf()
974 struct sched_entity *left = __pick_first_entity(cfs_rq); in pick_eevdf()
985 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) in __pick_last_entity() argument
987 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); in __pick_last_entity()
1013 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se);
1019 static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_deadline() argument
1039 if (cfs_rq->nr_running > 1) { in update_deadline()
1040 resched_curr(rq_of(cfs_rq)); in update_deadline()
1041 clear_buddies(cfs_rq, se); in update_deadline()
1100 struct cfs_rq *cfs_rq = cfs_rq_of(se); in post_init_entity_util_avg() local
1102 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); in post_init_entity_util_avg()
1103 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; in post_init_entity_util_avg()
1116 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
1121 if (cfs_rq->avg.util_avg != 0) { in post_init_entity_util_avg()
1122 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; in post_init_entity_util_avg()
1123 sa->util_avg /= (cfs_rq->avg.load_avg + 1); in post_init_entity_util_avg()
1142 static void update_tg_load_avg(struct cfs_rq *cfs_rq) in update_tg_load_avg() argument
1150 static void update_curr(struct cfs_rq *cfs_rq) in update_curr() argument
1152 struct sched_entity *curr = cfs_rq->curr; in update_curr()
1153 u64 now = rq_clock_task(rq_of(cfs_rq)); in update_curr()
1174 schedstat_add(cfs_rq->exec_clock, delta_exec); in update_curr()
1177 update_deadline(cfs_rq, curr); in update_curr()
1178 update_min_vruntime(cfs_rq); in update_curr()
1188 account_cfs_rq_runtime(cfs_rq, delta_exec); in update_curr()
1197 update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start_fair() argument
1210 __update_stats_wait_start(rq_of(cfs_rq), p, stats); in update_stats_wait_start_fair()
1214 update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end_fair() argument
1236 __update_stats_wait_end(rq_of(cfs_rq), p, stats); in update_stats_wait_end_fair()
1240 update_stats_enqueue_sleeper_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper_fair() argument
1253 __update_stats_enqueue_sleeper(rq_of(cfs_rq), tsk, stats); in update_stats_enqueue_sleeper_fair()
1260 update_stats_enqueue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue_fair() argument
1269 if (se != cfs_rq->curr) in update_stats_enqueue_fair()
1270 update_stats_wait_start_fair(cfs_rq, se); in update_stats_enqueue_fair()
1273 update_stats_enqueue_sleeper_fair(cfs_rq, se); in update_stats_enqueue_fair()
1277 update_stats_dequeue_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue_fair() argument
1287 if (se != cfs_rq->curr) in update_stats_dequeue_fair()
1288 update_stats_wait_end_fair(cfs_rq, se); in update_stats_dequeue_fair()
1298 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue_fair()
1301 rq_clock(rq_of(cfs_rq))); in update_stats_dequeue_fair()
1309 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start() argument
1314 se->exec_start = rq_clock_task(rq_of(cfs_rq)); in update_stats_curr_start()
3526 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue() argument
3528 update_load_add(&cfs_rq->load, se->load.weight); in account_entity_enqueue()
3531 struct rq *rq = rq_of(cfs_rq); in account_entity_enqueue()
3537 cfs_rq->nr_running++; in account_entity_enqueue()
3539 cfs_rq->idle_nr_running++; in account_entity_enqueue()
3543 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue() argument
3545 update_load_sub(&cfs_rq->load, se->load.weight); in account_entity_dequeue()
3548 account_numa_dequeue(rq_of(cfs_rq), task_of(se)); in account_entity_dequeue()
3552 cfs_rq->nr_running--; in account_entity_dequeue()
3554 cfs_rq->idle_nr_running--; in account_entity_dequeue()
3607 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg() argument
3609 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3610 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3614 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg() argument
3616 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3617 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
3619 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in dequeue_load_avg()
3620 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in dequeue_load_avg()
3624 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_load_avg() argument
3626 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_load_avg() argument
3629 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity() argument
3636 if (cfs_rq->curr == se) in reweight_entity()
3637 update_curr(cfs_rq); in reweight_entity()
3639 avg_vruntime_sub(cfs_rq, se); in reweight_entity()
3640 update_load_sub(&cfs_rq->load, se->load.weight); in reweight_entity()
3642 dequeue_load_avg(cfs_rq, se); in reweight_entity()
3660 if (se != cfs_rq->curr) in reweight_entity()
3672 enqueue_load_avg(cfs_rq, se); in reweight_entity()
3674 update_load_add(&cfs_rq->load, se->load.weight); in reweight_entity()
3675 if (cfs_rq->curr != se) in reweight_entity()
3676 avg_vruntime_add(cfs_rq, se); in reweight_entity()
3683 struct cfs_rq *cfs_rq = cfs_rq_of(se); in reweight_task() local
3687 reweight_entity(cfs_rq, se, weight); in reweight_task()
3691 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
3768 static long calc_group_shares(struct cfs_rq *cfs_rq) in calc_group_shares() argument
3771 struct task_group *tg = cfs_rq->tg; in calc_group_shares()
3775 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); in calc_group_shares()
3780 tg_weight -= cfs_rq->tg_load_avg_contrib; in calc_group_shares()
3809 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in update_cfs_group()
3836 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) in cfs_rq_util_change() argument
3838 struct rq *rq = rq_of(cfs_rq); in cfs_rq_util_change()
3840 if (&rq->cfs == cfs_rq) { in cfs_rq_util_change()
3883 static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) in cfs_rq_last_update_time() argument
3885 return u64_u32_load_copy(cfs_rq->avg.last_update_time, in cfs_rq_last_update_time()
3886 cfs_rq->last_update_time_copy); in cfs_rq_last_update_time()
3897 static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq) in child_cfs_rq_on_list() argument
3899 struct cfs_rq *prev_cfs_rq; in child_cfs_rq_on_list()
3902 if (cfs_rq->on_list) { in child_cfs_rq_on_list()
3903 prev = cfs_rq->leaf_cfs_rq_list.prev; in child_cfs_rq_on_list()
3905 struct rq *rq = rq_of(cfs_rq); in child_cfs_rq_on_list()
3910 prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list); in child_cfs_rq_on_list()
3912 return (prev_cfs_rq->tg->parent == cfs_rq->tg); in child_cfs_rq_on_list()
3915 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) in cfs_rq_is_decayed() argument
3917 if (cfs_rq->load.weight) in cfs_rq_is_decayed()
3920 if (!load_avg_is_decayed(&cfs_rq->avg)) in cfs_rq_is_decayed()
3923 if (child_cfs_rq_on_list(cfs_rq)) in cfs_rq_is_decayed()
3943 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) in update_tg_load_avg() argument
3945 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; in update_tg_load_avg()
3950 if (cfs_rq->tg == &root_task_group) in update_tg_load_avg()
3953 if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { in update_tg_load_avg()
3954 atomic_long_add(delta, &cfs_rq->tg->load_avg); in update_tg_load_avg()
3955 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; in update_tg_load_avg()
3965 struct cfs_rq *prev, struct cfs_rq *next) in set_task_rq_fair()
4058 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util() argument
4071 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_util()
4081 add_positive(&cfs_rq->avg.util_avg, delta_avg); in update_tg_cfs_util()
4082 add_positive(&cfs_rq->avg.util_sum, delta_sum); in update_tg_cfs_util()
4085 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in update_tg_cfs_util()
4086 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in update_tg_cfs_util()
4090 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable() argument
4103 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_runnable()
4112 add_positive(&cfs_rq->avg.runnable_avg, delta_avg); in update_tg_cfs_runnable()
4113 add_positive(&cfs_rq->avg.runnable_sum, delta_sum); in update_tg_cfs_runnable()
4115 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in update_tg_cfs_runnable()
4116 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in update_tg_cfs_runnable()
4120 update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_load() argument
4137 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_load()
4180 add_positive(&cfs_rq->avg.load_avg, delta_avg); in update_tg_cfs_load()
4181 add_positive(&cfs_rq->avg.load_sum, delta_sum); in update_tg_cfs_load()
4183 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in update_tg_cfs_load()
4184 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in update_tg_cfs_load()
4187 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) in add_tg_cfs_propagate() argument
4189 cfs_rq->propagate = 1; in add_tg_cfs_propagate()
4190 cfs_rq->prop_runnable_sum += runnable_sum; in add_tg_cfs_propagate()
4196 struct cfs_rq *cfs_rq, *gcfs_rq; in propagate_entity_load_avg() local
4207 cfs_rq = cfs_rq_of(se); in propagate_entity_load_avg()
4209 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); in propagate_entity_load_avg()
4211 update_tg_cfs_util(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4212 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4213 update_tg_cfs_load(cfs_rq, se, gcfs_rq); in propagate_entity_load_avg()
4215 trace_pelt_cfs_tp(cfs_rq); in propagate_entity_load_avg()
4227 struct cfs_rq *gcfs_rq = group_cfs_rq(se); in skip_blocked_update()
4253 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {} in update_tg_load_avg() argument
4260 static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {} in add_tg_cfs_propagate() argument
4268 struct cfs_rq *cfs_rq; in migrate_se_pelt_lag() local
4275 cfs_rq = cfs_rq_of(se); in migrate_se_pelt_lag()
4276 rq = rq_of(cfs_rq); in migrate_se_pelt_lag()
4316 throttled = u64_u32_load(cfs_rq->throttled_pelt_idle); in migrate_se_pelt_lag()
4329 lut = cfs_rq_last_update_time(cfs_rq); in migrate_se_pelt_lag()
4363 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) in update_cfs_rq_load_avg() argument
4366 struct sched_avg *sa = &cfs_rq->avg; in update_cfs_rq_load_avg()
4369 if (cfs_rq->removed.nr) { in update_cfs_rq_load_avg()
4371 u32 divider = get_pelt_divider(&cfs_rq->avg); in update_cfs_rq_load_avg()
4373 raw_spin_lock(&cfs_rq->removed.lock); in update_cfs_rq_load_avg()
4374 swap(cfs_rq->removed.util_avg, removed_util); in update_cfs_rq_load_avg()
4375 swap(cfs_rq->removed.load_avg, removed_load); in update_cfs_rq_load_avg()
4376 swap(cfs_rq->removed.runnable_avg, removed_runnable); in update_cfs_rq_load_avg()
4377 cfs_rq->removed.nr = 0; in update_cfs_rq_load_avg()
4378 raw_spin_unlock(&cfs_rq->removed.lock); in update_cfs_rq_load_avg()
4413 add_tg_cfs_propagate(cfs_rq, in update_cfs_rq_load_avg()
4419 decayed |= __update_load_avg_cfs_rq(now, cfs_rq); in update_cfs_rq_load_avg()
4421 cfs_rq->last_update_time_copy, in update_cfs_rq_load_avg()
4434 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in attach_entity_load_avg() argument
4440 u32 divider = get_pelt_divider(&cfs_rq->avg); in attach_entity_load_avg()
4449 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
4450 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
4468 enqueue_load_avg(cfs_rq, se); in attach_entity_load_avg()
4469 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
4470 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
4471 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
4472 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
4474 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
4476 cfs_rq_util_change(cfs_rq, 0); in attach_entity_load_avg()
4478 trace_pelt_cfs_tp(cfs_rq); in attach_entity_load_avg()
4489 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg() argument
4491 dequeue_load_avg(cfs_rq, se); in detach_entity_load_avg()
4492 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
4493 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
4495 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in detach_entity_load_avg()
4496 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4498 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
4499 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); in detach_entity_load_avg()
4501 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in detach_entity_load_avg()
4502 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4504 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
4506 cfs_rq_util_change(cfs_rq, 0); in detach_entity_load_avg()
4508 trace_pelt_cfs_tp(cfs_rq); in detach_entity_load_avg()
4520 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg() argument
4522 u64 now = cfs_rq_clock_pelt(cfs_rq); in update_load_avg()
4530 __update_load_avg_se(now, cfs_rq, se); in update_load_avg()
4532 decayed = update_cfs_rq_load_avg(now, cfs_rq); in update_load_avg()
4544 attach_entity_load_avg(cfs_rq, se); in update_load_avg()
4545 update_tg_load_avg(cfs_rq); in update_load_avg()
4552 detach_entity_load_avg(cfs_rq, se); in update_load_avg()
4553 update_tg_load_avg(cfs_rq); in update_load_avg()
4555 cfs_rq_util_change(cfs_rq, 0); in update_load_avg()
4558 update_tg_load_avg(cfs_rq); in update_load_avg()
4568 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sync_entity_load_avg() local
4571 last_update_time = cfs_rq_last_update_time(cfs_rq); in sync_entity_load_avg()
4581 struct cfs_rq *cfs_rq = cfs_rq_of(se); in remove_entity_load_avg() local
4592 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags); in remove_entity_load_avg()
4593 ++cfs_rq->removed.nr; in remove_entity_load_avg()
4594 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
4595 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
4596 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
4597 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags); in remove_entity_load_avg()
4600 static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq) in cfs_rq_runnable_avg() argument
4602 return cfs_rq->avg.runnable_avg; in cfs_rq_runnable_avg()
4605 static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) in cfs_rq_load_avg() argument
4607 return cfs_rq->avg.load_avg; in cfs_rq_load_avg()
4645 static inline void util_est_enqueue(struct cfs_rq *cfs_rq, in util_est_enqueue() argument
4654 enqueued = cfs_rq->avg.util_est.enqueued; in util_est_enqueue()
4656 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); in util_est_enqueue()
4658 trace_sched_util_est_cfs_tp(cfs_rq); in util_est_enqueue()
4661 static inline void util_est_dequeue(struct cfs_rq *cfs_rq, in util_est_dequeue() argument
4670 enqueued = cfs_rq->avg.util_est.enqueued; in util_est_dequeue()
4672 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); in util_est_dequeue()
4674 trace_sched_util_est_cfs_tp(cfs_rq); in util_est_dequeue()
4692 static inline void util_est_update(struct cfs_rq *cfs_rq, in util_est_update() argument
4748 if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) in util_est_update()
4933 static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) in cfs_rq_is_decayed() argument
4943 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) in update_load_avg() argument
4945 cfs_rq_util_change(cfs_rq, 0); in update_load_avg()
4951 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in attach_entity_load_avg() argument
4953 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg() argument
4961 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} in util_est_enqueue() argument
4964 util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {} in util_est_dequeue() argument
4967 util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, in util_est_update() argument
4974 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in place_entity() argument
4976 u64 vslice, vruntime = avg_vruntime(cfs_rq); in place_entity()
4990 if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) { in place_entity()
4991 struct sched_entity *curr = cfs_rq->curr; in place_entity()
5048 load = cfs_rq->avg_load; in place_entity()
5074 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
5075 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
5080 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity() argument
5082 bool curr = cfs_rq->curr == se; in enqueue_entity()
5089 place_entity(cfs_rq, se, flags); in enqueue_entity()
5091 update_curr(cfs_rq); in enqueue_entity()
5102 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); in enqueue_entity()
5116 place_entity(cfs_rq, se, flags); in enqueue_entity()
5118 account_entity_enqueue(cfs_rq, se); in enqueue_entity()
5125 update_stats_enqueue_fair(cfs_rq, se, flags); in enqueue_entity()
5127 __enqueue_entity(cfs_rq, se); in enqueue_entity()
5130 if (cfs_rq->nr_running == 1) { in enqueue_entity()
5131 check_enqueue_throttle(cfs_rq); in enqueue_entity()
5132 if (!throttled_hierarchy(cfs_rq)) { in enqueue_entity()
5133 list_add_leaf_cfs_rq(cfs_rq); in enqueue_entity()
5136 struct rq *rq = rq_of(cfs_rq); in enqueue_entity()
5138 if (cfs_rq_throttled(cfs_rq) && !cfs_rq->throttled_clock) in enqueue_entity()
5139 cfs_rq->throttled_clock = rq_clock(rq); in enqueue_entity()
5140 if (!cfs_rq->throttled_clock_self) in enqueue_entity()
5141 cfs_rq->throttled_clock_self = rq_clock(rq); in enqueue_entity()
5150 struct cfs_rq *cfs_rq = cfs_rq_of(se); in __clear_buddies_next() local
5151 if (cfs_rq->next != se) in __clear_buddies_next()
5154 cfs_rq->next = NULL; in __clear_buddies_next()
5158 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies() argument
5160 if (cfs_rq->next == se) in clear_buddies()
5164 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
5167 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity() argument
5177 update_curr(cfs_rq); in dequeue_entity()
5188 update_load_avg(cfs_rq, se, action); in dequeue_entity()
5191 update_stats_dequeue_fair(cfs_rq, se, flags); in dequeue_entity()
5193 clear_buddies(cfs_rq, se); in dequeue_entity()
5195 update_entity_lag(cfs_rq, se); in dequeue_entity()
5196 if (se != cfs_rq->curr) in dequeue_entity()
5197 __dequeue_entity(cfs_rq, se); in dequeue_entity()
5199 account_entity_dequeue(cfs_rq, se); in dequeue_entity()
5202 return_cfs_rq_runtime(cfs_rq); in dequeue_entity()
5213 update_min_vruntime(cfs_rq); in dequeue_entity()
5215 if (cfs_rq->nr_running == 0) in dequeue_entity()
5216 update_idle_cfs_rq_clock_pelt(cfs_rq); in dequeue_entity()
5220 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity() argument
5222 clear_buddies(cfs_rq, se); in set_next_entity()
5231 update_stats_wait_end_fair(cfs_rq, se); in set_next_entity()
5232 __dequeue_entity(cfs_rq, se); in set_next_entity()
5233 update_load_avg(cfs_rq, se, UPDATE_TG); in set_next_entity()
5241 update_stats_curr_start(cfs_rq, se); in set_next_entity()
5242 cfs_rq->curr = se; in set_next_entity()
5250 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { in set_next_entity()
5270 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) in pick_next_entity() argument
5276 cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next)) in pick_next_entity()
5277 return cfs_rq->next; in pick_next_entity()
5279 return pick_eevdf(cfs_rq); in pick_next_entity()
5282 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
5284 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) in put_prev_entity() argument
5291 update_curr(cfs_rq); in put_prev_entity()
5294 check_cfs_rq_runtime(cfs_rq); in put_prev_entity()
5297 update_stats_wait_start_fair(cfs_rq, prev); in put_prev_entity()
5299 __enqueue_entity(cfs_rq, prev); in put_prev_entity()
5301 update_load_avg(cfs_rq, prev, 0); in put_prev_entity()
5303 cfs_rq->curr = NULL; in put_prev_entity()
5307 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) in entity_tick() argument
5312 update_curr(cfs_rq); in entity_tick()
5317 update_load_avg(cfs_rq, curr, UPDATE_TG); in entity_tick()
5326 resched_curr(rq_of(cfs_rq)); in entity_tick()
5333 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) in entity_tick()
5418 struct cfs_rq *cfs_rq, u64 target_runtime) in __assign_cfs_rq_runtime() argument
5425 min_amount = target_runtime - cfs_rq->runtime_remaining; in __assign_cfs_rq_runtime()
5439 cfs_rq->runtime_remaining += amount; in __assign_cfs_rq_runtime()
5441 return cfs_rq->runtime_remaining > 0; in __assign_cfs_rq_runtime()
5445 static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) in assign_cfs_rq_runtime() argument
5447 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in assign_cfs_rq_runtime()
5451 ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice()); in assign_cfs_rq_runtime()
5457 static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in __account_cfs_rq_runtime() argument
5460 cfs_rq->runtime_remaining -= delta_exec; in __account_cfs_rq_runtime()
5462 if (likely(cfs_rq->runtime_remaining > 0)) in __account_cfs_rq_runtime()
5465 if (cfs_rq->throttled) in __account_cfs_rq_runtime()
5471 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) in __account_cfs_rq_runtime()
5472 resched_curr(rq_of(cfs_rq)); in __account_cfs_rq_runtime()
5476 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) in account_cfs_rq_runtime() argument
5478 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) in account_cfs_rq_runtime()
5481 __account_cfs_rq_runtime(cfs_rq, delta_exec); in account_cfs_rq_runtime()
5484 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled() argument
5486 return cfs_bandwidth_used() && cfs_rq->throttled; in cfs_rq_throttled()
5490 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy() argument
5492 return cfs_bandwidth_used() && cfs_rq->throttle_count; in throttled_hierarchy()
5503 struct cfs_rq *src_cfs_rq, *dest_cfs_rq; in throttled_lb_pair()
5505 src_cfs_rq = tg->cfs_rq[src_cpu]; in throttled_lb_pair()
5506 dest_cfs_rq = tg->cfs_rq[dest_cpu]; in throttled_lb_pair()
5515 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up() local
5517 cfs_rq->throttle_count--; in tg_unthrottle_up()
5518 if (!cfs_rq->throttle_count) { in tg_unthrottle_up()
5519 cfs_rq->throttled_clock_pelt_time += rq_clock_pelt(rq) - in tg_unthrottle_up()
5520 cfs_rq->throttled_clock_pelt; in tg_unthrottle_up()
5523 if (!cfs_rq_is_decayed(cfs_rq)) in tg_unthrottle_up()
5524 list_add_leaf_cfs_rq(cfs_rq); in tg_unthrottle_up()
5526 if (cfs_rq->throttled_clock_self) { in tg_unthrottle_up()
5527 u64 delta = rq_clock(rq) - cfs_rq->throttled_clock_self; in tg_unthrottle_up()
5529 cfs_rq->throttled_clock_self = 0; in tg_unthrottle_up()
5534 cfs_rq->throttled_clock_self_time += delta; in tg_unthrottle_up()
5544 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down() local
5547 if (!cfs_rq->throttle_count) { in tg_throttle_down()
5548 cfs_rq->throttled_clock_pelt = rq_clock_pelt(rq); in tg_throttle_down()
5549 list_del_leaf_cfs_rq(cfs_rq); in tg_throttle_down()
5551 SCHED_WARN_ON(cfs_rq->throttled_clock_self); in tg_throttle_down()
5552 if (cfs_rq->nr_running) in tg_throttle_down()
5553 cfs_rq->throttled_clock_self = rq_clock(rq); in tg_throttle_down()
5555 cfs_rq->throttle_count++; in tg_throttle_down()
5560 static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) in throttle_cfs_rq() argument
5562 struct rq *rq = rq_of(cfs_rq); in throttle_cfs_rq()
5563 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in throttle_cfs_rq()
5569 if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) { in throttle_cfs_rq()
5580 list_add_tail_rcu(&cfs_rq->throttled_list, in throttle_cfs_rq()
5588 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq()
5592 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); in throttle_cfs_rq()
5595 task_delta = cfs_rq->h_nr_running; in throttle_cfs_rq()
5596 idle_task_delta = cfs_rq->idle_h_nr_running; in throttle_cfs_rq()
5598 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
5606 idle_task_delta = cfs_rq->h_nr_running; in throttle_cfs_rq()
5619 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in throttle_cfs_rq()
5628 idle_task_delta = cfs_rq->h_nr_running; in throttle_cfs_rq()
5642 cfs_rq->throttled = 1; in throttle_cfs_rq()
5643 SCHED_WARN_ON(cfs_rq->throttled_clock); in throttle_cfs_rq()
5644 if (cfs_rq->nr_running) in throttle_cfs_rq()
5645 cfs_rq->throttled_clock = rq_clock(rq); in throttle_cfs_rq()
5649 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) in unthrottle_cfs_rq() argument
5651 struct rq *rq = rq_of(cfs_rq); in unthrottle_cfs_rq()
5652 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in unthrottle_cfs_rq()
5656 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq()
5658 cfs_rq->throttled = 0; in unthrottle_cfs_rq()
5663 if (cfs_rq->throttled_clock) { in unthrottle_cfs_rq()
5664 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; in unthrottle_cfs_rq()
5665 cfs_rq->throttled_clock = 0; in unthrottle_cfs_rq()
5667 list_del_rcu(&cfs_rq->throttled_list); in unthrottle_cfs_rq()
5671 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); in unthrottle_cfs_rq()
5673 if (!cfs_rq->load.weight) { in unthrottle_cfs_rq()
5674 if (!cfs_rq->on_list) in unthrottle_cfs_rq()
5687 task_delta = cfs_rq->h_nr_running; in unthrottle_cfs_rq()
5688 idle_task_delta = cfs_rq->idle_h_nr_running; in unthrottle_cfs_rq()
5690 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
5697 idle_task_delta = cfs_rq->h_nr_running; in unthrottle_cfs_rq()
5708 struct cfs_rq *qcfs_rq = cfs_rq_of(se); in unthrottle_cfs_rq()
5714 idle_task_delta = cfs_rq->h_nr_running; in unthrottle_cfs_rq()
5738 struct cfs_rq *cursor, *tmp; in __cfsb_csd_unthrottle()
5775 static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) in __unthrottle_cfs_rq_async() argument
5777 struct rq *rq = rq_of(cfs_rq); in __unthrottle_cfs_rq_async()
5781 unthrottle_cfs_rq(cfs_rq); in __unthrottle_cfs_rq_async()
5786 if (SCHED_WARN_ON(!list_empty(&cfs_rq->throttled_csd_list))) in __unthrottle_cfs_rq_async()
5790 list_add_tail(&cfs_rq->throttled_csd_list, &rq->cfsb_csd_list); in __unthrottle_cfs_rq_async()
5795 static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) in __unthrottle_cfs_rq_async() argument
5797 unthrottle_cfs_rq(cfs_rq); in __unthrottle_cfs_rq_async()
5801 static void unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq) in unthrottle_cfs_rq_async() argument
5803 lockdep_assert_rq_held(rq_of(cfs_rq)); in unthrottle_cfs_rq_async()
5805 if (SCHED_WARN_ON(!cfs_rq_throttled(cfs_rq) || in unthrottle_cfs_rq_async()
5806 cfs_rq->runtime_remaining <= 0)) in unthrottle_cfs_rq_async()
5809 __unthrottle_cfs_rq_async(cfs_rq); in unthrottle_cfs_rq_async()
5814 struct cfs_rq *local_unthrottle = NULL; in distribute_cfs_runtime()
5818 struct cfs_rq *cfs_rq; in distribute_cfs_runtime() local
5823 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, in distribute_cfs_runtime()
5825 rq = rq_of(cfs_rq); in distribute_cfs_runtime()
5833 if (!cfs_rq_throttled(cfs_rq)) in distribute_cfs_runtime()
5838 if (!list_empty(&cfs_rq->throttled_csd_list)) in distribute_cfs_runtime()
5843 SCHED_WARN_ON(cfs_rq->runtime_remaining > 0); in distribute_cfs_runtime()
5846 runtime = -cfs_rq->runtime_remaining + 1; in distribute_cfs_runtime()
5853 cfs_rq->runtime_remaining += runtime; in distribute_cfs_runtime()
5856 if (cfs_rq->runtime_remaining > 0) { in distribute_cfs_runtime()
5859 unthrottle_cfs_rq_async(cfs_rq); in distribute_cfs_runtime()
5861 local_unthrottle = cfs_rq; in distribute_cfs_runtime()
5992 static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in __return_cfs_rq_runtime() argument
5994 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); in __return_cfs_rq_runtime()
5995 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; in __return_cfs_rq_runtime()
6012 cfs_rq->runtime_remaining -= slack_runtime; in __return_cfs_rq_runtime()
6015 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) in return_cfs_rq_runtime() argument
6020 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) in return_cfs_rq_runtime()
6023 __return_cfs_rq_runtime(cfs_rq); in return_cfs_rq_runtime()
6060 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) in check_enqueue_throttle() argument
6066 if (!cfs_rq->runtime_enabled || cfs_rq->curr) in check_enqueue_throttle()
6070 if (cfs_rq_throttled(cfs_rq)) in check_enqueue_throttle()
6074 account_cfs_rq_runtime(cfs_rq, 0); in check_enqueue_throttle()
6075 if (cfs_rq->runtime_remaining <= 0) in check_enqueue_throttle()
6076 throttle_cfs_rq(cfs_rq); in check_enqueue_throttle()
6081 struct cfs_rq *pcfs_rq, *cfs_rq; in sync_throttle() local
6089 cfs_rq = tg->cfs_rq[cpu]; in sync_throttle()
6090 pcfs_rq = tg->parent->cfs_rq[cpu]; in sync_throttle()
6092 cfs_rq->throttle_count = pcfs_rq->throttle_count; in sync_throttle()
6093 cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu)); in sync_throttle()
6097 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) in check_cfs_rq_runtime() argument
6102 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) in check_cfs_rq_runtime()
6109 if (cfs_rq_throttled(cfs_rq)) in check_cfs_rq_runtime()
6112 return throttle_cfs_rq(cfs_rq); in check_cfs_rq_runtime()
6203 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) in init_cfs_rq_runtime() argument
6205 cfs_rq->runtime_enabled = 0; in init_cfs_rq_runtime()
6206 INIT_LIST_HEAD(&cfs_rq->throttled_list); in init_cfs_rq_runtime()
6208 INIT_LIST_HEAD(&cfs_rq->throttled_csd_list); in init_cfs_rq_runtime()
6277 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in update_runtime_enabled() local
6280 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; in update_runtime_enabled()
6302 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in unthrottle_offline_cfs_rqs() local
6304 if (!cfs_rq->runtime_enabled) in unthrottle_offline_cfs_rqs()
6311 cfs_rq->runtime_remaining = 1; in unthrottle_offline_cfs_rqs()
6316 cfs_rq->runtime_enabled = 0; in unthrottle_offline_cfs_rqs()
6318 if (cfs_rq_throttled(cfs_rq)) in unthrottle_offline_cfs_rqs()
6319 unthrottle_cfs_rq(cfs_rq); in unthrottle_offline_cfs_rqs()
6328 struct cfs_rq *cfs_rq = task_cfs_rq(p); in cfs_task_bw_constrained() local
6333 if (cfs_rq->runtime_enabled || in cfs_task_bw_constrained()
6334 tg_cfs_bandwidth(cfs_rq->tg)->hierarchical_quota != RUNTIME_INF) in cfs_task_bw_constrained()
6373 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} in account_cfs_rq_runtime() argument
6374 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } in check_cfs_rq_runtime() argument
6375 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} in check_enqueue_throttle() argument
6377 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in return_cfs_rq_runtime() argument
6379 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) in cfs_rq_throttled() argument
6384 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) in throttled_hierarchy() argument
6397 static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} in init_cfs_rq_runtime() argument
6512 struct cfs_rq *cfs_rq; in enqueue_task_fair() local
6536 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
6537 enqueue_entity(cfs_rq, se, flags); in enqueue_task_fair()
6539 cfs_rq->h_nr_running++; in enqueue_task_fair()
6540 cfs_rq->idle_h_nr_running += idle_h_nr_running; in enqueue_task_fair()
6542 if (cfs_rq_is_idle(cfs_rq)) in enqueue_task_fair()
6546 if (cfs_rq_throttled(cfs_rq)) in enqueue_task_fair()
6553 cfs_rq = cfs_rq_of(se); in enqueue_task_fair()
6555 update_load_avg(cfs_rq, se, UPDATE_TG); in enqueue_task_fair()
6559 cfs_rq->h_nr_running++; in enqueue_task_fair()
6560 cfs_rq->idle_h_nr_running += idle_h_nr_running; in enqueue_task_fair()
6562 if (cfs_rq_is_idle(cfs_rq)) in enqueue_task_fair()
6566 if (cfs_rq_throttled(cfs_rq)) in enqueue_task_fair()
6605 struct cfs_rq *cfs_rq; in dequeue_task_fair() local
6614 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
6615 dequeue_entity(cfs_rq, se, flags); in dequeue_task_fair()
6617 cfs_rq->h_nr_running--; in dequeue_task_fair()
6618 cfs_rq->idle_h_nr_running -= idle_h_nr_running; in dequeue_task_fair()
6620 if (cfs_rq_is_idle(cfs_rq)) in dequeue_task_fair()
6624 if (cfs_rq_throttled(cfs_rq)) in dequeue_task_fair()
6628 if (cfs_rq->load.weight) { in dequeue_task_fair()
6635 if (task_sleep && se && !throttled_hierarchy(cfs_rq)) in dequeue_task_fair()
6643 cfs_rq = cfs_rq_of(se); in dequeue_task_fair()
6645 update_load_avg(cfs_rq, se, UPDATE_TG); in dequeue_task_fair()
6649 cfs_rq->h_nr_running--; in dequeue_task_fair()
6650 cfs_rq->idle_h_nr_running -= idle_h_nr_running; in dequeue_task_fair()
6652 if (cfs_rq_is_idle(cfs_rq)) in dequeue_task_fair()
6656 if (cfs_rq_throttled(cfs_rq)) in dequeue_task_fair()
6713 struct cfs_rq *cfs_rq; in cpu_load_without() local
6720 cfs_rq = &rq->cfs; in cpu_load_without()
6721 load = READ_ONCE(cfs_rq->avg.load_avg); in cpu_load_without()
6736 struct cfs_rq *cfs_rq; in cpu_runnable_without() local
6743 cfs_rq = &rq->cfs; in cpu_runnable_without()
6744 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_runnable_without()
7469 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util() local
7470 unsigned long util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util()
7474 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_util()
7492 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued); in cpu_util()
8059 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in check_preempt_wakeup() local
8120 cfs_rq = cfs_rq_of(se); in check_preempt_wakeup()
8121 update_curr(cfs_rq); in check_preempt_wakeup()
8126 if (pick_eevdf(cfs_rq) == pse) in check_preempt_wakeup()
8139 struct cfs_rq *cfs_rq; in pick_task_fair() local
8142 cfs_rq = &rq->cfs; in pick_task_fair()
8143 if (!cfs_rq->nr_running) in pick_task_fair()
8147 struct sched_entity *curr = cfs_rq->curr; in pick_task_fair()
8152 update_curr(cfs_rq); in pick_task_fair()
8156 if (unlikely(check_cfs_rq_runtime(cfs_rq))) in pick_task_fair()
8160 se = pick_next_entity(cfs_rq, curr); in pick_task_fair()
8161 cfs_rq = group_cfs_rq(se); in pick_task_fair()
8162 } while (cfs_rq); in pick_task_fair()
8171 struct cfs_rq *cfs_rq = &rq->cfs; in pick_next_task_fair() local
8193 struct sched_entity *curr = cfs_rq->curr; in pick_next_task_fair()
8203 update_curr(cfs_rq); in pick_next_task_fair()
8213 if (unlikely(check_cfs_rq_runtime(cfs_rq))) { in pick_next_task_fair()
8214 cfs_rq = &rq->cfs; in pick_next_task_fair()
8216 if (!cfs_rq->nr_running) in pick_next_task_fair()
8223 se = pick_next_entity(cfs_rq, curr); in pick_next_task_fair()
8224 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
8225 } while (cfs_rq); in pick_next_task_fair()
8237 while (!(cfs_rq = is_same_group(se, pse))) { in pick_next_task_fair()
8251 put_prev_entity(cfs_rq, pse); in pick_next_task_fair()
8252 set_next_entity(cfs_rq, se); in pick_next_task_fair()
8262 se = pick_next_entity(cfs_rq, NULL); in pick_next_task_fair()
8263 set_next_entity(cfs_rq, se); in pick_next_task_fair()
8264 cfs_rq = group_cfs_rq(se); in pick_next_task_fair()
8265 } while (cfs_rq); in pick_next_task_fair()
8324 struct cfs_rq *cfs_rq; in put_prev_task_fair() local
8327 cfs_rq = cfs_rq_of(se); in put_prev_task_fair()
8328 put_prev_entity(cfs_rq, se); in put_prev_task_fair()
8338 struct cfs_rq *cfs_rq = task_cfs_rq(curr); in yield_task_fair() local
8347 clear_buddies(cfs_rq, se); in yield_task_fair()
8353 update_curr(cfs_rq); in yield_task_fair()
9016 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) in cfs_rq_has_blocked() argument
9018 if (cfs_rq->avg.load_avg) in cfs_rq_has_blocked()
9021 if (cfs_rq->avg.util_avg) in cfs_rq_has_blocked()
9057 static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; } in cfs_rq_has_blocked() argument
9093 struct cfs_rq *cfs_rq, *pos; in __update_blocked_fair() local
9101 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { in __update_blocked_fair()
9104 if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) { in __update_blocked_fair()
9105 update_tg_load_avg(cfs_rq); in __update_blocked_fair()
9107 if (cfs_rq->nr_running == 0) in __update_blocked_fair()
9108 update_idle_cfs_rq_clock_pelt(cfs_rq); in __update_blocked_fair()
9110 if (cfs_rq == &rq->cfs) in __update_blocked_fair()
9115 se = cfs_rq->tg->se[cpu]; in __update_blocked_fair()
9123 if (cfs_rq_is_decayed(cfs_rq)) in __update_blocked_fair()
9124 list_del_leaf_cfs_rq(cfs_rq); in __update_blocked_fair()
9127 if (cfs_rq_has_blocked(cfs_rq)) in __update_blocked_fair()
9139 static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) in update_cfs_rq_h_load() argument
9141 struct rq *rq = rq_of(cfs_rq); in update_cfs_rq_h_load()
9142 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
9146 if (cfs_rq->last_h_load_update == now) in update_cfs_rq_h_load()
9149 WRITE_ONCE(cfs_rq->h_load_next, NULL); in update_cfs_rq_h_load()
9151 cfs_rq = cfs_rq_of(se); in update_cfs_rq_h_load()
9152 WRITE_ONCE(cfs_rq->h_load_next, se); in update_cfs_rq_h_load()
9153 if (cfs_rq->last_h_load_update == now) in update_cfs_rq_h_load()
9158 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); in update_cfs_rq_h_load()
9159 cfs_rq->last_h_load_update = now; in update_cfs_rq_h_load()
9162 while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { in update_cfs_rq_h_load()
9163 load = cfs_rq->h_load; in update_cfs_rq_h_load()
9165 cfs_rq_load_avg(cfs_rq) + 1); in update_cfs_rq_h_load()
9166 cfs_rq = group_cfs_rq(se); in update_cfs_rq_h_load()
9167 cfs_rq->h_load = load; in update_cfs_rq_h_load()
9168 cfs_rq->last_h_load_update = now; in update_cfs_rq_h_load()
9174 struct cfs_rq *cfs_rq = task_cfs_rq(p); in task_h_load() local
9176 update_cfs_rq_h_load(cfs_rq); in task_h_load()
9177 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
9178 cfs_rq_load_avg(cfs_rq) + 1); in task_h_load()
9183 struct cfs_rq *cfs_rq = &rq->cfs; in __update_blocked_fair() local
9186 decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq); in __update_blocked_fair()
9187 if (cfs_rq_has_blocked(cfs_rq)) in __update_blocked_fair()
12293 struct cfs_rq *cfs_rq = cfs_rq_of(se); in se_fi_update() local
12296 if (cfs_rq->forceidle_seq == fi_seq) in se_fi_update()
12298 cfs_rq->forceidle_seq = fi_seq; in se_fi_update()
12301 cfs_rq->min_vruntime_fi = cfs_rq->min_vruntime; in se_fi_update()
12321 struct cfs_rq *cfs_rqa; in cfs_prio_less()
12322 struct cfs_rq *cfs_rqb; in cfs_prio_less()
12332 while (sea->cfs_rq->tg != seb->cfs_rq->tg) { in cfs_prio_less()
12345 cfs_rqa = sea->cfs_rq; in cfs_prio_less()
12346 cfs_rqb = seb->cfs_rq; in cfs_prio_less()
12365 struct cfs_rq *cfs_rq; in task_is_throttled_fair() local
12368 cfs_rq = task_group(p)->cfs_rq[cpu]; in task_is_throttled_fair()
12370 cfs_rq = &cpu_rq(cpu)->cfs; in task_is_throttled_fair()
12372 return throttled_hierarchy(cfs_rq); in task_is_throttled_fair()
12388 struct cfs_rq *cfs_rq; in task_tick_fair() local
12392 cfs_rq = cfs_rq_of(se); in task_tick_fair()
12393 entity_tick(cfs_rq, se, queued); in task_tick_fair()
12413 struct cfs_rq *cfs_rq; in task_fork_fair() local
12420 cfs_rq = task_cfs_rq(current); in task_fork_fair()
12421 curr = cfs_rq->curr; in task_fork_fair()
12423 update_curr(cfs_rq); in task_fork_fair()
12424 place_entity(cfs_rq, se, ENQUEUE_INITIAL); in task_fork_fair()
12460 struct cfs_rq *cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq() local
12462 if (cfs_rq_throttled(cfs_rq)) in propagate_entity_cfs_rq()
12465 if (!throttled_hierarchy(cfs_rq)) in propagate_entity_cfs_rq()
12466 list_add_leaf_cfs_rq(cfs_rq); in propagate_entity_cfs_rq()
12472 cfs_rq = cfs_rq_of(se); in propagate_entity_cfs_rq()
12474 update_load_avg(cfs_rq, se, UPDATE_TG); in propagate_entity_cfs_rq()
12476 if (cfs_rq_throttled(cfs_rq)) in propagate_entity_cfs_rq()
12479 if (!throttled_hierarchy(cfs_rq)) in propagate_entity_cfs_rq()
12480 list_add_leaf_cfs_rq(cfs_rq); in propagate_entity_cfs_rq()
12489 struct cfs_rq *cfs_rq = cfs_rq_of(se); in detach_entity_cfs_rq() local
12503 update_load_avg(cfs_rq, se, 0); in detach_entity_cfs_rq()
12504 detach_entity_load_avg(cfs_rq, se); in detach_entity_cfs_rq()
12505 update_tg_load_avg(cfs_rq); in detach_entity_cfs_rq()
12511 struct cfs_rq *cfs_rq = cfs_rq_of(se); in attach_entity_cfs_rq() local
12514 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); in attach_entity_cfs_rq()
12515 attach_entity_load_avg(cfs_rq, se); in attach_entity_cfs_rq()
12516 update_tg_load_avg(cfs_rq); in attach_entity_cfs_rq()
12576 struct cfs_rq *cfs_rq = cfs_rq_of(se); in set_next_task_fair() local
12578 set_next_entity(cfs_rq, se); in set_next_task_fair()
12580 account_cfs_rq_runtime(cfs_rq, 0); in set_next_task_fair()
12584 void init_cfs_rq(struct cfs_rq *cfs_rq) in init_cfs_rq() argument
12586 cfs_rq->tasks_timeline = RB_ROOT_CACHED; in init_cfs_rq()
12587 u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20))); in init_cfs_rq()
12589 raw_spin_lock_init(&cfs_rq->removed.lock); in init_cfs_rq()
12618 if (tg->cfs_rq) in free_fair_sched_group()
12619 kfree(tg->cfs_rq[i]); in free_fair_sched_group()
12624 kfree(tg->cfs_rq); in free_fair_sched_group()
12631 struct cfs_rq *cfs_rq; in alloc_fair_sched_group() local
12634 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); in alloc_fair_sched_group()
12635 if (!tg->cfs_rq) in alloc_fair_sched_group()
12646 cfs_rq = kzalloc_node(sizeof(struct cfs_rq), in alloc_fair_sched_group()
12648 if (!cfs_rq) in alloc_fair_sched_group()
12656 init_cfs_rq(cfs_rq); in alloc_fair_sched_group()
12657 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); in alloc_fair_sched_group()
12664 kfree(cfs_rq); in alloc_fair_sched_group()
12703 if (!tg->cfs_rq[cpu]->on_list) in unregister_fair_sched_group()
12709 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); in unregister_fair_sched_group()
12714 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, in init_tg_cfs_entry() argument
12720 cfs_rq->tg = tg; in init_tg_cfs_entry()
12721 cfs_rq->rq = rq; in init_tg_cfs_entry()
12722 init_cfs_rq_runtime(cfs_rq); in init_tg_cfs_entry()
12724 tg->cfs_rq[cpu] = cfs_rq; in init_tg_cfs_entry()
12732 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
12735 se->cfs_rq = parent->my_q; in init_tg_cfs_entry()
12739 se->my_q = cfs_rq; in init_tg_cfs_entry()
12819 struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i]; in sched_group_set_idle()
12844 struct cfs_rq *cfs_rq = cfs_rq_of(se); in sched_group_set_idle() local
12849 cfs_rq->idle_h_nr_running += idle_task_delta; in sched_group_set_idle()
12852 if (cfs_rq_is_idle(cfs_rq)) in sched_group_set_idle()
12957 struct cfs_rq *cfs_rq, *pos; in print_cfs_stats() local
12960 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) in print_cfs_stats()
12961 print_cfs_rq(m, cpu, cfs_rq); in print_cfs_stats()