Lines Matching refs:avg

674 	s64 avg = cfs_rq->avg_vruntime;  in avg_vruntime()  local
680 avg += entity_key(cfs_rq, curr) * weight; in avg_vruntime()
686 if (avg < 0) in avg_vruntime()
687 avg -= (load - 1); in avg_vruntime()
688 avg = div_s64(avg, load); in avg_vruntime()
691 return cfs_rq->min_vruntime + avg; in avg_vruntime()
741 s64 avg = cfs_rq->avg_vruntime; in entity_eligible() local
747 avg += entity_key(cfs_rq, curr) * weight; in entity_eligible()
751 return avg >= entity_key(cfs_rq, se) * load; in entity_eligible()
1055 struct sched_avg *sa = &se->avg; in init_entity_runnable_average()
1101 struct sched_avg *sa = &se->avg; in post_init_entity_util_avg()
1103 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; in post_init_entity_util_avg()
1116 se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); in post_init_entity_util_avg()
1121 if (cfs_rq->avg.util_avg != 0) { in post_init_entity_util_avg()
1122 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; in post_init_entity_util_avg()
1123 sa->util_avg /= (cfs_rq->avg.load_avg + 1); in post_init_entity_util_avg()
2708 delta = p->se.avg.load_sum; in numa_get_avg_runtime()
3609 cfs_rq->avg.load_avg += se->avg.load_avg; in enqueue_load_avg()
3610 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; in enqueue_load_avg()
3616 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); in dequeue_load_avg()
3617 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); in dequeue_load_avg()
3619 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in dequeue_load_avg()
3620 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in dequeue_load_avg()
3666 u32 divider = get_pelt_divider(&se->avg); in reweight_entity()
3668 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); in reweight_entity()
3775 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); in calc_group_shares()
3885 return u64_u32_load_copy(cfs_rq->avg.last_update_time, in cfs_rq_last_update_time()
3920 if (!load_avg_is_decayed(&cfs_rq->avg)) in cfs_rq_is_decayed()
3945 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; in update_tg_load_avg()
3955 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; in update_tg_load_avg()
3980 if (!(se->avg.last_update_time && prev)) in set_task_rq_fair()
3987 se->avg.last_update_time = n_last_update_time; in set_task_rq_fair()
4060 long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg; in update_tg_cfs_util()
4071 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_util()
4075 se->avg.util_avg = gcfs_rq->avg.util_avg; in update_tg_cfs_util()
4076 new_sum = se->avg.util_avg * divider; in update_tg_cfs_util()
4077 delta_sum = (long)new_sum - (long)se->avg.util_sum; in update_tg_cfs_util()
4078 se->avg.util_sum = new_sum; in update_tg_cfs_util()
4081 add_positive(&cfs_rq->avg.util_avg, delta_avg); in update_tg_cfs_util()
4082 add_positive(&cfs_rq->avg.util_sum, delta_sum); in update_tg_cfs_util()
4085 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in update_tg_cfs_util()
4086 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in update_tg_cfs_util()
4092 long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; in update_tg_cfs_runnable()
4103 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_runnable()
4106 se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; in update_tg_cfs_runnable()
4107 new_sum = se->avg.runnable_avg * divider; in update_tg_cfs_runnable()
4108 delta_sum = (long)new_sum - (long)se->avg.runnable_sum; in update_tg_cfs_runnable()
4109 se->avg.runnable_sum = new_sum; in update_tg_cfs_runnable()
4112 add_positive(&cfs_rq->avg.runnable_avg, delta_avg); in update_tg_cfs_runnable()
4113 add_positive(&cfs_rq->avg.runnable_sum, delta_sum); in update_tg_cfs_runnable()
4115 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in update_tg_cfs_runnable()
4116 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in update_tg_cfs_runnable()
4137 divider = get_pelt_divider(&cfs_rq->avg); in update_tg_cfs_load()
4144 runnable_sum += se->avg.load_sum; in update_tg_cfs_load()
4152 load_sum = div_u64(gcfs_rq->avg.load_sum, in update_tg_cfs_load()
4157 runnable_sum = min(se->avg.load_sum, load_sum); in update_tg_cfs_load()
4166 running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; in update_tg_cfs_load()
4172 delta_avg = load_avg - se->avg.load_avg; in update_tg_cfs_load()
4176 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum; in update_tg_cfs_load()
4178 se->avg.load_sum = runnable_sum; in update_tg_cfs_load()
4179 se->avg.load_avg = load_avg; in update_tg_cfs_load()
4180 add_positive(&cfs_rq->avg.load_avg, delta_avg); in update_tg_cfs_load()
4181 add_positive(&cfs_rq->avg.load_sum, delta_sum); in update_tg_cfs_load()
4183 cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum, in update_tg_cfs_load()
4184 cfs_rq->avg.load_avg * PELT_MIN_DIVIDER); in update_tg_cfs_load()
4233 if (se->avg.load_avg || se->avg.util_avg) in skip_blocked_update()
4272 if (load_avg_is_decayed(&se->avg)) in migrate_se_pelt_lag()
4366 struct sched_avg *sa = &cfs_rq->avg; in update_cfs_rq_load_avg()
4371 u32 divider = get_pelt_divider(&cfs_rq->avg); in update_cfs_rq_load_avg()
4440 u32 divider = get_pelt_divider(&cfs_rq->avg); in attach_entity_load_avg()
4449 se->avg.last_update_time = cfs_rq->avg.last_update_time; in attach_entity_load_avg()
4450 se->avg.period_contrib = cfs_rq->avg.period_contrib; in attach_entity_load_avg()
4458 se->avg.util_sum = se->avg.util_avg * divider; in attach_entity_load_avg()
4460 se->avg.runnable_sum = se->avg.runnable_avg * divider; in attach_entity_load_avg()
4462 se->avg.load_sum = se->avg.load_avg * divider; in attach_entity_load_avg()
4463 if (se_weight(se) < se->avg.load_sum) in attach_entity_load_avg()
4464 se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se)); in attach_entity_load_avg()
4466 se->avg.load_sum = 1; in attach_entity_load_avg()
4469 cfs_rq->avg.util_avg += se->avg.util_avg; in attach_entity_load_avg()
4470 cfs_rq->avg.util_sum += se->avg.util_sum; in attach_entity_load_avg()
4471 cfs_rq->avg.runnable_avg += se->avg.runnable_avg; in attach_entity_load_avg()
4472 cfs_rq->avg.runnable_sum += se->avg.runnable_sum; in attach_entity_load_avg()
4474 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); in attach_entity_load_avg()
4492 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); in detach_entity_load_avg()
4493 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); in detach_entity_load_avg()
4495 cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum, in detach_entity_load_avg()
4496 cfs_rq->avg.util_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4498 sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); in detach_entity_load_avg()
4499 sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum); in detach_entity_load_avg()
4501 cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum, in detach_entity_load_avg()
4502 cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER); in detach_entity_load_avg()
4504 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); in detach_entity_load_avg()
4529 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) in update_load_avg()
4535 if (!se->avg.last_update_time && (flags & DO_ATTACH)) { in update_load_avg()
4594 cfs_rq->removed.util_avg += se->avg.util_avg; in remove_entity_load_avg()
4595 cfs_rq->removed.load_avg += se->avg.load_avg; in remove_entity_load_avg()
4596 cfs_rq->removed.runnable_avg += se->avg.runnable_avg; in remove_entity_load_avg()
4602 return cfs_rq->avg.runnable_avg; in cfs_rq_runnable_avg()
4607 return cfs_rq->avg.load_avg; in cfs_rq_load_avg()
4614 return READ_ONCE(p->se.avg.util_avg); in task_util()
4619 struct util_est ue = READ_ONCE(p->se.avg.util_est); in _task_util_est()
4654 enqueued = cfs_rq->avg.util_est.enqueued; in util_est_enqueue()
4656 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); in util_est_enqueue()
4670 enqueued = cfs_rq->avg.util_est.enqueued; in util_est_dequeue()
4672 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); in util_est_dequeue()
4713 ue = p->se.avg.util_est; in util_est_update()
4773 WRITE_ONCE(p->se.avg.util_est, ue); in util_est_update()
6717 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without()
6721 load = READ_ONCE(cfs_rq->avg.load_avg); in cpu_load_without()
6740 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without()
6744 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_runnable_without()
6747 lsub_positive(&runnable, p->se.avg.runnable_avg); in cpu_runnable_without()
7470 unsigned long util = READ_ONCE(cfs_rq->avg.util_avg); in cpu_util()
7474 runnable = READ_ONCE(cfs_rq->avg.runnable_avg); in cpu_util()
7492 util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued); in cpu_util()
7557 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without()
8021 se->avg.last_update_time = 0; in migrate_task_rq_fair()
9018 if (cfs_rq->avg.load_avg) in cfs_rq_has_blocked()
9021 if (cfs_rq->avg.util_avg) in cfs_rq_has_blocked()
9164 load = div64_ul(load * se->avg.load_avg, in update_cfs_rq_h_load()
9177 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, in task_h_load()
9195 return p->se.avg.load_avg; in task_h_load()
9947 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu()
12498 if (!se->avg.last_update_time) in detach_entity_cfs_rq()
12607 p->se.avg.last_update_time = 0; in task_change_group_fair()