Lines Matching refs:sgs
9454 group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs) in group_has_capacity() argument
9456 if (sgs->sum_nr_running < sgs->group_weight) in group_has_capacity()
9459 if ((sgs->group_capacity * imbalance_pct) < in group_has_capacity()
9460 (sgs->group_runnable * 100)) in group_has_capacity()
9463 if ((sgs->group_capacity * 100) > in group_has_capacity()
9464 (sgs->group_util * imbalance_pct)) in group_has_capacity()
9479 group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs) in group_is_overloaded() argument
9481 if (sgs->sum_nr_running <= sgs->group_weight) in group_is_overloaded()
9484 if ((sgs->group_capacity * 100) < in group_is_overloaded()
9485 (sgs->group_util * imbalance_pct)) in group_is_overloaded()
9488 if ((sgs->group_capacity * imbalance_pct) < in group_is_overloaded()
9489 (sgs->group_runnable * 100)) in group_is_overloaded()
9498 struct sg_lb_stats *sgs) in group_classify() argument
9500 if (group_is_overloaded(imbalance_pct, sgs)) in group_classify()
9506 if (sgs->group_asym_packing) in group_classify()
9509 if (sgs->group_smt_balance) in group_classify()
9512 if (sgs->group_misfit_task_load) in group_classify()
9515 if (!group_has_capacity(imbalance_pct, sgs)) in group_classify()
9562 sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs, in sched_asym() argument
9574 if (sgs->group_weight - sgs->idle_cpus != 1) in sched_asym()
9592 static inline bool smt_balance(struct lb_env *env, struct sg_lb_stats *sgs, in smt_balance() argument
9605 sgs->sum_h_nr_running > 1) in smt_balance()
9670 struct sg_lb_stats *sgs, in update_sg_lb_stats() argument
9675 memset(sgs, 0, sizeof(*sgs)); in update_sg_lb_stats()
9683 sgs->group_load += load; in update_sg_lb_stats()
9684 sgs->group_util += cpu_util_cfs(i); in update_sg_lb_stats()
9685 sgs->group_runnable += cpu_runnable(rq); in update_sg_lb_stats()
9686 sgs->sum_h_nr_running += rq->cfs.h_nr_running; in update_sg_lb_stats()
9689 sgs->sum_nr_running += nr_running; in update_sg_lb_stats()
9698 sgs->nr_numa_running += rq->nr_numa_running; in update_sg_lb_stats()
9699 sgs->nr_preferred_running += rq->nr_preferred_running; in update_sg_lb_stats()
9705 sgs->idle_cpus++; in update_sg_lb_stats()
9715 if (sgs->group_misfit_task_load < rq->misfit_task_load) { in update_sg_lb_stats()
9716 sgs->group_misfit_task_load = rq->misfit_task_load; in update_sg_lb_stats()
9722 if (sgs->group_misfit_task_load < load) in update_sg_lb_stats()
9723 sgs->group_misfit_task_load = load; in update_sg_lb_stats()
9727 sgs->group_capacity = group->sgc->capacity; in update_sg_lb_stats()
9729 sgs->group_weight = group->group_weight; in update_sg_lb_stats()
9733 env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running && in update_sg_lb_stats()
9734 sched_asym(env, sds, sgs, group)) { in update_sg_lb_stats()
9735 sgs->group_asym_packing = 1; in update_sg_lb_stats()
9739 if (!local_group && smt_balance(env, sgs, group)) in update_sg_lb_stats()
9740 sgs->group_smt_balance = 1; in update_sg_lb_stats()
9742 sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs); in update_sg_lb_stats()
9745 if (sgs->group_type == group_overloaded) in update_sg_lb_stats()
9746 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / in update_sg_lb_stats()
9747 sgs->group_capacity; in update_sg_lb_stats()
9766 struct sg_lb_stats *sgs) in update_sd_pick_busiest() argument
9771 if (!sgs->sum_h_nr_running) in update_sd_pick_busiest()
9781 (sgs->group_type == group_misfit_task) && in update_sd_pick_busiest()
9786 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest()
9789 if (sgs->group_type < busiest->group_type) in update_sd_pick_busiest()
9797 switch (sgs->group_type) { in update_sd_pick_busiest()
9800 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest()
9822 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) in update_sd_pick_busiest()
9831 if (sgs->idle_cpus != 0 || busiest->idle_cpus != 0) in update_sd_pick_busiest()
9849 if (sgs->avg_load < busiest->avg_load) in update_sd_pick_busiest()
9852 if (sgs->avg_load == busiest->avg_load) { in update_sd_pick_busiest()
9870 if (sg->flags & SD_SHARE_CPUCAPACITY && sgs->sum_h_nr_running <= 1) in update_sd_pick_busiest()
9884 if (sgs->idle_cpus > busiest->idle_cpus) in update_sd_pick_busiest()
9886 else if ((sgs->idle_cpus == busiest->idle_cpus) && in update_sd_pick_busiest()
9887 (sgs->sum_nr_running <= busiest->sum_nr_running)) in update_sd_pick_busiest()
9900 (sgs->group_type <= group_fully_busy) && in update_sd_pick_busiest()
9908 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) in fbq_classify_group() argument
9910 if (sgs->sum_h_nr_running > sgs->nr_numa_running) in fbq_classify_group()
9912 if (sgs->sum_h_nr_running > sgs->nr_preferred_running) in fbq_classify_group()
9926 static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) in fbq_classify_group() argument
9993 struct sg_lb_stats *sgs, in update_sg_wakeup_stats() argument
9998 memset(sgs, 0, sizeof(*sgs)); in update_sg_wakeup_stats()
10002 sgs->group_misfit_task_load = 1; in update_sg_wakeup_stats()
10008 sgs->group_load += cpu_load_without(rq, p); in update_sg_wakeup_stats()
10009 sgs->group_util += cpu_util_without(i, p); in update_sg_wakeup_stats()
10010 sgs->group_runnable += cpu_runnable_without(rq, p); in update_sg_wakeup_stats()
10012 sgs->sum_h_nr_running += rq->cfs.h_nr_running - local; in update_sg_wakeup_stats()
10015 sgs->sum_nr_running += nr_running; in update_sg_wakeup_stats()
10021 sgs->idle_cpus++; in update_sg_wakeup_stats()
10025 sgs->group_misfit_task_load && in update_sg_wakeup_stats()
10027 sgs->group_misfit_task_load = 0; in update_sg_wakeup_stats()
10031 sgs->group_capacity = group->sgc->capacity; in update_sg_wakeup_stats()
10033 sgs->group_weight = group->group_weight; in update_sg_wakeup_stats()
10035 sgs->group_type = group_classify(sd->imbalance_pct, group, sgs); in update_sg_wakeup_stats()
10041 if (sgs->group_type == group_fully_busy || in update_sg_wakeup_stats()
10042 sgs->group_type == group_overloaded) in update_sg_wakeup_stats()
10043 sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / in update_sg_wakeup_stats()
10044 sgs->group_capacity; in update_sg_wakeup_stats()
10050 struct sg_lb_stats *sgs) in update_pick_idlest() argument
10052 if (sgs->group_type < idlest_sgs->group_type) in update_pick_idlest()
10055 if (sgs->group_type > idlest_sgs->group_type) in update_pick_idlest()
10063 switch (sgs->group_type) { in update_pick_idlest()
10067 if (idlest_sgs->avg_load <= sgs->avg_load) in update_pick_idlest()
10085 if (idlest_sgs->idle_cpus > sgs->idle_cpus) in update_pick_idlest()
10089 if (idlest_sgs->idle_cpus == sgs->idle_cpus && in update_pick_idlest()
10090 idlest_sgs->group_util <= sgs->group_util) in update_pick_idlest()
10110 struct sg_lb_stats *sgs; in find_idlest_group() local
10133 sgs = &local_sgs; in find_idlest_group()
10136 sgs = &tmp_sgs; in find_idlest_group()
10139 update_sg_wakeup_stats(sd, group, sgs, p); in find_idlest_group()
10141 if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) { in find_idlest_group()
10143 idlest_sgs = *sgs; in find_idlest_group()
10357 struct sg_lb_stats *sgs = &tmp_sgs; in update_sd_lb_stats() local
10363 sgs = local; in update_sd_lb_stats()
10370 update_sg_lb_stats(env, sds, sg, sgs, &sg_status); in update_sd_lb_stats()
10376 if (update_sd_pick_busiest(env, sds, sg, sgs)) { in update_sd_lb_stats()
10378 sds->busiest_stat = *sgs; in update_sd_lb_stats()
10383 sds->total_load += sgs->group_load; in update_sd_lb_stats()
10384 sds->total_capacity += sgs->group_capacity; in update_sd_lb_stats()
10386 sum_util += sgs->group_util; in update_sd_lb_stats()