Lines Matching refs:avg_load

5707 		unsigned long load, avg_load, runnable_load;  in find_idlest_group()  local
5724 avg_load = 0; in find_idlest_group()
5737 avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs); in find_idlest_group()
5746 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / in find_idlest_group()
5753 this_avg_load = avg_load; in find_idlest_group()
5762 min_avg_load = avg_load; in find_idlest_group()
5765 (100*min_avg_load > imbalance_scale*avg_load)) { in find_idlest_group()
5770 min_avg_load = avg_load; in find_idlest_group()
7503 unsigned long avg_load; /*Avg load across the CPUs of the group */ member
7530 unsigned long avg_load; /* Average load across all groups in sd */ member
7551 .avg_load = 0UL, in init_sd_lb_stats()
7884 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity; in update_sg_lb_stats()
7921 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest()
8129 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity, in check_asym_packing()
8162 if (busiest->avg_load + scaled_busy_load_per_task >= in fix_small_imbalance()
8163 local->avg_load + (scaled_busy_load_per_task * imbn)) { in fix_small_imbalance()
8175 min(busiest->load_per_task, busiest->avg_load); in fix_small_imbalance()
8177 min(local->load_per_task, local->avg_load); in fix_small_imbalance()
8181 if (busiest->avg_load > scaled_busy_load_per_task) { in fix_small_imbalance()
8184 busiest->avg_load - scaled_busy_load_per_task); in fix_small_imbalance()
8188 if (busiest->avg_load * busiest->group_capacity < in fix_small_imbalance()
8190 tmp = (busiest->avg_load * busiest->group_capacity) / in fix_small_imbalance()
8197 min(local->load_per_task, local->avg_load + tmp); in fix_small_imbalance()
8225 min(busiest->load_per_task, sds->avg_load); in calculate_imbalance()
8234 if (busiest->avg_load <= sds->avg_load || in calculate_imbalance()
8235 local->avg_load >= sds->avg_load) { in calculate_imbalance()
8261 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity); in calculate_imbalance()
8266 (sds->avg_load - local->avg_load) * local->group_capacity in calculate_imbalance()
8316 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load) in find_busiest_group()
8339 if (local->avg_load >= busiest->avg_load) in find_busiest_group()
8346 if (local->avg_load >= sds.avg_load) in find_busiest_group()
8365 if (100 * busiest->avg_load <= in find_busiest_group()
8366 env->sd->imbalance_pct * local->avg_load) in find_busiest_group()