Lines Matching refs:busiest
8334 struct sched_group *busiest; /* Busiest group in this sd */ member
8355 .busiest = NULL, in init_sd_lb_stats()
8701 struct sg_lb_stats *busiest = &sds->busiest_stat; in update_sd_pick_busiest() local
8718 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest()
8721 if (sgs->group_type < busiest->group_type) in update_sd_pick_busiest()
8732 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest()
8745 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) in update_sd_pick_busiest()
8754 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) in update_sd_pick_busiest()
8769 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest()
8781 if (sgs->idle_cpus > busiest->idle_cpus) in update_sd_pick_busiest()
8783 else if ((sgs->idle_cpus == busiest->idle_cpus) && in update_sd_pick_busiest()
8784 (sgs->sum_nr_running <= busiest->sum_nr_running)) in update_sd_pick_busiest()
9190 sds->busiest = sg; in update_sd_lb_stats()
9252 struct sg_lb_stats *local, *busiest; in calculate_imbalance() local
9255 busiest = &sds->busiest_stat; in calculate_imbalance()
9257 if (busiest->group_type == group_misfit_task) { in calculate_imbalance()
9264 if (busiest->group_type == group_asym_packing) { in calculate_imbalance()
9270 env->imbalance = busiest->sum_h_nr_running; in calculate_imbalance()
9274 if (busiest->group_type == group_imbalanced) { in calculate_imbalance()
9291 if ((busiest->group_type > group_fully_busy) && in calculate_imbalance()
9320 if (busiest->group_weight == 1 || sds->prefer_sibling) { in calculate_imbalance()
9321 unsigned int nr_diff = busiest->sum_nr_running; in calculate_imbalance()
9337 busiest->idle_cpus) >> 1); in calculate_imbalance()
9343 busiest->sum_nr_running, busiest->group_weight); in calculate_imbalance()
9368 if (local->avg_load >= busiest->avg_load) { in calculate_imbalance()
9384 (busiest->avg_load - sds->avg_load) * busiest->group_capacity, in calculate_imbalance()
9424 struct sg_lb_stats *local, *busiest; in find_busiest_group() local
9443 busiest = &sds.busiest_stat; in find_busiest_group()
9446 if (!sds.busiest) in find_busiest_group()
9450 if (busiest->group_type == group_misfit_task) in find_busiest_group()
9454 if (busiest->group_type == group_asym_packing) in find_busiest_group()
9462 if (busiest->group_type == group_imbalanced) in find_busiest_group()
9469 if (local->group_type > busiest->group_type) in find_busiest_group()
9481 if (local->avg_load >= busiest->avg_load) in find_busiest_group()
9499 if (100 * busiest->avg_load <= in find_busiest_group()
9506 busiest->sum_nr_running > local->sum_nr_running + 1) in find_busiest_group()
9509 if (busiest->group_type != group_overloaded) { in find_busiest_group()
9518 if (busiest->group_weight > 1 && in find_busiest_group()
9519 local->idle_cpus <= (busiest->idle_cpus + 1)) in find_busiest_group()
9531 if (busiest->sum_h_nr_running == 1) in find_busiest_group()
9541 return env->imbalance ? sds.busiest : NULL; in find_busiest_group()
9554 struct rq *busiest = NULL, *rq; in find_busiest_queue() local
9634 busiest = rq; in find_busiest_queue()
9651 busiest = rq; in find_busiest_queue()
9658 busiest = rq; in find_busiest_queue()
9669 busiest = rq; in find_busiest_queue()
9677 return busiest; in find_busiest_queue()
9789 struct rq *busiest; in load_balance() local
9821 busiest = find_busiest_queue(&env, group); in load_balance()
9822 if (!busiest) { in load_balance()
9827 BUG_ON(busiest == env.dst_rq); in load_balance()
9831 env.src_cpu = busiest->cpu; in load_balance()
9832 env.src_rq = busiest; in load_balance()
9837 if (busiest->nr_running > 1) { in load_balance()
9844 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); in load_balance()
9847 rq_lock_irqsave(busiest, &rf); in load_balance()
9848 update_rq_clock(busiest); in load_balance()
9864 rq_unlock(busiest, &rf); in load_balance()
9927 __cpumask_clear_cpu(cpu_of(busiest), cpus); in load_balance()
9959 raw_spin_rq_lock_irqsave(busiest, flags); in load_balance()
9966 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { in load_balance()
9967 raw_spin_rq_unlock_irqrestore(busiest, flags); in load_balance()
9979 if (!busiest->active_balance) { in load_balance()
9980 busiest->active_balance = 1; in load_balance()
9981 busiest->push_cpu = this_cpu; in load_balance()
9984 raw_spin_rq_unlock_irqrestore(busiest, flags); in load_balance()
9987 stop_one_cpu_nowait(cpu_of(busiest), in load_balance()
9988 active_load_balance_cpu_stop, busiest, in load_balance()
9989 &busiest->active_balance_work); in load_balance()