Lines Matching refs:busiest

7700 	struct sched_group *busiest;	/* Busiest group in this sd */  member
7720 .busiest = NULL, in init_sd_lb_stats()
8088 struct sg_lb_stats *busiest = &sds->busiest_stat; in update_sd_pick_busiest() local
8101 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest()
8104 if (sgs->group_type < busiest->group_type) in update_sd_pick_busiest()
8107 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest()
8127 sgs->group_misfit_task_load < busiest->group_misfit_task_load) in update_sd_pick_busiest()
8145 if (!sds->busiest) in update_sd_pick_busiest()
8149 if (sched_asym_prefer(sds->busiest->asym_prefer_cpu, in update_sd_pick_busiest()
8243 sds->busiest = sg; in update_sd_lb_stats()
8318 if (!sds->busiest) in check_asym_packing()
8321 busiest_cpu = sds->busiest->asym_prefer_cpu; in check_asym_packing()
8343 struct sg_lb_stats *local, *busiest; in fix_small_imbalance() local
8346 busiest = &sds->busiest_stat; in fix_small_imbalance()
8350 else if (busiest->load_per_task > local->load_per_task) in fix_small_imbalance()
8354 (busiest->load_per_task * SCHED_CAPACITY_SCALE) / in fix_small_imbalance()
8355 busiest->group_capacity; in fix_small_imbalance()
8357 if (busiest->avg_load + scaled_busy_load_per_task >= in fix_small_imbalance()
8359 env->imbalance = busiest->load_per_task; in fix_small_imbalance()
8369 capa_now += busiest->group_capacity * in fix_small_imbalance()
8370 min(busiest->load_per_task, busiest->avg_load); in fix_small_imbalance()
8376 if (busiest->avg_load > scaled_busy_load_per_task) { in fix_small_imbalance()
8377 capa_move += busiest->group_capacity * in fix_small_imbalance()
8378 min(busiest->load_per_task, in fix_small_imbalance()
8379 busiest->avg_load - scaled_busy_load_per_task); in fix_small_imbalance()
8383 if (busiest->avg_load * busiest->group_capacity < in fix_small_imbalance()
8384 busiest->load_per_task * SCHED_CAPACITY_SCALE) { in fix_small_imbalance()
8385 tmp = (busiest->avg_load * busiest->group_capacity) / in fix_small_imbalance()
8388 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) / in fix_small_imbalance()
8397 env->imbalance = busiest->load_per_task; in fix_small_imbalance()
8409 struct sg_lb_stats *local, *busiest; in calculate_imbalance() local
8412 busiest = &sds->busiest_stat; in calculate_imbalance()
8414 if (busiest->group_type == group_imbalanced) { in calculate_imbalance()
8419 busiest->load_per_task = in calculate_imbalance()
8420 min(busiest->load_per_task, sds->avg_load); in calculate_imbalance()
8429 if (busiest->group_type != group_misfit_task && in calculate_imbalance()
8430 (busiest->avg_load <= sds->avg_load || in calculate_imbalance()
8439 if (busiest->group_type == group_overloaded && in calculate_imbalance()
8441 load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE; in calculate_imbalance()
8442 if (load_above_capacity > busiest->group_capacity) { in calculate_imbalance()
8443 load_above_capacity -= busiest->group_capacity; in calculate_imbalance()
8445 load_above_capacity /= busiest->group_capacity; in calculate_imbalance()
8457 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity); in calculate_imbalance()
8461 max_pull * busiest->group_capacity, in calculate_imbalance()
8466 if (busiest->group_type == group_misfit_task) { in calculate_imbalance()
8468 busiest->group_misfit_task_load); in calculate_imbalance()
8477 if (env->imbalance < busiest->load_per_task) in calculate_imbalance()
8496 struct sg_lb_stats *local, *busiest; in find_busiest_group() local
8515 busiest = &sds.busiest_stat; in find_busiest_group()
8519 return sds.busiest; in find_busiest_group()
8522 if (!sds.busiest || busiest->sum_nr_running == 0) in find_busiest_group()
8534 if (busiest->group_type == group_imbalanced) in find_busiest_group()
8542 busiest->group_no_capacity) in find_busiest_group()
8546 if (busiest->group_type == group_misfit_task) in find_busiest_group()
8553 if (local->avg_load >= busiest->avg_load) in find_busiest_group()
8571 if ((busiest->group_type != group_overloaded) && in find_busiest_group()
8572 (local->idle_cpus <= (busiest->idle_cpus + 1))) in find_busiest_group()
8579 if (100 * busiest->avg_load <= in find_busiest_group()
8586 env->src_grp_type = busiest->group_type; in find_busiest_group()
8588 return env->imbalance ? sds.busiest : NULL; in find_busiest_group()
8601 struct rq *busiest = NULL, *rq; in find_busiest_queue() local
8641 busiest = rq; in find_busiest_queue()
8685 busiest = rq; in find_busiest_queue()
8689 return busiest; in find_busiest_queue()
8798 struct rq *busiest; in load_balance() local
8830 busiest = find_busiest_queue(&env, group); in load_balance()
8831 if (!busiest) { in load_balance()
8836 BUG_ON(busiest == env.dst_rq); in load_balance()
8840 env.src_cpu = busiest->cpu; in load_balance()
8841 env.src_rq = busiest; in load_balance()
8844 if (busiest->nr_running > 1) { in load_balance()
8852 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); in load_balance()
8855 rq_lock_irqsave(busiest, &rf); in load_balance()
8856 update_rq_clock(busiest); in load_balance()
8872 rq_unlock(busiest, &rf); in load_balance()
8935 __cpumask_clear_cpu(cpu_of(busiest), cpus); in load_balance()
8967 raw_spin_lock_irqsave(&busiest->lock, flags); in load_balance()
8974 if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { in load_balance()
8975 raw_spin_unlock_irqrestore(&busiest->lock, in load_balance()
8986 if (!busiest->active_balance) { in load_balance()
8987 busiest->active_balance = 1; in load_balance()
8988 busiest->push_cpu = this_cpu; in load_balance()
8991 raw_spin_unlock_irqrestore(&busiest->lock, flags); in load_balance()
8994 stop_one_cpu_nowait(cpu_of(busiest), in load_balance()
8995 active_load_balance_cpu_stop, busiest, in load_balance()
8996 &busiest->active_balance_work); in load_balance()