Searched refs:busiest (Results 1 – 3 of 3) sorted by relevance
7700 struct sched_group *busiest; /* Busiest group in this sd */ member7720 .busiest = NULL, in init_sd_lb_stats()8088 struct sg_lb_stats *busiest = &sds->busiest_stat; in update_sd_pick_busiest() local8101 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest()8104 if (sgs->group_type < busiest->group_type) in update_sd_pick_busiest()8107 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest()8127 sgs->group_misfit_task_load < busiest->group_misfit_task_load) in update_sd_pick_busiest()8145 if (!sds->busiest) in update_sd_pick_busiest()8149 if (sched_asym_prefer(sds->busiest->asym_prefer_cpu, in update_sd_pick_busiest()8243 sds->busiest = sg; in update_sd_lb_stats()[all …]
2001 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument2003 __acquires(busiest->lock) in _double_lock_balance()2007 double_rq_lock(this_rq, busiest); in _double_lock_balance()2020 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument2022 __acquires(busiest->lock) in _double_lock_balance()2027 if (unlikely(!raw_spin_trylock(&busiest->lock))) { in _double_lock_balance()2028 if (busiest < this_rq) { in _double_lock_balance()2030 raw_spin_lock(&busiest->lock); in _double_lock_balance()2035 raw_spin_lock_nested(&busiest->lock, in _double_lock_balance()2046 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) in double_lock_balance() argument[all …]
46 Initially, load_balance() finds the busiest group in the current sched domain.47 If it succeeds, it looks for the busiest runqueue of all the CPUs' runqueues in49 CPU's runqueue and the newly found busiest one and starts moving tasks from it