Searched refs:busiest (Results 1 – 3 of 3) sorted by relevance
| /Linux-v5.15/kernel/sched/ |
| D | fair.c | 8334 struct sched_group *busiest; /* Busiest group in this sd */ member 8355 .busiest = NULL, in init_sd_lb_stats() 8701 struct sg_lb_stats *busiest = &sds->busiest_stat; in update_sd_pick_busiest() local 8718 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest() 8721 if (sgs->group_type < busiest->group_type) in update_sd_pick_busiest() 8732 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest() 8745 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) in update_sd_pick_busiest() 8754 if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) in update_sd_pick_busiest() 8769 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest() 8781 if (sgs->idle_cpus > busiest->idle_cpus) in update_sd_pick_busiest() [all …]
|
| D | sched.h | 2532 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument 2534 __acquires(busiest->lock) in _double_lock_balance() 2538 double_rq_lock(this_rq, busiest); in _double_lock_balance() 2551 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument 2553 __acquires(busiest->lock) in _double_lock_balance() 2556 if (__rq_lockp(this_rq) == __rq_lockp(busiest)) in _double_lock_balance() 2559 if (likely(raw_spin_rq_trylock(busiest))) in _double_lock_balance() 2562 if (rq_order_less(this_rq, busiest)) { in _double_lock_balance() 2563 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING); in _double_lock_balance() 2568 double_rq_lock(this_rq, busiest); in _double_lock_balance() [all …]
|
| /Linux-v5.15/Documentation/scheduler/ |
| D | sched-domains.rst | 48 Initially, load_balance() finds the busiest group in the current sched domain. 49 If it succeeds, it looks for the busiest runqueue of all the CPUs' runqueues in 51 CPU's runqueue and the newly found busiest one and starts moving tasks from it
|