Home
last modified time | relevance | path

Searched refs:busiest (Results 1 – 3 of 3) sorted by relevance

/Linux-v4.19/kernel/sched/
Dfair.c7525 struct sched_group *busiest; /* Busiest group in this sd */ member
7545 .busiest = NULL, in init_sd_lb_stats()
7913 struct sg_lb_stats *busiest = &sds->busiest_stat; in update_sd_pick_busiest() local
7915 if (sgs->group_type > busiest->group_type) in update_sd_pick_busiest()
7918 if (sgs->group_type < busiest->group_type) in update_sd_pick_busiest()
7921 if (sgs->avg_load <= busiest->avg_load) in update_sd_pick_busiest()
7952 if (!sds->busiest) in update_sd_pick_busiest()
7956 if (sched_asym_prefer(sds->busiest->asym_prefer_cpu, in update_sd_pick_busiest()
8056 sds->busiest = sg; in update_sd_lb_stats()
8121 if (!sds->busiest) in check_asym_packing()
[all …]
Dsched.h1873 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument
1875 __acquires(busiest->lock) in _double_lock_balance()
1879 double_rq_lock(this_rq, busiest); in _double_lock_balance()
1892 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance() argument
1894 __acquires(busiest->lock) in _double_lock_balance()
1899 if (unlikely(!raw_spin_trylock(&busiest->lock))) { in _double_lock_balance()
1900 if (busiest < this_rq) { in _double_lock_balance()
1902 raw_spin_lock(&busiest->lock); in _double_lock_balance()
1907 raw_spin_lock_nested(&busiest->lock, in _double_lock_balance()
1918 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) in double_lock_balance() argument
[all …]
/Linux-v4.19/Documentation/scheduler/
Dsched-domains.txt42 Initially, load_balance() finds the busiest group in the current sched domain.
43 If it succeeds, it looks for the busiest runqueue of all the CPUs' runqueues in
45 CPU's runqueue and the newly found busiest one and starts moving tasks from it