Lines Matching refs:sds
6436 struct sched_domain_shared *sds; in set_idle_cores() local
6438 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); in set_idle_cores()
6439 if (sds) in set_idle_cores()
6440 WRITE_ONCE(sds->has_idle_cores, val); in set_idle_cores()
6445 struct sched_domain_shared *sds; in test_idle_cores() local
6447 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); in test_idle_cores()
6448 if (sds) in test_idle_cores()
6449 return READ_ONCE(sds->has_idle_cores); in test_idle_cores()
8615 static inline void init_sd_lb_stats(struct sd_lb_stats *sds) in init_sd_lb_stats() argument
8624 *sds = (struct sd_lb_stats){ in init_sd_lb_stats()
8898 static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds, in asym_smt_can_pull_tasks() argument
8906 local_is_smt = sds->local->flags & SD_SHARE_CPUCAPACITY; in asym_smt_can_pull_tasks()
8932 int local_busy_cpus = sds->local->group_weight - in asym_smt_can_pull_tasks()
8933 sds->local_stat.idle_cpus; in asym_smt_can_pull_tasks()
8947 if (!sds->local_stat.sum_nr_running) in asym_smt_can_pull_tasks()
8958 sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs, in sched_asym() argument
8962 if ((sds->local->flags & SD_SHARE_CPUCAPACITY) || in sched_asym()
8964 return asym_smt_can_pull_tasks(env->dst_cpu, sds, sgs, group); in sched_asym()
8991 struct sd_lb_stats *sds, in update_sg_lb_stats() argument
9000 local_group = group == sds->local; in update_sg_lb_stats()
9057 sched_asym(env, sds, sgs, group)) { in update_sg_lb_stats()
9083 struct sd_lb_stats *sds, in update_sd_pick_busiest() argument
9087 struct sg_lb_stats *busiest = &sds->busiest_stat; in update_sd_pick_busiest()
9102 sds->local_stat.group_type != group_has_spare)) in update_sd_pick_busiest()
9132 if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) in update_sd_pick_busiest()
9626 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) in update_sd_lb_stats() argument
9630 struct sg_lb_stats *local = &sds->local_stat; in update_sd_lb_stats()
9641 sds->local = sg; in update_sd_lb_stats()
9649 update_sg_lb_stats(env, sds, sg, sgs, &sg_status); in update_sd_lb_stats()
9655 if (update_sd_pick_busiest(env, sds, sg, sgs)) { in update_sd_lb_stats()
9656 sds->busiest = sg; in update_sd_lb_stats()
9657 sds->busiest_stat = *sgs; in update_sd_lb_stats()
9662 sds->total_load += sgs->group_load; in update_sd_lb_stats()
9663 sds->total_capacity += sgs->group_capacity; in update_sd_lb_stats()
9670 sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING; in update_sd_lb_stats()
9674 env->fbq_type = fbq_classify_group(&sds->busiest_stat); in update_sd_lb_stats()
9701 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) in calculate_imbalance() argument
9705 local = &sds->local_stat; in calculate_imbalance()
9706 busiest = &sds->busiest_stat; in calculate_imbalance()
9780 if (busiest->group_weight == 1 || sds->prefer_sibling) { in calculate_imbalance()
9837 sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) / in calculate_imbalance()
9838 sds->total_capacity; in calculate_imbalance()
9851 (busiest->avg_load - sds->avg_load) * busiest->group_capacity, in calculate_imbalance()
9852 (sds->avg_load - local->avg_load) * local->group_capacity in calculate_imbalance()
9891 struct sd_lb_stats sds; in find_busiest_group() local
9893 init_sd_lb_stats(&sds); in find_busiest_group()
9899 update_sd_lb_stats(env, &sds); in find_busiest_group()
9908 local = &sds.local_stat; in find_busiest_group()
9909 busiest = &sds.busiest_stat; in find_busiest_group()
9912 if (!sds.busiest) in find_busiest_group()
9951 sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) / in find_busiest_group()
9952 sds.total_capacity; in find_busiest_group()
9958 if (local->avg_load >= sds.avg_load) in find_busiest_group()
9971 if (sds.prefer_sibling && local->group_type == group_has_spare && in find_busiest_group()
10006 calculate_imbalance(env, &sds); in find_busiest_group()
10007 return env->imbalance ? sds.busiest : NULL; in find_busiest_group()
10847 struct sched_domain_shared *sds; in nohz_balancer_kick() local
10931 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); in nohz_balancer_kick()
10932 if (sds) { in nohz_balancer_kick()
10942 nr_busy = atomic_read(&sds->nr_busy_cpus); in nohz_balancer_kick()