Lines Matching refs:env

1520 static void task_numa_assign(struct task_numa_env *env,  in task_numa_assign()  argument
1523 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1533 if (env->best_cpu != -1) { in task_numa_assign()
1534 rq = cpu_rq(env->best_cpu); in task_numa_assign()
1538 if (env->best_task) in task_numa_assign()
1539 put_task_struct(env->best_task); in task_numa_assign()
1543 env->best_task = p; in task_numa_assign()
1544 env->best_imp = imp; in task_numa_assign()
1545 env->best_cpu = env->dst_cpu; in task_numa_assign()
1549 struct task_numa_env *env) in load_too_imbalanced() argument
1562 src_capacity = env->src_stats.compute_capacity; in load_too_imbalanced()
1563 dst_capacity = env->dst_stats.compute_capacity; in load_too_imbalanced()
1567 orig_src_load = env->src_stats.load; in load_too_imbalanced()
1568 orig_dst_load = env->dst_stats.load; in load_too_imbalanced()
1589 static void task_numa_compare(struct task_numa_env *env, in task_numa_compare() argument
1592 struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p); in task_numa_compare()
1593 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
1597 int dist = env->dist; in task_numa_compare()
1613 if (cur == env->p) in task_numa_compare()
1617 if (maymove && moveimp >= env->best_imp) in task_numa_compare()
1631 if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) in task_numa_compare()
1640 imp = taskimp + task_weight(cur, env->src_nid, dist) - in task_numa_compare()
1641 task_weight(cur, env->dst_nid, dist); in task_numa_compare()
1654 imp += group_weight(cur, env->src_nid, dist) - in task_numa_compare()
1655 group_weight(cur, env->dst_nid, dist); in task_numa_compare()
1657 imp += task_weight(cur, env->src_nid, dist) - in task_numa_compare()
1658 task_weight(cur, env->dst_nid, dist); in task_numa_compare()
1661 if (maymove && moveimp > imp && moveimp > env->best_imp) { in task_numa_compare()
1673 if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2) in task_numa_compare()
1679 load = task_h_load(env->p) - task_h_load(cur); in task_numa_compare()
1683 dst_load = env->dst_stats.load + load; in task_numa_compare()
1684 src_load = env->src_stats.load - load; in task_numa_compare()
1686 if (load_too_imbalanced(src_load, dst_load, env)) in task_numa_compare()
1700 env->dst_cpu = select_idle_sibling(env->p, env->src_cpu, in task_numa_compare()
1701 env->dst_cpu); in task_numa_compare()
1705 task_numa_assign(env, cur, imp); in task_numa_compare()
1710 static void task_numa_find_cpu(struct task_numa_env *env, in task_numa_find_cpu() argument
1717 load = task_h_load(env->p); in task_numa_find_cpu()
1718 dst_load = env->dst_stats.load + load; in task_numa_find_cpu()
1719 src_load = env->src_stats.load - load; in task_numa_find_cpu()
1725 maymove = !load_too_imbalanced(src_load, dst_load, env); in task_numa_find_cpu()
1727 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { in task_numa_find_cpu()
1729 if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) in task_numa_find_cpu()
1732 env->dst_cpu = cpu; in task_numa_find_cpu()
1733 task_numa_compare(env, taskimp, groupimp, maymove); in task_numa_find_cpu()
1739 struct task_numa_env env = { in task_numa_migrate() local
1767 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); in task_numa_migrate()
1769 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; in task_numa_migrate()
1783 env.dst_nid = p->numa_preferred_nid; in task_numa_migrate()
1784 dist = env.dist = node_distance(env.src_nid, env.dst_nid); in task_numa_migrate()
1785 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
1786 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
1787 update_numa_stats(&env.src_stats, env.src_nid); in task_numa_migrate()
1788 taskimp = task_weight(p, env.dst_nid, dist) - taskweight; in task_numa_migrate()
1789 groupimp = group_weight(p, env.dst_nid, dist) - groupweight; in task_numa_migrate()
1790 update_numa_stats(&env.dst_stats, env.dst_nid); in task_numa_migrate()
1793 task_numa_find_cpu(&env, taskimp, groupimp); in task_numa_migrate()
1803 if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) { in task_numa_migrate()
1805 if (nid == env.src_nid || nid == p->numa_preferred_nid) in task_numa_migrate()
1808 dist = node_distance(env.src_nid, env.dst_nid); in task_numa_migrate()
1810 dist != env.dist) { in task_numa_migrate()
1811 taskweight = task_weight(p, env.src_nid, dist); in task_numa_migrate()
1812 groupweight = group_weight(p, env.src_nid, dist); in task_numa_migrate()
1821 env.dist = dist; in task_numa_migrate()
1822 env.dst_nid = nid; in task_numa_migrate()
1823 update_numa_stats(&env.dst_stats, env.dst_nid); in task_numa_migrate()
1824 task_numa_find_cpu(&env, taskimp, groupimp); in task_numa_migrate()
1837 if (env.best_cpu == -1) in task_numa_migrate()
1838 nid = env.src_nid; in task_numa_migrate()
1840 nid = cpu_to_node(env.best_cpu); in task_numa_migrate()
1847 if (env.best_cpu == -1) in task_numa_migrate()
1850 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate()
1851 if (env.best_task == NULL) { in task_numa_migrate()
1852 ret = migrate_task_to(p, env.best_cpu); in task_numa_migrate()
1855 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu); in task_numa_migrate()
1859 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); in task_numa_migrate()
1863 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task)); in task_numa_migrate()
1864 put_task_struct(env.best_task); in task_numa_migrate()
7126 static int task_hot(struct task_struct *p, struct lb_env *env) in task_hot() argument
7130 lockdep_assert_held(&env->src_rq->lock); in task_hot()
7141 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && in task_hot()
7151 delta = rq_clock_task(env->src_rq) - p->se.exec_start; in task_hot()
7162 static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) in migrate_degrades_locality() argument
7171 if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) in migrate_degrades_locality()
7174 src_nid = cpu_to_node(env->src_cpu); in migrate_degrades_locality()
7175 dst_nid = cpu_to_node(env->dst_cpu); in migrate_degrades_locality()
7182 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) in migrate_degrades_locality()
7193 if (env->idle == CPU_IDLE) in migrate_degrades_locality()
7210 struct lb_env *env) in migrate_degrades_locality() argument
7220 int can_migrate_task(struct task_struct *p, struct lb_env *env) in can_migrate_task() argument
7224 lockdep_assert_held(&env->src_rq->lock); in can_migrate_task()
7233 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) in can_migrate_task()
7236 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { in can_migrate_task()
7241 env->flags |= LBF_SOME_PINNED; in can_migrate_task()
7251 if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED)) in can_migrate_task()
7255 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { in can_migrate_task()
7257 env->flags |= LBF_DST_PINNED; in can_migrate_task()
7258 env->new_dst_cpu = cpu; in can_migrate_task()
7267 env->flags &= ~LBF_ALL_PINNED; in can_migrate_task()
7269 if (task_running(env->src_rq, p)) { in can_migrate_task()
7280 tsk_cache_hot = migrate_degrades_locality(p, env); in can_migrate_task()
7282 tsk_cache_hot = task_hot(p, env); in can_migrate_task()
7285 env->sd->nr_balance_failed > env->sd->cache_nice_tries) { in can_migrate_task()
7287 schedstat_inc(env->sd->lb_hot_gained[env->idle]); in can_migrate_task()
7300 static void detach_task(struct task_struct *p, struct lb_env *env) in detach_task() argument
7302 lockdep_assert_held(&env->src_rq->lock); in detach_task()
7304 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); in detach_task()
7305 set_task_cpu(p, env->dst_cpu); in detach_task()
7314 static struct task_struct *detach_one_task(struct lb_env *env) in detach_one_task() argument
7318 lockdep_assert_held(&env->src_rq->lock); in detach_one_task()
7321 &env->src_rq->cfs_tasks, se.group_node) { in detach_one_task()
7322 if (!can_migrate_task(p, env)) in detach_one_task()
7325 detach_task(p, env); in detach_one_task()
7333 schedstat_inc(env->sd->lb_gained[env->idle]); in detach_one_task()
7347 static int detach_tasks(struct lb_env *env) in detach_tasks() argument
7349 struct list_head *tasks = &env->src_rq->cfs_tasks; in detach_tasks()
7354 lockdep_assert_held(&env->src_rq->lock); in detach_tasks()
7356 if (env->imbalance <= 0) in detach_tasks()
7364 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) in detach_tasks()
7369 env->loop++; in detach_tasks()
7371 if (env->loop > env->loop_max) in detach_tasks()
7375 if (env->loop > env->loop_break) { in detach_tasks()
7376 env->loop_break += sched_nr_migrate_break; in detach_tasks()
7377 env->flags |= LBF_NEED_BREAK; in detach_tasks()
7381 if (!can_migrate_task(p, env)) in detach_tasks()
7386 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) in detach_tasks()
7389 if ((load / 2) > env->imbalance) in detach_tasks()
7392 detach_task(p, env); in detach_tasks()
7393 list_add(&p->se.group_node, &env->tasks); in detach_tasks()
7396 env->imbalance -= load; in detach_tasks()
7404 if (env->idle == CPU_NEWLY_IDLE) in detach_tasks()
7412 if (env->imbalance <= 0) in detach_tasks()
7425 schedstat_add(env->sd->lb_gained[env->idle], detached); in detach_tasks()
7460 static void attach_tasks(struct lb_env *env) in attach_tasks() argument
7462 struct list_head *tasks = &env->tasks; in attach_tasks()
7466 rq_lock(env->dst_rq, &rf); in attach_tasks()
7467 update_rq_clock(env->dst_rq); in attach_tasks()
7473 attach_task(env->dst_rq, p); in attach_tasks()
7476 rq_unlock(env->dst_rq, &rf); in attach_tasks()
7916 group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs) in group_has_capacity() argument
7922 (sgs->group_util * env->sd->imbalance_pct)) in group_has_capacity()
7937 group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs) in group_is_overloaded() argument
7943 (sgs->group_util * env->sd->imbalance_pct)) in group_is_overloaded()
8014 static inline void update_sg_lb_stats(struct lb_env *env, in update_sg_lb_stats() argument
8023 for_each_cpu_and(i, sched_group_span(group), env->cpus) { in update_sg_lb_stats()
8026 if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false)) in update_sg_lb_stats()
8027 env->flags |= LBF_NOHZ_AGAIN; in update_sg_lb_stats()
8050 if (env->sd->flags & SD_ASYM_CPUCAPACITY && in update_sg_lb_stats()
8066 sgs->group_no_capacity = group_is_overloaded(env, sgs); in update_sg_lb_stats()
8083 static bool update_sd_pick_busiest(struct lb_env *env, in update_sd_pick_busiest() argument
8098 !group_has_capacity(env, &sds->local_stat))) in update_sd_pick_busiest()
8110 if (!(env->sd->flags & SD_ASYM_CPUCAPACITY)) in update_sd_pick_busiest()
8132 if (!(env->sd->flags & SD_ASYM_PACKING)) in update_sd_pick_busiest()
8136 if (env->idle == CPU_NOT_IDLE) in update_sd_pick_busiest()
8144 sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) { in update_sd_pick_busiest()
8192 static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) in update_sd_lb_stats() argument
8194 struct sched_domain *child = env->sd->child; in update_sd_lb_stats()
8195 struct sched_group *sg = env->sd->groups; in update_sd_lb_stats()
8202 if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked)) in update_sd_lb_stats()
8203 env->flags |= LBF_NOHZ_STATS; in update_sd_lb_stats()
8210 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); in update_sd_lb_stats()
8215 if (env->idle != CPU_NEWLY_IDLE || in update_sd_lb_stats()
8217 update_group_capacity(env->sd, env->dst_cpu); in update_sd_lb_stats()
8220 update_sg_lb_stats(env, sg, sgs, &sg_status); in update_sd_lb_stats()
8236 group_has_capacity(env, local) && in update_sd_lb_stats()
8242 if (update_sd_pick_busiest(env, sds, sg, sgs)) { in update_sd_lb_stats()
8254 } while (sg != env->sd->groups); in update_sd_lb_stats()
8257 if ((env->flags & LBF_NOHZ_AGAIN) && in update_sd_lb_stats()
8258 cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) { in update_sd_lb_stats()
8265 if (env->sd->flags & SD_NUMA) in update_sd_lb_stats()
8266 env->fbq_type = fbq_classify_group(&sds->busiest_stat); in update_sd_lb_stats()
8268 if (!env->sd->parent) { in update_sd_lb_stats()
8269 struct root_domain *rd = env->dst_rq->rd; in update_sd_lb_stats()
8278 struct root_domain *rd = env->dst_rq->rd; in update_sd_lb_stats()
8308 static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) in check_asym_packing() argument
8312 if (!(env->sd->flags & SD_ASYM_PACKING)) in check_asym_packing()
8315 if (env->idle == CPU_NOT_IDLE) in check_asym_packing()
8322 if (sched_asym_prefer(busiest_cpu, env->dst_cpu)) in check_asym_packing()
8325 env->imbalance = sds->busiest_stat.group_load; in check_asym_packing()
8338 void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) in fix_small_imbalance() argument
8349 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu); in fix_small_imbalance()
8359 env->imbalance = busiest->load_per_task; in fix_small_imbalance()
8397 env->imbalance = busiest->load_per_task; in fix_small_imbalance()
8406 static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) in calculate_imbalance() argument
8432 env->imbalance = 0; in calculate_imbalance()
8433 return fix_small_imbalance(env, sds); in calculate_imbalance()
8460 env->imbalance = min( in calculate_imbalance()
8467 env->imbalance = max_t(long, env->imbalance, in calculate_imbalance()
8477 if (env->imbalance < busiest->load_per_task) in calculate_imbalance()
8478 return fix_small_imbalance(env, sds); in calculate_imbalance()
8494 static struct sched_group *find_busiest_group(struct lb_env *env) in find_busiest_group() argument
8505 update_sd_lb_stats(env, &sds); in find_busiest_group()
8508 struct root_domain *rd = env->dst_rq->rd; in find_busiest_group()
8518 if (check_asym_packing(env, &sds)) in find_busiest_group()
8541 if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) && in find_busiest_group()
8563 if (env->idle == CPU_IDLE) { in find_busiest_group()
8580 env->sd->imbalance_pct * local->avg_load) in find_busiest_group()
8586 env->src_grp_type = busiest->group_type; in find_busiest_group()
8587 calculate_imbalance(env, &sds); in find_busiest_group()
8588 return env->imbalance ? sds.busiest : NULL; in find_busiest_group()
8591 env->imbalance = 0; in find_busiest_group()
8598 static struct rq *find_busiest_queue(struct lb_env *env, in find_busiest_queue() argument
8605 for_each_cpu_and(i, sched_group_span(group), env->cpus) { in find_busiest_queue()
8631 if (rt > env->fbq_type) in find_busiest_queue()
8638 if (env->src_grp_type == group_misfit_task) { in find_busiest_queue()
8655 if (env->sd->flags & SD_ASYM_CPUCAPACITY && in find_busiest_queue()
8656 capacity_of(env->dst_cpu) < capacity && in find_busiest_queue()
8667 if (rq->nr_running == 1 && load > env->imbalance && in find_busiest_queue()
8668 !check_cpu_capacity(rq, env->sd)) in find_busiest_queue()
8699 asym_active_balance(struct lb_env *env) in asym_active_balance() argument
8706 return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && in asym_active_balance()
8707 sched_asym_prefer(env->dst_cpu, env->src_cpu); in asym_active_balance()
8711 voluntary_active_balance(struct lb_env *env) in voluntary_active_balance() argument
8713 struct sched_domain *sd = env->sd; in voluntary_active_balance()
8715 if (asym_active_balance(env)) in voluntary_active_balance()
8724 if ((env->idle != CPU_NOT_IDLE) && in voluntary_active_balance()
8725 (env->src_rq->cfs.h_nr_running == 1)) { in voluntary_active_balance()
8726 if ((check_cpu_capacity(env->src_rq, sd)) && in voluntary_active_balance()
8727 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) in voluntary_active_balance()
8731 if (env->src_grp_type == group_misfit_task) in voluntary_active_balance()
8737 static int need_active_balance(struct lb_env *env) in need_active_balance() argument
8739 struct sched_domain *sd = env->sd; in need_active_balance()
8741 if (voluntary_active_balance(env)) in need_active_balance()
8749 static int should_we_balance(struct lb_env *env) in should_we_balance() argument
8751 struct sched_group *sg = env->sd->groups; in should_we_balance()
8758 if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) in should_we_balance()
8765 if (env->idle == CPU_NEWLY_IDLE) in should_we_balance()
8769 for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) { in should_we_balance()
8784 return balance_cpu == env->dst_cpu; in should_we_balance()
8802 struct lb_env env = { in load_balance() local
8811 .tasks = LIST_HEAD_INIT(env.tasks), in load_balance()
8819 if (!should_we_balance(&env)) { in load_balance()
8824 group = find_busiest_group(&env); in load_balance()
8830 busiest = find_busiest_queue(&env, group); in load_balance()
8836 BUG_ON(busiest == env.dst_rq); in load_balance()
8838 schedstat_add(sd->lb_imbalance[idle], env.imbalance); in load_balance()
8840 env.src_cpu = busiest->cpu; in load_balance()
8841 env.src_rq = busiest; in load_balance()
8851 env.flags |= LBF_ALL_PINNED; in load_balance()
8852 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); in load_balance()
8862 cur_ld_moved = detach_tasks(&env); in load_balance()
8875 attach_tasks(&env); in load_balance()
8881 if (env.flags & LBF_NEED_BREAK) { in load_balance()
8882 env.flags &= ~LBF_NEED_BREAK; in load_balance()
8905 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { in load_balance()
8908 __cpumask_clear_cpu(env.dst_cpu, env.cpus); in load_balance()
8910 env.dst_rq = cpu_rq(env.new_dst_cpu); in load_balance()
8911 env.dst_cpu = env.new_dst_cpu; in load_balance()
8912 env.flags &= ~LBF_DST_PINNED; in load_balance()
8913 env.loop = 0; in load_balance()
8914 env.loop_break = sched_nr_migrate_break; in load_balance()
8929 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) in load_balance()
8934 if (unlikely(env.flags & LBF_ALL_PINNED)) { in load_balance()
8944 if (!cpumask_subset(cpus, env.dst_grpmask)) { in load_balance()
8945 env.loop = 0; in load_balance()
8946 env.loop_break = sched_nr_migrate_break; in load_balance()
8964 if (need_active_balance(&env)) { in load_balance()
8977 env.flags |= LBF_ALL_PINNED; in load_balance()
9005 if (likely(!active_balance) || voluntary_active_balance(&env)) { in load_balance()
9027 if (sd_parent && !(env.flags & LBF_ALL_PINNED)) { in load_balance()
9053 if (env.idle == CPU_NEWLY_IDLE) in load_balance()
9057 if ((env.flags & LBF_ALL_PINNED && in load_balance()
9143 struct lb_env env = { in active_load_balance_cpu_stop() local
9162 p = detach_one_task(&env); in active_load_balance_cpu_stop()