Lines Matching refs:dst_cpu

1840 				int src_nid, int dst_cpu)  in should_numa_migrate_memory()  argument
1843 int dst_nid = cpu_to_node(dst_cpu); in should_numa_migrate_memory()
1877 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); in should_numa_migrate_memory()
1978 int dst_cpu, dst_nid; member
2085 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()
2088 if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) { in task_numa_assign()
2090 int start = env->dst_cpu; in task_numa_assign()
2099 env->dst_cpu = cpu; in task_numa_assign()
2100 rq = cpu_rq(env->dst_cpu); in task_numa_assign()
2114 if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) { in task_numa_assign()
2126 env->best_cpu = env->dst_cpu; in task_numa_assign()
2174 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
2320 cpu = env->dst_cpu; in task_numa_compare()
2331 env->dst_cpu = cpu; in task_numa_compare()
2388 env->dst_cpu = env->dst_stats.idle_cpu; in task_numa_find_cpu()
2410 env->dst_cpu = cpu; in task_numa_find_cpu()
7467 cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost) in cpu_util() argument
7484 if (p && task_cpu(p) == cpu && dst_cpu != cpu) in cpu_util()
7486 else if (p && task_cpu(p) != cpu && dst_cpu == cpu) in cpu_util()
7520 if (dst_cpu == cpu) in cpu_util()
7645 struct task_struct *p, int dst_cpu) in eenv_pd_max_util() argument
7651 struct task_struct *tsk = (cpu == dst_cpu) ? p : NULL; in eenv_pd_max_util()
7652 unsigned long util = cpu_util(cpu, p, dst_cpu, 1); in eenv_pd_max_util()
7676 struct cpumask *pd_cpus, struct task_struct *p, int dst_cpu) in compute_energy() argument
7678 unsigned long max_util = eenv_pd_max_util(eenv, pd_cpus, p, dst_cpu); in compute_energy()
7681 if (dst_cpu >= 0) in compute_energy()
8565 int dst_cpu; member
8619 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p)) in task_hot()
8649 dst_nid = cpu_to_node(env->dst_cpu); in migrate_degrades_locality()
8707 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) in can_migrate_task()
8714 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { in can_migrate_task()
8790 set_task_cpu(p, env->dst_cpu); in detach_task()
9566 if (!sched_use_asym_prio(env->sd, env->dst_cpu)) in sched_asym()
9578 return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu); in sched_asym()
9782 (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) || in update_sd_pick_busiest()
9901 (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu)))) in update_sd_pick_busiest()
10288 llc_weight = per_cpu(sd_llc_size, env->dst_cpu); in update_idle_cpu_scan()
10292 sd_share = rcu_dereference(per_cpu(sd_llc_shared, env->dst_cpu)); in update_idle_cpu_scan()
10360 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); in update_sd_lb_stats()
10367 update_group_capacity(env->sd, env->dst_cpu); in update_sd_lb_stats()
10821 !capacity_greater(capacity_of(env->dst_cpu), capacity) && in find_busiest_queue()
10834 sched_asym_prefer(i, env->dst_cpu) && in find_busiest_queue()
10932 sched_use_asym_prio(env->sd, env->dst_cpu) && in asym_active_balance()
10933 (sched_asym_prefer(env->dst_cpu, env->src_cpu) || in asym_active_balance()
10973 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) in need_active_balance()
10995 if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) in should_we_balance()
11037 return cpu == env->dst_cpu; in should_we_balance()
11040 if (idle_smt == env->dst_cpu) in should_we_balance()
11044 return group_balance_cpu(sg) == env->dst_cpu; in should_we_balance()
11063 .dst_cpu = this_cpu, in load_balance()
11170 __cpumask_clear_cpu(env.dst_cpu, env.cpus); in load_balance()
11173 env.dst_cpu = env.new_dst_cpu; in load_balance()
11405 .dst_cpu = target_cpu, in active_load_balance_cpu_stop()