Lines Matching refs:dst_cpu
1413 int src_nid, int dst_cpu) in should_numa_migrate_memory() argument
1416 int dst_nid = cpu_to_node(dst_cpu); in should_numa_migrate_memory()
1419 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); in should_numa_migrate_memory()
1533 int dst_cpu, dst_nid; member
1642 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1645 if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) { in task_numa_assign()
1647 int start = env->dst_cpu; in task_numa_assign()
1656 env->dst_cpu = cpu; in task_numa_assign()
1657 rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1671 if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) { in task_numa_assign()
1683 env->best_cpu = env->dst_cpu; in task_numa_assign()
1731 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
1868 cpu = env->dst_cpu; in task_numa_compare()
1879 env->dst_cpu = cpu; in task_numa_compare()
1936 env->dst_cpu = env->dst_stats.idle_cpu; in task_numa_find_cpu()
1958 env->dst_cpu = cpu; in task_numa_find_cpu()
6640 static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu) in cpu_util_next() argument
6651 if (task_cpu(p) == cpu && dst_cpu != cpu) in cpu_util_next()
6653 else if (task_cpu(p) != cpu && dst_cpu == cpu) in cpu_util_next()
6665 if (dst_cpu == cpu) in cpu_util_next()
6682 compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) in compute_energy() argument
6702 unsigned long util_freq = cpu_util_next(cpu, p, dst_cpu); in compute_energy()
6715 if (cpu == dst_cpu) { in compute_energy()
7656 int dst_cpu; member
7711 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p)) in task_hot()
7741 dst_nid = cpu_to_node(env->dst_cpu); in migrate_degrades_locality()
7799 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) in can_migrate_task()
7806 if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { in can_migrate_task()
7882 set_task_cpu(p, env->dst_cpu); in detach_task()
8620 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group)); in update_sg_lb_stats()
8667 sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu)) { in update_sg_lb_stats()
8714 (!capacity_greater(capacity_of(env->dst_cpu), sg->sgc->max_capacity) || in update_sd_pick_busiest()
8798 (capacity_greater(sg->sgc->min_capacity, capacity_of(env->dst_cpu)))) in update_sd_pick_busiest()
9173 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); in update_sd_lb_stats()
9180 update_group_capacity(env->sd, env->dst_cpu); in update_sd_lb_stats()
9602 !capacity_greater(capacity_of(env->dst_cpu), capacity) && in find_busiest_queue()
9695 sched_asym_prefer(env->dst_cpu, env->src_cpu); in asym_active_balance()
9734 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) in need_active_balance()
9755 if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) in should_we_balance()
9771 return cpu == env->dst_cpu; in should_we_balance()
9775 return group_balance_cpu(sg) == env->dst_cpu; in should_we_balance()
9795 .dst_cpu = this_cpu, in load_balance()
9900 __cpumask_clear_cpu(env.dst_cpu, env.cpus); in load_balance()
9903 env.dst_cpu = env.new_dst_cpu; in load_balance()
10135 .dst_cpu = target_cpu, in active_load_balance_cpu_stop()