Lines Matching refs:dest_cpu

1754 	int dest_cpu;  member
1767 struct task_struct *p, int dest_cpu) in __migrate_task() argument
1770 if (!is_cpu_allowed(p, dest_cpu)) in __migrate_task()
1774 rq = move_queued_task(rq, rf, p, dest_cpu); in __migrate_task()
1812 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
1814 p->wake_cpu = arg->dest_cpu; in migration_cpu_stop()
1875 unsigned int dest_cpu; in __set_cpus_allowed_ptr() local
1907 dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask); in __set_cpus_allowed_ptr()
1908 if (dest_cpu >= nr_cpu_ids) { in __set_cpus_allowed_ptr()
1930 struct migration_arg arg = { p, dest_cpu }; in __set_cpus_allowed_ptr()
1940 rq = move_queued_task(rq, &rf, p, dest_cpu); in __set_cpus_allowed_ptr()
2283 int dest_cpu; in select_fallback_rq() local
2294 for_each_cpu(dest_cpu, nodemask) { in select_fallback_rq()
2295 if (!cpu_active(dest_cpu)) in select_fallback_rq()
2297 if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) in select_fallback_rq()
2298 return dest_cpu; in select_fallback_rq()
2304 for_each_cpu(dest_cpu, p->cpus_ptr) { in select_fallback_rq()
2305 if (!is_cpu_allowed(p, dest_cpu)) in select_fallback_rq()
2344 return dest_cpu; in select_fallback_rq()
3893 int dest_cpu; in sched_exec() local
3896 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); in sched_exec()
3897 if (dest_cpu == smp_processor_id()) in sched_exec()
3900 if (likely(cpu_active(dest_cpu))) { in sched_exec()
3901 struct migration_arg arg = { p, dest_cpu }; in sched_exec()
6743 int dest_cpu; in migrate_tasks() local
6797 dest_cpu = select_fallback_rq(dead_rq->cpu, next); in migrate_tasks()
6798 rq = __migrate_task(rq, rf, next, dest_cpu); in migrate_tasks()