/Linux-v5.15/arch/x86/um/ |
D | ptrace_32.c | 198 int err, n, cpu = task_cpu(child); in get_fpregs() 215 int n, cpu = task_cpu(child); in set_fpregs() 228 int err, n, cpu = task_cpu(child); in get_fpxregs() 244 int n, cpu = task_cpu(child); in set_fpxregs()
|
/Linux-v5.15/arch/ia64/include/asm/ |
D | switch_to.h | 62 (task_cpu(current) != \ 64 task_thread_info(current)->last_cpu = task_cpu(current); \
|
/Linux-v5.15/kernel/rcu/ |
D | tasks.h | 487 cpu = task_cpu(t); in check_holdout_task() 933 int cpu = task_cpu(t); in trc_inspect_reader() 1012 cpu = task_cpu(t); in trc_wait_for_one_reader() 1099 cpu = task_cpu(t); in show_stalled_task_trace()
|
D | tree_stall.h | 464 cpu = gpk ? task_cpu(gpk) : -1; in rcu_check_gp_kthread_starvation() 508 cpu = task_cpu(gpk); in rcu_check_gp_kthread_expired_fqs_timer()
|
D | tree_nocb.h | 1346 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1, in show_rcu_nocb_gp_state() 1385 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1, in show_rcu_nocb_state()
|
/Linux-v5.15/kernel/sched/ |
D | stop_task.c | 16 return task_cpu(p); /* stop tasks as never migrate */ in select_task_rq_stop()
|
D | deadline.c | 352 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_non_contending() 357 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending() 1359 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in inactive_task_timer() 1368 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer() 1969 int cpu = task_cpu(task); in find_later_rq() 2119 BUG_ON(rq->cpu != task_cpu(p)); in pick_next_pushable_dl_task() 2360 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl() 2678 int cpus, err = -1, cpu = task_cpu(p); in sched_dl_overflow()
|
D | core.c | 2067 return cpu_curr(task_cpu(p)) == p; in task_curr() 2238 BUG_ON(task_cpu(p) != new_cpu); in move_queued_task() 2333 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) in migration_cpu_stop() 2364 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { in migration_cpu_stop() 2377 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, in migration_cpu_stop() 2611 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { in affine_move_task() 2798 !cpumask_test_cpu(task_cpu(p), new_mask))) { in __set_cpus_allowed_ptr_locked() 3033 if (task_cpu(p) != new_cpu) { in set_task_cpu() 3096 if (task_cpu(arg->dst_task) != arg->dst_cpu) in migrate_swap_stop() 3099 if (task_cpu(arg->src_task) != arg->src_cpu) in migrate_swap_stop() [all …]
|
D | idle.c | 414 return task_cpu(p); /* IDLE tasks as never migrated */ in select_task_rq_idle()
|
D | cpudeadline.c | 138 (cpu == task_cpu(p) && cap == max_cap)) { in cpudl_find()
|
D | psi.c | 783 task->pid, task->comm, task_cpu(task), in psi_flags_change() 794 int cpu = task_cpu(task); in psi_task_change() 825 int cpu = task_cpu(prev); in psi_task_switch()
|
D | fair.c | 1969 .src_cpu = task_cpu(p), in task_numa_migrate() 2934 int src_nid = cpu_to_node(task_cpu(p)); in update_scan_period() 5820 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_load_without() 5843 if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_runnable_without() 6566 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in cpu_util_without() 6651 if (task_cpu(p) == cpu && dst_cpu != cpu) in cpu_util_next() 6653 else if (task_cpu(p) != cpu && dst_cpu == cpu) in cpu_util_next() 8844 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) in task_running_on_cpu() 11328 set_task_rq(p, task_cpu(p)); in task_set_group_fair() 11335 set_task_rq(p, task_cpu(p)); in task_move_group_fair()
|
/Linux-v5.15/kernel/trace/ |
D | trace_sched_wakeup.c | 393 entry->next_cpu = task_cpu(next); in tracing_sched_switch_trace() 421 entry->next_cpu = task_cpu(wakee); in tracing_sched_wakeup_trace() 566 wakeup_cpu = task_cpu(p); in probe_wakeup()
|
/Linux-v5.15/include/linux/ |
D | kdb.h | 193 unsigned int cpu = task_cpu(p); in kdb_process_cpu()
|
D | sched.h | 2115 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() function 2128 static inline unsigned int task_cpu(const struct task_struct *p) in task_cpu() function
|
/Linux-v5.15/include/linux/sched/ |
D | topology.h | 271 return cpu_to_node(task_cpu(p)); in task_node()
|
/Linux-v5.15/Documentation/scheduler/ |
D | sched-capacity.rst | 341 task_util(p) < capacity(task_cpu(p)) 404 then it might become CPU-bound, IOW ``task_util(p) > capacity(task_cpu(p))``; 423 task_uclamp_min(p) <= capacity(task_cpu(cpu)) 437 task_bandwidth(p) < capacity(task_cpu(p))
|
/Linux-v5.15/kernel/locking/ |
D | mutex.c | 365 vcpu_is_preempted(task_cpu(owner))) { in mutex_spin_on_owner() 401 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); in mutex_can_spin_on_owner()
|
D | rtmutex.c | 1377 vcpu_is_preempted(task_cpu(owner))) { in rtmutex_spin_on_owner()
|
/Linux-v5.15/include/trace/events/ |
D | sched.h | 158 __entry->target_cpu = task_cpu(p); 289 __entry->orig_cpu = task_cpu(p);
|
/Linux-v5.15/kernel/ |
D | stop_machine.c | 58 struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task)); in print_stop_info()
|
/Linux-v5.15/arch/mips/kernel/ |
D | process.c | 850 cpumask_set_cpu(task_cpu(t), &process_cpus); in mips_set_process_fp_mode()
|
/Linux-v5.15/arch/powerpc/kernel/ |
D | process.c | 2053 unsigned long cpu = task_cpu(p); in valid_irq_stack() 2071 unsigned long cpu = task_cpu(p); in valid_emergency_stack()
|
/Linux-v5.15/fs/proc/ |
D | array.c | 617 seq_put_decimal_ll(m, " ", task_cpu(task)); in do_task_stat()
|
/Linux-v5.15/kernel/time/ |
D | tick-sched.c | 358 cpu = task_cpu(tsk); in tick_nohz_kick_task()
|