Home
last modified time | relevance | path

Searched refs:cpu_rq (Results 1 – 12 of 12) sorted by relevance

/Linux-v5.4/kernel/sched/
Dcpuacct.c114 raw_spin_lock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_read()
128 raw_spin_unlock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_read()
143 raw_spin_lock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_write()
150 raw_spin_unlock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_write()
255 raw_spin_lock_irq(&cpu_rq(cpu)->lock); in cpuacct_all_seq_show()
261 raw_spin_unlock_irq(&cpu_rq(cpu)->lock); in cpuacct_all_seq_show()
Dmembarrier.c99 if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) & in membarrier_global_expedited()
108 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited()
177 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited()
240 struct rq *rq = cpu_rq(cpu); in sync_runqueues_membarrier_state()
Dcore.c533 struct rq *rq = cpu_rq(cpu); in resched_cpu()
592 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu()
1255 memset(&cpu_rq(cpu)->uclamp, 0, sizeof(struct uclamp_rq)); in init_uclamp()
1256 cpu_rq(cpu)->uclamp_flags = 0; in init_uclamp()
1498 rq = cpu_rq(new_cpu); in move_queued_task()
1766 dst_rq = cpu_rq(cpu); in __migrate_swap_task()
1803 src_rq = cpu_rq(arg->src_cpu); in migrate_swap_stop()
1804 dst_rq = cpu_rq(arg->dst_cpu); in migrate_swap_stop()
2137 struct task_struct *old_stop = cpu_rq(cpu)->stop; in sched_set_stop_task()
2153 cpu_rq(cpu)->stop = stop; in sched_set_stop_task()
[all …]
Dfair.c1496 struct rq *rq = cpu_rq(cpu); in update_numa_stats()
1523 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1534 rq = cpu_rq(env->best_cpu); in task_numa_assign()
1593 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
1850 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate()
2280 tsk = READ_ONCE(cpu_rq(cpu)->curr); in task_numa_group()
4876 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); in sync_throttle()
5367 struct rq *rq = cpu_rq(cpu); in sched_idle_cpu()
5380 return cpu_rq(cpu)->cpu_capacity; in capacity_of()
5385 struct rq *rq = cpu_rq(cpu); in cpu_avg_load_per_task()
[all …]
Ddeadline.c51 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of()
56 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus()
69 return &cpu_rq(i)->dl.dl_bw; in dl_bw_of()
557 later_rq = cpu_rq(cpu); in dl_task_offline_migration()
1610 rq = cpu_rq(cpu); in select_task_rq_dl()
1632 cpu_rq(target)->dl.earliest_dl.curr) || in select_task_rq_dl()
1633 (cpu_rq(target)->dl.dl_nr_running == 0))) in select_task_rq_dl()
1973 later_rq = cpu_rq(cpu); in find_lock_later_rq()
2152 src_rq = cpu_rq(cpu); in pull_dl_task()
2537 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl); in sched_dl_do_global()
[all …]
Dstats.c29 rq = cpu_rq(cpu); in show_schedstat()
Dsched.h1045 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) macro
1047 #define task_rq(p) cpu_rq(task_cpu(p))
1048 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
1337 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
2209 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
2227 struct rq *rq = cpu_rq(i); in __dl_update()
2364 return cpu_rq(cpu)->cpu_capacity_orig; in capacity_orig_of()
Dcpufreq_schedutil.c213 struct rq *rq = cpu_rq(cpu); in schedutil_cpu_util()
296 struct rq *rq = cpu_rq(sg_cpu->cpu); in sugov_get_util()
448 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) in ignore_dl_rate_limit()
Ddebug.c487 struct rq *rq = cpu_rq(cpu); in print_cfs_rq()
508 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; in print_cfs_rq()
604 dl_bw = &cpu_rq(cpu)->rd->dl_bw; in print_dl_rq()
616 struct rq *rq = cpu_rq(cpu); in print_cpu()
Drt.c160 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry()
617 return &cpu_rq(cpu)->rt; in sched_rt_period_rt_rq()
1399 rq = cpu_rq(cpu); in select_task_rq_rt()
1436 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt()
1720 lowest_rq = cpu_rq(cpu); in find_lock_lowest_rq()
2079 src_rq = cpu_rq(cpu); in pull_rt_task()
2616 struct rt_rq *rt_rq = &cpu_rq(i)->rt; in sched_rt_global_constraints()
2716 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) in print_rt_stats()
Dtopology.c347 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains()
663 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain()
2020 rq = cpu_rq(i); in build_sched_domains()
2223 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; in partition_sched_domains_locked()
2260 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()
/Linux-v5.4/tools/perf/Documentation/
Dperf-probe.txt234 … be moved easily by modifying schedule(), but the same line matching 'rq=cpu_rq*' may still exist …