Searched refs:cpu_rq (Results 1 – 15 of 15) sorted by relevance
/Linux-v6.1/kernel/sched/ |
D | membarrier.c | 277 if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) & in membarrier_global_expedited() 285 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited() 357 p = rcu_dereference(cpu_rq(cpu_id)->curr); in membarrier_private_expedited() 370 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited() 465 struct rq *rq = cpu_rq(cpu); in sync_runqueues_membarrier_state()
|
D | cpuacct.c | 112 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read() 129 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read() 148 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write() 156 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write() 339 lockdep_assert_rq_held(cpu_rq(cpu)); in cpuacct_charge()
|
D | core.c | 318 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); in sched_core_lock() 327 raw_spin_unlock(&cpu_rq(t)->__lock); in sched_core_unlock() 348 cpu_rq(t)->core_enabled = enabled; in __sched_core_flip() 350 cpu_rq(cpu)->core->core_forceidle_start = 0; in __sched_core_flip() 361 cpu_rq(cpu)->core_enabled = enabled; in __sched_core_flip() 371 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); in sched_core_assert_empty() 1045 struct rq *rq = cpu_rq(cpu); in resched_cpu() 1111 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu() 1992 init_uclamp_rq(cpu_rq(cpu)); in init_uclamp() 2322 rq = cpu_rq(new_cpu); in move_queued_task() [all …]
|
D | cpufreq_schedutil.c | 159 struct rq *rq = cpu_rq(sg_cpu->cpu); in sugov_get_util() 284 boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL); in sugov_iowait_apply() 308 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) in ignore_dl_rate_limit() 347 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && in sugov_update_single_freq() 397 if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && in sugov_update_single_perf()
|
D | deadline.c | 105 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of() 110 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus() 151 return __dl_bw_capacity(cpu_rq(i)->rd->span); in dl_bw_capacity() 157 struct root_domain *rd = cpu_rq(cpu)->rd; in dl_bw_visited() 175 struct rq *rq = cpu_rq(i); in __dl_update() 183 return &cpu_rq(i)->dl.dl_bw; in dl_bw_of() 696 later_rq = cpu_rq(cpu); in dl_task_offline_migration() 1836 rq = cpu_rq(cpu); in select_task_rq_dl() 1866 dl_task_is_earliest_deadline(p, cpu_rq(target))) in select_task_rq_dl() 2231 later_rq = cpu_rq(cpu); in find_lock_later_rq() [all …]
|
D | fair.c | 1787 struct rq *rq = cpu_rq(cpu); in update_numa_stats() 1819 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign() 1834 rq = cpu_rq(env->dst_cpu); in task_numa_assign() 1849 rq = cpu_rq(env->best_cpu); in task_numa_assign() 1908 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare() 2267 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate() 2710 tsk = READ_ONCE(cpu_rq(cpu)->curr); in task_numa_group() 5575 cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu)); in sync_throttle() 5900 return sched_idle_rq(cpu_rq(cpu)); in sched_idle_cpu() 6153 return cpu_rq(cpu)->cpu_capacity; in capacity_of() [all …]
|
D | debug.c | 585 struct rq *rq = cpu_rq(cpu); in print_cfs_rq() 606 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; in print_cfs_rq() 706 dl_bw = &cpu_rq(cpu)->rd->dl_bw; in print_dl_rq() 718 struct rq *rq = cpu_rq(cpu); in print_cpu() 1079 cpu, latency, cpu_rq(cpu)->ticks_without_resched); in resched_latency_warn()
|
D | sched.h | 1193 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) macro 1195 #define task_rq(p) cpu_rq(task_cpu(p)) 1196 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1286 if (sched_core_cookie_match(cpu_rq(cpu), p)) in sched_group_cookie_match() 1763 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \ 2786 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) 2878 return cpu_rq(cpu)->cpu_capacity_orig; in capacity_orig_of() 2962 cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util_cfs()
|
D | rt.c | 218 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() 706 return &cpu_rq(cpu)->rt; in sched_rt_period_rt_rq() 1609 rq = cpu_rq(cpu); in select_task_rq_rt() 1659 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt() 1981 lowest_rq = cpu_rq(cpu); in find_lock_lowest_rq() 2379 src_rq = cpu_rq(cpu); in pull_rt_task() 2946 struct rt_rq *rt_rq = &cpu_rq(i)->rt; in sched_rt_global_constraints() 3053 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) in print_rt_stats()
|
D | core_sched.c | 275 rq_i = cpu_rq(i); in __sched_core_account_forceidle()
|
D | stats.c | 132 rq = cpu_rq(cpu); in show_schedstat()
|
D | topology.c | 377 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains() 709 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain() 2373 rq = cpu_rq(i); in build_sched_domains() 2576 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; in partition_sched_domains_locked() 2613 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()
|
D | cputime.c | 987 rq = cpu_rq(cpu); in kcpustat_field() 1074 rq = cpu_rq(cpu); in kcpustat_cpu_fetch()
|
D | psi.c | 1131 struct rq *rq = cpu_rq(cpu); in psi_cgroup_restart()
|
/Linux-v6.1/tools/perf/Documentation/ |
D | perf-probe.txt | 234 … be moved easily by modifying schedule(), but the same line matching 'rq=cpu_rq*' may still exist …
|