Searched refs:cpu_rq (Results 1 – 13 of 13) sorted by relevance
115 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read()129 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_read()144 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write()151 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_cpuusage_write()256 raw_spin_rq_lock_irq(cpu_rq(cpu)); in cpuacct_all_seq_show()262 raw_spin_rq_unlock_irq(cpu_rq(cpu)); in cpuacct_all_seq_show()
277 if (!(READ_ONCE(cpu_rq(cpu)->membarrier_state) & in membarrier_global_expedited()285 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited()357 p = rcu_dereference(cpu_rq(cpu_id)->curr); in membarrier_private_expedited()370 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited()465 struct rq *rq = cpu_rq(cpu); in sync_runqueues_membarrier_state()
247 raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); in sched_core_lock()256 raw_spin_unlock(&cpu_rq(t)->__lock); in sched_core_unlock()277 cpu_rq(t)->core_enabled = enabled; in __sched_core_flip()291 cpu_rq(cpu)->core_enabled = enabled; in __sched_core_flip()301 WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); in sched_core_assert_empty()987 struct rq *rq = cpu_rq(cpu); in resched_cpu()1053 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu()1927 init_uclamp_rq(cpu_rq(cpu)); in init_uclamp()2235 rq = cpu_rq(new_cpu); in move_queued_task()3052 dst_rq = cpu_rq(cpu); in __migrate_swap_task()[all …]
73 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of()78 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus()97 struct root_domain *rd = cpu_rq(i)->rd; in __dl_bw_capacity()125 struct root_domain *rd = cpu_rq(cpu)->rd; in dl_bw_visited()136 return &cpu_rq(i)->dl.dl_bw; in dl_bw_of()622 later_rq = cpu_rq(cpu); in dl_task_offline_migration()1680 rq = cpu_rq(cpu); in select_task_rq_dl()1711 cpu_rq(target)->dl.earliest_dl.curr) || in select_task_rq_dl()1712 (cpu_rq(target)->dl.dl_nr_running == 0))) in select_task_rq_dl()2064 later_rq = cpu_rq(cpu); in find_lock_later_rq()[all …]
1610 struct rq *rq = cpu_rq(cpu); in update_numa_stats()1642 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()1657 rq = cpu_rq(env->dst_cpu); in task_numa_assign()1672 rq = cpu_rq(env->best_cpu); in task_numa_assign()1731 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()2079 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate()2509 tsk = READ_ONCE(cpu_rq(cpu)->curr); in task_numa_group()5267 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); in sync_throttle()5583 return sched_idle_rq(cpu_rq(cpu)); in sched_idle_cpu()5857 return cpu_rq(cpu)->cpu_capacity; in capacity_of()[all …]
29 rq = cpu_rq(cpu); in show_schedstat()
578 struct rq *rq = cpu_rq(cpu); in print_cfs_rq()599 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; in print_cfs_rq()697 dl_bw = &cpu_rq(cpu)->rd->dl_bw; in print_dl_rq()709 struct rq *rq = cpu_rq(cpu); in print_cpu()1074 cpu, latency, cpu_rq(cpu)->ticks_without_resched); in resched_latency_warn()
1366 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) macro1368 #define task_rq(p) cpu_rq(task_cpu(p))1369 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)1740 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \2719 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)2742 struct rq *rq = cpu_rq(i); in __dl_update()2919 return cpu_rq(cpu)->cpu_capacity_orig; in capacity_orig_of()
162 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry()659 return &cpu_rq(cpu)->rt; in sched_rt_period_rt_rq()1443 rq = cpu_rq(cpu); in select_task_rq_rt()1493 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt()1805 lowest_rq = cpu_rq(cpu); in find_lock_lowest_rq()2191 src_rq = cpu_rq(cpu); in pull_rt_task()2754 struct rt_rq *rt_rq = &cpu_rq(i)->rt; in sched_rt_global_constraints()2854 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) in print_rt_stats()
166 struct rq *rq = cpu_rq(sg_cpu->cpu); in sugov_get_util()315 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) in ignore_dl_rate_limit()
357 struct root_domain *rd = cpu_rq(cpu)->rd; in build_perf_domains()689 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain()2244 rq = cpu_rq(i); in build_sched_domains()2447 rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; in partition_sched_domains_locked()2484 cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) { in partition_sched_domains_locked()
967 rq = cpu_rq(cpu); in kcpustat_field()1054 rq = cpu_rq(cpu); in kcpustat_cpu_fetch()
234 … be moved easily by modifying schedule(), but the same line matching 'rq=cpu_rq*' may still exist …