Searched refs:cpu_rq (Results 1 – 12 of 12) sorted by relevance
114 raw_spin_lock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_read()128 raw_spin_unlock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_read()143 raw_spin_lock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_write()150 raw_spin_unlock_irq(&cpu_rq(cpu)->lock); in cpuacct_cpuusage_write()255 raw_spin_lock_irq(&cpu_rq(cpu)->lock); in cpuacct_all_seq_show()261 raw_spin_unlock_irq(&cpu_rq(cpu)->lock); in cpuacct_all_seq_show()
83 p = task_rcu_dereference(&cpu_rq(cpu)->curr); in membarrier_global_expedited()162 p = task_rcu_dereference(&cpu_rq(cpu)->curr); in membarrier_private_expedited()
479 struct rq *rq = cpu_rq(cpu); in resched_cpu()538 struct rq *rq = cpu_rq(cpu); in wake_up_idle_cpu()918 rq = cpu_rq(new_cpu); in move_queued_task()1187 dst_rq = cpu_rq(cpu); in __migrate_swap_task()1226 src_rq = cpu_rq(arg->src_cpu); in migrate_swap_stop()1227 dst_rq = cpu_rq(arg->dst_cpu); in migrate_swap_stop()1560 struct task_struct *old_stop = cpu_rq(cpu)->stop; in sched_set_stop_task()1576 cpu_rq(cpu)->stop = stop; in sched_set_stop_task()1785 struct rq *rq = cpu_rq(cpu); in ttwu_queue_remote()1789 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { in ttwu_queue_remote()[all …]
1481 struct rq *rq = cpu_rq(cpu); in update_numa_stats()1527 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()1538 rq = cpu_rq(env->best_cpu); in task_numa_assign()1596 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()1850 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate()2274 tsk = READ_ONCE(cpu_rq(cpu)->curr); in task_numa_group()4818 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); in sync_throttle()5483 struct rq *rq = cpu_rq(cpu); in source_load()5498 struct rq *rq = cpu_rq(cpu); in target_load()5509 return cpu_rq(cpu)->cpu_capacity; in capacity_of()[all …]
51 return &cpu_rq(i)->rd->dl_bw; in dl_bw_of()56 struct root_domain *rd = cpu_rq(i)->rd; in dl_bw_cpus()69 return &cpu_rq(i)->dl.dl_bw; in dl_bw_of()557 later_rq = cpu_rq(cpu); in dl_task_offline_migration()1578 rq = cpu_rq(cpu); in select_task_rq_dl()1600 cpu_rq(target)->dl.earliest_dl.curr) || in select_task_rq_dl()1601 (cpu_rq(target)->dl.dl_nr_running == 0))) in select_task_rq_dl()1960 later_rq = cpu_rq(cpu); in find_lock_later_rq()2145 src_rq = cpu_rq(cpu); in pull_dl_task()2503 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl); in sched_dl_do_global()[all …]
29 rq = cpu_rq(cpu); in show_schedstat()
950 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) macro952 #define task_rq(p) cpu_rq(task_cpu(p))953 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)1146 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \2081 #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)2099 struct rq *rq = cpu_rq(i); in __dl_update()
159 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry()616 return &cpu_rq(cpu)->rt; in sched_rt_period_rt_rq()1398 rq = cpu_rq(cpu); in select_task_rq_rt()1435 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt()1730 lowest_rq = cpu_rq(cpu); in find_lock_lowest_rq()2091 src_rq = cpu_rq(cpu); in pull_rt_task()2627 struct rt_rq *rt_rq = &cpu_rq(i)->rt; in sched_rt_global_constraints()2727 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) in print_rt_stats()
500 struct rq *rq = cpu_rq(cpu); in print_cfs_rq()521 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; in print_cfs_rq()617 dl_bw = &cpu_rq(cpu)->rd->dl_bw; in print_dl_rq()629 struct rq *rq = cpu_rq(cpu); in print_cpu()
202 struct rq *rq = cpu_rq(sg_cpu->cpu); in sugov_get_util()418 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) in ignore_dl_rate_limit()
435 struct rq *rq = cpu_rq(cpu); in cpu_attach_domain()1696 rq = cpu_rq(i); in build_sched_domains()
233 … be moved easily by modifying schedule(), but the same line matching 'rq=cpu_rq*' may still exist …