Lines Matching refs:cpu_rq

1787 		struct rq *rq = cpu_rq(cpu);  in update_numa_stats()
1819 struct rq *rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1834 rq = cpu_rq(env->dst_cpu); in task_numa_assign()
1849 rq = cpu_rq(env->best_cpu); in task_numa_assign()
1908 struct rq *dst_rq = cpu_rq(env->dst_cpu); in task_numa_compare()
2267 best_rq = cpu_rq(env.best_cpu); in task_numa_migrate()
2710 tsk = READ_ONCE(cpu_rq(cpu)->curr); in task_numa_group()
5575 cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu)); in sync_throttle()
5900 return sched_idle_rq(cpu_rq(cpu)); in sched_idle_cpu()
6153 return cpu_rq(cpu)->cpu_capacity; in capacity_of()
6233 if (sync && cpu_rq(this_cpu)->nr_running == 1) in wake_affine_idle()
6249 this_eff_load = cpu_load(cpu_rq(this_cpu)); in wake_affine_weight()
6267 prev_eff_load = cpu_load(cpu_rq(prev_cpu)); in wake_affine_weight()
6327 struct rq *rq = cpu_rq(i); in find_idlest_group_cpu()
6357 load = cpu_load(cpu_rq(i)); in find_idlest_group_cpu()
6424 sched_cpu_cookie_match(cpu_rq(cpu), p)) in __select_idle_cpu()
6802 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util_next()
6906 unsigned long irq = cpu_util_irq(cpu_rq(prev_cpu)); in eenv_task_busy_time()
7115 util = uclamp_rq_util_with(cpu_rq(cpu), util, p); in find_energy_efficient_cpu()
7976 if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p)) in task_hot()
8559 struct rq *rq = cpu_rq(cpu); in update_blocked_averages()
8638 struct rq *rq = cpu_rq(cpu); in scale_rt_capacity()
8671 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu); in update_cpu_capacity()
8676 cpu_rq(cpu)->cpu_capacity = capacity; in update_cpu_capacity()
8677 trace_sched_cpu_capacity_tp(cpu_rq(cpu)); in update_cpu_capacity()
9003 struct rq *rq = cpu_rq(i); in update_sg_lb_stats()
9249 struct rq *rq = cpu_rq(cpu); in idle_cpu_without()
9285 struct rq *rq = cpu_rq(i); in update_sg_wakeup_stats()
9405 if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group)) in find_idlest_group()
10030 rq = cpu_rq(i); in find_busiest_queue()
10381 env.dst_rq = cpu_rq(env.new_dst_cpu); in load_balance()
10574 struct rq *target_rq = cpu_rq(target_cpu); in active_load_balance_cpu_stop()
10837 smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd); in kick_ilb()
11010 struct rq *rq = cpu_rq(cpu); in nohz_balance_enter_idle()
11143 rq = cpu_rq(balance_cpu); in _nohz_idle_balance()
11223 _nohz_idle_balance(cpu_rq(cpu), NOHZ_STATS_KICK); in nohz_run_idle_balance()
11923 rq = cpu_rq(i); in online_fair_sched_group()
11952 rq = cpu_rq(cpu); in unregister_fair_sched_group()
11964 struct rq *rq = cpu_rq(cpu); in init_tg_cfs_entry()
12012 struct rq *rq = cpu_rq(i); in __sched_group_set_shares()
12063 struct rq *rq = cpu_rq(i); in sched_group_set_idle()
12202 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) in print_cfs_stats()