Searched refs:cpu_of (Results 1 – 8 of 8) sorted by relevance
/Linux-v6.1/kernel/sched/ |
D | pelt.h | 118 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); in update_rq_clock_pelt() 119 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); in update_rq_clock_pelt()
|
D | pelt.c | 439 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq))); in update_irq_load_avg() 440 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq))); in update_irq_load_avg()
|
D | sched.h | 1171 static inline int cpu_of(struct rq *rq) in cpu_of() function 1261 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { in sched_core_cookie_match() 2382 int cpu = cpu_of(rq); in sched_update_tick_dependency() 2467 if (!cpu_active(cpu_of(rq))) in hrtick_enabled() 2859 cpu_of(rq))); in cpufreq_update_util() 3044 rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq); in uclamp_rq_is_capped()
|
D | core_sched.c | 242 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); in __sched_core_account_forceidle()
|
D | fair.c | 351 int cpu = cpu_of(rq); in list_add_leaf_cfs_rq() 834 long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); in post_init_entity_util_avg() 3992 now += sched_clock_cpu(cpu_of(rq)) - u64_u32_load(rq->clock_idle); in migrate_se_pelt_lag() 4399 if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) in util_est_update() 4445 if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) { in update_misfit_status() 5121 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_unthrottle_up() 5139 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in tg_throttle_down() 5179 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; in throttle_cfs_rq() 5245 se = cfs_rq->tg->se[cpu_of(rq)]; in unthrottle_cfs_rq() 5725 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; in update_runtime_enabled() [all …]
|
D | core.c | 682 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; in update_rq_clock_task() 708 steal = paravirt_steal_clock(cpu_of(rq)); in update_rq_clock_task() 743 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock() 770 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); in hrtick() 823 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); in hrtick_start() 1029 cpu = cpu_of(rq); in resched_curr() 1156 int cpu = cpu_of(rq); in nohz_csd_func() 2791 stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, in affine_move_task() 3736 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) in sched_ttwu_pending() 3737 set_task_cpu(p, cpu_of(rq)); in sched_ttwu_pending() [all …]
|
D | rt.c | 562 (rt_rq = iter->rt_rq[cpu_of(rq)]);) 581 int cpu = cpu_of(rq); in sched_rt_rq_enqueue() 599 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); in sched_rt_rq_dequeue() 2550 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) in switched_to_rt()
|
D | deadline.c | 1312 int cpu = cpu_of(rq); in update_curr_dl() 2507 src_dl_b = dl_bw_of(cpu_of(rq)); in set_cpus_allowed_dl()
|