Home
last modified time | relevance | path

Searched refs:task_rq (Results 1 – 5 of 5) sorted by relevance

/Linux-v5.15/kernel/sched/
Dcore.c143 if (prio_less(b, a, task_rq(a)->core->core_forceidle)) in __sched_core_less()
551 rq = task_rq(p); in __task_rq_lock()
553 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock()
575 rq = task_rq(p); in task_rq_lock()
594 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in task_rq_lock()
2325 if (task_rq(p) == rq) { in migration_cpu_stop()
2400 if (task_rq(p) != rq) in push_cpu_stop()
2417 if (task_rq(p) == rq) { in push_cpu_stop()
2453 struct rq *rq = task_rq(p); in __do_set_cpus_allowed()
3021 lockdep_is_held(__rq_lockp(task_rq(p))))); in set_task_cpu()
[all …]
Ddeadline.c36 struct rq *rq = task_rq(p); in dl_rq_of_se()
242 rq = task_rq(p); in dl_change_utilization()
986 struct rq *rq = task_rq(p); in start_dl_timer()
1728 rq = task_rq(p); in migrate_task_rq_dl()
1982 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask)) in find_later_rq()
2080 if (unlikely(task_rq(task) != rq || in find_lock_later_rq()
2342 rq = task_rq(p); in set_cpus_allowed_dl()
Drt.c243 return task_rq(p); in rq_of_rt_se()
1722 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, in find_lowest_rq()
1727 ret = cpupri_find(&task_rq(task)->rd->cpupri, in find_lowest_rq()
1825 if (unlikely(task_rq(task) != rq || in find_lock_lowest_rq()
Dsched.h1368 #define task_rq(p) cpu_rq(task_cpu(p)) macro
1405 return &task_rq(p)->cfs; in task_cfs_rq()
1411 struct rq *rq = task_rq(p); in cfs_rq_of()
Dfair.c1099 (lockdep_is_held(__rq_lockp(task_rq(p))) && !READ_ONCE(p->on_cpu))); in deref_task_numa_group()
5512 SCHED_WARN_ON(task_rq(p) != rq); in hrtick_start_fair()
7012 lockdep_assert_rq_held(task_rq(p)); in migrate_task_rq_fair()
8063 BUG_ON(task_rq(p) != rq); in attach_task()
11008 struct rq *rq = task_rq(a); in cfs_prio_less()
11015 SCHED_WARN_ON(task_rq(b)->core != rq->core); in cfs_prio_less()
11038 cfs_rqa = &task_rq(a)->cfs; in cfs_prio_less()
11039 cfs_rqb = &task_rq(b)->cfs; in cfs_prio_less()
11078 update_overutilized_status(task_rq(curr)); in task_tick_fair()