Lines Matching refs:rq
100 struct rq;
114 extern void calc_global_load_tick(struct rq *this_rq);
115 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
117 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
614 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
695 struct rq *rq; member
896 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
949 struct rq;
952 void (*func)(struct rq *rq);
962 struct rq { struct
1146 struct rq *core; argument
1174 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
1176 return cfs_rq->rq; in rq_of()
1181 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of()
1183 return container_of(cfs_rq, struct rq, cfs); in rq_of()
1187 static inline int cpu_of(struct rq *rq) in cpu_of() argument
1190 return rq->cpu; in cpu_of()
1207 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1221 static inline bool sched_core_enabled(struct rq *rq) in sched_core_enabled() argument
1223 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; in sched_core_enabled()
1235 static inline raw_spinlock_t *rq_lockp(struct rq *rq) in rq_lockp() argument
1237 if (sched_core_enabled(rq)) in rq_lockp()
1238 return &rq->core->__lock; in rq_lockp()
1240 return &rq->__lock; in rq_lockp()
1243 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) in __rq_lockp() argument
1245 if (rq->core_enabled) in __rq_lockp()
1246 return &rq->core->__lock; in __rq_lockp()
1248 return &rq->__lock; in __rq_lockp()
1253 void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
1261 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) in sched_cpu_cookie_match() argument
1264 if (!sched_core_enabled(rq)) in sched_cpu_cookie_match()
1267 return rq->core->core_cookie == p->core_cookie; in sched_cpu_cookie_match()
1270 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) in sched_core_cookie_match() argument
1276 if (!sched_core_enabled(rq)) in sched_core_cookie_match()
1279 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { in sched_core_cookie_match()
1290 return idle_core || rq->core->core_cookie == p->core_cookie; in sched_core_cookie_match()
1293 static inline bool sched_group_cookie_match(struct rq *rq, in sched_group_cookie_match() argument
1300 if (!sched_core_enabled(rq)) in sched_group_cookie_match()
1315 extern void sched_core_enqueue(struct rq *rq, struct task_struct *p);
1316 extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags);
1323 static inline bool sched_core_enabled(struct rq *rq) in sched_core_enabled() argument
1333 static inline raw_spinlock_t *rq_lockp(struct rq *rq) in rq_lockp() argument
1335 return &rq->__lock; in rq_lockp()
1338 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) in __rq_lockp() argument
1340 return &rq->__lock; in __rq_lockp()
1343 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) in sched_cpu_cookie_match() argument
1348 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) in sched_core_cookie_match() argument
1353 static inline bool sched_group_cookie_match(struct rq *rq, in sched_group_cookie_match() argument
1361 static inline void lockdep_assert_rq_held(struct rq *rq) in lockdep_assert_rq_held() argument
1363 lockdep_assert_held(__rq_lockp(rq)); in lockdep_assert_rq_held()
1366 extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
1367 extern bool raw_spin_rq_trylock(struct rq *rq);
1368 extern void raw_spin_rq_unlock(struct rq *rq);
1370 static inline void raw_spin_rq_lock(struct rq *rq) in raw_spin_rq_lock() argument
1372 raw_spin_rq_lock_nested(rq, 0); in raw_spin_rq_lock()
1375 static inline void raw_spin_rq_lock_irq(struct rq *rq) in raw_spin_rq_lock_irq() argument
1378 raw_spin_rq_lock(rq); in raw_spin_rq_lock_irq()
1381 static inline void raw_spin_rq_unlock_irq(struct rq *rq) in raw_spin_rq_unlock_irq() argument
1383 raw_spin_rq_unlock(rq); in raw_spin_rq_unlock_irq()
1387 static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) in _raw_spin_rq_lock_irqsave() argument
1391 raw_spin_rq_lock(rq); in _raw_spin_rq_lock_irqsave()
1395 static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) in raw_spin_rq_unlock_irqrestore() argument
1397 raw_spin_rq_unlock(rq); in raw_spin_rq_unlock_irqrestore()
1401 #define raw_spin_rq_lock_irqsave(rq, flags) \ argument
1403 flags = _raw_spin_rq_lock_irqsave(rq); \
1407 extern void __update_idle_core(struct rq *rq);
1409 static inline void update_idle_core(struct rq *rq) in update_idle_core() argument
1412 __update_idle_core(rq); in update_idle_core()
1416 static inline void update_idle_core(struct rq *rq) { } in update_idle_core() argument
1455 struct rq *rq = task_rq(p); in cfs_rq_of() local
1457 return &rq->cfs; in cfs_rq_of()
1467 extern void update_rq_clock(struct rq *rq);
1496 static inline void assert_clock_updated(struct rq *rq) in assert_clock_updated() argument
1502 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); in assert_clock_updated()
1505 static inline u64 rq_clock(struct rq *rq) in rq_clock() argument
1507 lockdep_assert_rq_held(rq); in rq_clock()
1508 assert_clock_updated(rq); in rq_clock()
1510 return rq->clock; in rq_clock()
1513 static inline u64 rq_clock_task(struct rq *rq) in rq_clock_task() argument
1515 lockdep_assert_rq_held(rq); in rq_clock_task()
1516 assert_clock_updated(rq); in rq_clock_task()
1518 return rq->clock_task; in rq_clock_task()
1534 static inline u64 rq_clock_thermal(struct rq *rq) in rq_clock_thermal() argument
1536 return rq_clock_task(rq) >> sched_thermal_decay_shift; in rq_clock_thermal()
1539 static inline void rq_clock_skip_update(struct rq *rq) in rq_clock_skip_update() argument
1541 lockdep_assert_rq_held(rq); in rq_clock_skip_update()
1542 rq->clock_update_flags |= RQCF_REQ_SKIP; in rq_clock_skip_update()
1549 static inline void rq_clock_cancel_skipupdate(struct rq *rq) in rq_clock_cancel_skipupdate() argument
1551 lockdep_assert_rq_held(rq); in rq_clock_cancel_skipupdate()
1552 rq->clock_update_flags &= ~RQCF_REQ_SKIP; in rq_clock_cancel_skipupdate()
1564 static inline void rq_clock_start_loop_update(struct rq *rq) in rq_clock_start_loop_update() argument
1566 lockdep_assert_rq_held(rq); in rq_clock_start_loop_update()
1567 SCHED_WARN_ON(rq->clock_update_flags & RQCF_ACT_SKIP); in rq_clock_start_loop_update()
1568 rq->clock_update_flags |= RQCF_ACT_SKIP; in rq_clock_start_loop_update()
1571 static inline void rq_clock_stop_loop_update(struct rq *rq) in rq_clock_stop_loop_update() argument
1573 lockdep_assert_rq_held(rq); in rq_clock_stop_loop_update()
1574 rq->clock_update_flags &= ~RQCF_ACT_SKIP; in rq_clock_stop_loop_update()
1602 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock() argument
1604 rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); in rq_pin_lock()
1607 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); in rq_pin_lock()
1610 SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback); in rq_pin_lock()
1615 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock() argument
1618 if (rq->clock_update_flags > RQCF_ACT_SKIP) in rq_unpin_lock()
1622 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); in rq_unpin_lock()
1625 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) in rq_repin_lock() argument
1627 lockdep_repin_lock(__rq_lockp(rq), rf->cookie); in rq_repin_lock()
1633 rq->clock_update_flags |= rf->clock_update_flags; in rq_repin_lock()
1637 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1638 __acquires(rq->lock);
1640 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1642 __acquires(rq->lock);
1644 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) in __task_rq_unlock() argument
1645 __releases(rq->lock) in __task_rq_unlock()
1647 rq_unpin_lock(rq, rf); in __task_rq_unlock()
1648 raw_spin_rq_unlock(rq); in __task_rq_unlock()
1652 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in task_rq_unlock() argument
1653 __releases(rq->lock) in task_rq_unlock()
1656 rq_unpin_lock(rq, rf); in task_rq_unlock()
1657 raw_spin_rq_unlock(rq); in task_rq_unlock()
1662 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) in rq_lock_irqsave() argument
1663 __acquires(rq->lock) in rq_lock_irqsave()
1665 raw_spin_rq_lock_irqsave(rq, rf->flags); in rq_lock_irqsave()
1666 rq_pin_lock(rq, rf); in rq_lock_irqsave()
1670 rq_lock_irq(struct rq *rq, struct rq_flags *rf) in rq_lock_irq() argument
1671 __acquires(rq->lock) in rq_lock_irq()
1673 raw_spin_rq_lock_irq(rq); in rq_lock_irq()
1674 rq_pin_lock(rq, rf); in rq_lock_irq()
1678 rq_lock(struct rq *rq, struct rq_flags *rf) in rq_lock() argument
1679 __acquires(rq->lock) in rq_lock()
1681 raw_spin_rq_lock(rq); in rq_lock()
1682 rq_pin_lock(rq, rf); in rq_lock()
1686 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) in rq_unlock_irqrestore() argument
1687 __releases(rq->lock) in rq_unlock_irqrestore()
1689 rq_unpin_lock(rq, rf); in rq_unlock_irqrestore()
1690 raw_spin_rq_unlock_irqrestore(rq, rf->flags); in rq_unlock_irqrestore()
1694 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) in rq_unlock_irq() argument
1695 __releases(rq->lock) in rq_unlock_irq()
1697 rq_unpin_lock(rq, rf); in rq_unlock_irq()
1698 raw_spin_rq_unlock_irq(rq); in rq_unlock_irq()
1702 rq_unlock(struct rq *rq, struct rq_flags *rf) in rq_unlock() argument
1703 __releases(rq->lock) in rq_unlock()
1705 rq_unpin_lock(rq, rf); in rq_unlock()
1706 raw_spin_rq_unlock(rq); in rq_unlock()
1709 DEFINE_LOCK_GUARD_1(rq_lock, struct rq,
1714 DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq,
1719 DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq,
1724 static inline struct rq *
1726 __acquires(rq->lock) in this_rq_lock_irq()
1728 struct rq *rq; in this_rq_lock_irq() local
1731 rq = this_rq(); in this_rq_lock_irq()
1732 rq_lock(rq, rf); in this_rq_lock_irq()
1733 return rq; in this_rq_lock_irq()
1784 queue_balance_callback(struct rq *rq, in queue_balance_callback() argument
1786 void (*func)(struct rq *rq)) in queue_balance_callback() argument
1788 lockdep_assert_rq_held(rq); in queue_balance_callback()
1795 if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) in queue_balance_callback()
1799 head->next = rq->balance_callback; in queue_balance_callback()
1800 rq->balance_callback = head; in queue_balance_callback()
1962 extern void __sched_core_account_forceidle(struct rq *rq);
1964 static inline void sched_core_account_forceidle(struct rq *rq) in sched_core_account_forceidle() argument
1967 __sched_core_account_forceidle(rq); in sched_core_account_forceidle()
1970 extern void __sched_core_tick(struct rq *rq);
1972 static inline void sched_core_tick(struct rq *rq) in sched_core_tick() argument
1974 if (sched_core_enabled(rq) && schedstat_enabled()) in sched_core_tick()
1975 __sched_core_tick(rq); in sched_core_tick()
1980 static inline void sched_core_account_forceidle(struct rq *rq) {} in sched_core_account_forceidle() argument
1982 static inline void sched_core_tick(struct rq *rq) {} in sched_core_tick() argument
2131 static inline int task_current(struct rq *rq, struct task_struct *p) in task_current() argument
2133 return rq->curr == p; in task_current()
2136 static inline int task_on_cpu(struct rq *rq, struct task_struct *p) in task_on_cpu() argument
2141 return task_current(rq, p); in task_on_cpu()
2237 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
2238 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
2239 void (*yield_task) (struct rq *rq);
2240 bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
2242 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
2244 struct task_struct *(*pick_next_task)(struct rq *rq);
2246 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
2247 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
2250 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
2253 struct task_struct * (*pick_task)(struct rq *rq);
2257 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
2261 void (*rq_online)(struct rq *rq);
2262 void (*rq_offline)(struct rq *rq);
2264 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
2267 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
2276 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
2277 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
2278 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
2281 unsigned int (*get_rr_interval)(struct rq *rq,
2284 void (*update_curr)(struct rq *rq);
2295 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) in put_prev_task() argument
2297 WARN_ON_ONCE(rq->curr != prev); in put_prev_task()
2298 prev->sched_class->put_prev_task(rq, prev); in put_prev_task()
2301 static inline void set_next_task(struct rq *rq, struct task_struct *next) in set_next_task() argument
2303 next->sched_class->set_next_task(rq, next, false); in set_next_task()
2340 static inline bool sched_stop_runnable(struct rq *rq) in sched_stop_runnable() argument
2342 return rq->stop && task_on_rq_queued(rq->stop); in sched_stop_runnable()
2345 static inline bool sched_dl_runnable(struct rq *rq) in sched_dl_runnable() argument
2347 return rq->dl.dl_nr_running > 0; in sched_dl_runnable()
2350 static inline bool sched_rt_runnable(struct rq *rq) in sched_rt_runnable() argument
2352 return rq->rt.rt_queued > 0; in sched_rt_runnable()
2355 static inline bool sched_fair_runnable(struct rq *rq) in sched_fair_runnable() argument
2357 return rq->cfs.nr_running > 0; in sched_fair_runnable()
2360 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_f…
2361 extern struct task_struct *pick_next_task_idle(struct rq *rq);
2372 extern void trigger_load_balance(struct rq *rq);
2376 static inline struct task_struct *get_push_task(struct rq *rq) in get_push_task() argument
2378 struct task_struct *p = rq->curr; in get_push_task()
2380 lockdep_assert_rq_held(rq); in get_push_task()
2382 if (rq->push_busy) in get_push_task()
2391 rq->push_busy = true; in get_push_task()
2400 static inline void idle_set_state(struct rq *rq, in idle_set_state() argument
2403 rq->idle_state = idle_state; in idle_set_state()
2406 static inline struct cpuidle_state *idle_get_state(struct rq *rq) in idle_get_state() argument
2410 return rq->idle_state; in idle_get_state()
2413 static inline void idle_set_state(struct rq *rq, in idle_set_state() argument
2418 static inline struct cpuidle_state *idle_get_state(struct rq *rq) in idle_get_state() argument
2437 extern void resched_curr(struct rq *rq);
2458 extern bool sched_can_stop_tick(struct rq *rq);
2466 static inline void sched_update_tick_dependency(struct rq *rq) in sched_update_tick_dependency() argument
2468 int cpu = cpu_of(rq); in sched_update_tick_dependency()
2473 if (sched_can_stop_tick(rq)) in sched_update_tick_dependency()
2480 static inline void sched_update_tick_dependency(struct rq *rq) { } in sched_update_tick_dependency() argument
2483 static inline void add_nr_running(struct rq *rq, unsigned count) in add_nr_running() argument
2485 unsigned prev_nr = rq->nr_running; in add_nr_running()
2487 rq->nr_running = prev_nr + count; in add_nr_running()
2489 call_trace_sched_update_nr_running(rq, count); in add_nr_running()
2493 if (prev_nr < 2 && rq->nr_running >= 2) { in add_nr_running()
2494 if (!READ_ONCE(rq->rd->overload)) in add_nr_running()
2495 WRITE_ONCE(rq->rd->overload, 1); in add_nr_running()
2499 sched_update_tick_dependency(rq); in add_nr_running()
2502 static inline void sub_nr_running(struct rq *rq, unsigned count) in sub_nr_running() argument
2504 rq->nr_running -= count; in sub_nr_running()
2506 call_trace_sched_update_nr_running(rq, -count); in sub_nr_running()
2510 sched_update_tick_dependency(rq); in sub_nr_running()
2513 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
2514 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
2516 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
2549 static inline int hrtick_enabled(struct rq *rq) in hrtick_enabled() argument
2551 if (!cpu_active(cpu_of(rq))) in hrtick_enabled()
2553 return hrtimer_is_hres_active(&rq->hrtick_timer); in hrtick_enabled()
2556 static inline int hrtick_enabled_fair(struct rq *rq) in hrtick_enabled_fair() argument
2560 return hrtick_enabled(rq); in hrtick_enabled_fair()
2563 static inline int hrtick_enabled_dl(struct rq *rq) in hrtick_enabled_dl() argument
2567 return hrtick_enabled(rq); in hrtick_enabled_dl()
2570 void hrtick_start(struct rq *rq, u64 delay);
2574 static inline int hrtick_enabled_fair(struct rq *rq) in hrtick_enabled_fair() argument
2579 static inline int hrtick_enabled_dl(struct rq *rq) in hrtick_enabled_dl() argument
2584 static inline int hrtick_enabled(struct rq *rq) in hrtick_enabled() argument
2623 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) in double_rq_clock_clear_update()
2632 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) {} in double_rq_clock_clear_update()
2643 static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) in rq_order_less()
2669 extern void double_rq_lock(struct rq *rq1, struct rq *rq2);
2681 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance()
2700 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance()
2728 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) in double_lock_balance()
2735 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) in double_unlock_balance()
2786 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) in double_rq_unlock()
2797 extern void set_rq_online (struct rq *rq);
2798 extern void set_rq_offline(struct rq *rq);
2809 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) in double_rq_lock()
2826 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) in double_rq_unlock()
2837 DEFINE_LOCK_GUARD_2(double_rq_lock, struct rq,
2892 extern void nohz_balance_exit_idle(struct rq *rq);
2894 static inline void nohz_balance_exit_idle(struct rq *rq) { } in nohz_balance_exit_idle() argument
2958 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) in cpufreq_update_util() argument
2963 cpu_of(rq))); in cpufreq_update_util()
2965 data->func(data, rq_clock(rq), flags); in cpufreq_update_util()
2968 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} in cpufreq_update_util() argument
3019 static inline unsigned long cpu_bw_dl(struct rq *rq) in cpu_bw_dl() argument
3021 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; in cpu_bw_dl()
3024 static inline unsigned long cpu_util_dl(struct rq *rq) in cpu_util_dl() argument
3026 return READ_ONCE(rq->avg_dl.util_avg); in cpu_util_dl()
3033 static inline unsigned long cpu_util_rt(struct rq *rq) in cpu_util_rt() argument
3035 return READ_ONCE(rq->avg_rt.util_avg); in cpu_util_rt()
3042 static inline unsigned long uclamp_rq_get(struct rq *rq, in uclamp_rq_get() argument
3045 return READ_ONCE(rq->uclamp[clamp_id].value); in uclamp_rq_get()
3048 static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, in uclamp_rq_set() argument
3051 WRITE_ONCE(rq->uclamp[clamp_id].value, value); in uclamp_rq_set()
3054 static inline bool uclamp_rq_is_idle(struct rq *rq) in uclamp_rq_is_idle() argument
3056 return rq->uclamp_flags & UCLAMP_FLAG_IDLE; in uclamp_rq_is_idle()
3077 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, in uclamp_rq_util_with() argument
3094 if (uclamp_rq_is_idle(rq)) in uclamp_rq_util_with()
3098 min_util = max_t(unsigned long, min_util, uclamp_rq_get(rq, UCLAMP_MIN)); in uclamp_rq_util_with()
3099 max_util = max_t(unsigned long, max_util, uclamp_rq_get(rq, UCLAMP_MAX)); in uclamp_rq_util_with()
3113 static inline bool uclamp_rq_is_capped(struct rq *rq) in uclamp_rq_is_capped() argument
3121 rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq); in uclamp_rq_is_capped()
3122 max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); in uclamp_rq_is_capped()
3150 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, in uclamp_rq_util_with() argument
3156 static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; } in uclamp_rq_is_capped() argument
3163 static inline unsigned long uclamp_rq_get(struct rq *rq, in uclamp_rq_get() argument
3172 static inline void uclamp_rq_set(struct rq *rq, enum uclamp_id clamp_id, in uclamp_rq_set() argument
3177 static inline bool uclamp_rq_is_idle(struct rq *rq) in uclamp_rq_is_idle() argument
3184 static inline unsigned long cpu_util_irq(struct rq *rq) in cpu_util_irq() argument
3186 return rq->avg_irq.util_avg; in cpu_util_irq()
3199 static inline unsigned long cpu_util_irq(struct rq *rq) in cpu_util_irq() argument
3236 static inline void membarrier_switch_mm(struct rq *rq, in membarrier_switch_mm() argument
3246 if (READ_ONCE(rq->membarrier_state) == membarrier_state) in membarrier_switch_mm()
3249 WRITE_ONCE(rq->membarrier_state, membarrier_state); in membarrier_switch_mm()
3252 static inline void membarrier_switch_mm(struct rq *rq, in membarrier_switch_mm() argument
3302 extern void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t);
3303 extern void task_tick_mm_cid(struct rq *rq, struct task_struct *curr);
3393 static inline void mm_cid_snapshot_time(struct rq *rq, struct mm_struct *mm) in mm_cid_snapshot_time() argument
3395 struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq)); in mm_cid_snapshot_time()
3397 lockdep_assert_rq_held(rq); in mm_cid_snapshot_time()
3398 WRITE_ONCE(pcpu_cid->time, rq->clock); in mm_cid_snapshot_time()
3401 static inline int __mm_cid_get(struct rq *rq, struct mm_struct *mm) in __mm_cid_get() argument
3449 mm_cid_snapshot_time(rq, mm); in __mm_cid_get()
3453 static inline int mm_cid_get(struct rq *rq, struct mm_struct *mm) in mm_cid_get() argument
3459 lockdep_assert_rq_held(rq); in mm_cid_get()
3463 mm_cid_snapshot_time(rq, mm); in mm_cid_get()
3470 cid = __mm_cid_get(rq, mm); in mm_cid_get()
3475 static inline void switch_mm_cid(struct rq *rq, in switch_mm_cid() argument
3512 mm_cid_snapshot_time(rq, prev->mm); in switch_mm_cid()
3517 next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next->mm); in switch_mm_cid()
3521 static inline void switch_mm_cid(struct rq *rq, struct task_struct *prev, struct task_struct *next)… in switch_mm_cid() argument
3523 static inline void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) { } in sched_mm_cid_migrate_to()
3524 static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { } in task_tick_mm_cid() argument