Lines Matching refs:rq
100 struct rq;
114 extern void calc_global_load_tick(struct rq *this_rq);
115 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
117 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
617 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
693 struct rq *rq; member
888 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
941 struct rq;
944 void (*func)(struct rq *rq);
954 struct rq { struct
1138 struct rq *core; argument
1158 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
1160 return cfs_rq->rq; in rq_of()
1165 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of()
1167 return container_of(cfs_rq, struct rq, cfs); in rq_of()
1171 static inline int cpu_of(struct rq *rq) in cpu_of() argument
1174 return rq->cpu; in cpu_of()
1191 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1205 static inline bool sched_core_enabled(struct rq *rq) in sched_core_enabled() argument
1207 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; in sched_core_enabled()
1219 static inline raw_spinlock_t *rq_lockp(struct rq *rq) in rq_lockp() argument
1221 if (sched_core_enabled(rq)) in rq_lockp()
1222 return &rq->core->__lock; in rq_lockp()
1224 return &rq->__lock; in rq_lockp()
1227 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) in __rq_lockp() argument
1229 if (rq->core_enabled) in __rq_lockp()
1230 return &rq->core->__lock; in __rq_lockp()
1232 return &rq->__lock; in __rq_lockp()
1243 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) in sched_cpu_cookie_match() argument
1246 if (!sched_core_enabled(rq)) in sched_cpu_cookie_match()
1249 return rq->core->core_cookie == p->core_cookie; in sched_cpu_cookie_match()
1252 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) in sched_core_cookie_match() argument
1258 if (!sched_core_enabled(rq)) in sched_core_cookie_match()
1261 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { in sched_core_cookie_match()
1272 return idle_core || rq->core->core_cookie == p->core_cookie; in sched_core_cookie_match()
1275 static inline bool sched_group_cookie_match(struct rq *rq, in sched_group_cookie_match() argument
1282 if (!sched_core_enabled(rq)) in sched_group_cookie_match()
1297 extern void sched_core_enqueue(struct rq *rq, struct task_struct *p);
1298 extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags);
1305 static inline bool sched_core_enabled(struct rq *rq) in sched_core_enabled() argument
1315 static inline raw_spinlock_t *rq_lockp(struct rq *rq) in rq_lockp() argument
1317 return &rq->__lock; in rq_lockp()
1320 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) in __rq_lockp() argument
1322 return &rq->__lock; in __rq_lockp()
1325 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) in sched_cpu_cookie_match() argument
1330 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) in sched_core_cookie_match() argument
1335 static inline bool sched_group_cookie_match(struct rq *rq, in sched_group_cookie_match() argument
1343 static inline void lockdep_assert_rq_held(struct rq *rq) in lockdep_assert_rq_held() argument
1345 lockdep_assert_held(__rq_lockp(rq)); in lockdep_assert_rq_held()
1348 extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
1349 extern bool raw_spin_rq_trylock(struct rq *rq);
1350 extern void raw_spin_rq_unlock(struct rq *rq);
1352 static inline void raw_spin_rq_lock(struct rq *rq) in raw_spin_rq_lock() argument
1354 raw_spin_rq_lock_nested(rq, 0); in raw_spin_rq_lock()
1357 static inline void raw_spin_rq_lock_irq(struct rq *rq) in raw_spin_rq_lock_irq() argument
1360 raw_spin_rq_lock(rq); in raw_spin_rq_lock_irq()
1363 static inline void raw_spin_rq_unlock_irq(struct rq *rq) in raw_spin_rq_unlock_irq() argument
1365 raw_spin_rq_unlock(rq); in raw_spin_rq_unlock_irq()
1369 static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) in _raw_spin_rq_lock_irqsave() argument
1373 raw_spin_rq_lock(rq); in _raw_spin_rq_lock_irqsave()
1377 static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) in raw_spin_rq_unlock_irqrestore() argument
1379 raw_spin_rq_unlock(rq); in raw_spin_rq_unlock_irqrestore()
1383 #define raw_spin_rq_lock_irqsave(rq, flags) \ argument
1385 flags = _raw_spin_rq_lock_irqsave(rq); \
1389 extern void __update_idle_core(struct rq *rq);
1391 static inline void update_idle_core(struct rq *rq) in update_idle_core() argument
1394 __update_idle_core(rq); in update_idle_core()
1398 static inline void update_idle_core(struct rq *rq) { } in update_idle_core() argument
1440 struct rq *rq = task_rq(p); in cfs_rq_of() local
1442 return &rq->cfs; in cfs_rq_of()
1452 extern void update_rq_clock(struct rq *rq);
1481 static inline void assert_clock_updated(struct rq *rq) in assert_clock_updated() argument
1487 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); in assert_clock_updated()
1490 static inline u64 rq_clock(struct rq *rq) in rq_clock() argument
1492 lockdep_assert_rq_held(rq); in rq_clock()
1493 assert_clock_updated(rq); in rq_clock()
1495 return rq->clock; in rq_clock()
1498 static inline u64 rq_clock_task(struct rq *rq) in rq_clock_task() argument
1500 lockdep_assert_rq_held(rq); in rq_clock_task()
1501 assert_clock_updated(rq); in rq_clock_task()
1503 return rq->clock_task; in rq_clock_task()
1519 static inline u64 rq_clock_thermal(struct rq *rq) in rq_clock_thermal() argument
1521 return rq_clock_task(rq) >> sched_thermal_decay_shift; in rq_clock_thermal()
1524 static inline void rq_clock_skip_update(struct rq *rq) in rq_clock_skip_update() argument
1526 lockdep_assert_rq_held(rq); in rq_clock_skip_update()
1527 rq->clock_update_flags |= RQCF_REQ_SKIP; in rq_clock_skip_update()
1534 static inline void rq_clock_cancel_skipupdate(struct rq *rq) in rq_clock_cancel_skipupdate() argument
1536 lockdep_assert_rq_held(rq); in rq_clock_cancel_skipupdate()
1537 rq->clock_update_flags &= ~RQCF_REQ_SKIP; in rq_clock_cancel_skipupdate()
1565 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock() argument
1567 rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); in rq_pin_lock()
1570 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); in rq_pin_lock()
1573 SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback); in rq_pin_lock()
1578 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock() argument
1581 if (rq->clock_update_flags > RQCF_ACT_SKIP) in rq_unpin_lock()
1585 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); in rq_unpin_lock()
1588 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) in rq_repin_lock() argument
1590 lockdep_repin_lock(__rq_lockp(rq), rf->cookie); in rq_repin_lock()
1596 rq->clock_update_flags |= rf->clock_update_flags; in rq_repin_lock()
1600 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1601 __acquires(rq->lock);
1603 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1605 __acquires(rq->lock);
1607 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) in __task_rq_unlock() argument
1608 __releases(rq->lock) in __task_rq_unlock()
1610 rq_unpin_lock(rq, rf); in __task_rq_unlock()
1611 raw_spin_rq_unlock(rq); in __task_rq_unlock()
1615 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in task_rq_unlock() argument
1616 __releases(rq->lock) in task_rq_unlock()
1619 rq_unpin_lock(rq, rf); in task_rq_unlock()
1620 raw_spin_rq_unlock(rq); in task_rq_unlock()
1625 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) in rq_lock_irqsave() argument
1626 __acquires(rq->lock) in rq_lock_irqsave()
1628 raw_spin_rq_lock_irqsave(rq, rf->flags); in rq_lock_irqsave()
1629 rq_pin_lock(rq, rf); in rq_lock_irqsave()
1633 rq_lock_irq(struct rq *rq, struct rq_flags *rf) in rq_lock_irq() argument
1634 __acquires(rq->lock) in rq_lock_irq()
1636 raw_spin_rq_lock_irq(rq); in rq_lock_irq()
1637 rq_pin_lock(rq, rf); in rq_lock_irq()
1641 rq_lock(struct rq *rq, struct rq_flags *rf) in rq_lock() argument
1642 __acquires(rq->lock) in rq_lock()
1644 raw_spin_rq_lock(rq); in rq_lock()
1645 rq_pin_lock(rq, rf); in rq_lock()
1649 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) in rq_unlock_irqrestore() argument
1650 __releases(rq->lock) in rq_unlock_irqrestore()
1652 rq_unpin_lock(rq, rf); in rq_unlock_irqrestore()
1653 raw_spin_rq_unlock_irqrestore(rq, rf->flags); in rq_unlock_irqrestore()
1657 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) in rq_unlock_irq() argument
1658 __releases(rq->lock) in rq_unlock_irq()
1660 rq_unpin_lock(rq, rf); in rq_unlock_irq()
1661 raw_spin_rq_unlock_irq(rq); in rq_unlock_irq()
1665 rq_unlock(struct rq *rq, struct rq_flags *rf) in rq_unlock() argument
1666 __releases(rq->lock) in rq_unlock()
1668 rq_unpin_lock(rq, rf); in rq_unlock()
1669 raw_spin_rq_unlock(rq); in rq_unlock()
1672 static inline struct rq *
1674 __acquires(rq->lock) in this_rq_lock_irq()
1676 struct rq *rq; in this_rq_lock_irq() local
1679 rq = this_rq(); in this_rq_lock_irq()
1680 rq_lock(rq, rf); in this_rq_lock_irq()
1681 return rq; in this_rq_lock_irq()
1732 queue_balance_callback(struct rq *rq, in queue_balance_callback() argument
1734 void (*func)(struct rq *rq)) in queue_balance_callback() argument
1736 lockdep_assert_rq_held(rq); in queue_balance_callback()
1743 if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) in queue_balance_callback()
1747 head->next = rq->balance_callback; in queue_balance_callback()
1748 rq->balance_callback = head; in queue_balance_callback()
1886 extern void __sched_core_account_forceidle(struct rq *rq);
1888 static inline void sched_core_account_forceidle(struct rq *rq) in sched_core_account_forceidle() argument
1891 __sched_core_account_forceidle(rq); in sched_core_account_forceidle()
1894 extern void __sched_core_tick(struct rq *rq);
1896 static inline void sched_core_tick(struct rq *rq) in sched_core_tick() argument
1898 if (sched_core_enabled(rq) && schedstat_enabled()) in sched_core_tick()
1899 __sched_core_tick(rq); in sched_core_tick()
1904 static inline void sched_core_account_forceidle(struct rq *rq) {} in sched_core_account_forceidle() argument
1906 static inline void sched_core_tick(struct rq *rq) {} in sched_core_tick() argument
2055 static inline int task_current(struct rq *rq, struct task_struct *p) in task_current() argument
2057 return rq->curr == p; in task_current()
2060 static inline int task_on_cpu(struct rq *rq, struct task_struct *p) in task_on_cpu() argument
2065 return task_current(rq, p); in task_on_cpu()
2153 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
2154 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
2155 void (*yield_task) (struct rq *rq);
2156 bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
2158 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
2160 struct task_struct *(*pick_next_task)(struct rq *rq);
2162 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
2163 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
2166 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
2169 struct task_struct * (*pick_task)(struct rq *rq);
2173 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
2179 void (*rq_online)(struct rq *rq);
2180 void (*rq_offline)(struct rq *rq);
2182 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
2185 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
2194 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
2195 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
2196 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
2199 unsigned int (*get_rr_interval)(struct rq *rq,
2202 void (*update_curr)(struct rq *rq);
2209 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) in put_prev_task() argument
2211 WARN_ON_ONCE(rq->curr != prev); in put_prev_task()
2212 prev->sched_class->put_prev_task(rq, prev); in put_prev_task()
2215 static inline void set_next_task(struct rq *rq, struct task_struct *next) in set_next_task() argument
2217 next->sched_class->set_next_task(rq, next, false); in set_next_task()
2254 static inline bool sched_stop_runnable(struct rq *rq) in sched_stop_runnable() argument
2256 return rq->stop && task_on_rq_queued(rq->stop); in sched_stop_runnable()
2259 static inline bool sched_dl_runnable(struct rq *rq) in sched_dl_runnable() argument
2261 return rq->dl.dl_nr_running > 0; in sched_dl_runnable()
2264 static inline bool sched_rt_runnable(struct rq *rq) in sched_rt_runnable() argument
2266 return rq->rt.rt_queued > 0; in sched_rt_runnable()
2269 static inline bool sched_fair_runnable(struct rq *rq) in sched_fair_runnable() argument
2271 return rq->cfs.nr_running > 0; in sched_fair_runnable()
2274 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_f…
2275 extern struct task_struct *pick_next_task_idle(struct rq *rq);
2286 extern void trigger_load_balance(struct rq *rq);
2290 static inline struct task_struct *get_push_task(struct rq *rq) in get_push_task() argument
2292 struct task_struct *p = rq->curr; in get_push_task()
2294 lockdep_assert_rq_held(rq); in get_push_task()
2296 if (rq->push_busy) in get_push_task()
2305 rq->push_busy = true; in get_push_task()
2314 static inline void idle_set_state(struct rq *rq, in idle_set_state() argument
2317 rq->idle_state = idle_state; in idle_set_state()
2320 static inline struct cpuidle_state *idle_get_state(struct rq *rq) in idle_get_state() argument
2324 return rq->idle_state; in idle_get_state()
2327 static inline void idle_set_state(struct rq *rq, in idle_set_state() argument
2332 static inline struct cpuidle_state *idle_get_state(struct rq *rq) in idle_get_state() argument
2350 extern void resched_curr(struct rq *rq);
2372 extern bool sched_can_stop_tick(struct rq *rq);
2380 static inline void sched_update_tick_dependency(struct rq *rq) in sched_update_tick_dependency() argument
2382 int cpu = cpu_of(rq); in sched_update_tick_dependency()
2387 if (sched_can_stop_tick(rq)) in sched_update_tick_dependency()
2394 static inline void sched_update_tick_dependency(struct rq *rq) { } in sched_update_tick_dependency() argument
2397 static inline void add_nr_running(struct rq *rq, unsigned count) in add_nr_running() argument
2399 unsigned prev_nr = rq->nr_running; in add_nr_running()
2401 rq->nr_running = prev_nr + count; in add_nr_running()
2403 call_trace_sched_update_nr_running(rq, count); in add_nr_running()
2407 if (prev_nr < 2 && rq->nr_running >= 2) { in add_nr_running()
2408 if (!READ_ONCE(rq->rd->overload)) in add_nr_running()
2409 WRITE_ONCE(rq->rd->overload, 1); in add_nr_running()
2413 sched_update_tick_dependency(rq); in add_nr_running()
2416 static inline void sub_nr_running(struct rq *rq, unsigned count) in sub_nr_running() argument
2418 rq->nr_running -= count; in sub_nr_running()
2420 call_trace_sched_update_nr_running(rq, -count); in sub_nr_running()
2424 sched_update_tick_dependency(rq); in sub_nr_running()
2427 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
2428 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
2430 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
2465 static inline int hrtick_enabled(struct rq *rq) in hrtick_enabled() argument
2467 if (!cpu_active(cpu_of(rq))) in hrtick_enabled()
2469 return hrtimer_is_hres_active(&rq->hrtick_timer); in hrtick_enabled()
2472 static inline int hrtick_enabled_fair(struct rq *rq) in hrtick_enabled_fair() argument
2476 return hrtick_enabled(rq); in hrtick_enabled_fair()
2479 static inline int hrtick_enabled_dl(struct rq *rq) in hrtick_enabled_dl() argument
2483 return hrtick_enabled(rq); in hrtick_enabled_dl()
2486 void hrtick_start(struct rq *rq, u64 delay);
2490 static inline int hrtick_enabled_fair(struct rq *rq) in hrtick_enabled_fair() argument
2495 static inline int hrtick_enabled_dl(struct rq *rq) in hrtick_enabled_dl() argument
2500 static inline int hrtick_enabled(struct rq *rq) in hrtick_enabled() argument
2539 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) in double_rq_clock_clear_update()
2548 static inline void double_rq_clock_clear_update(struct rq *rq1, struct rq *rq2) {} in double_rq_clock_clear_update()
2553 static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) in rq_order_less()
2579 extern void double_rq_lock(struct rq *rq1, struct rq *rq2);
2591 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance()
2610 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance()
2638 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) in double_lock_balance()
2645 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) in double_unlock_balance()
2686 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) in double_rq_unlock()
2697 extern void set_rq_online (struct rq *rq);
2698 extern void set_rq_offline(struct rq *rq);
2709 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) in double_rq_lock()
2726 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) in double_rq_unlock()
2788 extern void nohz_balance_exit_idle(struct rq *rq);
2790 static inline void nohz_balance_exit_idle(struct rq *rq) { } in nohz_balance_exit_idle() argument
2854 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) in cpufreq_update_util() argument
2859 cpu_of(rq))); in cpufreq_update_util()
2861 data->func(data, rq_clock(rq), flags); in cpufreq_update_util()
2864 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} in cpufreq_update_util() argument
2915 static inline unsigned long cpu_bw_dl(struct rq *rq) in cpu_bw_dl() argument
2917 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; in cpu_bw_dl()
2920 static inline unsigned long cpu_util_dl(struct rq *rq) in cpu_util_dl() argument
2922 return READ_ONCE(rq->avg_dl.util_avg); in cpu_util_dl()
2973 static inline unsigned long cpu_util_rt(struct rq *rq) in cpu_util_rt() argument
2975 return READ_ONCE(rq->avg_rt.util_avg); in cpu_util_rt()
3000 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, in uclamp_rq_util_with() argument
3017 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) in uclamp_rq_util_with()
3021 min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); in uclamp_rq_util_with()
3022 max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); in uclamp_rq_util_with()
3036 static inline bool uclamp_rq_is_capped(struct rq *rq) in uclamp_rq_is_capped() argument
3044 rq_util = cpu_util_cfs(cpu_of(rq)) + cpu_util_rt(rq); in uclamp_rq_is_capped()
3045 max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); in uclamp_rq_is_capped()
3064 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, in uclamp_rq_util_with() argument
3070 static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; } in uclamp_rq_is_capped() argument
3079 static inline unsigned long cpu_util_irq(struct rq *rq) in cpu_util_irq() argument
3081 return rq->avg_irq.util_avg; in cpu_util_irq()
3094 static inline unsigned long cpu_util_irq(struct rq *rq) in cpu_util_irq() argument
3131 static inline void membarrier_switch_mm(struct rq *rq, in membarrier_switch_mm() argument
3141 if (READ_ONCE(rq->membarrier_state) == membarrier_state) in membarrier_switch_mm()
3144 WRITE_ONCE(rq->membarrier_state, membarrier_state); in membarrier_switch_mm()
3147 static inline void membarrier_switch_mm(struct rq *rq, in membarrier_switch_mm() argument