Lines Matching refs:rq
87 struct rq;
99 extern void calc_global_load_tick(struct rq *this_rq);
100 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
102 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
580 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
649 struct rq *rq; member
835 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
895 struct rq { struct
1056 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
1058 return cfs_rq->rq; in rq_of()
1063 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of()
1065 return container_of(cfs_rq, struct rq, cfs); in rq_of()
1069 static inline int cpu_of(struct rq *rq) in cpu_of() argument
1072 return rq->cpu; in cpu_of()
1080 extern void __update_idle_core(struct rq *rq);
1082 static inline void update_idle_core(struct rq *rq) in update_idle_core() argument
1085 __update_idle_core(rq); in update_idle_core()
1089 static inline void update_idle_core(struct rq *rq) { } in update_idle_core() argument
1092 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1100 extern void update_rq_clock(struct rq *rq);
1102 static inline u64 __rq_clock_broken(struct rq *rq) in __rq_clock_broken() argument
1104 return READ_ONCE(rq->clock); in __rq_clock_broken()
1134 static inline void assert_clock_updated(struct rq *rq) in assert_clock_updated() argument
1140 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); in assert_clock_updated()
1143 static inline u64 rq_clock(struct rq *rq) in rq_clock() argument
1145 lockdep_assert_held(&rq->lock); in rq_clock()
1146 assert_clock_updated(rq); in rq_clock()
1148 return rq->clock; in rq_clock()
1151 static inline u64 rq_clock_task(struct rq *rq) in rq_clock_task() argument
1153 lockdep_assert_held(&rq->lock); in rq_clock_task()
1154 assert_clock_updated(rq); in rq_clock_task()
1156 return rq->clock_task; in rq_clock_task()
1172 static inline u64 rq_clock_thermal(struct rq *rq) in rq_clock_thermal() argument
1174 return rq_clock_task(rq) >> sched_thermal_decay_shift; in rq_clock_thermal()
1177 static inline void rq_clock_skip_update(struct rq *rq) in rq_clock_skip_update() argument
1179 lockdep_assert_held(&rq->lock); in rq_clock_skip_update()
1180 rq->clock_update_flags |= RQCF_REQ_SKIP; in rq_clock_skip_update()
1187 static inline void rq_clock_cancel_skipupdate(struct rq *rq) in rq_clock_cancel_skipupdate() argument
1189 lockdep_assert_held(&rq->lock); in rq_clock_cancel_skipupdate()
1190 rq->clock_update_flags &= ~RQCF_REQ_SKIP; in rq_clock_cancel_skipupdate()
1216 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock() argument
1218 rf->cookie = lockdep_pin_lock(&rq->lock); in rq_pin_lock()
1221 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); in rq_pin_lock()
1226 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock() argument
1229 if (rq->clock_update_flags > RQCF_ACT_SKIP) in rq_unpin_lock()
1233 lockdep_unpin_lock(&rq->lock, rf->cookie); in rq_unpin_lock()
1236 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) in rq_repin_lock() argument
1238 lockdep_repin_lock(&rq->lock, rf->cookie); in rq_repin_lock()
1244 rq->clock_update_flags |= rf->clock_update_flags; in rq_repin_lock()
1248 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1249 __acquires(rq->lock);
1251 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1253 __acquires(rq->lock);
1255 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) in __task_rq_unlock() argument
1256 __releases(rq->lock) in __task_rq_unlock()
1258 rq_unpin_lock(rq, rf); in __task_rq_unlock()
1259 raw_spin_unlock(&rq->lock); in __task_rq_unlock()
1263 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in task_rq_unlock() argument
1264 __releases(rq->lock) in task_rq_unlock()
1267 rq_unpin_lock(rq, rf); in task_rq_unlock()
1268 raw_spin_unlock(&rq->lock); in task_rq_unlock()
1273 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) in rq_lock_irqsave() argument
1274 __acquires(rq->lock) in rq_lock_irqsave()
1276 raw_spin_lock_irqsave(&rq->lock, rf->flags); in rq_lock_irqsave()
1277 rq_pin_lock(rq, rf); in rq_lock_irqsave()
1281 rq_lock_irq(struct rq *rq, struct rq_flags *rf) in rq_lock_irq() argument
1282 __acquires(rq->lock) in rq_lock_irq()
1284 raw_spin_lock_irq(&rq->lock); in rq_lock_irq()
1285 rq_pin_lock(rq, rf); in rq_lock_irq()
1289 rq_lock(struct rq *rq, struct rq_flags *rf) in rq_lock() argument
1290 __acquires(rq->lock) in rq_lock()
1292 raw_spin_lock(&rq->lock); in rq_lock()
1293 rq_pin_lock(rq, rf); in rq_lock()
1297 rq_relock(struct rq *rq, struct rq_flags *rf) in rq_relock() argument
1298 __acquires(rq->lock) in rq_relock()
1300 raw_spin_lock(&rq->lock); in rq_relock()
1301 rq_repin_lock(rq, rf); in rq_relock()
1305 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) in rq_unlock_irqrestore() argument
1306 __releases(rq->lock) in rq_unlock_irqrestore()
1308 rq_unpin_lock(rq, rf); in rq_unlock_irqrestore()
1309 raw_spin_unlock_irqrestore(&rq->lock, rf->flags); in rq_unlock_irqrestore()
1313 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) in rq_unlock_irq() argument
1314 __releases(rq->lock) in rq_unlock_irq()
1316 rq_unpin_lock(rq, rf); in rq_unlock_irq()
1317 raw_spin_unlock_irq(&rq->lock); in rq_unlock_irq()
1321 rq_unlock(struct rq *rq, struct rq_flags *rf) in rq_unlock() argument
1322 __releases(rq->lock) in rq_unlock()
1324 rq_unpin_lock(rq, rf); in rq_unlock()
1325 raw_spin_unlock(&rq->lock); in rq_unlock()
1328 static inline struct rq *
1330 __acquires(rq->lock) in this_rq_lock_irq()
1332 struct rq *rq; in this_rq_lock_irq() local
1335 rq = this_rq(); in this_rq_lock_irq()
1336 rq_lock(rq, rf); in this_rq_lock_irq()
1337 return rq; in this_rq_lock_irq()
1386 queue_balance_callback(struct rq *rq, in queue_balance_callback() argument
1388 void (*func)(struct rq *rq)) in queue_balance_callback() argument
1390 lockdep_assert_held(&rq->lock); in queue_balance_callback()
1396 head->next = rq->balance_callback; in queue_balance_callback()
1397 rq->balance_callback = head; in queue_balance_callback()
1693 static inline int task_current(struct rq *rq, struct task_struct *p) in task_current() argument
1695 return rq->curr == p; in task_current()
1698 static inline int task_running(struct rq *rq, struct task_struct *p) in task_running() argument
1703 return task_current(rq, p); in task_running()
1785 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1786 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1787 void (*yield_task) (struct rq *rq);
1788 bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
1790 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
1792 struct task_struct *(*pick_next_task)(struct rq *rq);
1794 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1795 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
1798 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
1802 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
1807 void (*rq_online)(struct rq *rq);
1808 void (*rq_offline)(struct rq *rq);
1811 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
1820 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
1821 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1822 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1825 unsigned int (*get_rr_interval)(struct rq *rq,
1828 void (*update_curr)(struct rq *rq);
1838 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) in put_prev_task() argument
1840 WARN_ON_ONCE(rq->curr != prev); in put_prev_task()
1841 prev->sched_class->put_prev_task(rq, prev); in put_prev_task()
1844 static inline void set_next_task(struct rq *rq, struct task_struct *next) in set_next_task() argument
1846 WARN_ON_ONCE(rq->curr != next); in set_next_task()
1847 next->sched_class->set_next_task(rq, next, false); in set_next_task()
1869 static inline bool sched_stop_runnable(struct rq *rq) in sched_stop_runnable() argument
1871 return rq->stop && task_on_rq_queued(rq->stop); in sched_stop_runnable()
1874 static inline bool sched_dl_runnable(struct rq *rq) in sched_dl_runnable() argument
1876 return rq->dl.dl_nr_running > 0; in sched_dl_runnable()
1879 static inline bool sched_rt_runnable(struct rq *rq) in sched_rt_runnable() argument
1881 return rq->rt.rt_queued > 0; in sched_rt_runnable()
1884 static inline bool sched_fair_runnable(struct rq *rq) in sched_fair_runnable() argument
1886 return rq->cfs.nr_running > 0; in sched_fair_runnable()
1889 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_f…
1890 extern struct task_struct *pick_next_task_idle(struct rq *rq);
1896 extern void trigger_load_balance(struct rq *rq);
1903 static inline void idle_set_state(struct rq *rq, in idle_set_state() argument
1906 rq->idle_state = idle_state; in idle_set_state()
1909 static inline struct cpuidle_state *idle_get_state(struct rq *rq) in idle_get_state() argument
1913 return rq->idle_state; in idle_get_state()
1916 static inline void idle_set_state(struct rq *rq, in idle_set_state() argument
1921 static inline struct cpuidle_state *idle_get_state(struct rq *rq) in idle_get_state() argument
1939 extern void resched_curr(struct rq *rq);
1961 extern bool sched_can_stop_tick(struct rq *rq);
1969 static inline void sched_update_tick_dependency(struct rq *rq) in sched_update_tick_dependency() argument
1971 int cpu = cpu_of(rq); in sched_update_tick_dependency()
1976 if (sched_can_stop_tick(rq)) in sched_update_tick_dependency()
1983 static inline void sched_update_tick_dependency(struct rq *rq) { } in sched_update_tick_dependency() argument
1986 static inline void add_nr_running(struct rq *rq, unsigned count) in add_nr_running() argument
1988 unsigned prev_nr = rq->nr_running; in add_nr_running()
1990 rq->nr_running = prev_nr + count; in add_nr_running()
1992 call_trace_sched_update_nr_running(rq, count); in add_nr_running()
1996 if (prev_nr < 2 && rq->nr_running >= 2) { in add_nr_running()
1997 if (!READ_ONCE(rq->rd->overload)) in add_nr_running()
1998 WRITE_ONCE(rq->rd->overload, 1); in add_nr_running()
2002 sched_update_tick_dependency(rq); in add_nr_running()
2005 static inline void sub_nr_running(struct rq *rq, unsigned count) in sub_nr_running() argument
2007 rq->nr_running -= count; in sub_nr_running()
2009 call_trace_sched_update_nr_running(rq, -count); in sub_nr_running()
2013 sched_update_tick_dependency(rq); in sub_nr_running()
2016 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
2017 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
2019 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
2031 static inline int hrtick_enabled(struct rq *rq) in hrtick_enabled() argument
2035 if (!cpu_active(cpu_of(rq))) in hrtick_enabled()
2037 return hrtimer_is_hres_active(&rq->hrtick_timer); in hrtick_enabled()
2040 void hrtick_start(struct rq *rq, u64 delay);
2044 static inline int hrtick_enabled(struct rq *rq) in hrtick_enabled() argument
2079 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
2089 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance()
2108 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance()
2134 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) in double_lock_balance()
2145 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) in double_unlock_balance()
2185 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) in double_rq_lock()
2210 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) in double_rq_unlock()
2221 extern void set_rq_online (struct rq *rq);
2222 extern void set_rq_offline(struct rq *rq);
2233 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) in double_rq_lock()
2249 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) in double_rq_unlock()
2299 extern void nohz_balance_exit_idle(struct rq *rq);
2301 static inline void nohz_balance_exit_idle(struct rq *rq) { } in nohz_balance_exit_idle() argument
2315 struct rq *rq = cpu_rq(i); in __dl_update() local
2317 rq->dl.extra_bw += bw; in __dl_update()
2386 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) in cpufreq_update_util() argument
2391 cpu_of(rq))); in cpufreq_update_util()
2393 data->func(data, rq_clock(rq), flags); in cpufreq_update_util()
2396 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} in cpufreq_update_util() argument
2420 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, in uclamp_rq_util_with() argument
2429 min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value); in uclamp_rq_util_with()
2430 max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value); in uclamp_rq_util_with()
2462 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, in uclamp_rq_util_with() argument
2510 static inline unsigned long cpu_bw_dl(struct rq *rq) in cpu_bw_dl() argument
2512 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; in cpu_bw_dl()
2515 static inline unsigned long cpu_util_dl(struct rq *rq) in cpu_util_dl() argument
2517 return READ_ONCE(rq->avg_dl.util_avg); in cpu_util_dl()
2520 static inline unsigned long cpu_util_cfs(struct rq *rq) in cpu_util_cfs() argument
2522 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); in cpu_util_cfs()
2526 READ_ONCE(rq->cfs.avg.util_est.enqueued)); in cpu_util_cfs()
2532 static inline unsigned long cpu_util_rt(struct rq *rq) in cpu_util_rt() argument
2534 return READ_ONCE(rq->avg_rt.util_avg); in cpu_util_rt()
2546 static inline unsigned long cpu_util_irq(struct rq *rq) in cpu_util_irq() argument
2548 return rq->avg_irq.util_avg; in cpu_util_irq()
2561 static inline unsigned long cpu_util_irq(struct rq *rq) in cpu_util_irq() argument
2598 static inline void membarrier_switch_mm(struct rq *rq, in membarrier_switch_mm() argument
2608 if (READ_ONCE(rq->membarrier_state) == membarrier_state) in membarrier_switch_mm()
2611 WRITE_ONCE(rq->membarrier_state, membarrier_state); in membarrier_switch_mm()
2614 static inline void membarrier_switch_mm(struct rq *rq, in membarrier_switch_mm() argument