Lines Matching refs:rt_rq
76 void init_rt_rq(struct rt_rq *rt_rq) in init_rt_rq() argument
81 array = &rt_rq->active; in init_rt_rq()
90 rt_rq->highest_prio.curr = MAX_RT_PRIO; in init_rt_rq()
91 rt_rq->highest_prio.next = MAX_RT_PRIO; in init_rt_rq()
92 rt_rq->rt_nr_migratory = 0; in init_rt_rq()
93 rt_rq->overloaded = 0; in init_rt_rq()
94 plist_head_init(&rt_rq->pushable_tasks); in init_rt_rq()
97 rt_rq->rt_queued = 0; in init_rt_rq()
99 rt_rq->rt_time = 0; in init_rt_rq()
100 rt_rq->rt_throttled = 0; in init_rt_rq()
101 rt_rq->rt_runtime = 0; in init_rt_rq()
102 raw_spin_lock_init(&rt_rq->rt_runtime_lock); in init_rt_rq()
121 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() argument
123 return rt_rq->rq; in rq_of_rt_rq()
126 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) in rt_rq_of_se()
128 return rt_se->rt_rq; in rt_rq_of_se()
133 struct rt_rq *rt_rq = rt_se->rt_rq; in rq_of_rt_se() local
135 return rt_rq->rq; in rq_of_rt_se()
146 if (tg->rt_rq) in free_rt_sched_group()
147 kfree(tg->rt_rq[i]); in free_rt_sched_group()
152 kfree(tg->rt_rq); in free_rt_sched_group()
156 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, in init_tg_rt_entry() argument
162 rt_rq->highest_prio.curr = MAX_RT_PRIO; in init_tg_rt_entry()
163 rt_rq->rt_nr_boosted = 0; in init_tg_rt_entry()
164 rt_rq->rq = rq; in init_tg_rt_entry()
165 rt_rq->tg = tg; in init_tg_rt_entry()
167 tg->rt_rq[cpu] = rt_rq; in init_tg_rt_entry()
174 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
176 rt_se->rt_rq = parent->my_q; in init_tg_rt_entry()
178 rt_se->my_q = rt_rq; in init_tg_rt_entry()
185 struct rt_rq *rt_rq; in alloc_rt_sched_group() local
189 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); in alloc_rt_sched_group()
190 if (!tg->rt_rq) in alloc_rt_sched_group()
200 rt_rq = kzalloc_node(sizeof(struct rt_rq), in alloc_rt_sched_group()
202 if (!rt_rq) in alloc_rt_sched_group()
210 init_rt_rq(rt_rq); in alloc_rt_sched_group()
211 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; in alloc_rt_sched_group()
212 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); in alloc_rt_sched_group()
218 kfree(rt_rq); in alloc_rt_sched_group()
232 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() argument
234 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
244 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) in rt_rq_of_se()
303 static void update_rt_migration(struct rt_rq *rt_rq) in update_rt_migration() argument
305 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { in update_rt_migration()
306 if (!rt_rq->overloaded) { in update_rt_migration()
307 rt_set_overload(rq_of_rt_rq(rt_rq)); in update_rt_migration()
308 rt_rq->overloaded = 1; in update_rt_migration()
310 } else if (rt_rq->overloaded) { in update_rt_migration()
311 rt_clear_overload(rq_of_rt_rq(rt_rq)); in update_rt_migration()
312 rt_rq->overloaded = 0; in update_rt_migration()
316 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_migration() argument
324 rt_rq = &rq_of_rt_rq(rt_rq)->rt; in inc_rt_migration()
326 rt_rq->rt_nr_total++; in inc_rt_migration()
328 rt_rq->rt_nr_migratory++; in inc_rt_migration()
330 update_rt_migration(rt_rq); in inc_rt_migration()
333 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_migration() argument
341 rt_rq = &rq_of_rt_rq(rt_rq)->rt; in dec_rt_migration()
343 rt_rq->rt_nr_total--; in dec_rt_migration()
345 rt_rq->rt_nr_migratory--; in dec_rt_migration()
347 update_rt_migration(rt_rq); in dec_rt_migration()
409 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_migration() argument
414 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_migration() argument
432 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
433 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
442 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) in sched_rt_runtime() argument
444 if (!rt_rq->tg) in sched_rt_runtime()
447 return rt_rq->rt_runtime; in sched_rt_runtime()
450 static inline u64 sched_rt_period(struct rt_rq *rt_rq) in sched_rt_period() argument
452 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); in sched_rt_period()
470 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
473 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
478 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) in group_rt_rq()
486 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) in sched_rt_rq_enqueue() argument
488 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; in sched_rt_rq_enqueue()
489 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue()
494 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_enqueue()
496 if (rt_rq->rt_nr_running) { in sched_rt_rq_enqueue()
498 enqueue_top_rt_rq(rt_rq); in sched_rt_rq_enqueue()
502 if (rt_rq->highest_prio.curr < curr->prio) in sched_rt_rq_enqueue()
507 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) in sched_rt_rq_dequeue() argument
510 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); in sched_rt_rq_dequeue()
512 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_dequeue()
515 dequeue_top_rt_rq(rt_rq); in sched_rt_rq_dequeue()
517 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0); in sched_rt_rq_dequeue()
523 static inline int rt_rq_throttled(struct rt_rq *rt_rq) in rt_rq_throttled() argument
525 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; in rt_rq_throttled()
530 struct rt_rq *rt_rq = group_rt_rq(rt_se); in rt_se_boosted() local
533 if (rt_rq) in rt_se_boosted()
534 return !!rt_rq->rt_nr_boosted; in rt_se_boosted()
553 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) in sched_rt_period_rt_rq()
555 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; in sched_rt_period_rt_rq()
558 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) in sched_rt_bandwidth() argument
560 return &rt_rq->tg->rt_bandwidth; in sched_rt_bandwidth()
565 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) in sched_rt_runtime() argument
567 return rt_rq->rt_runtime; in sched_rt_runtime()
570 static inline u64 sched_rt_period(struct rt_rq *rt_rq) in sched_rt_period() argument
575 typedef struct rt_rq *rt_rq_iter_t;
577 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
578 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
583 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) in group_rt_rq()
588 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) in sched_rt_rq_enqueue() argument
590 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue()
592 if (!rt_rq->rt_nr_running) in sched_rt_rq_enqueue()
595 enqueue_top_rt_rq(rt_rq); in sched_rt_rq_enqueue()
599 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) in sched_rt_rq_dequeue() argument
601 dequeue_top_rt_rq(rt_rq); in sched_rt_rq_dequeue()
604 static inline int rt_rq_throttled(struct rt_rq *rt_rq) in rt_rq_throttled() argument
606 return rt_rq->rt_throttled; in rt_rq_throttled()
615 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) in sched_rt_period_rt_rq()
620 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) in sched_rt_bandwidth() argument
627 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) in sched_rt_bandwidth_account() argument
629 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in sched_rt_bandwidth_account()
632 rt_rq->rt_time < rt_b->rt_runtime); in sched_rt_bandwidth_account()
639 static void do_balance_runtime(struct rt_rq *rt_rq) in do_balance_runtime() argument
641 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in do_balance_runtime()
642 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; in do_balance_runtime()
651 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); in do_balance_runtime()
654 if (iter == rt_rq) in do_balance_runtime()
673 if (rt_rq->rt_runtime + diff > rt_period) in do_balance_runtime()
674 diff = rt_period - rt_rq->rt_runtime; in do_balance_runtime()
676 rt_rq->rt_runtime += diff; in do_balance_runtime()
677 if (rt_rq->rt_runtime == rt_period) { in do_balance_runtime()
695 struct rt_rq *rt_rq; in __disable_runtime() local
700 for_each_rt_rq(rt_rq, iter, rq) { in __disable_runtime()
701 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in __disable_runtime()
706 raw_spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
712 if (rt_rq->rt_runtime == RUNTIME_INF || in __disable_runtime()
713 rt_rq->rt_runtime == rt_b->rt_runtime) in __disable_runtime()
715 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
722 want = rt_b->rt_runtime - rt_rq->rt_runtime; in __disable_runtime()
728 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); in __disable_runtime()
734 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) in __disable_runtime()
752 raw_spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
763 rt_rq->rt_runtime = RUNTIME_INF; in __disable_runtime()
764 rt_rq->rt_throttled = 0; in __disable_runtime()
765 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
769 sched_rt_rq_enqueue(rt_rq); in __disable_runtime()
776 struct rt_rq *rt_rq; in __enable_runtime() local
784 for_each_rt_rq(rt_rq, iter, rq) { in __enable_runtime()
785 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in __enable_runtime()
788 raw_spin_lock(&rt_rq->rt_runtime_lock); in __enable_runtime()
789 rt_rq->rt_runtime = rt_b->rt_runtime; in __enable_runtime()
790 rt_rq->rt_time = 0; in __enable_runtime()
791 rt_rq->rt_throttled = 0; in __enable_runtime()
792 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __enable_runtime()
797 static void balance_runtime(struct rt_rq *rt_rq) in balance_runtime() argument
802 if (rt_rq->rt_time > rt_rq->rt_runtime) { in balance_runtime()
803 raw_spin_unlock(&rt_rq->rt_runtime_lock); in balance_runtime()
804 do_balance_runtime(rt_rq); in balance_runtime()
805 raw_spin_lock(&rt_rq->rt_runtime_lock); in balance_runtime()
809 static inline void balance_runtime(struct rt_rq *rt_rq) {} in balance_runtime() argument
833 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); in do_sched_rt_period_timer() local
834 struct rq *rq = rq_of_rt_rq(rt_rq); in do_sched_rt_period_timer()
841 raw_spin_lock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
842 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) in do_sched_rt_period_timer()
843 rt_rq->rt_runtime = rt_b->rt_runtime; in do_sched_rt_period_timer()
844 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; in do_sched_rt_period_timer()
845 raw_spin_unlock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
852 if (rt_rq->rt_time) { in do_sched_rt_period_timer()
855 raw_spin_lock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
856 if (rt_rq->rt_throttled) in do_sched_rt_period_timer()
857 balance_runtime(rt_rq); in do_sched_rt_period_timer()
858 runtime = rt_rq->rt_runtime; in do_sched_rt_period_timer()
859 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); in do_sched_rt_period_timer()
860 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { in do_sched_rt_period_timer()
861 rt_rq->rt_throttled = 0; in do_sched_rt_period_timer()
871 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer()
874 if (rt_rq->rt_time || rt_rq->rt_nr_running) in do_sched_rt_period_timer()
876 raw_spin_unlock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
877 } else if (rt_rq->rt_nr_running) { in do_sched_rt_period_timer()
879 if (!rt_rq_throttled(rt_rq)) in do_sched_rt_period_timer()
882 if (rt_rq->rt_throttled) in do_sched_rt_period_timer()
886 sched_rt_rq_enqueue(rt_rq); in do_sched_rt_period_timer()
899 struct rt_rq *rt_rq = group_rt_rq(rt_se); in rt_se_prio() local
901 if (rt_rq) in rt_se_prio()
902 return rt_rq->highest_prio.curr; in rt_se_prio()
908 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) in sched_rt_runtime_exceeded() argument
910 u64 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded()
912 if (rt_rq->rt_throttled) in sched_rt_runtime_exceeded()
913 return rt_rq_throttled(rt_rq); in sched_rt_runtime_exceeded()
915 if (runtime >= sched_rt_period(rt_rq)) in sched_rt_runtime_exceeded()
918 balance_runtime(rt_rq); in sched_rt_runtime_exceeded()
919 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded()
923 if (rt_rq->rt_time > runtime) { in sched_rt_runtime_exceeded()
924 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in sched_rt_runtime_exceeded()
931 rt_rq->rt_throttled = 1; in sched_rt_runtime_exceeded()
939 rt_rq->rt_time = 0; in sched_rt_runtime_exceeded()
942 if (rt_rq_throttled(rt_rq)) { in sched_rt_runtime_exceeded()
943 sched_rt_rq_dequeue(rt_rq); in sched_rt_runtime_exceeded()
983 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in update_curr_rt() local
985 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { in update_curr_rt()
986 raw_spin_lock(&rt_rq->rt_runtime_lock); in update_curr_rt()
987 rt_rq->rt_time += delta_exec; in update_curr_rt()
988 if (sched_rt_runtime_exceeded(rt_rq)) in update_curr_rt()
990 raw_spin_unlock(&rt_rq->rt_runtime_lock); in update_curr_rt()
996 dequeue_top_rt_rq(struct rt_rq *rt_rq) in dequeue_top_rt_rq() argument
998 struct rq *rq = rq_of_rt_rq(rt_rq); in dequeue_top_rt_rq()
1000 BUG_ON(&rq->rt != rt_rq); in dequeue_top_rt_rq()
1002 if (!rt_rq->rt_queued) in dequeue_top_rt_rq()
1007 sub_nr_running(rq, rt_rq->rt_nr_running); in dequeue_top_rt_rq()
1008 rt_rq->rt_queued = 0; in dequeue_top_rt_rq()
1013 enqueue_top_rt_rq(struct rt_rq *rt_rq) in enqueue_top_rt_rq() argument
1015 struct rq *rq = rq_of_rt_rq(rt_rq); in enqueue_top_rt_rq()
1017 BUG_ON(&rq->rt != rt_rq); in enqueue_top_rt_rq()
1019 if (rt_rq->rt_queued) in enqueue_top_rt_rq()
1022 if (rt_rq_throttled(rt_rq)) in enqueue_top_rt_rq()
1025 if (rt_rq->rt_nr_running) { in enqueue_top_rt_rq()
1026 add_nr_running(rq, rt_rq->rt_nr_running); in enqueue_top_rt_rq()
1027 rt_rq->rt_queued = 1; in enqueue_top_rt_rq()
1037 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) in inc_rt_prio_smp() argument
1039 struct rq *rq = rq_of_rt_rq(rt_rq); in inc_rt_prio_smp()
1045 if (&rq->rt != rt_rq) in inc_rt_prio_smp()
1053 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) in dec_rt_prio_smp() argument
1055 struct rq *rq = rq_of_rt_rq(rt_rq); in dec_rt_prio_smp()
1061 if (&rq->rt != rt_rq) in dec_rt_prio_smp()
1064 if (rq->online && rt_rq->highest_prio.curr != prev_prio) in dec_rt_prio_smp()
1065 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); in dec_rt_prio_smp()
1071 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} in inc_rt_prio_smp() argument
1073 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} in dec_rt_prio_smp() argument
1079 inc_rt_prio(struct rt_rq *rt_rq, int prio) in inc_rt_prio() argument
1081 int prev_prio = rt_rq->highest_prio.curr; in inc_rt_prio()
1084 rt_rq->highest_prio.curr = prio; in inc_rt_prio()
1086 inc_rt_prio_smp(rt_rq, prio, prev_prio); in inc_rt_prio()
1090 dec_rt_prio(struct rt_rq *rt_rq, int prio) in dec_rt_prio() argument
1092 int prev_prio = rt_rq->highest_prio.curr; in dec_rt_prio()
1094 if (rt_rq->rt_nr_running) { in dec_rt_prio()
1103 struct rt_prio_array *array = &rt_rq->active; in dec_rt_prio()
1105 rt_rq->highest_prio.curr = in dec_rt_prio()
1110 rt_rq->highest_prio.curr = MAX_RT_PRIO; in dec_rt_prio()
1112 dec_rt_prio_smp(rt_rq, prio, prev_prio); in dec_rt_prio()
1117 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} in inc_rt_prio() argument
1118 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} in dec_rt_prio() argument
1125 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_group() argument
1128 rt_rq->rt_nr_boosted++; in inc_rt_group()
1130 if (rt_rq->tg) in inc_rt_group()
1131 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); in inc_rt_group()
1135 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_group() argument
1138 rt_rq->rt_nr_boosted--; in dec_rt_group()
1140 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); in dec_rt_group()
1146 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_group() argument
1152 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} in dec_rt_group() argument
1159 struct rt_rq *group_rq = group_rt_rq(rt_se); in rt_se_nr_running()
1170 struct rt_rq *group_rq = group_rt_rq(rt_se); in rt_se_rr_nr_running()
1182 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_tasks() argument
1187 rt_rq->rt_nr_running += rt_se_nr_running(rt_se); in inc_rt_tasks()
1188 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); in inc_rt_tasks()
1190 inc_rt_prio(rt_rq, prio); in inc_rt_tasks()
1191 inc_rt_migration(rt_se, rt_rq); in inc_rt_tasks()
1192 inc_rt_group(rt_se, rt_rq); in inc_rt_tasks()
1196 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_tasks() argument
1199 WARN_ON(!rt_rq->rt_nr_running); in dec_rt_tasks()
1200 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); in dec_rt_tasks()
1201 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); in dec_rt_tasks()
1203 dec_rt_prio(rt_rq, rt_se_prio(rt_se)); in dec_rt_tasks()
1204 dec_rt_migration(rt_se, rt_rq); in dec_rt_tasks()
1205 dec_rt_group(rt_se, rt_rq); in dec_rt_tasks()
1233 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in __enqueue_rt_entity() local
1234 struct rt_prio_array *array = &rt_rq->active; in __enqueue_rt_entity()
1235 struct rt_rq *group_rq = group_rt_rq(rt_se); in __enqueue_rt_entity()
1262 inc_rt_tasks(rt_se, rt_rq); in __enqueue_rt_entity()
1267 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in __dequeue_rt_entity() local
1268 struct rt_prio_array *array = &rt_rq->active; in __dequeue_rt_entity()
1276 dec_rt_tasks(rt_se, rt_rq); in __dequeue_rt_entity()
1317 struct rt_rq *rt_rq = group_rt_rq(rt_se); in dequeue_rt_entity() local
1319 if (rt_rq && rt_rq->rt_nr_running) in dequeue_rt_entity()
1357 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) in requeue_rt_entity() argument
1360 struct rt_prio_array *array = &rt_rq->active; in requeue_rt_entity()
1373 struct rt_rq *rt_rq; in requeue_task_rt() local
1376 rt_rq = rt_rq_of_se(rt_se); in requeue_task_rt()
1377 requeue_rt_entity(rt_rq, rt_se, head); in requeue_task_rt()
1537 struct rt_rq *rt_rq) in pick_next_rt_entity() argument
1539 struct rt_prio_array *array = &rt_rq->active; in pick_next_rt_entity()
1556 struct rt_rq *rt_rq = &rq->rt; in _pick_next_task_rt() local
1559 rt_se = pick_next_rt_entity(rq, rt_rq); in _pick_next_task_rt()
1561 rt_rq = group_rt_rq(rt_se); in _pick_next_task_rt()
1562 } while (rt_rq); in _pick_next_task_rt()
2524 struct rt_rq *rt_rq = tg->rt_rq[i]; in tg_set_rt_bandwidth() local
2526 raw_spin_lock(&rt_rq->rt_runtime_lock); in tg_set_rt_bandwidth()
2527 rt_rq->rt_runtime = rt_runtime; in tg_set_rt_bandwidth()
2528 raw_spin_unlock(&rt_rq->rt_runtime_lock); in tg_set_rt_bandwidth()
2616 struct rt_rq *rt_rq = &cpu_rq(i)->rt; in sched_rt_global_constraints() local
2618 raw_spin_lock(&rt_rq->rt_runtime_lock); in sched_rt_global_constraints()
2619 rt_rq->rt_runtime = global_rt_runtime(); in sched_rt_global_constraints()
2620 raw_spin_unlock(&rt_rq->rt_runtime_lock); in sched_rt_global_constraints()
2713 struct rt_rq *rt_rq; in print_rt_stats() local
2716 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) in print_rt_stats()
2717 print_rt_rq(m, cpu, rt_rq); in print_rt_stats()