Lines Matching refs:curr

89 	rt_rq->highest_prio.curr = MAX_RT_PRIO;  in init_rt_rq()
161 rt_rq->highest_prio.curr = MAX_RT_PRIO; in init_tg_rt_entry()
265 return rq->rt.highest_prio.curr > prev->prio; in need_pull_rt_task()
487 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; in sched_rt_rq_enqueue() local
501 if (rt_rq->highest_prio.curr < curr->prio) in sched_rt_rq_enqueue()
870 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer()
901 return rt_rq->highest_prio.curr; in rt_se_prio()
956 struct task_struct *curr = rq->curr; in update_curr_rt() local
957 struct sched_rt_entity *rt_se = &curr->rt; in update_curr_rt()
961 if (curr->sched_class != &rt_sched_class) in update_curr_rt()
965 delta_exec = now - curr->se.exec_start; in update_curr_rt()
969 schedstat_set(curr->se.statistics.exec_max, in update_curr_rt()
970 max(curr->se.statistics.exec_max, delta_exec)); in update_curr_rt()
972 curr->se.sum_exec_runtime += delta_exec; in update_curr_rt()
973 account_group_exec_runtime(curr, delta_exec); in update_curr_rt()
975 curr->se.exec_start = now; in update_curr_rt()
976 cgroup_account_cputime(curr, delta_exec); in update_curr_rt()
1063 if (rq->online && rt_rq->highest_prio.curr != prev_prio) in dec_rt_prio_smp()
1064 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); in dec_rt_prio_smp()
1080 int prev_prio = rt_rq->highest_prio.curr; in inc_rt_prio()
1083 rt_rq->highest_prio.curr = prio; in inc_rt_prio()
1091 int prev_prio = rt_rq->highest_prio.curr; in dec_rt_prio()
1104 rt_rq->highest_prio.curr = in dec_rt_prio()
1109 rt_rq->highest_prio.curr = MAX_RT_PRIO; in dec_rt_prio()
1382 requeue_task_rt(rq, rq->curr, 0); in yield_task_rt()
1391 struct task_struct *curr; in select_task_rq_rt() local
1401 curr = READ_ONCE(rq->curr); /* unlocked access */ in select_task_rq_rt()
1425 if (curr && unlikely(rt_task(curr)) && in select_task_rq_rt()
1426 (curr->nr_cpus_allowed < 2 || in select_task_rq_rt()
1427 curr->prio <= p->prio)) { in select_task_rq_rt()
1435 p->prio < cpu_rq(target)->rt.highest_prio.curr) in select_task_rq_rt()
1450 if (rq->curr->nr_cpus_allowed == 1 || in check_preempt_equal_prio()
1451 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) in check_preempt_equal_prio()
1478 if (p->prio < rq->curr->prio) { in check_preempt_curr_rt()
1496 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) in check_preempt_curr_rt()
1586 if (rq->curr->sched_class != &rt_sched_class) in pick_next_task_rt()
1732 if (lowest_rq->rt.highest_prio.curr <= task->prio) { in find_lock_lowest_rq()
1763 if (lowest_rq->rt.highest_prio.curr > task->prio) in find_lock_lowest_rq()
1813 if (unlikely(next_task == rq->curr)) { in push_rt_task()
1823 if (unlikely(next_task->prio < rq->curr->prio)) { in push_rt_task()
2101 this_rq->rt.highest_prio.curr) in pull_rt_task()
2121 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { in pull_rt_task()
2122 WARN_ON(p == src_rq->curr); in pull_rt_task()
2133 if (p->prio < src_rq->curr->prio) in pull_rt_task()
2163 !test_tsk_need_resched(rq->curr) && in task_woken_rt()
2165 (dl_task(rq->curr) || rt_task(rq->curr)) && in task_woken_rt()
2166 (rq->curr->nr_cpus_allowed < 2 || in task_woken_rt()
2167 rq->curr->prio <= p->prio)) in task_woken_rt()
2179 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); in rq_online_rt()
2237 if (task_on_rq_queued(p) && rq->curr != p) { in switched_to_rt()
2242 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) in switched_to_rt()
2257 if (rq->curr == p) { in prio_changed_rt()
2270 if (p->prio > rq->rt.highest_prio.curr) in prio_changed_rt()
2283 if (p->prio < rq->curr->prio) in prio_changed_rt()
2358 struct task_struct *p = rq->curr; in set_curr_task_rt()