Lines Matching refs:sched_class

233 	rq->curr->sched_class->task_tick(rq, rq->curr, 1);  in hrtick()
711 if (update_load && p->sched_class == &fair_sched_class) { in set_load_weight()
727 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
738 p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
824 const struct sched_class *prev_class, in check_class_changed()
827 if (prev_class != p->sched_class) { in check_class_changed()
831 p->sched_class->switched_to(rq, p); in check_class_changed()
833 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
838 const struct sched_class *class; in check_preempt_curr()
840 if (p->sched_class == rq->curr->sched_class) { in check_preempt_curr()
841 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr()
844 if (class == rq->curr->sched_class) in check_preempt_curr()
846 if (class == p->sched_class) { in check_preempt_curr()
1031 p->sched_class->set_cpus_allowed(p, new_mask); in do_set_cpus_allowed()
1143 p->sched_class == &fair_sched_class && in set_task_cpu()
1169 if (p->sched_class->migrate_task_rq) in set_task_cpu()
1170 p->sched_class->migrate_task_rq(p, new_cpu); in set_task_cpu()
1531 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); in select_task_rq()
1573 stop->sched_class = &stop_sched_class; in sched_set_stop_task()
1583 old_stop->sched_class = &rt_sched_class; in sched_set_stop_task()
1657 if (p->sched_class->task_woken) { in ttwu_do_wakeup()
1663 p->sched_class->task_woken(rq, p); in ttwu_do_wakeup()
2333 p->sched_class = &rt_sched_class; in sched_fork()
2335 p->sched_class = &fair_sched_class; in sched_fork()
2352 if (p->sched_class->task_fork) in sched_fork()
2353 p->sched_class->task_fork(p); in sched_fork()
2422 if (p->sched_class->task_woken) { in wake_up_new_task()
2428 p->sched_class->task_woken(rq, p); in wake_up_new_task()
2697 if (prev->sched_class->task_dead) in finish_task_switch()
2698 prev->sched_class->task_dead(prev); in finish_task_switch()
2950 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); in sched_exec()
3026 p->sched_class->update_curr(rq); in task_sched_runtime()
3050 curr->sched_class->task_tick(rq, curr, 0); in scheduler_tick()
3106 curr->sched_class->task_tick(rq, curr, 0); in sched_tick_remote()
3306 const struct sched_class *class; in pick_next_task()
3315 if (likely((prev->sched_class == &idle_sched_class || in pick_next_task()
3316 prev->sched_class == &fair_sched_class) && in pick_next_task()
3747 const struct sched_class *prev_class; in rt_mutex_setprio()
3804 prev_class = p->sched_class; in rt_mutex_setprio()
3828 p->sched_class = &dl_sched_class; in rt_mutex_setprio()
3834 p->sched_class = &rt_sched_class; in rt_mutex_setprio()
3840 p->sched_class = &fair_sched_class; in rt_mutex_setprio()
4092 p->sched_class = &dl_sched_class; in __setscheduler()
4094 p->sched_class = &rt_sched_class; in __setscheduler()
4096 p->sched_class = &fair_sched_class; in __setscheduler()
4123 const struct sched_class *prev_class; in __sched_setscheduler()
4326 prev_class = p->sched_class; in __sched_setscheduler()
4940 current->sched_class->yield_task(rq); in do_sched_yield()
5071 if (!curr->sched_class->yield_to_task) in yield_to()
5074 if (curr->sched_class != p->sched_class) in yield_to()
5080 yielded = curr->sched_class->yield_to_task(rq, p, preempt); in yield_to()
5221 if (p->sched_class->get_rr_interval) in sched_rr_get_interval()
5222 time_slice = p->sched_class->get_rr_interval(rq, p); in sched_rr_get_interval()
5421 idle->sched_class = &idle_sched_class; in init_idle()
5560 static const struct sched_class fake_sched_class = {
5569 .sched_class = &fake_sched_class,
5662 const struct sched_class *class; in set_rq_online()
5677 const struct sched_class *class; in set_rq_offline()
6329 if (tsk->sched_class->task_change_group) in sched_change_group()
6330 tsk->sched_class->task_change_group(tsk, type); in sched_change_group()
6451 if (task->sched_class != &fair_sched_class) in cpu_cgroup_can_attach()