Lines Matching refs:sched_entity
258 static inline struct task_struct *task_of(struct sched_entity *se) in task_of()
274 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of()
280 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) in group_cfs_rq()
362 is_same_group(struct sched_entity *se, struct sched_entity *pse) in is_same_group()
370 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity()
376 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se()
409 static inline struct task_struct *task_of(struct sched_entity *se) in task_of()
428 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of()
437 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) in group_cfs_rq()
453 static inline struct sched_entity *parent_entity(struct sched_entity *se) in parent_entity()
459 find_matching_se(struct sched_entity **se, struct sched_entity **pse) in find_matching_se()
490 static inline int entity_before(struct sched_entity *a, in entity_before()
491 struct sched_entity *b) in entity_before()
498 struct sched_entity *curr = cfs_rq->curr; in update_min_vruntime()
511 struct sched_entity *se; in update_min_vruntime()
512 se = rb_entry(leftmost, struct sched_entity, run_node); in update_min_vruntime()
531 static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __enqueue_entity()
535 struct sched_entity *entry; in __enqueue_entity()
543 entry = rb_entry(parent, struct sched_entity, run_node); in __enqueue_entity()
561 static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in __dequeue_entity()
566 struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) in __pick_first_entity()
573 return rb_entry(left, struct sched_entity, run_node); in __pick_first_entity()
576 static struct sched_entity *__pick_next_entity(struct sched_entity *se) in __pick_next_entity()
583 return rb_entry(next, struct sched_entity, run_node); in __pick_next_entity()
587 struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) in __pick_last_entity()
594 return rb_entry(last, struct sched_entity, run_node); in __pick_last_entity()
628 static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) in calc_delta_fair()
658 static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_slice()
685 static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) in sched_vslice()
698 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average()
719 static void attach_entity_cfs_rq(struct sched_entity *se);
747 void post_init_entity_util_avg(struct sched_entity *se) in post_init_entity_util_avg()
788 void init_entity_runnable_average(struct sched_entity *se) in init_entity_runnable_average()
791 void post_init_entity_util_avg(struct sched_entity *se) in post_init_entity_util_avg()
804 struct sched_entity *curr = cfs_rq->curr; in update_curr()
843 update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_start()
861 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_wait_end()
893 update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_enqueue_sleeper()
964 update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_enqueue()
981 update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_stats_dequeue()
1010 update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) in update_stats_curr_start()
2691 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_enqueue()
2708 account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) in account_entity_dequeue()
2761 enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_runnable_load_avg()
2770 dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_runnable_load_avg()
2780 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in enqueue_load_avg()
2787 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in dequeue_load_avg()
2794 enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_runnable_load_avg()
2796 dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_runnable_load_avg()
2798 enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in enqueue_load_avg()
2800 dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } in dequeue_load_avg()
2803 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, in reweight_entity()
2837 struct sched_entity *se = &p->se; in reweight_task()
3006 static void update_cfs_group(struct sched_entity *se) in update_cfs_group()
3031 static inline void update_cfs_group(struct sched_entity *se) in update_cfs_group()
3097 void set_task_rq_fair(struct sched_entity *se, in set_task_rq_fair()
3211 update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_util()
3237 update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) in update_tg_cfs_runnable()
3311 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg()
3338 static inline bool skip_blocked_update(struct sched_entity *se) in skip_blocked_update()
3368 static inline int propagate_entity_load_avg(struct sched_entity *se) in propagate_entity_load_avg()
3446 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in attach_entity_load_avg()
3493 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) in detach_entity_load_avg()
3512 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in update_load_avg()
3570 void sync_entity_load_avg(struct sched_entity *se) in sync_entity_load_avg()
3583 void remove_entity_load_avg(struct sched_entity *se) in remove_entity_load_avg()
3732 static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) in update_load_avg()
3737 static inline void remove_entity_load_avg(struct sched_entity *se) {} in remove_entity_load_avg()
3740 attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {} in attach_entity_load_avg()
3742 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} in detach_entity_load_avg()
3758 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) in check_spread()
3772 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) in place_entity()
3857 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in enqueue_entity()
3909 static void __clear_buddies_last(struct sched_entity *se) in __clear_buddies_last()
3920 static void __clear_buddies_next(struct sched_entity *se) in __clear_buddies_next()
3931 static void __clear_buddies_skip(struct sched_entity *se) in __clear_buddies_skip()
3942 static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) in clear_buddies()
3957 dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) in dequeue_entity()
4012 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) in check_preempt_tick()
4015 struct sched_entity *se; in check_preempt_tick()
4049 set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) in set_next_entity()
4081 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
4090 static struct sched_entity *
4091 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) in pick_next_entity()
4093 struct sched_entity *left = __pick_first_entity(cfs_rq); in pick_next_entity()
4094 struct sched_entity *se; in pick_next_entity()
4110 struct sched_entity *second; in pick_next_entity()
4143 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) in put_prev_entity()
4168 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) in entity_tick()
4443 struct sched_entity *se; in throttle_cfs_rq()
4501 struct sched_entity *se; in unthrottle_cfs_rq()
5028 struct sched_entity *se = &p->se; in hrtick_start_fair()
5082 struct sched_entity *se = &p->se; in enqueue_task_fair()
5136 static void set_next_buddy(struct sched_entity *se);
5146 struct sched_entity *se = &p->se; in dequeue_task_fair()
6363 static void detach_entity_cfs_rq(struct sched_entity *se);
6379 struct sched_entity *se = &p->se; in migrate_task_rq_fair()
6433 static unsigned long wakeup_gran(struct sched_entity *se) in wakeup_gran()
6468 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) in wakeup_preempt_entity()
6482 static void set_last_buddy(struct sched_entity *se) in set_last_buddy()
6494 static void set_next_buddy(struct sched_entity *se) in set_next_buddy()
6506 static void set_skip_buddy(struct sched_entity *se) in set_skip_buddy()
6518 struct sched_entity *se = &curr->se, *pse = &p->se; in check_preempt_wakeup()
6602 struct sched_entity *se; in pick_next_task_fair()
6623 struct sched_entity *curr = cfs_rq->curr; in pick_next_task_fair()
6665 struct sched_entity *pse = &prev->se; in pick_next_task_fair()
6736 struct sched_entity *se = &prev->se; in put_prev_task_fair()
6754 struct sched_entity *se = &curr->se; in yield_task_fair()
6783 struct sched_entity *se = &p->se; in yield_to_task_fair()
7373 struct sched_entity *se; in update_blocked_averages()
7423 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; in update_cfs_rq_h_load()
9652 struct sched_entity *se = &curr->se; in task_tick_fair()
9671 struct sched_entity *se = &p->se, *curr; in task_fork_fair()
9723 struct sched_entity *se = &p->se; in vruntime_normalized()
9754 static void propagate_entity_cfs_rq(struct sched_entity *se) in propagate_entity_cfs_rq()
9771 static void propagate_entity_cfs_rq(struct sched_entity *se) { } in propagate_entity_cfs_rq()
9774 static void detach_entity_cfs_rq(struct sched_entity *se) in detach_entity_cfs_rq()
9785 static void attach_entity_cfs_rq(struct sched_entity *se) in attach_entity_cfs_rq()
9806 struct sched_entity *se = &p->se; in detach_task_cfs_rq()
9823 struct sched_entity *se = &p->se; in attach_task_cfs_rq()
9861 struct sched_entity *se = &rq->curr->se; in set_curr_task_fair()
9887 struct sched_entity *se = &p->se; in task_set_group_fair()
9937 struct sched_entity *se; in alloc_fair_sched_group()
9958 se = kzalloc_node(sizeof(struct sched_entity), in alloc_fair_sched_group()
9978 struct sched_entity *se; in online_fair_sched_group()
10020 struct sched_entity *se, int cpu, in init_tg_cfs_entry()
10021 struct sched_entity *parent) in init_tg_cfs_entry()
10071 struct sched_entity *se = tg->se[i]; in sched_group_set_shares()
10106 struct sched_entity *se = &task->se; in get_rr_interval_fair()