Searched refs:cfs_rq (Results 1 – 7 of 7) sorted by relevance
/Linux-v4.19/kernel/sched/ |
D | fair.c | 253 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument 255 return cfs_rq->rq; in rq_of() 268 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq() 270 return p->se.cfs_rq; in task_cfs_rq() 274 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of() 276 return se->cfs_rq; in cfs_rq_of() 280 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) in group_cfs_rq() 285 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) in list_add_leaf_cfs_rq() argument 287 if (!cfs_rq->on_list) { in list_add_leaf_cfs_rq() 288 struct rq *rq = rq_of(cfs_rq); in list_add_leaf_cfs_rq() [all …]
|
D | pelt.h | 4 int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se); 5 int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq); 48 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) in update_cfs_rq_load_avg() argument
|
D | pelt.c | 283 int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se() argument 289 cfs_rq->curr == se)) { in __update_load_avg_se() 299 int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq) in __update_load_avg_cfs_rq() argument 301 if (___update_load_sum(now, cpu, &cfs_rq->avg, in __update_load_avg_cfs_rq() 302 scale_load_down(cfs_rq->load.weight), in __update_load_avg_cfs_rq() 303 scale_load_down(cfs_rq->runnable_weight), in __update_load_avg_cfs_rq() 304 cfs_rq->curr != NULL)) { in __update_load_avg_cfs_rq() 306 ___update_load_avg(&cfs_rq->avg, 1, 1); in __update_load_avg_cfs_rq()
|
D | debug.c | 496 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) in print_cfs_rq() argument 506 SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); in print_cfs_rq() 512 SPLIT_NS(cfs_rq->exec_clock)); in print_cfs_rq() 515 if (rb_first_cached(&cfs_rq->tasks_timeline)) in print_cfs_rq() 516 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; in print_cfs_rq() 517 last = __pick_last_entity(cfs_rq); in print_cfs_rq() 520 min_vruntime = cfs_rq->min_vruntime; in print_cfs_rq() 536 cfs_rq->nr_spread_over); in print_cfs_rq() 537 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); in print_cfs_rq() 538 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); in print_cfs_rq() [all …]
|
D | sched.h | 324 struct cfs_rq; 362 struct cfs_rq **cfs_rq; member 433 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 440 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 466 struct cfs_rq *prev, struct cfs_rq *next); 469 struct cfs_rq *prev, struct cfs_rq *next) { } in set_task_rq_fair() 480 struct cfs_rq { struct 807 struct cfs_rq cfs; 1305 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]); in set_task_rq() 1306 p->se.cfs_rq = tg->cfs_rq[cpu]; in set_task_rq() [all …]
|
D | core.c | 2151 p->se.cfs_rq = NULL; in __sched_fork() 2982 struct sched_entity *curr = (&p->se)->cfs_rq->curr; in prefetch_curr_exec_start() 5930 root_task_group.cfs_rq = (struct cfs_rq **)ptr; in sched_init() 6561 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth() local 6562 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth() 6566 cfs_rq->runtime_enabled = runtime_enabled; in tg_set_cfs_bandwidth() 6567 cfs_rq->runtime_remaining = 0; in tg_set_cfs_bandwidth() 6569 if (cfs_rq->throttled) in tg_set_cfs_bandwidth() 6570 unthrottle_cfs_rq(cfs_rq); in tg_set_cfs_bandwidth()
|
/Linux-v4.19/include/linux/ |
D | sched.h | 37 struct cfs_rq; 467 struct cfs_rq *cfs_rq; member 469 struct cfs_rq *my_q;
|