Searched refs:cfs_rq (Results 1 – 8 of 8) sorted by relevance
/Linux-v5.10/kernel/sched/ |
D | fair.c | 271 static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) in task_cfs_rq() 273 return p->se.cfs_rq; in task_cfs_rq() 277 static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) in cfs_rq_of() 279 return se->cfs_rq; in cfs_rq_of() 283 static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) in group_cfs_rq() 288 static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) in cfs_rq_tg_path() argument 293 if (cfs_rq && task_group_is_autogroup(cfs_rq->tg)) in cfs_rq_tg_path() 294 autogroup_path(cfs_rq->tg, path, len); in cfs_rq_tg_path() 295 else if (cfs_rq && cfs_rq->tg->css.cgroup) in cfs_rq_tg_path() 296 cgroup_path(cfs_rq->tg->css.cgroup, path, len); in cfs_rq_tg_path() [all …]
|
D | pelt.h | 5 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se); 6 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq); 152 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in cfs_rq_clock_pelt() argument 154 if (unlikely(cfs_rq->throttle_count)) in cfs_rq_clock_pelt() 155 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time; in cfs_rq_clock_pelt() 157 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; in cfs_rq_clock_pelt() 160 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) in cfs_rq_clock_pelt() argument 162 return rq_clock_pelt(rq_of(cfs_rq)); in cfs_rq_clock_pelt() 169 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) in update_cfs_rq_load_avg() argument
|
D | pelt.c | 310 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se) in __update_load_avg_se() argument 313 cfs_rq->curr == se)) { in __update_load_avg_se() 324 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) in __update_load_avg_cfs_rq() argument 326 if (___update_load_sum(now, &cfs_rq->avg, in __update_load_avg_cfs_rq() 327 scale_load_down(cfs_rq->load.weight), in __update_load_avg_cfs_rq() 328 cfs_rq->h_nr_running, in __update_load_avg_cfs_rq() 329 cfs_rq->curr != NULL)) { in __update_load_avg_cfs_rq() 331 ___update_load_avg(&cfs_rq->avg, 1); in __update_load_avg_cfs_rq() 332 trace_pelt_cfs_tp(cfs_rq); in __update_load_avg_cfs_rq()
|
D | debug.c | 536 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) in print_cfs_rq() argument 546 SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); in print_cfs_rq() 552 SPLIT_NS(cfs_rq->exec_clock)); in print_cfs_rq() 555 if (rb_first_cached(&cfs_rq->tasks_timeline)) in print_cfs_rq() 556 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; in print_cfs_rq() 557 last = __pick_last_entity(cfs_rq); in print_cfs_rq() 560 min_vruntime = cfs_rq->min_vruntime; in print_cfs_rq() 576 cfs_rq->nr_spread_over); in print_cfs_rq() 577 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); in print_cfs_rq() 578 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); in print_cfs_rq() [all …]
|
D | sched.h | 356 struct cfs_rq; 391 struct cfs_rq **cfs_rq; member 472 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, 479 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 505 struct cfs_rq *prev, struct cfs_rq *next); 508 struct cfs_rq *prev, struct cfs_rq *next) { } in set_task_rq_fair() 519 struct cfs_rq { struct 931 struct cfs_rq cfs; 1056 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument 1058 return cfs_rq->rq; in rq_of() [all …]
|
D | core.c | 3077 p->se.cfs_rq = NULL; in __sched_fork() 3928 struct sched_entity *curr = (&p->se)->cfs_rq->curr; in prefetch_curr_exec_start() 7091 root_task_group.cfs_rq = (struct cfs_rq **)ptr; in sched_init() 7959 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth() local 7960 struct rq *rq = cfs_rq->rq; in tg_set_cfs_bandwidth() 7964 cfs_rq->runtime_enabled = runtime_enabled; in tg_set_cfs_bandwidth() 7965 cfs_rq->runtime_remaining = 0; in tg_set_cfs_bandwidth() 7967 if (cfs_rq->throttled) in tg_set_cfs_bandwidth() 7968 unthrottle_cfs_rq(cfs_rq); in tg_set_cfs_bandwidth()
|
/Linux-v5.10/include/trace/events/ |
D | sched.h | 610 TP_PROTO(struct cfs_rq *cfs_rq), 611 TP_ARGS(cfs_rq)); 642 TP_PROTO(struct cfs_rq *cfs_rq), 643 TP_ARGS(cfs_rq));
|
/Linux-v5.10/include/linux/ |
D | sched.h | 44 struct cfs_rq; 471 struct cfs_rq *cfs_rq; member 473 struct cfs_rq *my_q; 2071 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq); 2072 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len); 2073 int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
|