Lines Matching refs:cfs

448 	return &task_rq(p)->cfs;  in task_cfs_rq()
456 return &rq->cfs; in cfs_rq_of()
485 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
3113 if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) { in cfs_rq_util_change()
4176 rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { in set_next_entity()
4597 if (rq->curr == rq->idle && rq->cfs.nr_running) in unthrottle_cfs_rq()
5123 if (rq->cfs.h_nr_running > 1) { in hrtick_start_fair()
5200 util_est_enqueue(&rq->cfs, p); in enqueue_task_fair()
5342 util_est_dequeue(&rq->cfs, p, task_sleep); in dequeue_task_fair()
5369 return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running && in sched_idle_cpu()
5375 return cfs_rq_runnable_load_avg(&rq->cfs); in cpu_runnable_load()
5386 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running); in cpu_avg_load_per_task()
5594 avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs); in find_idlest_group()
6080 cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util()
6111 cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util_without()
6211 struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; in cpu_util_next()
6752 struct cfs_rq *cfs_rq = &rq->cfs; in pick_next_task_fair()
6795 cfs_rq = &rq->cfs; in pick_next_task_fair()
7644 struct cfs_rq *cfs_rq = &rq->cfs; in update_blocked_averages()
8031 sgs->sum_nr_running += rq->cfs.h_nr_running; in update_sg_lb_stats()
8725 (env->src_rq->cfs.h_nr_running == 1)) { in voluntary_active_balance()
9407 if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) { in nohz_balancer_kick()
9856 if (this_rq->cfs.h_nr_running && !pulled_task) in newidle_balance()
9864 if (this_rq->nr_running != this_rq->cfs.h_nr_running) in newidle_balance()
10342 se->cfs_rq = &rq->cfs; in init_tg_cfs_entry()
10418 if (rq->cfs.load.weight) in get_rr_interval_fair()