Lines Matching refs:tg
842 int tg_nop(struct task_group *tg, void *data) in tg_nop() argument
1333 struct task_group *tg = &root_task_group; in uclamp_update_root_tg() local
1335 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], in uclamp_update_root_tg()
1337 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], in uclamp_update_root_tg()
7429 static inline void alloc_uclamp_sched_group(struct task_group *tg, in alloc_uclamp_sched_group() argument
7436 uclamp_se_set(&tg->uclamp_req[clamp_id], in alloc_uclamp_sched_group()
7438 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; in alloc_uclamp_sched_group()
7443 static void sched_free_group(struct task_group *tg) in sched_free_group() argument
7445 free_fair_sched_group(tg); in sched_free_group()
7446 free_rt_sched_group(tg); in sched_free_group()
7447 autogroup_free(tg); in sched_free_group()
7448 kmem_cache_free(task_group_cache, tg); in sched_free_group()
7454 struct task_group *tg; in sched_create_group() local
7456 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); in sched_create_group()
7457 if (!tg) in sched_create_group()
7460 if (!alloc_fair_sched_group(tg, parent)) in sched_create_group()
7463 if (!alloc_rt_sched_group(tg, parent)) in sched_create_group()
7466 alloc_uclamp_sched_group(tg, parent); in sched_create_group()
7468 return tg; in sched_create_group()
7471 sched_free_group(tg); in sched_create_group()
7475 void sched_online_group(struct task_group *tg, struct task_group *parent) in sched_online_group() argument
7480 list_add_rcu(&tg->list, &task_groups); in sched_online_group()
7485 tg->parent = parent; in sched_online_group()
7486 INIT_LIST_HEAD(&tg->children); in sched_online_group()
7487 list_add_rcu(&tg->siblings, &parent->children); in sched_online_group()
7490 online_fair_sched_group(tg); in sched_online_group()
7500 void sched_destroy_group(struct task_group *tg) in sched_destroy_group() argument
7503 call_rcu(&tg->rcu, sched_free_group_rcu); in sched_destroy_group()
7506 void sched_offline_group(struct task_group *tg) in sched_offline_group() argument
7511 unregister_fair_sched_group(tg); in sched_offline_group()
7514 list_del_rcu(&tg->list); in sched_offline_group()
7515 list_del_rcu(&tg->siblings); in sched_offline_group()
7521 struct task_group *tg; in sched_change_group() local
7528 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), in sched_change_group()
7530 tg = autogroup_task_group(tsk, tg); in sched_change_group()
7531 tsk->sched_task_group = tg; in sched_change_group()
7592 struct task_group *tg; in cpu_cgroup_css_alloc() local
7599 tg = sched_create_group(parent); in cpu_cgroup_css_alloc()
7600 if (IS_ERR(tg)) in cpu_cgroup_css_alloc()
7603 return &tg->css; in cpu_cgroup_css_alloc()
7609 struct task_group *tg = css_tg(css); in cpu_cgroup_css_online() local
7613 sched_online_group(tg, parent); in cpu_cgroup_css_online()
7625 struct task_group *tg = css_tg(css); in cpu_cgroup_css_released() local
7627 sched_offline_group(tg); in cpu_cgroup_css_released()
7632 struct task_group *tg = css_tg(css); in cpu_cgroup_css_free() local
7637 sched_free_group(tg); in cpu_cgroup_css_free()
7791 struct task_group *tg; in cpu_uclamp_write() local
7802 tg = css_tg(of_css(of)); in cpu_uclamp_write()
7803 if (tg->uclamp_req[clamp_id].value != req.util) in cpu_uclamp_write()
7804 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); in cpu_uclamp_write()
7810 tg->uclamp_pct[clamp_id] = req.percent; in cpu_uclamp_write()
7838 struct task_group *tg; in cpu_uclamp_print() local
7844 tg = css_tg(seq_css(sf)); in cpu_uclamp_print()
7845 util_clamp = tg->uclamp_req[clamp_id].value; in cpu_uclamp_print()
7853 percent = tg->uclamp_pct[clamp_id]; in cpu_uclamp_print()
7883 struct task_group *tg = css_tg(css); in cpu_shares_read_u64() local
7885 return (u64) scale_load_down(tg->shares); in cpu_shares_read_u64()
7896 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
7898 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) in tg_set_cfs_bandwidth() argument
7901 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_set_cfs_bandwidth()
7903 if (tg == &root_task_group) in tg_set_cfs_bandwidth()
7934 ret = __cfs_schedulable(tg, period, quota); in tg_set_cfs_bandwidth()
7959 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth()
7980 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) in tg_set_cfs_quota() argument
7984 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_quota()
7992 return tg_set_cfs_bandwidth(tg, period, quota); in tg_set_cfs_quota()
7995 static long tg_get_cfs_quota(struct task_group *tg) in tg_get_cfs_quota() argument
7999 if (tg->cfs_bandwidth.quota == RUNTIME_INF) in tg_get_cfs_quota()
8002 quota_us = tg->cfs_bandwidth.quota; in tg_get_cfs_quota()
8008 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) in tg_set_cfs_period() argument
8016 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_period()
8018 return tg_set_cfs_bandwidth(tg, period, quota); in tg_set_cfs_period()
8021 static long tg_get_cfs_period(struct task_group *tg) in tg_get_cfs_period() argument
8025 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); in tg_get_cfs_period()
8056 struct task_group *tg; member
8064 static u64 normalize_cfs_quota(struct task_group *tg, in normalize_cfs_quota() argument
8069 if (tg == d->tg) { in normalize_cfs_quota()
8073 period = tg_get_cfs_period(tg); in normalize_cfs_quota()
8074 quota = tg_get_cfs_quota(tg); in normalize_cfs_quota()
8084 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) in tg_cfs_schedulable_down() argument
8087 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_cfs_schedulable_down()
8090 if (!tg->parent) { in tg_cfs_schedulable_down()
8093 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; in tg_cfs_schedulable_down()
8095 quota = normalize_cfs_quota(tg, d); in tg_cfs_schedulable_down()
8117 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) in __cfs_schedulable() argument
8121 .tg = tg, in __cfs_schedulable()
8140 struct task_group *tg = css_tg(seq_css(sf)); in cpu_cfs_stat_show() local
8141 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_cfs_stat_show()
8147 if (schedstat_enabled() && tg != &root_task_group) { in cpu_cfs_stat_show()
8152 ws += schedstat_val(tg->se[i]->statistics.wait_sum); in cpu_cfs_stat_show()
8246 struct task_group *tg = css_tg(css); in cpu_extra_stat_show() local
8247 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_extra_stat_show()
8267 struct task_group *tg = css_tg(css); in cpu_weight_read_u64() local
8268 u64 weight = scale_load_down(tg->shares); in cpu_weight_read_u64()
8361 struct task_group *tg = css_tg(seq_css(sf)); in cpu_max_show() local
8363 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); in cpu_max_show()
8370 struct task_group *tg = css_tg(of_css(of)); in cpu_max_write() local
8371 u64 period = tg_get_cfs_period(tg); in cpu_max_write()
8377 ret = tg_set_cfs_bandwidth(tg, period, quota); in cpu_max_write()