Lines Matching refs:tg

1196 int tg_nop(struct task_group *tg, void *data)  in tg_nop()  argument
1700 struct task_group *tg = &root_task_group; in uclamp_update_root_tg() local
1702 uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], in uclamp_update_root_tg()
1704 uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], in uclamp_update_root_tg()
9697 static inline void alloc_uclamp_sched_group(struct task_group *tg, in alloc_uclamp_sched_group() argument
9704 uclamp_se_set(&tg->uclamp_req[clamp_id], in alloc_uclamp_sched_group()
9706 tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; in alloc_uclamp_sched_group()
9711 static void sched_free_group(struct task_group *tg) in sched_free_group() argument
9713 free_fair_sched_group(tg); in sched_free_group()
9714 free_rt_sched_group(tg); in sched_free_group()
9715 autogroup_free(tg); in sched_free_group()
9716 kmem_cache_free(task_group_cache, tg); in sched_free_group()
9722 struct task_group *tg; in sched_create_group() local
9724 tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); in sched_create_group()
9725 if (!tg) in sched_create_group()
9728 if (!alloc_fair_sched_group(tg, parent)) in sched_create_group()
9731 if (!alloc_rt_sched_group(tg, parent)) in sched_create_group()
9734 alloc_uclamp_sched_group(tg, parent); in sched_create_group()
9736 return tg; in sched_create_group()
9739 sched_free_group(tg); in sched_create_group()
9743 void sched_online_group(struct task_group *tg, struct task_group *parent) in sched_online_group() argument
9748 list_add_rcu(&tg->list, &task_groups); in sched_online_group()
9753 tg->parent = parent; in sched_online_group()
9754 INIT_LIST_HEAD(&tg->children); in sched_online_group()
9755 list_add_rcu(&tg->siblings, &parent->children); in sched_online_group()
9758 online_fair_sched_group(tg); in sched_online_group()
9768 void sched_destroy_group(struct task_group *tg) in sched_destroy_group() argument
9771 call_rcu(&tg->rcu, sched_free_group_rcu); in sched_destroy_group()
9774 void sched_offline_group(struct task_group *tg) in sched_offline_group() argument
9779 unregister_fair_sched_group(tg); in sched_offline_group()
9782 list_del_rcu(&tg->list); in sched_offline_group()
9783 list_del_rcu(&tg->siblings); in sched_offline_group()
9789 struct task_group *tg; in sched_change_group() local
9796 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), in sched_change_group()
9798 tg = autogroup_task_group(tsk, tg); in sched_change_group()
9799 tsk->sched_task_group = tg; in sched_change_group()
9860 struct task_group *tg; in cpu_cgroup_css_alloc() local
9867 tg = sched_create_group(parent); in cpu_cgroup_css_alloc()
9868 if (IS_ERR(tg)) in cpu_cgroup_css_alloc()
9871 return &tg->css; in cpu_cgroup_css_alloc()
9877 struct task_group *tg = css_tg(css); in cpu_cgroup_css_online() local
9881 sched_online_group(tg, parent); in cpu_cgroup_css_online()
9897 struct task_group *tg = css_tg(css); in cpu_cgroup_css_released() local
9899 sched_offline_group(tg); in cpu_cgroup_css_released()
9904 struct task_group *tg = css_tg(css); in cpu_cgroup_css_free() local
9909 sched_free_group(tg); in cpu_cgroup_css_free()
10066 struct task_group *tg; in cpu_uclamp_write() local
10077 tg = css_tg(of_css(of)); in cpu_uclamp_write()
10078 if (tg->uclamp_req[clamp_id].value != req.util) in cpu_uclamp_write()
10079 uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); in cpu_uclamp_write()
10085 tg->uclamp_pct[clamp_id] = req.percent; in cpu_uclamp_write()
10113 struct task_group *tg; in cpu_uclamp_print() local
10119 tg = css_tg(seq_css(sf)); in cpu_uclamp_print()
10120 util_clamp = tg->uclamp_req[clamp_id].value; in cpu_uclamp_print()
10128 percent = tg->uclamp_pct[clamp_id]; in cpu_uclamp_print()
10158 struct task_group *tg = css_tg(css); in cpu_shares_read_u64() local
10160 return (u64) scale_load_down(tg->shares); in cpu_shares_read_u64()
10171 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
10173 static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, in tg_set_cfs_bandwidth() argument
10177 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_set_cfs_bandwidth()
10179 if (tg == &root_task_group) in tg_set_cfs_bandwidth()
10214 ret = __cfs_schedulable(tg, period, quota); in tg_set_cfs_bandwidth()
10240 struct cfs_rq *cfs_rq = tg->cfs_rq[i]; in tg_set_cfs_bandwidth()
10261 static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) in tg_set_cfs_quota() argument
10265 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_quota()
10266 burst = tg->cfs_bandwidth.burst; in tg_set_cfs_quota()
10274 return tg_set_cfs_bandwidth(tg, period, quota, burst); in tg_set_cfs_quota()
10277 static long tg_get_cfs_quota(struct task_group *tg) in tg_get_cfs_quota() argument
10281 if (tg->cfs_bandwidth.quota == RUNTIME_INF) in tg_get_cfs_quota()
10284 quota_us = tg->cfs_bandwidth.quota; in tg_get_cfs_quota()
10290 static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) in tg_set_cfs_period() argument
10298 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_period()
10299 burst = tg->cfs_bandwidth.burst; in tg_set_cfs_period()
10301 return tg_set_cfs_bandwidth(tg, period, quota, burst); in tg_set_cfs_period()
10304 static long tg_get_cfs_period(struct task_group *tg) in tg_get_cfs_period() argument
10308 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); in tg_get_cfs_period()
10314 static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) in tg_set_cfs_burst() argument
10322 period = ktime_to_ns(tg->cfs_bandwidth.period); in tg_set_cfs_burst()
10323 quota = tg->cfs_bandwidth.quota; in tg_set_cfs_burst()
10325 return tg_set_cfs_bandwidth(tg, period, quota, burst); in tg_set_cfs_burst()
10328 static long tg_get_cfs_burst(struct task_group *tg) in tg_get_cfs_burst() argument
10332 burst_us = tg->cfs_bandwidth.burst; in tg_get_cfs_burst()
10375 struct task_group *tg; member
10383 static u64 normalize_cfs_quota(struct task_group *tg, in normalize_cfs_quota() argument
10388 if (tg == d->tg) { in normalize_cfs_quota()
10392 period = tg_get_cfs_period(tg); in normalize_cfs_quota()
10393 quota = tg_get_cfs_quota(tg); in normalize_cfs_quota()
10403 static int tg_cfs_schedulable_down(struct task_group *tg, void *data) in tg_cfs_schedulable_down() argument
10406 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in tg_cfs_schedulable_down()
10409 if (!tg->parent) { in tg_cfs_schedulable_down()
10412 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; in tg_cfs_schedulable_down()
10414 quota = normalize_cfs_quota(tg, d); in tg_cfs_schedulable_down()
10436 static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) in __cfs_schedulable() argument
10440 .tg = tg, in __cfs_schedulable()
10459 struct task_group *tg = css_tg(seq_css(sf)); in cpu_cfs_stat_show() local
10460 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_cfs_stat_show()
10466 if (schedstat_enabled() && tg != &root_task_group) { in cpu_cfs_stat_show()
10471 ws += schedstat_val(tg->se[i]->statistics.wait_sum); in cpu_cfs_stat_show()
10589 struct task_group *tg = css_tg(css); in cpu_extra_stat_show() local
10590 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; in cpu_extra_stat_show()
10610 struct task_group *tg = css_tg(css); in cpu_weight_read_u64() local
10611 u64 weight = scale_load_down(tg->shares); in cpu_weight_read_u64()
10704 struct task_group *tg = css_tg(seq_css(sf)); in cpu_max_show() local
10706 cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); in cpu_max_show()
10713 struct task_group *tg = css_tg(of_css(of)); in cpu_max_write() local
10714 u64 period = tg_get_cfs_period(tg); in cpu_max_write()
10715 u64 burst = tg_get_cfs_burst(tg); in cpu_max_write()
10721 ret = tg_set_cfs_bandwidth(tg, period, quota, burst); in cpu_max_write()