Lines Matching full:cs

243 static inline struct cpuset *parent_cs(struct cpuset *cs)  in parent_cs()  argument
245 return css_cs(cs->css.parent); in parent_cs()
261 static inline bool is_cpuset_online(struct cpuset *cs) in is_cpuset_online() argument
263 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); in is_cpuset_online()
266 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() argument
268 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive()
271 static inline int is_mem_exclusive(const struct cpuset *cs) in is_mem_exclusive() argument
273 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); in is_mem_exclusive()
276 static inline int is_mem_hardwall(const struct cpuset *cs) in is_mem_hardwall() argument
278 return test_bit(CS_MEM_HARDWALL, &cs->flags); in is_mem_hardwall()
281 static inline int is_sched_load_balance(const struct cpuset *cs) in is_sched_load_balance() argument
283 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in is_sched_load_balance()
286 static inline int is_memory_migrate(const struct cpuset *cs) in is_memory_migrate() argument
288 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); in is_memory_migrate()
291 static inline int is_spread_page(const struct cpuset *cs) in is_spread_page() argument
293 return test_bit(CS_SPREAD_PAGE, &cs->flags); in is_spread_page()
296 static inline int is_spread_slab(const struct cpuset *cs) in is_spread_slab() argument
298 return test_bit(CS_SPREAD_SLAB, &cs->flags); in is_spread_slab()
301 static inline int is_partition_valid(const struct cpuset *cs) in is_partition_valid() argument
303 return cs->partition_root_state > 0; in is_partition_valid()
306 static inline int is_partition_invalid(const struct cpuset *cs) in is_partition_invalid() argument
308 return cs->partition_root_state < 0; in is_partition_invalid()
314 static inline void make_partition_invalid(struct cpuset *cs) in make_partition_invalid() argument
316 if (is_partition_valid(cs)) in make_partition_invalid()
317 cs->partition_root_state = -cs->partition_root_state; in make_partition_invalid()
323 static inline void notify_partition_change(struct cpuset *cs, int old_prs) in notify_partition_change() argument
325 if (old_prs == cs->partition_root_state) in notify_partition_change()
327 cgroup_file_notify(&cs->partition_file); in notify_partition_change()
330 if (is_partition_valid(cs)) in notify_partition_change()
331 WRITE_ONCE(cs->prs_err, PERR_NONE); in notify_partition_change()
457 * @cs: partition root to be checked
461 * It is assumed that @cs is a valid partition root. @excluded_child should
464 static inline bool partition_is_populated(struct cpuset *cs, in partition_is_populated() argument
470 if (cs->css.cgroup->nr_populated_csets) in partition_is_populated()
472 if (!excluded_child && !cs->nr_subparts_cpus) in partition_is_populated()
473 return cgroup_is_populated(cs->css.cgroup); in partition_is_populated()
476 cpuset_for_each_child(child, css, cs) { in partition_is_populated()
505 struct cpuset *cs; in guarantee_online_cpus() local
511 cs = task_cs(tsk); in guarantee_online_cpus()
513 while (!cpumask_intersects(cs->effective_cpus, pmask)) { in guarantee_online_cpus()
514 cs = parent_cs(cs); in guarantee_online_cpus()
515 if (unlikely(!cs)) { in guarantee_online_cpus()
526 cpumask_and(pmask, pmask, cs->effective_cpus); in guarantee_online_cpus()
543 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
545 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
546 cs = parent_cs(cs); in guarantee_online_mems()
547 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
555 static void cpuset_update_task_spread_flag(struct cpuset *cs, in cpuset_update_task_spread_flag() argument
558 if (is_spread_page(cs)) in cpuset_update_task_spread_flag()
563 if (is_spread_slab(cs)) in cpuset_update_task_spread_flag()
587 * @cs: the cpuset that have cpumasks to be allocated.
593 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in alloc_cpumasks() argument
597 if (cs) { in alloc_cpumasks()
598 pmask1 = &cs->cpus_allowed; in alloc_cpumasks()
599 pmask2 = &cs->effective_cpus; in alloc_cpumasks()
600 pmask3 = &cs->subparts_cpus; in alloc_cpumasks()
627 * @cs: the cpuset that have cpumasks to be free.
630 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in free_cpumasks() argument
632 if (cs) { in free_cpumasks()
633 free_cpumask_var(cs->cpus_allowed); in free_cpumasks()
634 free_cpumask_var(cs->effective_cpus); in free_cpumasks()
635 free_cpumask_var(cs->subparts_cpus); in free_cpumasks()
646 * @cs: the cpuset that the trial cpuset duplicates
648 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) in alloc_trial_cpuset() argument
652 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); in alloc_trial_cpuset()
661 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
662 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
668 * @cs: the cpuset to be freed
670 static inline void free_cpuset(struct cpuset *cs) in free_cpuset() argument
672 free_cpumasks(cs, NULL); in free_cpuset()
673 kfree(cs); in free_cpuset()
1065 static void update_tasks_root_domain(struct cpuset *cs) in update_tasks_root_domain() argument
1070 css_task_iter_start(&cs->css, 0, &it); in update_tasks_root_domain()
1080 struct cpuset *cs = NULL; in rebuild_root_domains() local
1095 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_root_domains()
1097 if (cpumask_empty(cs->effective_cpus)) { in rebuild_root_domains()
1102 css_get(&cs->css); in rebuild_root_domains()
1106 update_tasks_root_domain(cs); in rebuild_root_domains()
1109 css_put(&cs->css); in rebuild_root_domains()
1140 struct cpuset *cs; in rebuild_sched_domains_locked() local
1166 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_sched_domains_locked()
1167 if (!is_partition_valid(cs)) { in rebuild_sched_domains_locked()
1171 if (!cpumask_subset(cs->effective_cpus, in rebuild_sched_domains_locked()
1203 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1205 * Iterate through each task of @cs updating its cpus_allowed to the
1209 static void update_tasks_cpumask(struct cpuset *cs) in update_tasks_cpumask() argument
1213 bool top_cs = cs == &top_cpuset; in update_tasks_cpumask()
1215 css_task_iter_start(&cs->css, 0, &it); in update_tasks_cpumask()
1223 set_cpus_allowed_ptr(task, cs->effective_cpus); in update_tasks_cpumask()
1231 * @cs: the cpuset the need to recompute the new effective_cpus mask
1240 struct cpuset *cs, struct cpuset *parent) in compute_effective_cpumask() argument
1245 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); in compute_effective_cpumask()
1248 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); in compute_effective_cpumask()
1262 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1302 static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd, in update_parent_subparts_cpumask() argument
1306 struct cpuset *parent = parent_cs(cs); in update_parent_subparts_cpumask()
1324 (!newmask && cpumask_empty(cs->cpus_allowed))) in update_parent_subparts_cpumask()
1332 old_prs = new_prs = cs->partition_root_state; in update_parent_subparts_cpumask()
1338 if (!cpumask_intersects(cs->cpus_allowed, parent->cpus_allowed)) in update_parent_subparts_cpumask()
1345 if (!cpumask_intersects(cs->cpus_allowed, parent->effective_cpus) && in update_parent_subparts_cpumask()
1346 partition_is_populated(parent, cs)) in update_parent_subparts_cpumask()
1349 cpumask_copy(tmp->addmask, cs->cpus_allowed); in update_parent_subparts_cpumask()
1357 cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1367 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1383 cpumask_andnot(tmp->delmask, cs->cpus_allowed, newmask); in update_parent_subparts_cpumask()
1397 partition_is_populated(parent, cs)) { in update_parent_subparts_cpumask()
1400 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1422 cpumask_and(tmp->addmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1427 if ((is_partition_valid(cs) && !parent->nr_subparts_cpus) || in update_parent_subparts_cpumask()
1430 partition_is_populated(parent, cs))) { in update_parent_subparts_cpumask()
1435 if (part_error && is_partition_valid(cs) && in update_parent_subparts_cpumask()
1437 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed, in update_parent_subparts_cpumask()
1441 WRITE_ONCE(cs->prs_err, part_error); in update_parent_subparts_cpumask()
1448 switch (cs->partition_root_state) { in update_parent_subparts_cpumask()
1470 if (is_prs_invalid(old_prs) && !is_cpu_exclusive(cs) && in update_parent_subparts_cpumask()
1471 (update_flag(CS_CPU_EXCLUSIVE, cs, 1) < 0)) in update_parent_subparts_cpumask()
1473 if (is_prs_invalid(new_prs) && is_cpu_exclusive(cs)) in update_parent_subparts_cpumask()
1474 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_parent_subparts_cpumask()
1503 cs->partition_root_state = new_prs; in update_parent_subparts_cpumask()
1516 update_flag(CS_SCHED_LOAD_BALANCE, cs, 1); in update_parent_subparts_cpumask()
1518 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in update_parent_subparts_cpumask()
1520 notify_partition_change(cs, old_prs); in update_parent_subparts_cpumask()
1526 * @cs: the cpuset to consider
1537 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, in update_cpumasks_hier() argument
1546 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
1587 * for cs already in update_cpumask(). We should also call in update_cpumasks_hier()
1592 if ((cp != cs) && old_prs) { in update_cpumasks_hier()
1686 * @cs: Current cpuset
1689 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, in update_sibling_cpumasks() argument
1707 if (sibling == cs) in update_sibling_cpumasks()
1724 * @cs: the cpuset to consider
1728 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
1736 if (cs == &top_cpuset) in update_cpumask()
1758 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) in update_cpumask()
1771 retval = validate_change(cs, trialcs); in update_cpumask()
1786 parent = parent_cs(cs); in update_cpumask()
1800 if (cs->partition_root_state) { in update_cpumask()
1802 update_parent_subparts_cpumask(cs, partcmd_invalidate, in update_cpumask()
1805 update_parent_subparts_cpumask(cs, partcmd_update, in update_cpumask()
1810 parent_cs(cs)); in update_cpumask()
1812 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
1819 if (cs->nr_subparts_cpus) { in update_cpumask()
1820 if (!is_partition_valid(cs) || in update_cpumask()
1821 (cpumask_subset(trialcs->effective_cpus, cs->subparts_cpus) && in update_cpumask()
1822 partition_is_populated(cs, NULL))) { in update_cpumask()
1823 cs->nr_subparts_cpus = 0; in update_cpumask()
1824 cpumask_clear(cs->subparts_cpus); in update_cpumask()
1826 cpumask_and(cs->subparts_cpus, cs->subparts_cpus, in update_cpumask()
1827 cs->cpus_allowed); in update_cpumask()
1828 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); in update_cpumask()
1834 update_cpumasks_hier(cs, &tmp, false); in update_cpumask()
1836 if (cs->partition_root_state) { in update_cpumask()
1837 struct cpuset *parent = parent_cs(cs); in update_cpumask()
1844 update_sibling_cpumasks(parent, cs, &tmp); in update_cpumask()
1934 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1936 * Iterate through each task of @cs updating its mems_allowed to the
1940 static void update_tasks_nodemask(struct cpuset *cs) in update_tasks_nodemask() argument
1946 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in update_tasks_nodemask()
1948 guarantee_online_mems(cs, &newmems); in update_tasks_nodemask()
1960 css_task_iter_start(&cs->css, 0, &it); in update_tasks_nodemask()
1971 migrate = is_memory_migrate(cs); in update_tasks_nodemask()
1973 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask()
1975 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in update_tasks_nodemask()
1983 * cs->old_mems_allowed. in update_tasks_nodemask()
1985 cs->old_mems_allowed = newmems; in update_tasks_nodemask()
1993 * @cs: the cpuset to consider
2003 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
2009 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
2055 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2059 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
2068 if (cs == &top_cpuset) { in update_nodemask()
2093 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
2097 retval = validate_change(cs, trialcs); in update_nodemask()
2104 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
2108 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
2124 static int update_relax_domain_level(struct cpuset *cs, s64 val) in update_relax_domain_level() argument
2131 if (val != cs->relax_domain_level) { in update_relax_domain_level()
2132 cs->relax_domain_level = val; in update_relax_domain_level()
2133 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
2134 is_sched_load_balance(cs)) in update_relax_domain_level()
2143 * @cs: the cpuset in which each task's spread flags needs to be changed
2145 * Iterate through each task of @cs updating its spread flags. As this
2149 static void update_tasks_flags(struct cpuset *cs) in update_tasks_flags() argument
2154 css_task_iter_start(&cs->css, 0, &it); in update_tasks_flags()
2156 cpuset_update_task_spread_flag(cs, task); in update_tasks_flags()
2163 * cs: the cpuset to update
2169 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in update_flag() argument
2177 trialcs = alloc_trial_cpuset(cs); in update_flag()
2186 err = validate_change(cs, trialcs); in update_flag()
2190 balance_flag_changed = (is_sched_load_balance(cs) != in update_flag()
2193 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in update_flag()
2194 || (is_spread_page(cs) != is_spread_page(trialcs))); in update_flag()
2197 cs->flags = trialcs->flags; in update_flag()
2204 update_tasks_flags(cs); in update_flag()
2212 * @cs: the cpuset to update
2218 static int update_prstate(struct cpuset *cs, int new_prs) in update_prstate() argument
2220 int err = PERR_NONE, old_prs = cs->partition_root_state; in update_prstate()
2222 struct cpuset *parent = parent_cs(cs); in update_prstate()
2233 cs->partition_root_state = -new_prs; in update_prstate()
2246 if (cpumask_empty(cs->cpus_allowed)) { in update_prstate()
2251 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); in update_prstate()
2257 err = update_parent_subparts_cpumask(cs, partcmd_enable, in update_prstate()
2260 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2269 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in update_prstate()
2276 update_flag(CS_SCHED_LOAD_BALANCE, cs, (new_prs != PRS_ISOLATED)); in update_prstate()
2284 update_parent_subparts_cpumask(cs, partcmd_disable, NULL, in update_prstate()
2290 if (unlikely(cs->nr_subparts_cpus)) { in update_prstate()
2292 cs->nr_subparts_cpus = 0; in update_prstate()
2293 cpumask_clear(cs->subparts_cpus); in update_prstate()
2294 compute_effective_cpumask(cs->effective_cpus, cs, parent); in update_prstate()
2299 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
2301 if (!is_sched_load_balance(cs)) { in update_prstate()
2303 update_flag(CS_SCHED_LOAD_BALANCE, cs, 1); in update_prstate()
2311 update_sibling_cpumasks(parent, cs, &tmpmask); in update_prstate()
2322 cs->partition_root_state = new_prs; in update_prstate()
2328 if (!list_empty(&cs->css.children)) in update_prstate()
2329 update_cpumasks_hier(cs, &tmpmask, !new_prs); in update_prstate()
2331 notify_partition_change(cs, old_prs); in update_prstate()
2443 struct cpuset *cs; in cpuset_can_attach() local
2449 cs = css_cs(css); in cpuset_can_attach()
2456 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) in cpuset_can_attach()
2462 if (cpumask_empty(cs->effective_cpus)) in cpuset_can_attach()
2466 ret = task_can_attach(task, cs->effective_cpus); in cpuset_can_attach()
2478 cs->attach_in_progress++; in cpuset_can_attach()
2510 struct cpuset *cs; in cpuset_attach() local
2514 cs = css_cs(css); in cpuset_attach()
2519 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
2522 if (cs != &top_cpuset) in cpuset_attach()
2533 cpuset_update_task_spread_flag(cs, task); in cpuset_attach()
2540 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2555 if (is_memory_migrate(cs)) in cpuset_attach()
2563 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
2565 cs->attach_in_progress--; in cpuset_attach()
2566 if (!cs->attach_in_progress) in cpuset_attach()
2596 struct cpuset *cs = css_cs(css); in cpuset_write_u64() local
2602 if (!is_cpuset_online(cs)) { in cpuset_write_u64()
2609 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); in cpuset_write_u64()
2612 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); in cpuset_write_u64()
2615 retval = update_flag(CS_MEM_HARDWALL, cs, val); in cpuset_write_u64()
2618 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); in cpuset_write_u64()
2621 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); in cpuset_write_u64()
2627 retval = update_flag(CS_SPREAD_PAGE, cs, val); in cpuset_write_u64()
2630 retval = update_flag(CS_SPREAD_SLAB, cs, val); in cpuset_write_u64()
2645 struct cpuset *cs = css_cs(css); in cpuset_write_s64() local
2651 if (!is_cpuset_online(cs)) in cpuset_write_s64()
2656 retval = update_relax_domain_level(cs, val); in cpuset_write_s64()
2674 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
2681 * CPU or memory hotunplug may leave @cs w/o any execution in cpuset_write_resmask()
2686 * As writes to "cpus" or "mems" may restore @cs's execution in cpuset_write_resmask()
2695 * protection is okay as we check whether @cs is online after in cpuset_write_resmask()
2699 css_get(&cs->css); in cpuset_write_resmask()
2705 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
2708 trialcs = alloc_trial_cpuset(cs); in cpuset_write_resmask()
2716 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
2719 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
2731 css_put(&cs->css); in cpuset_write_resmask()
2746 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
2754 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); in cpuset_common_seq_show()
2757 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
2760 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
2763 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
2766 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); in cpuset_common_seq_show()
2778 struct cpuset *cs = css_cs(css); in cpuset_read_u64() local
2782 return is_cpu_exclusive(cs); in cpuset_read_u64()
2784 return is_mem_exclusive(cs); in cpuset_read_u64()
2786 return is_mem_hardwall(cs); in cpuset_read_u64()
2788 return is_sched_load_balance(cs); in cpuset_read_u64()
2790 return is_memory_migrate(cs); in cpuset_read_u64()
2794 return fmeter_getrate(&cs->fmeter); in cpuset_read_u64()
2796 return is_spread_page(cs); in cpuset_read_u64()
2798 return is_spread_slab(cs); in cpuset_read_u64()
2809 struct cpuset *cs = css_cs(css); in cpuset_read_s64() local
2813 return cs->relax_domain_level; in cpuset_read_s64()
2824 struct cpuset *cs = css_cs(seq_css(seq)); in sched_partition_show() local
2827 switch (cs->partition_root_state) { in sched_partition_show()
2843 err = perr_strings[READ_ONCE(cs->prs_err)]; in sched_partition_show()
2856 struct cpuset *cs = css_cs(of_css(of)); in sched_partition_write() local
2874 css_get(&cs->css); in sched_partition_write()
2877 if (!is_cpuset_online(cs)) in sched_partition_write()
2880 retval = update_prstate(cs, val); in sched_partition_write()
2884 css_put(&cs->css); in sched_partition_write()
3057 struct cpuset *cs; in cpuset_css_alloc() local
3062 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in cpuset_css_alloc()
3063 if (!cs) in cpuset_css_alloc()
3066 if (alloc_cpumasks(cs, NULL)) { in cpuset_css_alloc()
3067 kfree(cs); in cpuset_css_alloc()
3071 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
3072 nodes_clear(cs->mems_allowed); in cpuset_css_alloc()
3073 nodes_clear(cs->effective_mems); in cpuset_css_alloc()
3074 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
3075 cs->relax_domain_level = -1; in cpuset_css_alloc()
3079 __set_bit(CS_MEMORY_MIGRATE, &cs->flags); in cpuset_css_alloc()
3081 return &cs->css; in cpuset_css_alloc()
3086 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
3087 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
3097 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
3099 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
3101 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
3107 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
3108 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
3109 cs->use_parent_ecpus = true; in cpuset_css_online()
3140 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
3141 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
3142 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
3143 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
3164 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
3169 if (is_partition_valid(cs)) in cpuset_css_offline()
3170 update_prstate(cs, 0); in cpuset_css_offline()
3173 is_sched_load_balance(cs)) in cpuset_css_offline()
3174 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
3176 if (cs->use_parent_ecpus) { in cpuset_css_offline()
3177 struct cpuset *parent = parent_cs(cs); in cpuset_css_offline()
3179 cs->use_parent_ecpus = false; in cpuset_css_offline()
3184 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
3192 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
3194 free_cpuset(cs); in cpuset_css_free()
3281 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) in remove_tasks_in_empty_cpuset() argument
3289 parent = parent_cs(cs); in remove_tasks_in_empty_cpuset()
3294 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { in remove_tasks_in_empty_cpuset()
3296 pr_cont_cgroup_name(cs->css.cgroup); in remove_tasks_in_empty_cpuset()
3302 hotplug_update_tasks_legacy(struct cpuset *cs, in hotplug_update_tasks_legacy() argument
3309 cpumask_copy(cs->cpus_allowed, new_cpus); in hotplug_update_tasks_legacy()
3310 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks_legacy()
3311 cs->mems_allowed = *new_mems; in hotplug_update_tasks_legacy()
3312 cs->effective_mems = *new_mems; in hotplug_update_tasks_legacy()
3319 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) in hotplug_update_tasks_legacy()
3320 update_tasks_cpumask(cs); in hotplug_update_tasks_legacy()
3321 if (mems_updated && !nodes_empty(cs->mems_allowed)) in hotplug_update_tasks_legacy()
3322 update_tasks_nodemask(cs); in hotplug_update_tasks_legacy()
3324 is_empty = cpumask_empty(cs->cpus_allowed) || in hotplug_update_tasks_legacy()
3325 nodes_empty(cs->mems_allowed); in hotplug_update_tasks_legacy()
3335 remove_tasks_in_empty_cpuset(cs); in hotplug_update_tasks_legacy()
3341 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
3346 if (cpumask_empty(new_cpus) && !is_partition_valid(cs)) in hotplug_update_tasks()
3347 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
3349 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
3352 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
3353 cs->effective_mems = *new_mems; in hotplug_update_tasks()
3357 update_tasks_cpumask(cs); in hotplug_update_tasks()
3359 update_tasks_nodemask(cs); in hotplug_update_tasks()
3371 * @cs: cpuset in interest
3374 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3375 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3378 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) in cpuset_hotplug_update_tasks() argument
3386 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3394 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3399 parent = parent_cs(cs); in cpuset_hotplug_update_tasks()
3400 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3401 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3403 if (cs->nr_subparts_cpus) in cpuset_hotplug_update_tasks()
3408 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3410 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3419 if (cs->nr_subparts_cpus && is_partition_valid(cs) && in cpuset_hotplug_update_tasks()
3420 cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)) { in cpuset_hotplug_update_tasks()
3422 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3423 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3425 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3435 if (is_partition_valid(cs) && (!parent->nr_subparts_cpus || in cpuset_hotplug_update_tasks()
3436 (cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)))) { in cpuset_hotplug_update_tasks()
3439 update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp); in cpuset_hotplug_update_tasks()
3440 if (cs->nr_subparts_cpus) { in cpuset_hotplug_update_tasks()
3442 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3443 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3445 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3448 old_prs = cs->partition_root_state; in cpuset_hotplug_update_tasks()
3450 if (is_partition_valid(cs)) { in cpuset_hotplug_update_tasks()
3452 make_partition_invalid(cs); in cpuset_hotplug_update_tasks()
3455 WRITE_ONCE(cs->prs_err, PERR_INVPARENT); in cpuset_hotplug_update_tasks()
3457 WRITE_ONCE(cs->prs_err, PERR_NOTPART); in cpuset_hotplug_update_tasks()
3459 WRITE_ONCE(cs->prs_err, PERR_HOTPLUG); in cpuset_hotplug_update_tasks()
3460 notify_partition_change(cs, old_prs); in cpuset_hotplug_update_tasks()
3469 else if (is_partition_valid(parent) && is_partition_invalid(cs)) { in cpuset_hotplug_update_tasks()
3470 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp); in cpuset_hotplug_update_tasks()
3471 if (is_partition_valid(cs)) in cpuset_hotplug_update_tasks()
3476 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
3477 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
3483 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3486 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3580 struct cpuset *cs; in cpuset_hotplug_workfn() local
3584 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_hotplug_workfn()
3585 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_hotplug_workfn()
3589 cpuset_hotplug_update_tasks(cs, ptmp); in cpuset_hotplug_workfn()
3592 css_put(&cs->css); in cpuset_hotplug_workfn()
3712 * But we used cs && cs->cpus_allowed lockless and thus can in cpuset_cpus_allowed_fallback()
3775 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
3777 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
3778 cs = parent_cs(cs); in nearest_hardwall_ancestor()
3779 return cs; in nearest_hardwall_ancestor()
3824 struct cpuset *cs; /* current cpuset ancestors */ in __cpuset_node_allowed() local
3848 cs = nearest_hardwall_ancestor(task_cs(current)); in __cpuset_node_allowed()
3849 allowed = node_isset(node, cs->mems_allowed); in __cpuset_node_allowed()