Lines Matching full:cs

203 static inline struct cpuset *parent_cs(struct cpuset *cs)  in parent_cs()  argument
205 return css_cs(cs->css.parent); in parent_cs()
221 static inline bool is_cpuset_online(struct cpuset *cs) in is_cpuset_online() argument
223 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); in is_cpuset_online()
226 static inline int is_cpu_exclusive(const struct cpuset *cs) in is_cpu_exclusive() argument
228 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); in is_cpu_exclusive()
231 static inline int is_mem_exclusive(const struct cpuset *cs) in is_mem_exclusive() argument
233 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); in is_mem_exclusive()
236 static inline int is_mem_hardwall(const struct cpuset *cs) in is_mem_hardwall() argument
238 return test_bit(CS_MEM_HARDWALL, &cs->flags); in is_mem_hardwall()
241 static inline int is_sched_load_balance(const struct cpuset *cs) in is_sched_load_balance() argument
243 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in is_sched_load_balance()
246 static inline int is_memory_migrate(const struct cpuset *cs) in is_memory_migrate() argument
248 return test_bit(CS_MEMORY_MIGRATE, &cs->flags); in is_memory_migrate()
251 static inline int is_spread_page(const struct cpuset *cs) in is_spread_page() argument
253 return test_bit(CS_SPREAD_PAGE, &cs->flags); in is_spread_page()
256 static inline int is_spread_slab(const struct cpuset *cs) in is_spread_slab() argument
258 return test_bit(CS_SPREAD_SLAB, &cs->flags); in is_spread_slab()
261 static inline int is_partition_root(const struct cpuset *cs) in is_partition_root() argument
263 return cs->partition_root_state > 0; in is_partition_root()
380 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) in guarantee_online_cpus() argument
382 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { in guarantee_online_cpus()
383 cs = parent_cs(cs); in guarantee_online_cpus()
384 if (unlikely(!cs)) { in guarantee_online_cpus()
396 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); in guarantee_online_cpus()
410 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) in guarantee_online_mems() argument
412 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems()
413 cs = parent_cs(cs); in guarantee_online_mems()
414 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); in guarantee_online_mems()
422 static void cpuset_update_task_spread_flag(struct cpuset *cs, in cpuset_update_task_spread_flag() argument
425 if (is_spread_page(cs)) in cpuset_update_task_spread_flag()
430 if (is_spread_slab(cs)) in cpuset_update_task_spread_flag()
454 * @cs: the cpuset that have cpumasks to be allocated.
460 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in alloc_cpumasks() argument
464 if (cs) { in alloc_cpumasks()
465 pmask1 = &cs->cpus_allowed; in alloc_cpumasks()
466 pmask2 = &cs->effective_cpus; in alloc_cpumasks()
467 pmask3 = &cs->subparts_cpus; in alloc_cpumasks()
494 * @cs: the cpuset that have cpumasks to be free.
497 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) in free_cpumasks() argument
499 if (cs) { in free_cpumasks()
500 free_cpumask_var(cs->cpus_allowed); in free_cpumasks()
501 free_cpumask_var(cs->effective_cpus); in free_cpumasks()
502 free_cpumask_var(cs->subparts_cpus); in free_cpumasks()
513 * @cs: the cpuset that the trial cpuset duplicates
515 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) in alloc_trial_cpuset() argument
519 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); in alloc_trial_cpuset()
528 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); in alloc_trial_cpuset()
529 cpumask_copy(trial->effective_cpus, cs->effective_cpus); in alloc_trial_cpuset()
535 * @cs: the cpuset to be freed
537 static inline void free_cpuset(struct cpuset *cs) in free_cpuset() argument
539 free_cpumasks(cs, NULL); in free_cpuset()
540 kfree(cs); in free_cpuset()
910 static void update_tasks_root_domain(struct cpuset *cs) in update_tasks_root_domain() argument
915 css_task_iter_start(&cs->css, 0, &it); in update_tasks_root_domain()
925 struct cpuset *cs = NULL; in rebuild_root_domains() local
942 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in rebuild_root_domains()
944 if (cpumask_empty(cs->effective_cpus)) { in rebuild_root_domains()
949 css_get(&cs->css); in rebuild_root_domains()
953 update_tasks_root_domain(cs); in rebuild_root_domains()
956 css_put(&cs->css); in rebuild_root_domains()
1027 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1029 * Iterate through each task of @cs updating its cpus_allowed to the
1033 static void update_tasks_cpumask(struct cpuset *cs) in update_tasks_cpumask() argument
1038 css_task_iter_start(&cs->css, 0, &it); in update_tasks_cpumask()
1040 set_cpus_allowed_ptr(task, cs->effective_cpus); in update_tasks_cpumask()
1047 * @cs: the cpuset the need to recompute the new effective_cpus mask
1056 struct cpuset *cs, struct cpuset *parent) in compute_effective_cpumask() argument
1061 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed); in compute_effective_cpumask()
1064 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); in compute_effective_cpumask()
1284 * @cs: the cpuset to consider
1294 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp) in update_cpumasks_hier() argument
1301 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_cpumasks_hier()
1334 * for cs already in update_cpumask(). We should also call in update_cpumasks_hier()
1338 if ((cp != cs) && cp->partition_root_state) { in update_cpumasks_hier()
1444 * @cs: Current cpuset
1447 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, in update_sibling_cpumasks() argument
1460 if (sibling == cs) in update_sibling_cpumasks()
1472 * @cs: the cpuset to consider
1476 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, in update_cpumask() argument
1483 if (cs == &top_cpuset) in update_cpumask()
1505 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) in update_cpumask()
1508 retval = validate_change(cs, trialcs); in update_cpumask()
1522 if (cs->partition_root_state) { in update_cpumask()
1526 if (update_parent_subparts_cpumask(cs, partcmd_update, in update_cpumask()
1532 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); in update_cpumask()
1537 if (cs->nr_subparts_cpus) { in update_cpumask()
1538 cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus, in update_cpumask()
1539 cs->cpus_allowed); in update_cpumask()
1540 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); in update_cpumask()
1544 update_cpumasks_hier(cs, &tmp); in update_cpumask()
1546 if (cs->partition_root_state) { in update_cpumask()
1547 struct cpuset *parent = parent_cs(cs); in update_cpumask()
1554 update_sibling_cpumasks(parent, cs, &tmp); in update_cpumask()
1639 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1641 * Iterate through each task of @cs updating its mems_allowed to the
1645 static void update_tasks_nodemask(struct cpuset *cs) in update_tasks_nodemask() argument
1651 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ in update_tasks_nodemask()
1653 guarantee_online_mems(cs, &newmems); in update_tasks_nodemask()
1665 css_task_iter_start(&cs->css, 0, &it); in update_tasks_nodemask()
1676 migrate = is_memory_migrate(cs); in update_tasks_nodemask()
1678 mpol_rebind_mm(mm, &cs->mems_allowed); in update_tasks_nodemask()
1680 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); in update_tasks_nodemask()
1688 * cs->old_mems_allowed. in update_tasks_nodemask()
1690 cs->old_mems_allowed = newmems; in update_tasks_nodemask()
1698 * @cs: the cpuset to consider
1708 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) in update_nodemasks_hier() argument
1714 cpuset_for_each_descendant_pre(cp, pos_css, cs) { in update_nodemasks_hier()
1760 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1764 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, in update_nodemask() argument
1773 if (cs == &top_cpuset) { in update_nodemask()
1798 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { in update_nodemask()
1802 retval = validate_change(cs, trialcs); in update_nodemask()
1807 cs->mems_allowed = trialcs->mems_allowed; in update_nodemask()
1811 update_nodemasks_hier(cs, &trialcs->mems_allowed); in update_nodemask()
1827 static int update_relax_domain_level(struct cpuset *cs, s64 val) in update_relax_domain_level() argument
1834 if (val != cs->relax_domain_level) { in update_relax_domain_level()
1835 cs->relax_domain_level = val; in update_relax_domain_level()
1836 if (!cpumask_empty(cs->cpus_allowed) && in update_relax_domain_level()
1837 is_sched_load_balance(cs)) in update_relax_domain_level()
1846 * @cs: the cpuset in which each task's spread flags needs to be changed
1848 * Iterate through each task of @cs updating its spread flags. As this
1852 static void update_tasks_flags(struct cpuset *cs) in update_tasks_flags() argument
1857 css_task_iter_start(&cs->css, 0, &it); in update_tasks_flags()
1859 cpuset_update_task_spread_flag(cs, task); in update_tasks_flags()
1866 * cs: the cpuset to update
1872 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in update_flag() argument
1880 trialcs = alloc_trial_cpuset(cs); in update_flag()
1889 err = validate_change(cs, trialcs); in update_flag()
1893 balance_flag_changed = (is_sched_load_balance(cs) != in update_flag()
1896 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) in update_flag()
1897 || (is_spread_page(cs) != is_spread_page(trialcs))); in update_flag()
1900 cs->flags = trialcs->flags; in update_flag()
1907 update_tasks_flags(cs); in update_flag()
1915 * cs: the cpuset to update
1920 static int update_prstate(struct cpuset *cs, int val) in update_prstate() argument
1923 struct cpuset *parent = parent_cs(cs); in update_prstate()
1928 if (val == cs->partition_root_state) in update_prstate()
1935 if (val && cs->partition_root_state) in update_prstate()
1942 if (!cs->partition_root_state) { in update_prstate()
1948 if (cpumask_empty(cs->cpus_allowed)) in update_prstate()
1951 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1); in update_prstate()
1955 err = update_parent_subparts_cpumask(cs, partcmd_enable, in update_prstate()
1958 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
1961 cs->partition_root_state = PRS_ENABLED; in update_prstate()
1967 if (cs->partition_root_state == PRS_ERROR) { in update_prstate()
1968 cs->partition_root_state = 0; in update_prstate()
1969 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
1974 err = update_parent_subparts_cpumask(cs, partcmd_disable, in update_prstate()
1979 cs->partition_root_state = 0; in update_prstate()
1982 update_flag(CS_CPU_EXCLUSIVE, cs, 0); in update_prstate()
1993 update_sibling_cpumasks(parent, cs, &tmp); in update_prstate()
2108 struct cpuset *cs; in cpuset_can_attach() local
2114 cs = css_cs(css); in cpuset_can_attach()
2121 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) in cpuset_can_attach()
2125 ret = task_can_attach(task, cs->cpus_allowed); in cpuset_can_attach()
2137 cs->attach_in_progress++; in cpuset_can_attach()
2169 struct cpuset *cs; in cpuset_attach() local
2173 cs = css_cs(css); in cpuset_attach()
2178 if (cs == &top_cpuset) in cpuset_attach()
2181 guarantee_online_cpus(cs, cpus_attach); in cpuset_attach()
2183 guarantee_online_mems(cs, &cpuset_attach_nodemask_to); in cpuset_attach()
2193 cpuset_update_task_spread_flag(cs, task); in cpuset_attach()
2200 cpuset_attach_nodemask_to = cs->effective_mems; in cpuset_attach()
2215 if (is_memory_migrate(cs)) in cpuset_attach()
2223 cs->old_mems_allowed = cpuset_attach_nodemask_to; in cpuset_attach()
2225 cs->attach_in_progress--; in cpuset_attach()
2226 if (!cs->attach_in_progress) in cpuset_attach()
2256 struct cpuset *cs = css_cs(css); in cpuset_write_u64() local
2262 if (!is_cpuset_online(cs)) { in cpuset_write_u64()
2269 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); in cpuset_write_u64()
2272 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); in cpuset_write_u64()
2275 retval = update_flag(CS_MEM_HARDWALL, cs, val); in cpuset_write_u64()
2278 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); in cpuset_write_u64()
2281 retval = update_flag(CS_MEMORY_MIGRATE, cs, val); in cpuset_write_u64()
2287 retval = update_flag(CS_SPREAD_PAGE, cs, val); in cpuset_write_u64()
2290 retval = update_flag(CS_SPREAD_SLAB, cs, val); in cpuset_write_u64()
2305 struct cpuset *cs = css_cs(css); in cpuset_write_s64() local
2311 if (!is_cpuset_online(cs)) in cpuset_write_s64()
2316 retval = update_relax_domain_level(cs, val); in cpuset_write_s64()
2334 struct cpuset *cs = css_cs(of_css(of)); in cpuset_write_resmask() local
2341 * CPU or memory hotunplug may leave @cs w/o any execution in cpuset_write_resmask()
2346 * As writes to "cpus" or "mems" may restore @cs's execution in cpuset_write_resmask()
2355 * protection is okay as we check whether @cs is online after in cpuset_write_resmask()
2359 css_get(&cs->css); in cpuset_write_resmask()
2365 if (!is_cpuset_online(cs)) in cpuset_write_resmask()
2368 trialcs = alloc_trial_cpuset(cs); in cpuset_write_resmask()
2376 retval = update_cpumask(cs, trialcs, buf); in cpuset_write_resmask()
2379 retval = update_nodemask(cs, trialcs, buf); in cpuset_write_resmask()
2391 css_put(&cs->css); in cpuset_write_resmask()
2406 struct cpuset *cs = css_cs(seq_css(sf)); in cpuset_common_seq_show() local
2414 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); in cpuset_common_seq_show()
2417 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); in cpuset_common_seq_show()
2420 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); in cpuset_common_seq_show()
2423 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); in cpuset_common_seq_show()
2426 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus)); in cpuset_common_seq_show()
2438 struct cpuset *cs = css_cs(css); in cpuset_read_u64() local
2442 return is_cpu_exclusive(cs); in cpuset_read_u64()
2444 return is_mem_exclusive(cs); in cpuset_read_u64()
2446 return is_mem_hardwall(cs); in cpuset_read_u64()
2448 return is_sched_load_balance(cs); in cpuset_read_u64()
2450 return is_memory_migrate(cs); in cpuset_read_u64()
2454 return fmeter_getrate(&cs->fmeter); in cpuset_read_u64()
2456 return is_spread_page(cs); in cpuset_read_u64()
2458 return is_spread_slab(cs); in cpuset_read_u64()
2469 struct cpuset *cs = css_cs(css); in cpuset_read_s64() local
2473 return cs->relax_domain_level; in cpuset_read_s64()
2484 struct cpuset *cs = css_cs(seq_css(seq)); in sched_partition_show() local
2486 switch (cs->partition_root_state) { in sched_partition_show()
2503 struct cpuset *cs = css_cs(of_css(of)); in sched_partition_write() local
2519 css_get(&cs->css); in sched_partition_write()
2522 if (!is_cpuset_online(cs)) in sched_partition_write()
2525 retval = update_prstate(cs, val); in sched_partition_write()
2529 css_put(&cs->css); in sched_partition_write()
2701 struct cpuset *cs; in cpuset_css_alloc() local
2706 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in cpuset_css_alloc()
2707 if (!cs) in cpuset_css_alloc()
2710 if (alloc_cpumasks(cs, NULL)) { in cpuset_css_alloc()
2711 kfree(cs); in cpuset_css_alloc()
2715 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); in cpuset_css_alloc()
2716 nodes_clear(cs->mems_allowed); in cpuset_css_alloc()
2717 nodes_clear(cs->effective_mems); in cpuset_css_alloc()
2718 fmeter_init(&cs->fmeter); in cpuset_css_alloc()
2719 cs->relax_domain_level = -1; in cpuset_css_alloc()
2721 return &cs->css; in cpuset_css_alloc()
2726 struct cpuset *cs = css_cs(css); in cpuset_css_online() local
2727 struct cpuset *parent = parent_cs(cs); in cpuset_css_online()
2737 set_bit(CS_ONLINE, &cs->flags); in cpuset_css_online()
2739 set_bit(CS_SPREAD_PAGE, &cs->flags); in cpuset_css_online()
2741 set_bit(CS_SPREAD_SLAB, &cs->flags); in cpuset_css_online()
2747 cpumask_copy(cs->effective_cpus, parent->effective_cpus); in cpuset_css_online()
2748 cs->effective_mems = parent->effective_mems; in cpuset_css_online()
2749 cs->use_parent_ecpus = true; in cpuset_css_online()
2780 cs->mems_allowed = parent->mems_allowed; in cpuset_css_online()
2781 cs->effective_mems = parent->mems_allowed; in cpuset_css_online()
2782 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); in cpuset_css_online()
2783 cpumask_copy(cs->effective_cpus, parent->cpus_allowed); in cpuset_css_online()
2804 struct cpuset *cs = css_cs(css); in cpuset_css_offline() local
2809 if (is_partition_root(cs)) in cpuset_css_offline()
2810 update_prstate(cs, 0); in cpuset_css_offline()
2813 is_sched_load_balance(cs)) in cpuset_css_offline()
2814 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); in cpuset_css_offline()
2816 if (cs->use_parent_ecpus) { in cpuset_css_offline()
2817 struct cpuset *parent = parent_cs(cs); in cpuset_css_offline()
2819 cs->use_parent_ecpus = false; in cpuset_css_offline()
2824 clear_bit(CS_ONLINE, &cs->flags); in cpuset_css_offline()
2832 struct cpuset *cs = css_cs(css); in cpuset_css_free() local
2834 free_cpuset(cs); in cpuset_css_free()
2921 static void remove_tasks_in_empty_cpuset(struct cpuset *cs) in remove_tasks_in_empty_cpuset() argument
2929 parent = parent_cs(cs); in remove_tasks_in_empty_cpuset()
2934 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { in remove_tasks_in_empty_cpuset()
2936 pr_cont_cgroup_name(cs->css.cgroup); in remove_tasks_in_empty_cpuset()
2942 hotplug_update_tasks_legacy(struct cpuset *cs, in hotplug_update_tasks_legacy() argument
2949 cpumask_copy(cs->cpus_allowed, new_cpus); in hotplug_update_tasks_legacy()
2950 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks_legacy()
2951 cs->mems_allowed = *new_mems; in hotplug_update_tasks_legacy()
2952 cs->effective_mems = *new_mems; in hotplug_update_tasks_legacy()
2959 if (cpus_updated && !cpumask_empty(cs->cpus_allowed)) in hotplug_update_tasks_legacy()
2960 update_tasks_cpumask(cs); in hotplug_update_tasks_legacy()
2961 if (mems_updated && !nodes_empty(cs->mems_allowed)) in hotplug_update_tasks_legacy()
2962 update_tasks_nodemask(cs); in hotplug_update_tasks_legacy()
2964 is_empty = cpumask_empty(cs->cpus_allowed) || in hotplug_update_tasks_legacy()
2965 nodes_empty(cs->mems_allowed); in hotplug_update_tasks_legacy()
2975 remove_tasks_in_empty_cpuset(cs); in hotplug_update_tasks_legacy()
2981 hotplug_update_tasks(struct cpuset *cs, in hotplug_update_tasks() argument
2986 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); in hotplug_update_tasks()
2988 *new_mems = parent_cs(cs)->effective_mems; in hotplug_update_tasks()
2991 cpumask_copy(cs->effective_cpus, new_cpus); in hotplug_update_tasks()
2992 cs->effective_mems = *new_mems; in hotplug_update_tasks()
2996 update_tasks_cpumask(cs); in hotplug_update_tasks()
2998 update_tasks_nodemask(cs); in hotplug_update_tasks()
3010 * @cs: cpuset in interest
3013 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3014 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3017 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) in cpuset_hotplug_update_tasks() argument
3025 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); in cpuset_hotplug_update_tasks()
3033 if (cs->attach_in_progress) { in cpuset_hotplug_update_tasks()
3038 parent = parent_cs(cs); in cpuset_hotplug_update_tasks()
3039 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3040 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); in cpuset_hotplug_update_tasks()
3042 if (cs->nr_subparts_cpus) in cpuset_hotplug_update_tasks()
3047 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3049 if (!tmp || !cs->partition_root_state) in cpuset_hotplug_update_tasks()
3057 if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || in cpuset_hotplug_update_tasks()
3059 if (cs->nr_subparts_cpus) { in cpuset_hotplug_update_tasks()
3060 cs->nr_subparts_cpus = 0; in cpuset_hotplug_update_tasks()
3061 cpumask_clear(cs->subparts_cpus); in cpuset_hotplug_update_tasks()
3062 compute_effective_cpumask(&new_cpus, cs, parent); in cpuset_hotplug_update_tasks()
3073 update_parent_subparts_cpumask(cs, partcmd_disable, in cpuset_hotplug_update_tasks()
3075 cs->partition_root_state = PRS_ERROR; in cpuset_hotplug_update_tasks()
3086 ((cs->partition_root_state == PRS_ERROR) || in cpuset_hotplug_update_tasks()
3088 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp)) in cpuset_hotplug_update_tasks()
3092 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); in cpuset_hotplug_update_tasks()
3093 mems_updated = !nodes_equal(new_mems, cs->effective_mems); in cpuset_hotplug_update_tasks()
3096 hotplug_update_tasks(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3099 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, in cpuset_hotplug_update_tasks()
3186 struct cpuset *cs; in cpuset_hotplug_workfn() local
3190 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { in cpuset_hotplug_workfn()
3191 if (cs == &top_cpuset || !css_tryget_online(&cs->css)) in cpuset_hotplug_workfn()
3195 cpuset_hotplug_update_tasks(cs, ptmp); in cpuset_hotplug_workfn()
3198 css_put(&cs->css); in cpuset_hotplug_workfn()
3308 * But we used cs && cs->cpus_allowed lockless and thus can in cpuset_cpus_allowed_fallback()
3370 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) in nearest_hardwall_ancestor() argument
3372 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) in nearest_hardwall_ancestor()
3373 cs = parent_cs(cs); in nearest_hardwall_ancestor()
3374 return cs; in nearest_hardwall_ancestor()
3419 struct cpuset *cs; /* current cpuset ancestors */ in __cpuset_node_allowed() local
3443 cs = nearest_hardwall_ancestor(task_cs(current)); in __cpuset_node_allowed()
3444 allowed = node_isset(node, cs->mems_allowed); in __cpuset_node_allowed()