1 /*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8 * Copyright (C) 2006 Google, Inc
9 *
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
12 *
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
16 * 2006 Rework by Paul Menage to use generic cgroups
17 * 2008 Rework of the scheduler domains and CPU hotplug handling
18 * by Max Krasnyansky
19 *
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file COPYING in the main directory of the Linux
22 * distribution for more details.
23 */
24
25 #include <linux/cpu.h>
26 #include <linux/cpumask.h>
27 #include <linux/cpuset.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/kernel.h>
31 #include <linux/mempolicy.h>
32 #include <linux/mm.h>
33 #include <linux/memory.h>
34 #include <linux/export.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched.h>
37 #include <linux/sched/deadline.h>
38 #include <linux/sched/mm.h>
39 #include <linux/sched/task.h>
40 #include <linux/security.h>
41 #include <linux/spinlock.h>
42 #include <linux/oom.h>
43 #include <linux/sched/isolation.h>
44 #include <linux/cgroup.h>
45 #include <linux/wait.h>
46
47 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
48 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
49
50 /*
51 * There could be abnormal cpuset configurations for cpu or memory
52 * node binding, add this key to provide a quick low-cost judgment
53 * of the situation.
54 */
55 DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
56
57 /* See "Frequency meter" comments, below. */
58
59 struct fmeter {
60 int cnt; /* unprocessed events count */
61 int val; /* most recent output value */
62 time64_t time; /* clock (secs) when val computed */
63 spinlock_t lock; /* guards read or write of above */
64 };
65
66 /*
67 * Invalid partition error code
68 */
69 enum prs_errcode {
70 PERR_NONE = 0,
71 PERR_INVCPUS,
72 PERR_INVPARENT,
73 PERR_NOTPART,
74 PERR_NOTEXCL,
75 PERR_NOCPUS,
76 PERR_HOTPLUG,
77 PERR_CPUSEMPTY,
78 };
79
80 static const char * const perr_strings[] = {
81 [PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus",
82 [PERR_INVPARENT] = "Parent is an invalid partition root",
83 [PERR_NOTPART] = "Parent is not a partition root",
84 [PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive",
85 [PERR_NOCPUS] = "Parent unable to distribute cpu downstream",
86 [PERR_HOTPLUG] = "No cpu available due to hotplug",
87 [PERR_CPUSEMPTY] = "cpuset.cpus is empty",
88 };
89
90 struct cpuset {
91 struct cgroup_subsys_state css;
92
93 unsigned long flags; /* "unsigned long" so bitops work */
94
95 /*
96 * On default hierarchy:
97 *
98 * The user-configured masks can only be changed by writing to
99 * cpuset.cpus and cpuset.mems, and won't be limited by the
100 * parent masks.
101 *
102 * The effective masks is the real masks that apply to the tasks
103 * in the cpuset. They may be changed if the configured masks are
104 * changed or hotplug happens.
105 *
106 * effective_mask == configured_mask & parent's effective_mask,
107 * and if it ends up empty, it will inherit the parent's mask.
108 *
109 *
110 * On legacy hierarchy:
111 *
112 * The user-configured masks are always the same with effective masks.
113 */
114
115 /* user-configured CPUs and Memory Nodes allow to tasks */
116 cpumask_var_t cpus_allowed;
117 nodemask_t mems_allowed;
118
119 /* effective CPUs and Memory Nodes allow to tasks */
120 cpumask_var_t effective_cpus;
121 nodemask_t effective_mems;
122
123 /*
124 * CPUs allocated to child sub-partitions (default hierarchy only)
125 * - CPUs granted by the parent = effective_cpus U subparts_cpus
126 * - effective_cpus and subparts_cpus are mutually exclusive.
127 *
128 * effective_cpus contains only onlined CPUs, but subparts_cpus
129 * may have offlined ones.
130 */
131 cpumask_var_t subparts_cpus;
132
133 /*
134 * This is old Memory Nodes tasks took on.
135 *
136 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
137 * - A new cpuset's old_mems_allowed is initialized when some
138 * task is moved into it.
139 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
140 * cpuset.mems_allowed and have tasks' nodemask updated, and
141 * then old_mems_allowed is updated to mems_allowed.
142 */
143 nodemask_t old_mems_allowed;
144
145 struct fmeter fmeter; /* memory_pressure filter */
146
147 /*
148 * Tasks are being attached to this cpuset. Used to prevent
149 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
150 */
151 int attach_in_progress;
152
153 /* partition number for rebuild_sched_domains() */
154 int pn;
155
156 /* for custom sched domain */
157 int relax_domain_level;
158
159 /* number of CPUs in subparts_cpus */
160 int nr_subparts_cpus;
161
162 /* partition root state */
163 int partition_root_state;
164
165 /*
166 * Default hierarchy only:
167 * use_parent_ecpus - set if using parent's effective_cpus
168 * child_ecpus_count - # of children with use_parent_ecpus set
169 */
170 int use_parent_ecpus;
171 int child_ecpus_count;
172
173 /*
174 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
175 * know when to rebuild associated root domain bandwidth information.
176 */
177 int nr_deadline_tasks;
178 int nr_migrate_dl_tasks;
179 u64 sum_migrate_dl_bw;
180
181 /* Invalid partition error code, not lock protected */
182 enum prs_errcode prs_err;
183
184 /* Handle for cpuset.cpus.partition */
185 struct cgroup_file partition_file;
186 };
187
188 /*
189 * Partition root states:
190 *
191 * 0 - member (not a partition root)
192 * 1 - partition root
193 * 2 - partition root without load balancing (isolated)
194 * -1 - invalid partition root
195 * -2 - invalid isolated partition root
196 */
197 #define PRS_MEMBER 0
198 #define PRS_ROOT 1
199 #define PRS_ISOLATED 2
200 #define PRS_INVALID_ROOT -1
201 #define PRS_INVALID_ISOLATED -2
202
is_prs_invalid(int prs_state)203 static inline bool is_prs_invalid(int prs_state)
204 {
205 return prs_state < 0;
206 }
207
208 /*
209 * Temporary cpumasks for working with partitions that are passed among
210 * functions to avoid memory allocation in inner functions.
211 */
212 struct tmpmasks {
213 cpumask_var_t addmask, delmask; /* For partition root */
214 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */
215 };
216
css_cs(struct cgroup_subsys_state * css)217 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
218 {
219 return css ? container_of(css, struct cpuset, css) : NULL;
220 }
221
222 /* Retrieve the cpuset for a task */
task_cs(struct task_struct * task)223 static inline struct cpuset *task_cs(struct task_struct *task)
224 {
225 return css_cs(task_css(task, cpuset_cgrp_id));
226 }
227
parent_cs(struct cpuset * cs)228 static inline struct cpuset *parent_cs(struct cpuset *cs)
229 {
230 return css_cs(cs->css.parent);
231 }
232
inc_dl_tasks_cs(struct task_struct * p)233 void inc_dl_tasks_cs(struct task_struct *p)
234 {
235 struct cpuset *cs = task_cs(p);
236
237 cs->nr_deadline_tasks++;
238 }
239
dec_dl_tasks_cs(struct task_struct * p)240 void dec_dl_tasks_cs(struct task_struct *p)
241 {
242 struct cpuset *cs = task_cs(p);
243
244 cs->nr_deadline_tasks--;
245 }
246
247 /* bits in struct cpuset flags field */
248 typedef enum {
249 CS_ONLINE,
250 CS_CPU_EXCLUSIVE,
251 CS_MEM_EXCLUSIVE,
252 CS_MEM_HARDWALL,
253 CS_MEMORY_MIGRATE,
254 CS_SCHED_LOAD_BALANCE,
255 CS_SPREAD_PAGE,
256 CS_SPREAD_SLAB,
257 } cpuset_flagbits_t;
258
259 /* convenient tests for these bits */
is_cpuset_online(struct cpuset * cs)260 static inline bool is_cpuset_online(struct cpuset *cs)
261 {
262 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
263 }
264
is_cpu_exclusive(const struct cpuset * cs)265 static inline int is_cpu_exclusive(const struct cpuset *cs)
266 {
267 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
268 }
269
is_mem_exclusive(const struct cpuset * cs)270 static inline int is_mem_exclusive(const struct cpuset *cs)
271 {
272 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
273 }
274
is_mem_hardwall(const struct cpuset * cs)275 static inline int is_mem_hardwall(const struct cpuset *cs)
276 {
277 return test_bit(CS_MEM_HARDWALL, &cs->flags);
278 }
279
is_sched_load_balance(const struct cpuset * cs)280 static inline int is_sched_load_balance(const struct cpuset *cs)
281 {
282 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
283 }
284
is_memory_migrate(const struct cpuset * cs)285 static inline int is_memory_migrate(const struct cpuset *cs)
286 {
287 return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
288 }
289
is_spread_page(const struct cpuset * cs)290 static inline int is_spread_page(const struct cpuset *cs)
291 {
292 return test_bit(CS_SPREAD_PAGE, &cs->flags);
293 }
294
is_spread_slab(const struct cpuset * cs)295 static inline int is_spread_slab(const struct cpuset *cs)
296 {
297 return test_bit(CS_SPREAD_SLAB, &cs->flags);
298 }
299
is_partition_valid(const struct cpuset * cs)300 static inline int is_partition_valid(const struct cpuset *cs)
301 {
302 return cs->partition_root_state > 0;
303 }
304
is_partition_invalid(const struct cpuset * cs)305 static inline int is_partition_invalid(const struct cpuset *cs)
306 {
307 return cs->partition_root_state < 0;
308 }
309
310 /*
311 * Callers should hold callback_lock to modify partition_root_state.
312 */
make_partition_invalid(struct cpuset * cs)313 static inline void make_partition_invalid(struct cpuset *cs)
314 {
315 if (is_partition_valid(cs))
316 cs->partition_root_state = -cs->partition_root_state;
317 }
318
319 /*
320 * Send notification event of whenever partition_root_state changes.
321 */
notify_partition_change(struct cpuset * cs,int old_prs)322 static inline void notify_partition_change(struct cpuset *cs, int old_prs)
323 {
324 if (old_prs == cs->partition_root_state)
325 return;
326 cgroup_file_notify(&cs->partition_file);
327
328 /* Reset prs_err if not invalid */
329 if (is_partition_valid(cs))
330 WRITE_ONCE(cs->prs_err, PERR_NONE);
331 }
332
333 static struct cpuset top_cpuset = {
334 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
335 (1 << CS_MEM_EXCLUSIVE)),
336 .partition_root_state = PRS_ROOT,
337 };
338
339 /**
340 * cpuset_for_each_child - traverse online children of a cpuset
341 * @child_cs: loop cursor pointing to the current child
342 * @pos_css: used for iteration
343 * @parent_cs: target cpuset to walk children of
344 *
345 * Walk @child_cs through the online children of @parent_cs. Must be used
346 * with RCU read locked.
347 */
348 #define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
349 css_for_each_child((pos_css), &(parent_cs)->css) \
350 if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
351
352 /**
353 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
354 * @des_cs: loop cursor pointing to the current descendant
355 * @pos_css: used for iteration
356 * @root_cs: target cpuset to walk ancestor of
357 *
358 * Walk @des_cs through the online descendants of @root_cs. Must be used
359 * with RCU read locked. The caller may modify @pos_css by calling
360 * css_rightmost_descendant() to skip subtree. @root_cs is included in the
361 * iteration and the first node to be visited.
362 */
363 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
364 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
365 if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
366
367 /*
368 * There are two global locks guarding cpuset structures - cpuset_mutex and
369 * callback_lock. We also require taking task_lock() when dereferencing a
370 * task's cpuset pointer. See "The task_lock() exception", at the end of this
371 * comment. The cpuset code uses only cpuset_mutex. Other kernel subsystems
372 * can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
373 * structures. Note that cpuset_mutex needs to be a mutex as it is used in
374 * paths that rely on priority inheritance (e.g. scheduler - on RT) for
375 * correctness.
376 *
377 * A task must hold both locks to modify cpusets. If a task holds
378 * cpuset_mutex, it blocks others, ensuring that it is the only task able to
379 * also acquire callback_lock and be able to modify cpusets. It can perform
380 * various checks on the cpuset structure first, knowing nothing will change.
381 * It can also allocate memory while just holding cpuset_mutex. While it is
382 * performing these checks, various callback routines can briefly acquire
383 * callback_lock to query cpusets. Once it is ready to make the changes, it
384 * takes callback_lock, blocking everyone else.
385 *
386 * Calls to the kernel memory allocator can not be made while holding
387 * callback_lock, as that would risk double tripping on callback_lock
388 * from one of the callbacks into the cpuset code from within
389 * __alloc_pages().
390 *
391 * If a task is only holding callback_lock, then it has read-only
392 * access to cpusets.
393 *
394 * Now, the task_struct fields mems_allowed and mempolicy may be changed
395 * by other task, we use alloc_lock in the task_struct fields to protect
396 * them.
397 *
398 * The cpuset_common_file_read() handlers only hold callback_lock across
399 * small pieces of code, such as when reading out possibly multi-word
400 * cpumasks and nodemasks.
401 *
402 * Accessing a task's cpuset should be done in accordance with the
403 * guidelines for accessing subsystem state in kernel/cgroup.c
404 */
405
406 static DEFINE_MUTEX(cpuset_mutex);
407
cpuset_lock(void)408 void cpuset_lock(void)
409 {
410 mutex_lock(&cpuset_mutex);
411 }
412
cpuset_unlock(void)413 void cpuset_unlock(void)
414 {
415 mutex_unlock(&cpuset_mutex);
416 }
417
418 static DEFINE_SPINLOCK(callback_lock);
419
420 static struct workqueue_struct *cpuset_migrate_mm_wq;
421
422 /*
423 * CPU / memory hotplug is handled asynchronously.
424 */
425 static void cpuset_hotplug_workfn(struct work_struct *work);
426 static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
427
428 static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
429
check_insane_mems_config(nodemask_t * nodes)430 static inline void check_insane_mems_config(nodemask_t *nodes)
431 {
432 if (!cpusets_insane_config() &&
433 movable_only_nodes(nodes)) {
434 static_branch_enable(&cpusets_insane_config_key);
435 pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
436 "Cpuset allocations might fail even with a lot of memory available.\n",
437 nodemask_pr_args(nodes));
438 }
439 }
440
441 /*
442 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
443 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
444 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
445 * With v2 behavior, "cpus" and "mems" are always what the users have
446 * requested and won't be changed by hotplug events. Only the effective
447 * cpus or mems will be affected.
448 */
is_in_v2_mode(void)449 static inline bool is_in_v2_mode(void)
450 {
451 return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
452 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
453 }
454
455 /**
456 * partition_is_populated - check if partition has tasks
457 * @cs: partition root to be checked
458 * @excluded_child: a child cpuset to be excluded in task checking
459 * Return: true if there are tasks, false otherwise
460 *
461 * It is assumed that @cs is a valid partition root. @excluded_child should
462 * be non-NULL when this cpuset is going to become a partition itself.
463 */
partition_is_populated(struct cpuset * cs,struct cpuset * excluded_child)464 static inline bool partition_is_populated(struct cpuset *cs,
465 struct cpuset *excluded_child)
466 {
467 struct cgroup_subsys_state *css;
468 struct cpuset *child;
469
470 if (cs->css.cgroup->nr_populated_csets)
471 return true;
472 if (!excluded_child && !cs->nr_subparts_cpus)
473 return cgroup_is_populated(cs->css.cgroup);
474
475 rcu_read_lock();
476 cpuset_for_each_child(child, css, cs) {
477 if (child == excluded_child)
478 continue;
479 if (is_partition_valid(child))
480 continue;
481 if (cgroup_is_populated(child->css.cgroup)) {
482 rcu_read_unlock();
483 return true;
484 }
485 }
486 rcu_read_unlock();
487 return false;
488 }
489
490 /*
491 * Return in pmask the portion of a task's cpusets's cpus_allowed that
492 * are online and are capable of running the task. If none are found,
493 * walk up the cpuset hierarchy until we find one that does have some
494 * appropriate cpus.
495 *
496 * One way or another, we guarantee to return some non-empty subset
497 * of cpu_online_mask.
498 *
499 * Call with callback_lock or cpuset_mutex held.
500 */
guarantee_online_cpus(struct task_struct * tsk,struct cpumask * pmask)501 static void guarantee_online_cpus(struct task_struct *tsk,
502 struct cpumask *pmask)
503 {
504 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
505 struct cpuset *cs;
506
507 if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
508 cpumask_copy(pmask, cpu_online_mask);
509
510 rcu_read_lock();
511 cs = task_cs(tsk);
512
513 while (!cpumask_intersects(cs->effective_cpus, pmask)) {
514 cs = parent_cs(cs);
515 if (unlikely(!cs)) {
516 /*
517 * The top cpuset doesn't have any online cpu as a
518 * consequence of a race between cpuset_hotplug_work
519 * and cpu hotplug notifier. But we know the top
520 * cpuset's effective_cpus is on its way to be
521 * identical to cpu_online_mask.
522 */
523 goto out_unlock;
524 }
525 }
526 cpumask_and(pmask, pmask, cs->effective_cpus);
527
528 out_unlock:
529 rcu_read_unlock();
530 }
531
532 /*
533 * Return in *pmask the portion of a cpusets's mems_allowed that
534 * are online, with memory. If none are online with memory, walk
535 * up the cpuset hierarchy until we find one that does have some
536 * online mems. The top cpuset always has some mems online.
537 *
538 * One way or another, we guarantee to return some non-empty subset
539 * of node_states[N_MEMORY].
540 *
541 * Call with callback_lock or cpuset_mutex held.
542 */
guarantee_online_mems(struct cpuset * cs,nodemask_t * pmask)543 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
544 {
545 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
546 cs = parent_cs(cs);
547 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
548 }
549
550 /*
551 * update task's spread flag if cpuset's page/slab spread flag is set
552 *
553 * Call with callback_lock or cpuset_mutex held. The check can be skipped
554 * if on default hierarchy.
555 */
cpuset_update_task_spread_flags(struct cpuset * cs,struct task_struct * tsk)556 static void cpuset_update_task_spread_flags(struct cpuset *cs,
557 struct task_struct *tsk)
558 {
559 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
560 return;
561
562 if (is_spread_page(cs))
563 task_set_spread_page(tsk);
564 else
565 task_clear_spread_page(tsk);
566
567 if (is_spread_slab(cs))
568 task_set_spread_slab(tsk);
569 else
570 task_clear_spread_slab(tsk);
571 }
572
573 /*
574 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
575 *
576 * One cpuset is a subset of another if all its allowed CPUs and
577 * Memory Nodes are a subset of the other, and its exclusive flags
578 * are only set if the other's are set. Call holding cpuset_mutex.
579 */
580
is_cpuset_subset(const struct cpuset * p,const struct cpuset * q)581 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
582 {
583 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
584 nodes_subset(p->mems_allowed, q->mems_allowed) &&
585 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
586 is_mem_exclusive(p) <= is_mem_exclusive(q);
587 }
588
589 /**
590 * alloc_cpumasks - allocate three cpumasks for cpuset
591 * @cs: the cpuset that have cpumasks to be allocated.
592 * @tmp: the tmpmasks structure pointer
593 * Return: 0 if successful, -ENOMEM otherwise.
594 *
595 * Only one of the two input arguments should be non-NULL.
596 */
alloc_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)597 static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
598 {
599 cpumask_var_t *pmask1, *pmask2, *pmask3;
600
601 if (cs) {
602 pmask1 = &cs->cpus_allowed;
603 pmask2 = &cs->effective_cpus;
604 pmask3 = &cs->subparts_cpus;
605 } else {
606 pmask1 = &tmp->new_cpus;
607 pmask2 = &tmp->addmask;
608 pmask3 = &tmp->delmask;
609 }
610
611 if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
612 return -ENOMEM;
613
614 if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
615 goto free_one;
616
617 if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
618 goto free_two;
619
620 return 0;
621
622 free_two:
623 free_cpumask_var(*pmask2);
624 free_one:
625 free_cpumask_var(*pmask1);
626 return -ENOMEM;
627 }
628
629 /**
630 * free_cpumasks - free cpumasks in a tmpmasks structure
631 * @cs: the cpuset that have cpumasks to be free.
632 * @tmp: the tmpmasks structure pointer
633 */
free_cpumasks(struct cpuset * cs,struct tmpmasks * tmp)634 static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
635 {
636 if (cs) {
637 free_cpumask_var(cs->cpus_allowed);
638 free_cpumask_var(cs->effective_cpus);
639 free_cpumask_var(cs->subparts_cpus);
640 }
641 if (tmp) {
642 free_cpumask_var(tmp->new_cpus);
643 free_cpumask_var(tmp->addmask);
644 free_cpumask_var(tmp->delmask);
645 }
646 }
647
648 /**
649 * alloc_trial_cpuset - allocate a trial cpuset
650 * @cs: the cpuset that the trial cpuset duplicates
651 */
alloc_trial_cpuset(struct cpuset * cs)652 static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
653 {
654 struct cpuset *trial;
655
656 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
657 if (!trial)
658 return NULL;
659
660 if (alloc_cpumasks(trial, NULL)) {
661 kfree(trial);
662 return NULL;
663 }
664
665 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
666 cpumask_copy(trial->effective_cpus, cs->effective_cpus);
667 return trial;
668 }
669
670 /**
671 * free_cpuset - free the cpuset
672 * @cs: the cpuset to be freed
673 */
free_cpuset(struct cpuset * cs)674 static inline void free_cpuset(struct cpuset *cs)
675 {
676 free_cpumasks(cs, NULL);
677 kfree(cs);
678 }
679
680 /*
681 * validate_change_legacy() - Validate conditions specific to legacy (v1)
682 * behavior.
683 */
validate_change_legacy(struct cpuset * cur,struct cpuset * trial)684 static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial)
685 {
686 struct cgroup_subsys_state *css;
687 struct cpuset *c, *par;
688 int ret;
689
690 WARN_ON_ONCE(!rcu_read_lock_held());
691
692 /* Each of our child cpusets must be a subset of us */
693 ret = -EBUSY;
694 cpuset_for_each_child(c, css, cur)
695 if (!is_cpuset_subset(c, trial))
696 goto out;
697
698 /* On legacy hierarchy, we must be a subset of our parent cpuset. */
699 ret = -EACCES;
700 par = parent_cs(cur);
701 if (par && !is_cpuset_subset(trial, par))
702 goto out;
703
704 ret = 0;
705 out:
706 return ret;
707 }
708
709 /*
710 * validate_change() - Used to validate that any proposed cpuset change
711 * follows the structural rules for cpusets.
712 *
713 * If we replaced the flag and mask values of the current cpuset
714 * (cur) with those values in the trial cpuset (trial), would
715 * our various subset and exclusive rules still be valid? Presumes
716 * cpuset_mutex held.
717 *
718 * 'cur' is the address of an actual, in-use cpuset. Operations
719 * such as list traversal that depend on the actual address of the
720 * cpuset in the list must use cur below, not trial.
721 *
722 * 'trial' is the address of bulk structure copy of cur, with
723 * perhaps one or more of the fields cpus_allowed, mems_allowed,
724 * or flags changed to new, trial values.
725 *
726 * Return 0 if valid, -errno if not.
727 */
728
validate_change(struct cpuset * cur,struct cpuset * trial)729 static int validate_change(struct cpuset *cur, struct cpuset *trial)
730 {
731 struct cgroup_subsys_state *css;
732 struct cpuset *c, *par;
733 int ret = 0;
734
735 rcu_read_lock();
736
737 if (!is_in_v2_mode())
738 ret = validate_change_legacy(cur, trial);
739 if (ret)
740 goto out;
741
742 /* Remaining checks don't apply to root cpuset */
743 if (cur == &top_cpuset)
744 goto out;
745
746 par = parent_cs(cur);
747
748 /*
749 * Cpusets with tasks - existing or newly being attached - can't
750 * be changed to have empty cpus_allowed or mems_allowed.
751 */
752 ret = -ENOSPC;
753 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
754 if (!cpumask_empty(cur->cpus_allowed) &&
755 cpumask_empty(trial->cpus_allowed))
756 goto out;
757 if (!nodes_empty(cur->mems_allowed) &&
758 nodes_empty(trial->mems_allowed))
759 goto out;
760 }
761
762 /*
763 * We can't shrink if we won't have enough room for SCHED_DEADLINE
764 * tasks.
765 */
766 ret = -EBUSY;
767 if (is_cpu_exclusive(cur) &&
768 !cpuset_cpumask_can_shrink(cur->cpus_allowed,
769 trial->cpus_allowed))
770 goto out;
771
772 /*
773 * If either I or some sibling (!= me) is exclusive, we can't
774 * overlap
775 */
776 ret = -EINVAL;
777 cpuset_for_each_child(c, css, par) {
778 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
779 c != cur &&
780 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
781 goto out;
782 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
783 c != cur &&
784 nodes_intersects(trial->mems_allowed, c->mems_allowed))
785 goto out;
786 }
787
788 ret = 0;
789 out:
790 rcu_read_unlock();
791 return ret;
792 }
793
794 #ifdef CONFIG_SMP
795 /*
796 * Helper routine for generate_sched_domains().
797 * Do cpusets a, b have overlapping effective cpus_allowed masks?
798 */
cpusets_overlap(struct cpuset * a,struct cpuset * b)799 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
800 {
801 return cpumask_intersects(a->effective_cpus, b->effective_cpus);
802 }
803
804 static void
update_domain_attr(struct sched_domain_attr * dattr,struct cpuset * c)805 update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
806 {
807 if (dattr->relax_domain_level < c->relax_domain_level)
808 dattr->relax_domain_level = c->relax_domain_level;
809 return;
810 }
811
update_domain_attr_tree(struct sched_domain_attr * dattr,struct cpuset * root_cs)812 static void update_domain_attr_tree(struct sched_domain_attr *dattr,
813 struct cpuset *root_cs)
814 {
815 struct cpuset *cp;
816 struct cgroup_subsys_state *pos_css;
817
818 rcu_read_lock();
819 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
820 /* skip the whole subtree if @cp doesn't have any CPU */
821 if (cpumask_empty(cp->cpus_allowed)) {
822 pos_css = css_rightmost_descendant(pos_css);
823 continue;
824 }
825
826 if (is_sched_load_balance(cp))
827 update_domain_attr(dattr, cp);
828 }
829 rcu_read_unlock();
830 }
831
832 /* Must be called with cpuset_mutex held. */
nr_cpusets(void)833 static inline int nr_cpusets(void)
834 {
835 /* jump label reference count + the top-level cpuset */
836 return static_key_count(&cpusets_enabled_key.key) + 1;
837 }
838
839 /*
840 * generate_sched_domains()
841 *
842 * This function builds a partial partition of the systems CPUs
843 * A 'partial partition' is a set of non-overlapping subsets whose
844 * union is a subset of that set.
845 * The output of this function needs to be passed to kernel/sched/core.c
846 * partition_sched_domains() routine, which will rebuild the scheduler's
847 * load balancing domains (sched domains) as specified by that partial
848 * partition.
849 *
850 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
851 * for a background explanation of this.
852 *
853 * Does not return errors, on the theory that the callers of this
854 * routine would rather not worry about failures to rebuild sched
855 * domains when operating in the severe memory shortage situations
856 * that could cause allocation failures below.
857 *
858 * Must be called with cpuset_mutex held.
859 *
860 * The three key local variables below are:
861 * cp - cpuset pointer, used (together with pos_css) to perform a
862 * top-down scan of all cpusets. For our purposes, rebuilding
863 * the schedulers sched domains, we can ignore !is_sched_load_
864 * balance cpusets.
865 * csa - (for CpuSet Array) Array of pointers to all the cpusets
866 * that need to be load balanced, for convenient iterative
867 * access by the subsequent code that finds the best partition,
868 * i.e the set of domains (subsets) of CPUs such that the
869 * cpus_allowed of every cpuset marked is_sched_load_balance
870 * is a subset of one of these domains, while there are as
871 * many such domains as possible, each as small as possible.
872 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
873 * the kernel/sched/core.c routine partition_sched_domains() in a
874 * convenient format, that can be easily compared to the prior
875 * value to determine what partition elements (sched domains)
876 * were changed (added or removed.)
877 *
878 * Finding the best partition (set of domains):
879 * The triple nested loops below over i, j, k scan over the
880 * load balanced cpusets (using the array of cpuset pointers in
881 * csa[]) looking for pairs of cpusets that have overlapping
882 * cpus_allowed, but which don't have the same 'pn' partition
883 * number and gives them in the same partition number. It keeps
884 * looping on the 'restart' label until it can no longer find
885 * any such pairs.
886 *
887 * The union of the cpus_allowed masks from the set of
888 * all cpusets having the same 'pn' value then form the one
889 * element of the partition (one sched domain) to be passed to
890 * partition_sched_domains().
891 */
generate_sched_domains(cpumask_var_t ** domains,struct sched_domain_attr ** attributes)892 static int generate_sched_domains(cpumask_var_t **domains,
893 struct sched_domain_attr **attributes)
894 {
895 struct cpuset *cp; /* top-down scan of cpusets */
896 struct cpuset **csa; /* array of all cpuset ptrs */
897 int csn; /* how many cpuset ptrs in csa so far */
898 int i, j, k; /* indices for partition finding loops */
899 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
900 struct sched_domain_attr *dattr; /* attributes for custom domains */
901 int ndoms = 0; /* number of sched domains in result */
902 int nslot; /* next empty doms[] struct cpumask slot */
903 struct cgroup_subsys_state *pos_css;
904 bool root_load_balance = is_sched_load_balance(&top_cpuset);
905
906 doms = NULL;
907 dattr = NULL;
908 csa = NULL;
909
910 /* Special case for the 99% of systems with one, full, sched domain */
911 if (root_load_balance && !top_cpuset.nr_subparts_cpus) {
912 ndoms = 1;
913 doms = alloc_sched_domains(ndoms);
914 if (!doms)
915 goto done;
916
917 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
918 if (dattr) {
919 *dattr = SD_ATTR_INIT;
920 update_domain_attr_tree(dattr, &top_cpuset);
921 }
922 cpumask_and(doms[0], top_cpuset.effective_cpus,
923 housekeeping_cpumask(HK_TYPE_DOMAIN));
924
925 goto done;
926 }
927
928 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
929 if (!csa)
930 goto done;
931 csn = 0;
932
933 rcu_read_lock();
934 if (root_load_balance)
935 csa[csn++] = &top_cpuset;
936 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
937 if (cp == &top_cpuset)
938 continue;
939 /*
940 * Continue traversing beyond @cp iff @cp has some CPUs and
941 * isn't load balancing. The former is obvious. The
942 * latter: All child cpusets contain a subset of the
943 * parent's cpus, so just skip them, and then we call
944 * update_domain_attr_tree() to calc relax_domain_level of
945 * the corresponding sched domain.
946 *
947 * If root is load-balancing, we can skip @cp if it
948 * is a subset of the root's effective_cpus.
949 */
950 if (!cpumask_empty(cp->cpus_allowed) &&
951 !(is_sched_load_balance(cp) &&
952 cpumask_intersects(cp->cpus_allowed,
953 housekeeping_cpumask(HK_TYPE_DOMAIN))))
954 continue;
955
956 if (root_load_balance &&
957 cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
958 continue;
959
960 if (is_sched_load_balance(cp) &&
961 !cpumask_empty(cp->effective_cpus))
962 csa[csn++] = cp;
963
964 /* skip @cp's subtree if not a partition root */
965 if (!is_partition_valid(cp))
966 pos_css = css_rightmost_descendant(pos_css);
967 }
968 rcu_read_unlock();
969
970 for (i = 0; i < csn; i++)
971 csa[i]->pn = i;
972 ndoms = csn;
973
974 restart:
975 /* Find the best partition (set of sched domains) */
976 for (i = 0; i < csn; i++) {
977 struct cpuset *a = csa[i];
978 int apn = a->pn;
979
980 for (j = 0; j < csn; j++) {
981 struct cpuset *b = csa[j];
982 int bpn = b->pn;
983
984 if (apn != bpn && cpusets_overlap(a, b)) {
985 for (k = 0; k < csn; k++) {
986 struct cpuset *c = csa[k];
987
988 if (c->pn == bpn)
989 c->pn = apn;
990 }
991 ndoms--; /* one less element */
992 goto restart;
993 }
994 }
995 }
996
997 /*
998 * Now we know how many domains to create.
999 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
1000 */
1001 doms = alloc_sched_domains(ndoms);
1002 if (!doms)
1003 goto done;
1004
1005 /*
1006 * The rest of the code, including the scheduler, can deal with
1007 * dattr==NULL case. No need to abort if alloc fails.
1008 */
1009 dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
1010 GFP_KERNEL);
1011
1012 for (nslot = 0, i = 0; i < csn; i++) {
1013 struct cpuset *a = csa[i];
1014 struct cpumask *dp;
1015 int apn = a->pn;
1016
1017 if (apn < 0) {
1018 /* Skip completed partitions */
1019 continue;
1020 }
1021
1022 dp = doms[nslot];
1023
1024 if (nslot == ndoms) {
1025 static int warnings = 10;
1026 if (warnings) {
1027 pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
1028 nslot, ndoms, csn, i, apn);
1029 warnings--;
1030 }
1031 continue;
1032 }
1033
1034 cpumask_clear(dp);
1035 if (dattr)
1036 *(dattr + nslot) = SD_ATTR_INIT;
1037 for (j = i; j < csn; j++) {
1038 struct cpuset *b = csa[j];
1039
1040 if (apn == b->pn) {
1041 cpumask_or(dp, dp, b->effective_cpus);
1042 cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
1043 if (dattr)
1044 update_domain_attr_tree(dattr + nslot, b);
1045
1046 /* Done with this partition */
1047 b->pn = -1;
1048 }
1049 }
1050 nslot++;
1051 }
1052 BUG_ON(nslot != ndoms);
1053
1054 done:
1055 kfree(csa);
1056
1057 /*
1058 * Fallback to the default domain if kmalloc() failed.
1059 * See comments in partition_sched_domains().
1060 */
1061 if (doms == NULL)
1062 ndoms = 1;
1063
1064 *domains = doms;
1065 *attributes = dattr;
1066 return ndoms;
1067 }
1068
dl_update_tasks_root_domain(struct cpuset * cs)1069 static void dl_update_tasks_root_domain(struct cpuset *cs)
1070 {
1071 struct css_task_iter it;
1072 struct task_struct *task;
1073
1074 if (cs->nr_deadline_tasks == 0)
1075 return;
1076
1077 css_task_iter_start(&cs->css, 0, &it);
1078
1079 while ((task = css_task_iter_next(&it)))
1080 dl_add_task_root_domain(task);
1081
1082 css_task_iter_end(&it);
1083 }
1084
dl_rebuild_rd_accounting(void)1085 static void dl_rebuild_rd_accounting(void)
1086 {
1087 struct cpuset *cs = NULL;
1088 struct cgroup_subsys_state *pos_css;
1089
1090 lockdep_assert_held(&cpuset_mutex);
1091 lockdep_assert_cpus_held();
1092 lockdep_assert_held(&sched_domains_mutex);
1093
1094 rcu_read_lock();
1095
1096 /*
1097 * Clear default root domain DL accounting, it will be computed again
1098 * if a task belongs to it.
1099 */
1100 dl_clear_root_domain(&def_root_domain);
1101
1102 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1103
1104 if (cpumask_empty(cs->effective_cpus)) {
1105 pos_css = css_rightmost_descendant(pos_css);
1106 continue;
1107 }
1108
1109 css_get(&cs->css);
1110
1111 rcu_read_unlock();
1112
1113 dl_update_tasks_root_domain(cs);
1114
1115 rcu_read_lock();
1116 css_put(&cs->css);
1117 }
1118 rcu_read_unlock();
1119 }
1120
1121 static void
partition_and_rebuild_sched_domains(int ndoms_new,cpumask_var_t doms_new[],struct sched_domain_attr * dattr_new)1122 partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1123 struct sched_domain_attr *dattr_new)
1124 {
1125 mutex_lock(&sched_domains_mutex);
1126 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
1127 dl_rebuild_rd_accounting();
1128 mutex_unlock(&sched_domains_mutex);
1129 }
1130
1131 /*
1132 * Rebuild scheduler domains.
1133 *
1134 * If the flag 'sched_load_balance' of any cpuset with non-empty
1135 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
1136 * which has that flag enabled, or if any cpuset with a non-empty
1137 * 'cpus' is removed, then call this routine to rebuild the
1138 * scheduler's dynamic sched domains.
1139 *
1140 * Call with cpuset_mutex held. Takes cpus_read_lock().
1141 */
rebuild_sched_domains_locked(void)1142 static void rebuild_sched_domains_locked(void)
1143 {
1144 struct cgroup_subsys_state *pos_css;
1145 struct sched_domain_attr *attr;
1146 cpumask_var_t *doms;
1147 struct cpuset *cs;
1148 int ndoms;
1149
1150 lockdep_assert_cpus_held();
1151 lockdep_assert_held(&cpuset_mutex);
1152
1153 /*
1154 * If we have raced with CPU hotplug, return early to avoid
1155 * passing doms with offlined cpu to partition_sched_domains().
1156 * Anyways, cpuset_hotplug_workfn() will rebuild sched domains.
1157 *
1158 * With no CPUs in any subpartitions, top_cpuset's effective CPUs
1159 * should be the same as the active CPUs, so checking only top_cpuset
1160 * is enough to detect racing CPU offlines.
1161 */
1162 if (!top_cpuset.nr_subparts_cpus &&
1163 !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
1164 return;
1165
1166 /*
1167 * With subpartition CPUs, however, the effective CPUs of a partition
1168 * root should be only a subset of the active CPUs. Since a CPU in any
1169 * partition root could be offlined, all must be checked.
1170 */
1171 if (top_cpuset.nr_subparts_cpus) {
1172 rcu_read_lock();
1173 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1174 if (!is_partition_valid(cs)) {
1175 pos_css = css_rightmost_descendant(pos_css);
1176 continue;
1177 }
1178 if (!cpumask_subset(cs->effective_cpus,
1179 cpu_active_mask)) {
1180 rcu_read_unlock();
1181 return;
1182 }
1183 }
1184 rcu_read_unlock();
1185 }
1186
1187 /* Generate domain masks and attrs */
1188 ndoms = generate_sched_domains(&doms, &attr);
1189
1190 /* Have scheduler rebuild the domains */
1191 partition_and_rebuild_sched_domains(ndoms, doms, attr);
1192 }
1193 #else /* !CONFIG_SMP */
rebuild_sched_domains_locked(void)1194 static void rebuild_sched_domains_locked(void)
1195 {
1196 }
1197 #endif /* CONFIG_SMP */
1198
rebuild_sched_domains(void)1199 void rebuild_sched_domains(void)
1200 {
1201 cpus_read_lock();
1202 mutex_lock(&cpuset_mutex);
1203 rebuild_sched_domains_locked();
1204 mutex_unlock(&cpuset_mutex);
1205 cpus_read_unlock();
1206 }
1207
1208 /**
1209 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1210 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1211 * @new_cpus: the temp variable for the new effective_cpus mask
1212 *
1213 * Iterate through each task of @cs updating its cpus_allowed to the
1214 * effective cpuset's. As this function is called with cpuset_mutex held,
1215 * cpuset membership stays stable. For top_cpuset, task_cpu_possible_mask()
1216 * is used instead of effective_cpus to make sure all offline CPUs are also
1217 * included as hotplug code won't update cpumasks for tasks in top_cpuset.
1218 */
update_tasks_cpumask(struct cpuset * cs,struct cpumask * new_cpus)1219 static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1220 {
1221 struct css_task_iter it;
1222 struct task_struct *task;
1223 bool top_cs = cs == &top_cpuset;
1224
1225 css_task_iter_start(&cs->css, 0, &it);
1226 while ((task = css_task_iter_next(&it))) {
1227 const struct cpumask *possible_mask = task_cpu_possible_mask(task);
1228
1229 if (top_cs) {
1230 /*
1231 * Percpu kthreads in top_cpuset are ignored
1232 */
1233 if (kthread_is_per_cpu(task))
1234 continue;
1235 cpumask_andnot(new_cpus, possible_mask, cs->subparts_cpus);
1236 } else {
1237 cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1238 }
1239 set_cpus_allowed_ptr(task, new_cpus);
1240 }
1241 css_task_iter_end(&it);
1242 }
1243
1244 /**
1245 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1246 * @new_cpus: the temp variable for the new effective_cpus mask
1247 * @cs: the cpuset the need to recompute the new effective_cpus mask
1248 * @parent: the parent cpuset
1249 *
1250 * If the parent has subpartition CPUs, include them in the list of
1251 * allowable CPUs in computing the new effective_cpus mask. Since offlined
1252 * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask
1253 * to mask those out.
1254 */
compute_effective_cpumask(struct cpumask * new_cpus,struct cpuset * cs,struct cpuset * parent)1255 static void compute_effective_cpumask(struct cpumask *new_cpus,
1256 struct cpuset *cs, struct cpuset *parent)
1257 {
1258 if (parent->nr_subparts_cpus && is_partition_valid(cs)) {
1259 cpumask_or(new_cpus, parent->effective_cpus,
1260 parent->subparts_cpus);
1261 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed);
1262 cpumask_and(new_cpus, new_cpus, cpu_active_mask);
1263 } else {
1264 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1265 }
1266 }
1267
1268 /*
1269 * Commands for update_parent_subparts_cpumask
1270 */
1271 enum subparts_cmd {
1272 partcmd_enable, /* Enable partition root */
1273 partcmd_disable, /* Disable partition root */
1274 partcmd_update, /* Update parent's subparts_cpus */
1275 partcmd_invalidate, /* Make partition invalid */
1276 };
1277
1278 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1279 int turning_on);
1280 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1281 struct tmpmasks *tmp);
1282
1283 /*
1284 * Update partition exclusive flag
1285 *
1286 * Return: 0 if successful, an error code otherwise
1287 */
update_partition_exclusive(struct cpuset * cs,int new_prs)1288 static int update_partition_exclusive(struct cpuset *cs, int new_prs)
1289 {
1290 bool exclusive = (new_prs > 0);
1291
1292 if (exclusive && !is_cpu_exclusive(cs)) {
1293 if (update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1294 return PERR_NOTEXCL;
1295 } else if (!exclusive && is_cpu_exclusive(cs)) {
1296 /* Turning off CS_CPU_EXCLUSIVE will not return error */
1297 update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1298 }
1299 return 0;
1300 }
1301
1302 /*
1303 * Update partition load balance flag and/or rebuild sched domain
1304 *
1305 * Changing load balance flag will automatically call
1306 * rebuild_sched_domains_locked().
1307 */
update_partition_sd_lb(struct cpuset * cs,int old_prs)1308 static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1309 {
1310 int new_prs = cs->partition_root_state;
1311 bool new_lb = (new_prs != PRS_ISOLATED);
1312 bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
1313
1314 if (new_lb != !!is_sched_load_balance(cs)) {
1315 rebuild_domains = true;
1316 if (new_lb)
1317 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1318 else
1319 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1320 }
1321
1322 if (rebuild_domains)
1323 rebuild_sched_domains_locked();
1324 }
1325
1326 /**
1327 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1328 * @cs: The cpuset that requests change in partition root state
1329 * @cmd: Partition root state change command
1330 * @newmask: Optional new cpumask for partcmd_update
1331 * @tmp: Temporary addmask and delmask
1332 * Return: 0 or a partition root state error code
1333 *
1334 * For partcmd_enable, the cpuset is being transformed from a non-partition
1335 * root to a partition root. The cpus_allowed mask of the given cpuset will
1336 * be put into parent's subparts_cpus and taken away from parent's
1337 * effective_cpus. The function will return 0 if all the CPUs listed in
1338 * cpus_allowed can be granted or an error code will be returned.
1339 *
1340 * For partcmd_disable, the cpuset is being transformed from a partition
1341 * root back to a non-partition root. Any CPUs in cpus_allowed that are in
1342 * parent's subparts_cpus will be taken away from that cpumask and put back
1343 * into parent's effective_cpus. 0 will always be returned.
1344 *
1345 * For partcmd_update, if the optional newmask is specified, the cpu list is
1346 * to be changed from cpus_allowed to newmask. Otherwise, cpus_allowed is
1347 * assumed to remain the same. The cpuset should either be a valid or invalid
1348 * partition root. The partition root state may change from valid to invalid
1349 * or vice versa. An error code will only be returned if transitioning from
1350 * invalid to valid violates the exclusivity rule.
1351 *
1352 * For partcmd_invalidate, the current partition will be made invalid.
1353 *
1354 * The partcmd_enable and partcmd_disable commands are used by
1355 * update_prstate(). An error code may be returned and the caller will check
1356 * for error.
1357 *
1358 * The partcmd_update command is used by update_cpumasks_hier() with newmask
1359 * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1360 * by update_cpumask() with NULL newmask. In both cases, the callers won't
1361 * check for error and so partition_root_state and prs_error will be updated
1362 * directly.
1363 */
update_parent_subparts_cpumask(struct cpuset * cs,int cmd,struct cpumask * newmask,struct tmpmasks * tmp)1364 static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
1365 struct cpumask *newmask,
1366 struct tmpmasks *tmp)
1367 {
1368 struct cpuset *parent = parent_cs(cs);
1369 int adding; /* Moving cpus from effective_cpus to subparts_cpus */
1370 int deleting; /* Moving cpus from subparts_cpus to effective_cpus */
1371 int old_prs, new_prs;
1372 int part_error = PERR_NONE; /* Partition error? */
1373
1374 lockdep_assert_held(&cpuset_mutex);
1375
1376 /*
1377 * The parent must be a partition root.
1378 * The new cpumask, if present, or the current cpus_allowed must
1379 * not be empty.
1380 */
1381 if (!is_partition_valid(parent)) {
1382 return is_partition_invalid(parent)
1383 ? PERR_INVPARENT : PERR_NOTPART;
1384 }
1385 if (!newmask && cpumask_empty(cs->cpus_allowed))
1386 return PERR_CPUSEMPTY;
1387
1388 /*
1389 * new_prs will only be changed for the partcmd_update and
1390 * partcmd_invalidate commands.
1391 */
1392 adding = deleting = false;
1393 old_prs = new_prs = cs->partition_root_state;
1394 if (cmd == partcmd_enable) {
1395 /*
1396 * Enabling partition root is not allowed if cpus_allowed
1397 * doesn't overlap parent's cpus_allowed.
1398 */
1399 if (!cpumask_intersects(cs->cpus_allowed, parent->cpus_allowed))
1400 return PERR_INVCPUS;
1401
1402 /*
1403 * A parent can be left with no CPU as long as there is no
1404 * task directly associated with the parent partition.
1405 */
1406 if (cpumask_subset(parent->effective_cpus, cs->cpus_allowed) &&
1407 partition_is_populated(parent, cs))
1408 return PERR_NOCPUS;
1409
1410 cpumask_copy(tmp->addmask, cs->cpus_allowed);
1411 adding = true;
1412 } else if (cmd == partcmd_disable) {
1413 /*
1414 * Need to remove cpus from parent's subparts_cpus for valid
1415 * partition root.
1416 */
1417 deleting = !is_prs_invalid(old_prs) &&
1418 cpumask_and(tmp->delmask, cs->cpus_allowed,
1419 parent->subparts_cpus);
1420 } else if (cmd == partcmd_invalidate) {
1421 if (is_prs_invalid(old_prs))
1422 return 0;
1423
1424 /*
1425 * Make the current partition invalid. It is assumed that
1426 * invalidation is caused by violating cpu exclusivity rule.
1427 */
1428 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1429 parent->subparts_cpus);
1430 if (old_prs > 0) {
1431 new_prs = -old_prs;
1432 part_error = PERR_NOTEXCL;
1433 }
1434 } else if (newmask) {
1435 /*
1436 * partcmd_update with newmask:
1437 *
1438 * Compute add/delete mask to/from subparts_cpus
1439 *
1440 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus
1441 * addmask = newmask & parent->cpus_allowed
1442 * & ~parent->subparts_cpus
1443 */
1444 cpumask_andnot(tmp->delmask, cs->cpus_allowed, newmask);
1445 deleting = cpumask_and(tmp->delmask, tmp->delmask,
1446 parent->subparts_cpus);
1447
1448 cpumask_and(tmp->addmask, newmask, parent->cpus_allowed);
1449 adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1450 parent->subparts_cpus);
1451 /*
1452 * Empty cpumask is not allowed
1453 */
1454 if (cpumask_empty(newmask)) {
1455 part_error = PERR_CPUSEMPTY;
1456 /*
1457 * Make partition invalid if parent's effective_cpus could
1458 * become empty and there are tasks in the parent.
1459 */
1460 } else if (adding &&
1461 cpumask_subset(parent->effective_cpus, tmp->addmask) &&
1462 !cpumask_intersects(tmp->delmask, cpu_active_mask) &&
1463 partition_is_populated(parent, cs)) {
1464 part_error = PERR_NOCPUS;
1465 adding = false;
1466 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1467 parent->subparts_cpus);
1468 }
1469 } else {
1470 /*
1471 * partcmd_update w/o newmask:
1472 *
1473 * delmask = cpus_allowed & parent->subparts_cpus
1474 * addmask = cpus_allowed & parent->cpus_allowed
1475 * & ~parent->subparts_cpus
1476 *
1477 * This gets invoked either due to a hotplug event or from
1478 * update_cpumasks_hier(). This can cause the state of a
1479 * partition root to transition from valid to invalid or vice
1480 * versa. So we still need to compute the addmask and delmask.
1481
1482 * A partition error happens when:
1483 * 1) Cpuset is valid partition, but parent does not distribute
1484 * out any CPUs.
1485 * 2) Parent has tasks and all its effective CPUs will have
1486 * to be distributed out.
1487 */
1488 cpumask_and(tmp->addmask, cs->cpus_allowed,
1489 parent->cpus_allowed);
1490 adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1491 parent->subparts_cpus);
1492
1493 if ((is_partition_valid(cs) && !parent->nr_subparts_cpus) ||
1494 (adding &&
1495 cpumask_subset(parent->effective_cpus, tmp->addmask) &&
1496 partition_is_populated(parent, cs))) {
1497 part_error = PERR_NOCPUS;
1498 adding = false;
1499 }
1500
1501 if (part_error && is_partition_valid(cs) &&
1502 parent->nr_subparts_cpus)
1503 deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1504 parent->subparts_cpus);
1505 }
1506 if (part_error)
1507 WRITE_ONCE(cs->prs_err, part_error);
1508
1509 if (cmd == partcmd_update) {
1510 /*
1511 * Check for possible transition between valid and invalid
1512 * partition root.
1513 */
1514 switch (cs->partition_root_state) {
1515 case PRS_ROOT:
1516 case PRS_ISOLATED:
1517 if (part_error)
1518 new_prs = -old_prs;
1519 break;
1520 case PRS_INVALID_ROOT:
1521 case PRS_INVALID_ISOLATED:
1522 if (!part_error)
1523 new_prs = -old_prs;
1524 break;
1525 }
1526 }
1527
1528 if (!adding && !deleting && (new_prs == old_prs))
1529 return 0;
1530
1531 /*
1532 * Transitioning between invalid to valid or vice versa may require
1533 * changing CS_CPU_EXCLUSIVE.
1534 */
1535 if (old_prs != new_prs) {
1536 int err = update_partition_exclusive(cs, new_prs);
1537
1538 if (err)
1539 return err;
1540 }
1541
1542 /*
1543 * Change the parent's subparts_cpus.
1544 * Newly added CPUs will be removed from effective_cpus and
1545 * newly deleted ones will be added back to effective_cpus.
1546 */
1547 spin_lock_irq(&callback_lock);
1548 if (adding) {
1549 cpumask_or(parent->subparts_cpus,
1550 parent->subparts_cpus, tmp->addmask);
1551 cpumask_andnot(parent->effective_cpus,
1552 parent->effective_cpus, tmp->addmask);
1553 }
1554 if (deleting) {
1555 cpumask_andnot(parent->subparts_cpus,
1556 parent->subparts_cpus, tmp->delmask);
1557 /*
1558 * Some of the CPUs in subparts_cpus might have been offlined.
1559 */
1560 cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask);
1561 cpumask_or(parent->effective_cpus,
1562 parent->effective_cpus, tmp->delmask);
1563 }
1564
1565 parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
1566
1567 if (old_prs != new_prs)
1568 cs->partition_root_state = new_prs;
1569
1570 spin_unlock_irq(&callback_lock);
1571
1572 if (adding || deleting) {
1573 update_tasks_cpumask(parent, tmp->addmask);
1574 if (parent->child_ecpus_count)
1575 update_sibling_cpumasks(parent, cs, tmp);
1576 }
1577
1578 /*
1579 * For partcmd_update without newmask, it is being called from
1580 * cpuset_hotplug_workfn() where cpus_read_lock() wasn't taken.
1581 * Update the load balance flag and scheduling domain if
1582 * cpus_read_trylock() is successful.
1583 */
1584 if ((cmd == partcmd_update) && !newmask && cpus_read_trylock()) {
1585 update_partition_sd_lb(cs, old_prs);
1586 cpus_read_unlock();
1587 }
1588
1589 notify_partition_change(cs, old_prs);
1590 return 0;
1591 }
1592
1593 /*
1594 * update_cpumasks_hier() flags
1595 */
1596 #define HIER_CHECKALL 0x01 /* Check all cpusets with no skipping */
1597 #define HIER_NO_SD_REBUILD 0x02 /* Don't rebuild sched domains */
1598
1599 /*
1600 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1601 * @cs: the cpuset to consider
1602 * @tmp: temp variables for calculating effective_cpus & partition setup
1603 * @force: don't skip any descendant cpusets if set
1604 *
1605 * When configured cpumask is changed, the effective cpumasks of this cpuset
1606 * and all its descendants need to be updated.
1607 *
1608 * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
1609 *
1610 * Called with cpuset_mutex held
1611 */
update_cpumasks_hier(struct cpuset * cs,struct tmpmasks * tmp,int flags)1612 static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
1613 int flags)
1614 {
1615 struct cpuset *cp;
1616 struct cgroup_subsys_state *pos_css;
1617 bool need_rebuild_sched_domains = false;
1618 int old_prs, new_prs;
1619
1620 rcu_read_lock();
1621 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1622 struct cpuset *parent = parent_cs(cp);
1623 bool update_parent = false;
1624
1625 compute_effective_cpumask(tmp->new_cpus, cp, parent);
1626
1627 /*
1628 * If it becomes empty, inherit the effective mask of the
1629 * parent, which is guaranteed to have some CPUs unless
1630 * it is a partition root that has explicitly distributed
1631 * out all its CPUs.
1632 */
1633 if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) {
1634 if (is_partition_valid(cp) &&
1635 cpumask_equal(cp->cpus_allowed, cp->subparts_cpus))
1636 goto update_parent_subparts;
1637
1638 cpumask_copy(tmp->new_cpus, parent->effective_cpus);
1639 if (!cp->use_parent_ecpus) {
1640 cp->use_parent_ecpus = true;
1641 parent->child_ecpus_count++;
1642 }
1643 } else if (cp->use_parent_ecpus) {
1644 cp->use_parent_ecpus = false;
1645 WARN_ON_ONCE(!parent->child_ecpus_count);
1646 parent->child_ecpus_count--;
1647 }
1648
1649 /*
1650 * Skip the whole subtree if
1651 * 1) the cpumask remains the same,
1652 * 2) has no partition root state,
1653 * 3) HIER_CHECKALL flag not set, and
1654 * 4) for v2 load balance state same as its parent.
1655 */
1656 if (!cp->partition_root_state && !(flags & HIER_CHECKALL) &&
1657 cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
1658 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
1659 (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
1660 pos_css = css_rightmost_descendant(pos_css);
1661 continue;
1662 }
1663
1664 update_parent_subparts:
1665 /*
1666 * update_parent_subparts_cpumask() should have been called
1667 * for cs already in update_cpumask(). We should also call
1668 * update_tasks_cpumask() again for tasks in the parent
1669 * cpuset if the parent's subparts_cpus changes.
1670 */
1671 old_prs = new_prs = cp->partition_root_state;
1672 if ((cp != cs) && old_prs) {
1673 switch (parent->partition_root_state) {
1674 case PRS_ROOT:
1675 case PRS_ISOLATED:
1676 update_parent = true;
1677 break;
1678
1679 default:
1680 /*
1681 * When parent is not a partition root or is
1682 * invalid, child partition roots become
1683 * invalid too.
1684 */
1685 if (is_partition_valid(cp))
1686 new_prs = -cp->partition_root_state;
1687 WRITE_ONCE(cp->prs_err,
1688 is_partition_invalid(parent)
1689 ? PERR_INVPARENT : PERR_NOTPART);
1690 break;
1691 }
1692 }
1693
1694 if (!css_tryget_online(&cp->css))
1695 continue;
1696 rcu_read_unlock();
1697
1698 if (update_parent) {
1699 update_parent_subparts_cpumask(cp, partcmd_update, NULL,
1700 tmp);
1701 /*
1702 * The cpuset partition_root_state may become
1703 * invalid. Capture it.
1704 */
1705 new_prs = cp->partition_root_state;
1706 }
1707
1708 spin_lock_irq(&callback_lock);
1709
1710 if (cp->nr_subparts_cpus && !is_partition_valid(cp)) {
1711 /*
1712 * Put all active subparts_cpus back to effective_cpus.
1713 */
1714 cpumask_or(tmp->new_cpus, tmp->new_cpus,
1715 cp->subparts_cpus);
1716 cpumask_and(tmp->new_cpus, tmp->new_cpus,
1717 cpu_active_mask);
1718 cp->nr_subparts_cpus = 0;
1719 cpumask_clear(cp->subparts_cpus);
1720 }
1721
1722 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1723 if (cp->nr_subparts_cpus) {
1724 /*
1725 * Make sure that effective_cpus & subparts_cpus
1726 * are mutually exclusive.
1727 */
1728 cpumask_andnot(cp->effective_cpus, cp->effective_cpus,
1729 cp->subparts_cpus);
1730 }
1731
1732 cp->partition_root_state = new_prs;
1733 spin_unlock_irq(&callback_lock);
1734
1735 notify_partition_change(cp, old_prs);
1736
1737 WARN_ON(!is_in_v2_mode() &&
1738 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
1739
1740 update_tasks_cpumask(cp, tmp->new_cpus);
1741
1742 /*
1743 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
1744 * from parent if current cpuset isn't a valid partition root
1745 * and their load balance states differ.
1746 */
1747 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
1748 !is_partition_valid(cp) &&
1749 (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
1750 if (is_sched_load_balance(parent))
1751 set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
1752 else
1753 clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
1754 }
1755
1756 /*
1757 * On legacy hierarchy, if the effective cpumask of any non-
1758 * empty cpuset is changed, we need to rebuild sched domains.
1759 * On default hierarchy, the cpuset needs to be a partition
1760 * root as well.
1761 */
1762 if (!cpumask_empty(cp->cpus_allowed) &&
1763 is_sched_load_balance(cp) &&
1764 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
1765 is_partition_valid(cp)))
1766 need_rebuild_sched_domains = true;
1767
1768 rcu_read_lock();
1769 css_put(&cp->css);
1770 }
1771 rcu_read_unlock();
1772
1773 if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD))
1774 rebuild_sched_domains_locked();
1775 }
1776
1777 /**
1778 * update_sibling_cpumasks - Update siblings cpumasks
1779 * @parent: Parent cpuset
1780 * @cs: Current cpuset
1781 * @tmp: Temp variables
1782 */
update_sibling_cpumasks(struct cpuset * parent,struct cpuset * cs,struct tmpmasks * tmp)1783 static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1784 struct tmpmasks *tmp)
1785 {
1786 struct cpuset *sibling;
1787 struct cgroup_subsys_state *pos_css;
1788
1789 lockdep_assert_held(&cpuset_mutex);
1790
1791 /*
1792 * Check all its siblings and call update_cpumasks_hier()
1793 * if their use_parent_ecpus flag is set in order for them
1794 * to use the right effective_cpus value.
1795 *
1796 * The update_cpumasks_hier() function may sleep. So we have to
1797 * release the RCU read lock before calling it. HIER_NO_SD_REBUILD
1798 * flag is used to suppress rebuild of sched domains as the callers
1799 * will take care of that.
1800 */
1801 rcu_read_lock();
1802 cpuset_for_each_child(sibling, pos_css, parent) {
1803 if (sibling == cs)
1804 continue;
1805 if (!sibling->use_parent_ecpus)
1806 continue;
1807 if (!css_tryget_online(&sibling->css))
1808 continue;
1809
1810 rcu_read_unlock();
1811 update_cpumasks_hier(sibling, tmp, HIER_NO_SD_REBUILD);
1812 rcu_read_lock();
1813 css_put(&sibling->css);
1814 }
1815 rcu_read_unlock();
1816 }
1817
1818 /**
1819 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1820 * @cs: the cpuset to consider
1821 * @trialcs: trial cpuset
1822 * @buf: buffer of cpu numbers written to this cpuset
1823 */
update_cpumask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)1824 static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
1825 const char *buf)
1826 {
1827 int retval;
1828 struct tmpmasks tmp;
1829 bool invalidate = false;
1830 int old_prs = cs->partition_root_state;
1831
1832 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
1833 if (cs == &top_cpuset)
1834 return -EACCES;
1835
1836 /*
1837 * An empty cpus_allowed is ok only if the cpuset has no tasks.
1838 * Since cpulist_parse() fails on an empty mask, we special case
1839 * that parsing. The validate_change() call ensures that cpusets
1840 * with tasks have cpus.
1841 */
1842 if (!*buf) {
1843 cpumask_clear(trialcs->cpus_allowed);
1844 } else {
1845 retval = cpulist_parse(buf, trialcs->cpus_allowed);
1846 if (retval < 0)
1847 return retval;
1848
1849 if (!cpumask_subset(trialcs->cpus_allowed,
1850 top_cpuset.cpus_allowed))
1851 return -EINVAL;
1852 }
1853
1854 /* Nothing to do if the cpus didn't change */
1855 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
1856 return 0;
1857
1858 if (alloc_cpumasks(NULL, &tmp))
1859 return -ENOMEM;
1860
1861 retval = validate_change(cs, trialcs);
1862
1863 if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
1864 struct cpuset *cp, *parent;
1865 struct cgroup_subsys_state *css;
1866
1867 /*
1868 * The -EINVAL error code indicates that partition sibling
1869 * CPU exclusivity rule has been violated. We still allow
1870 * the cpumask change to proceed while invalidating the
1871 * partition. However, any conflicting sibling partitions
1872 * have to be marked as invalid too.
1873 */
1874 invalidate = true;
1875 rcu_read_lock();
1876 parent = parent_cs(cs);
1877 cpuset_for_each_child(cp, css, parent)
1878 if (is_partition_valid(cp) &&
1879 cpumask_intersects(trialcs->cpus_allowed, cp->cpus_allowed)) {
1880 rcu_read_unlock();
1881 update_parent_subparts_cpumask(cp, partcmd_invalidate, NULL, &tmp);
1882 rcu_read_lock();
1883 }
1884 rcu_read_unlock();
1885 retval = 0;
1886 }
1887 if (retval < 0)
1888 goto out_free;
1889
1890 if (cs->partition_root_state) {
1891 if (invalidate)
1892 update_parent_subparts_cpumask(cs, partcmd_invalidate,
1893 NULL, &tmp);
1894 else
1895 update_parent_subparts_cpumask(cs, partcmd_update,
1896 trialcs->cpus_allowed, &tmp);
1897 }
1898
1899 compute_effective_cpumask(trialcs->effective_cpus, trialcs,
1900 parent_cs(cs));
1901 spin_lock_irq(&callback_lock);
1902 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
1903
1904 /*
1905 * Make sure that subparts_cpus, if not empty, is a subset of
1906 * cpus_allowed. Clear subparts_cpus if partition not valid or
1907 * empty effective cpus with tasks.
1908 */
1909 if (cs->nr_subparts_cpus) {
1910 if (!is_partition_valid(cs) ||
1911 (cpumask_subset(trialcs->effective_cpus, cs->subparts_cpus) &&
1912 partition_is_populated(cs, NULL))) {
1913 cs->nr_subparts_cpus = 0;
1914 cpumask_clear(cs->subparts_cpus);
1915 } else {
1916 cpumask_and(cs->subparts_cpus, cs->subparts_cpus,
1917 cs->cpus_allowed);
1918 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
1919 }
1920 }
1921 spin_unlock_irq(&callback_lock);
1922
1923 /* effective_cpus will be updated here */
1924 update_cpumasks_hier(cs, &tmp, 0);
1925
1926 if (cs->partition_root_state) {
1927 struct cpuset *parent = parent_cs(cs);
1928
1929 /*
1930 * For partition root, update the cpumasks of sibling
1931 * cpusets if they use parent's effective_cpus.
1932 */
1933 if (parent->child_ecpus_count)
1934 update_sibling_cpumasks(parent, cs, &tmp);
1935
1936 /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains */
1937 update_partition_sd_lb(cs, old_prs);
1938 }
1939 out_free:
1940 free_cpumasks(NULL, &tmp);
1941 return 0;
1942 }
1943
1944 /*
1945 * Migrate memory region from one set of nodes to another. This is
1946 * performed asynchronously as it can be called from process migration path
1947 * holding locks involved in process management. All mm migrations are
1948 * performed in the queued order and can be waited for by flushing
1949 * cpuset_migrate_mm_wq.
1950 */
1951
1952 struct cpuset_migrate_mm_work {
1953 struct work_struct work;
1954 struct mm_struct *mm;
1955 nodemask_t from;
1956 nodemask_t to;
1957 };
1958
cpuset_migrate_mm_workfn(struct work_struct * work)1959 static void cpuset_migrate_mm_workfn(struct work_struct *work)
1960 {
1961 struct cpuset_migrate_mm_work *mwork =
1962 container_of(work, struct cpuset_migrate_mm_work, work);
1963
1964 /* on a wq worker, no need to worry about %current's mems_allowed */
1965 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
1966 mmput(mwork->mm);
1967 kfree(mwork);
1968 }
1969
cpuset_migrate_mm(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to)1970 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1971 const nodemask_t *to)
1972 {
1973 struct cpuset_migrate_mm_work *mwork;
1974
1975 if (nodes_equal(*from, *to)) {
1976 mmput(mm);
1977 return;
1978 }
1979
1980 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1981 if (mwork) {
1982 mwork->mm = mm;
1983 mwork->from = *from;
1984 mwork->to = *to;
1985 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1986 queue_work(cpuset_migrate_mm_wq, &mwork->work);
1987 } else {
1988 mmput(mm);
1989 }
1990 }
1991
cpuset_post_attach(void)1992 static void cpuset_post_attach(void)
1993 {
1994 flush_workqueue(cpuset_migrate_mm_wq);
1995 }
1996
1997 /*
1998 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1999 * @tsk: the task to change
2000 * @newmems: new nodes that the task will be set
2001 *
2002 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2003 * and rebind an eventual tasks' mempolicy. If the task is allocating in
2004 * parallel, it might temporarily see an empty intersection, which results in
2005 * a seqlock check and retry before OOM or allocation failure.
2006 */
cpuset_change_task_nodemask(struct task_struct * tsk,nodemask_t * newmems)2007 static void cpuset_change_task_nodemask(struct task_struct *tsk,
2008 nodemask_t *newmems)
2009 {
2010 task_lock(tsk);
2011
2012 local_irq_disable();
2013 write_seqcount_begin(&tsk->mems_allowed_seq);
2014
2015 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2016 mpol_rebind_task(tsk, newmems);
2017 tsk->mems_allowed = *newmems;
2018
2019 write_seqcount_end(&tsk->mems_allowed_seq);
2020 local_irq_enable();
2021
2022 task_unlock(tsk);
2023 }
2024
2025 static void *cpuset_being_rebound;
2026
2027 /**
2028 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2029 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2030 *
2031 * Iterate through each task of @cs updating its mems_allowed to the
2032 * effective cpuset's. As this function is called with cpuset_mutex held,
2033 * cpuset membership stays stable.
2034 */
update_tasks_nodemask(struct cpuset * cs)2035 static void update_tasks_nodemask(struct cpuset *cs)
2036 {
2037 static nodemask_t newmems; /* protected by cpuset_mutex */
2038 struct css_task_iter it;
2039 struct task_struct *task;
2040
2041 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
2042
2043 guarantee_online_mems(cs, &newmems);
2044
2045 /*
2046 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
2047 * take while holding tasklist_lock. Forks can happen - the
2048 * mpol_dup() cpuset_being_rebound check will catch such forks,
2049 * and rebind their vma mempolicies too. Because we still hold
2050 * the global cpuset_mutex, we know that no other rebind effort
2051 * will be contending for the global variable cpuset_being_rebound.
2052 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
2053 * is idempotent. Also migrate pages in each mm to new nodes.
2054 */
2055 css_task_iter_start(&cs->css, 0, &it);
2056 while ((task = css_task_iter_next(&it))) {
2057 struct mm_struct *mm;
2058 bool migrate;
2059
2060 cpuset_change_task_nodemask(task, &newmems);
2061
2062 mm = get_task_mm(task);
2063 if (!mm)
2064 continue;
2065
2066 migrate = is_memory_migrate(cs);
2067
2068 mpol_rebind_mm(mm, &cs->mems_allowed);
2069 if (migrate)
2070 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2071 else
2072 mmput(mm);
2073 }
2074 css_task_iter_end(&it);
2075
2076 /*
2077 * All the tasks' nodemasks have been updated, update
2078 * cs->old_mems_allowed.
2079 */
2080 cs->old_mems_allowed = newmems;
2081
2082 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
2083 cpuset_being_rebound = NULL;
2084 }
2085
2086 /*
2087 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2088 * @cs: the cpuset to consider
2089 * @new_mems: a temp variable for calculating new effective_mems
2090 *
2091 * When configured nodemask is changed, the effective nodemasks of this cpuset
2092 * and all its descendants need to be updated.
2093 *
2094 * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2095 *
2096 * Called with cpuset_mutex held
2097 */
update_nodemasks_hier(struct cpuset * cs,nodemask_t * new_mems)2098 static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2099 {
2100 struct cpuset *cp;
2101 struct cgroup_subsys_state *pos_css;
2102
2103 rcu_read_lock();
2104 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2105 struct cpuset *parent = parent_cs(cp);
2106
2107 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2108
2109 /*
2110 * If it becomes empty, inherit the effective mask of the
2111 * parent, which is guaranteed to have some MEMs.
2112 */
2113 if (is_in_v2_mode() && nodes_empty(*new_mems))
2114 *new_mems = parent->effective_mems;
2115
2116 /* Skip the whole subtree if the nodemask remains the same. */
2117 if (nodes_equal(*new_mems, cp->effective_mems)) {
2118 pos_css = css_rightmost_descendant(pos_css);
2119 continue;
2120 }
2121
2122 if (!css_tryget_online(&cp->css))
2123 continue;
2124 rcu_read_unlock();
2125
2126 spin_lock_irq(&callback_lock);
2127 cp->effective_mems = *new_mems;
2128 spin_unlock_irq(&callback_lock);
2129
2130 WARN_ON(!is_in_v2_mode() &&
2131 !nodes_equal(cp->mems_allowed, cp->effective_mems));
2132
2133 update_tasks_nodemask(cp);
2134
2135 rcu_read_lock();
2136 css_put(&cp->css);
2137 }
2138 rcu_read_unlock();
2139 }
2140
2141 /*
2142 * Handle user request to change the 'mems' memory placement
2143 * of a cpuset. Needs to validate the request, update the
2144 * cpusets mems_allowed, and for each task in the cpuset,
2145 * update mems_allowed and rebind task's mempolicy and any vma
2146 * mempolicies and if the cpuset is marked 'memory_migrate',
2147 * migrate the tasks pages to the new memory.
2148 *
2149 * Call with cpuset_mutex held. May take callback_lock during call.
2150 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2151 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2152 * their mempolicies to the cpusets new mems_allowed.
2153 */
update_nodemask(struct cpuset * cs,struct cpuset * trialcs,const char * buf)2154 static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2155 const char *buf)
2156 {
2157 int retval;
2158
2159 /*
2160 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
2161 * it's read-only
2162 */
2163 if (cs == &top_cpuset) {
2164 retval = -EACCES;
2165 goto done;
2166 }
2167
2168 /*
2169 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2170 * Since nodelist_parse() fails on an empty mask, we special case
2171 * that parsing. The validate_change() call ensures that cpusets
2172 * with tasks have memory.
2173 */
2174 if (!*buf) {
2175 nodes_clear(trialcs->mems_allowed);
2176 } else {
2177 retval = nodelist_parse(buf, trialcs->mems_allowed);
2178 if (retval < 0)
2179 goto done;
2180
2181 if (!nodes_subset(trialcs->mems_allowed,
2182 top_cpuset.mems_allowed)) {
2183 retval = -EINVAL;
2184 goto done;
2185 }
2186 }
2187
2188 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
2189 retval = 0; /* Too easy - nothing to do */
2190 goto done;
2191 }
2192 retval = validate_change(cs, trialcs);
2193 if (retval < 0)
2194 goto done;
2195
2196 check_insane_mems_config(&trialcs->mems_allowed);
2197
2198 spin_lock_irq(&callback_lock);
2199 cs->mems_allowed = trialcs->mems_allowed;
2200 spin_unlock_irq(&callback_lock);
2201
2202 /* use trialcs->mems_allowed as a temp variable */
2203 update_nodemasks_hier(cs, &trialcs->mems_allowed);
2204 done:
2205 return retval;
2206 }
2207
current_cpuset_is_being_rebound(void)2208 bool current_cpuset_is_being_rebound(void)
2209 {
2210 bool ret;
2211
2212 rcu_read_lock();
2213 ret = task_cs(current) == cpuset_being_rebound;
2214 rcu_read_unlock();
2215
2216 return ret;
2217 }
2218
update_relax_domain_level(struct cpuset * cs,s64 val)2219 static int update_relax_domain_level(struct cpuset *cs, s64 val)
2220 {
2221 #ifdef CONFIG_SMP
2222 if (val < -1 || val >= sched_domain_level_max)
2223 return -EINVAL;
2224 #endif
2225
2226 if (val != cs->relax_domain_level) {
2227 cs->relax_domain_level = val;
2228 if (!cpumask_empty(cs->cpus_allowed) &&
2229 is_sched_load_balance(cs))
2230 rebuild_sched_domains_locked();
2231 }
2232
2233 return 0;
2234 }
2235
2236 /**
2237 * update_tasks_flags - update the spread flags of tasks in the cpuset.
2238 * @cs: the cpuset in which each task's spread flags needs to be changed
2239 *
2240 * Iterate through each task of @cs updating its spread flags. As this
2241 * function is called with cpuset_mutex held, cpuset membership stays
2242 * stable.
2243 */
update_tasks_flags(struct cpuset * cs)2244 static void update_tasks_flags(struct cpuset *cs)
2245 {
2246 struct css_task_iter it;
2247 struct task_struct *task;
2248
2249 css_task_iter_start(&cs->css, 0, &it);
2250 while ((task = css_task_iter_next(&it)))
2251 cpuset_update_task_spread_flags(cs, task);
2252 css_task_iter_end(&it);
2253 }
2254
2255 /*
2256 * update_flag - read a 0 or a 1 in a file and update associated flag
2257 * bit: the bit to update (see cpuset_flagbits_t)
2258 * cs: the cpuset to update
2259 * turning_on: whether the flag is being set or cleared
2260 *
2261 * Call with cpuset_mutex held.
2262 */
2263
update_flag(cpuset_flagbits_t bit,struct cpuset * cs,int turning_on)2264 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2265 int turning_on)
2266 {
2267 struct cpuset *trialcs;
2268 int balance_flag_changed;
2269 int spread_flag_changed;
2270 int err;
2271
2272 trialcs = alloc_trial_cpuset(cs);
2273 if (!trialcs)
2274 return -ENOMEM;
2275
2276 if (turning_on)
2277 set_bit(bit, &trialcs->flags);
2278 else
2279 clear_bit(bit, &trialcs->flags);
2280
2281 err = validate_change(cs, trialcs);
2282 if (err < 0)
2283 goto out;
2284
2285 balance_flag_changed = (is_sched_load_balance(cs) !=
2286 is_sched_load_balance(trialcs));
2287
2288 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2289 || (is_spread_page(cs) != is_spread_page(trialcs)));
2290
2291 spin_lock_irq(&callback_lock);
2292 cs->flags = trialcs->flags;
2293 spin_unlock_irq(&callback_lock);
2294
2295 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
2296 rebuild_sched_domains_locked();
2297
2298 if (spread_flag_changed)
2299 update_tasks_flags(cs);
2300 out:
2301 free_cpuset(trialcs);
2302 return err;
2303 }
2304
2305 /**
2306 * update_prstate - update partition_root_state
2307 * @cs: the cpuset to update
2308 * @new_prs: new partition root state
2309 * Return: 0 if successful, != 0 if error
2310 *
2311 * Call with cpuset_mutex held.
2312 */
update_prstate(struct cpuset * cs,int new_prs)2313 static int update_prstate(struct cpuset *cs, int new_prs)
2314 {
2315 int err = PERR_NONE, old_prs = cs->partition_root_state;
2316 struct cpuset *parent = parent_cs(cs);
2317 struct tmpmasks tmpmask;
2318
2319 if (old_prs == new_prs)
2320 return 0;
2321
2322 /*
2323 * For a previously invalid partition root, leave it at being
2324 * invalid if new_prs is not "member".
2325 */
2326 if (new_prs && is_prs_invalid(old_prs)) {
2327 cs->partition_root_state = -new_prs;
2328 return 0;
2329 }
2330
2331 if (alloc_cpumasks(NULL, &tmpmask))
2332 return -ENOMEM;
2333
2334 err = update_partition_exclusive(cs, new_prs);
2335 if (err)
2336 goto out;
2337
2338 if (!old_prs) {
2339 /*
2340 * cpus_allowed cannot be empty.
2341 */
2342 if (cpumask_empty(cs->cpus_allowed)) {
2343 err = PERR_CPUSEMPTY;
2344 goto out;
2345 }
2346
2347 err = update_parent_subparts_cpumask(cs, partcmd_enable,
2348 NULL, &tmpmask);
2349 } else if (old_prs && new_prs) {
2350 /*
2351 * A change in load balance state only, no change in cpumasks.
2352 */
2353 ;
2354 } else {
2355 /*
2356 * Switching back to member is always allowed even if it
2357 * disables child partitions.
2358 */
2359 update_parent_subparts_cpumask(cs, partcmd_disable, NULL,
2360 &tmpmask);
2361
2362 /*
2363 * If there are child partitions, they will all become invalid.
2364 */
2365 if (unlikely(cs->nr_subparts_cpus)) {
2366 spin_lock_irq(&callback_lock);
2367 cs->nr_subparts_cpus = 0;
2368 cpumask_clear(cs->subparts_cpus);
2369 compute_effective_cpumask(cs->effective_cpus, cs, parent);
2370 spin_unlock_irq(&callback_lock);
2371 }
2372 }
2373 out:
2374 /*
2375 * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
2376 * happens.
2377 */
2378 if (err) {
2379 new_prs = -new_prs;
2380 update_partition_exclusive(cs, new_prs);
2381 }
2382
2383 spin_lock_irq(&callback_lock);
2384 cs->partition_root_state = new_prs;
2385 WRITE_ONCE(cs->prs_err, err);
2386 spin_unlock_irq(&callback_lock);
2387
2388 /*
2389 * Update child cpusets, if present.
2390 * Force update if switching back to member.
2391 */
2392 if (!list_empty(&cs->css.children))
2393 update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0);
2394
2395 /* Update sched domains and load balance flag */
2396 update_partition_sd_lb(cs, old_prs);
2397
2398 notify_partition_change(cs, old_prs);
2399 free_cpumasks(NULL, &tmpmask);
2400 return 0;
2401 }
2402
2403 /*
2404 * Frequency meter - How fast is some event occurring?
2405 *
2406 * These routines manage a digitally filtered, constant time based,
2407 * event frequency meter. There are four routines:
2408 * fmeter_init() - initialize a frequency meter.
2409 * fmeter_markevent() - called each time the event happens.
2410 * fmeter_getrate() - returns the recent rate of such events.
2411 * fmeter_update() - internal routine used to update fmeter.
2412 *
2413 * A common data structure is passed to each of these routines,
2414 * which is used to keep track of the state required to manage the
2415 * frequency meter and its digital filter.
2416 *
2417 * The filter works on the number of events marked per unit time.
2418 * The filter is single-pole low-pass recursive (IIR). The time unit
2419 * is 1 second. Arithmetic is done using 32-bit integers scaled to
2420 * simulate 3 decimal digits of precision (multiplied by 1000).
2421 *
2422 * With an FM_COEF of 933, and a time base of 1 second, the filter
2423 * has a half-life of 10 seconds, meaning that if the events quit
2424 * happening, then the rate returned from the fmeter_getrate()
2425 * will be cut in half each 10 seconds, until it converges to zero.
2426 *
2427 * It is not worth doing a real infinitely recursive filter. If more
2428 * than FM_MAXTICKS ticks have elapsed since the last filter event,
2429 * just compute FM_MAXTICKS ticks worth, by which point the level
2430 * will be stable.
2431 *
2432 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
2433 * arithmetic overflow in the fmeter_update() routine.
2434 *
2435 * Given the simple 32 bit integer arithmetic used, this meter works
2436 * best for reporting rates between one per millisecond (msec) and
2437 * one per 32 (approx) seconds. At constant rates faster than one
2438 * per msec it maxes out at values just under 1,000,000. At constant
2439 * rates between one per msec, and one per second it will stabilize
2440 * to a value N*1000, where N is the rate of events per second.
2441 * At constant rates between one per second and one per 32 seconds,
2442 * it will be choppy, moving up on the seconds that have an event,
2443 * and then decaying until the next event. At rates slower than
2444 * about one in 32 seconds, it decays all the way back to zero between
2445 * each event.
2446 */
2447
2448 #define FM_COEF 933 /* coefficient for half-life of 10 secs */
2449 #define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */
2450 #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
2451 #define FM_SCALE 1000 /* faux fixed point scale */
2452
2453 /* Initialize a frequency meter */
fmeter_init(struct fmeter * fmp)2454 static void fmeter_init(struct fmeter *fmp)
2455 {
2456 fmp->cnt = 0;
2457 fmp->val = 0;
2458 fmp->time = 0;
2459 spin_lock_init(&fmp->lock);
2460 }
2461
2462 /* Internal meter update - process cnt events and update value */
fmeter_update(struct fmeter * fmp)2463 static void fmeter_update(struct fmeter *fmp)
2464 {
2465 time64_t now;
2466 u32 ticks;
2467
2468 now = ktime_get_seconds();
2469 ticks = now - fmp->time;
2470
2471 if (ticks == 0)
2472 return;
2473
2474 ticks = min(FM_MAXTICKS, ticks);
2475 while (ticks-- > 0)
2476 fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
2477 fmp->time = now;
2478
2479 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
2480 fmp->cnt = 0;
2481 }
2482
2483 /* Process any previous ticks, then bump cnt by one (times scale). */
fmeter_markevent(struct fmeter * fmp)2484 static void fmeter_markevent(struct fmeter *fmp)
2485 {
2486 spin_lock(&fmp->lock);
2487 fmeter_update(fmp);
2488 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
2489 spin_unlock(&fmp->lock);
2490 }
2491
2492 /* Process any previous ticks, then return current value. */
fmeter_getrate(struct fmeter * fmp)2493 static int fmeter_getrate(struct fmeter *fmp)
2494 {
2495 int val;
2496
2497 spin_lock(&fmp->lock);
2498 fmeter_update(fmp);
2499 val = fmp->val;
2500 spin_unlock(&fmp->lock);
2501 return val;
2502 }
2503
2504 static struct cpuset *cpuset_attach_old_cs;
2505
2506 /*
2507 * Check to see if a cpuset can accept a new task
2508 * For v1, cpus_allowed and mems_allowed can't be empty.
2509 * For v2, effective_cpus can't be empty.
2510 * Note that in v1, effective_cpus = cpus_allowed.
2511 */
cpuset_can_attach_check(struct cpuset * cs)2512 static int cpuset_can_attach_check(struct cpuset *cs)
2513 {
2514 if (cpumask_empty(cs->effective_cpus) ||
2515 (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
2516 return -ENOSPC;
2517 return 0;
2518 }
2519
reset_migrate_dl_data(struct cpuset * cs)2520 static void reset_migrate_dl_data(struct cpuset *cs)
2521 {
2522 cs->nr_migrate_dl_tasks = 0;
2523 cs->sum_migrate_dl_bw = 0;
2524 }
2525
2526 /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
cpuset_can_attach(struct cgroup_taskset * tset)2527 static int cpuset_can_attach(struct cgroup_taskset *tset)
2528 {
2529 struct cgroup_subsys_state *css;
2530 struct cpuset *cs, *oldcs;
2531 struct task_struct *task;
2532 bool cpus_updated, mems_updated;
2533 int ret;
2534
2535 /* used later by cpuset_attach() */
2536 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2537 oldcs = cpuset_attach_old_cs;
2538 cs = css_cs(css);
2539
2540 mutex_lock(&cpuset_mutex);
2541
2542 /* Check to see if task is allowed in the cpuset */
2543 ret = cpuset_can_attach_check(cs);
2544 if (ret)
2545 goto out_unlock;
2546
2547 cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
2548 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
2549
2550 cgroup_taskset_for_each(task, css, tset) {
2551 ret = task_can_attach(task);
2552 if (ret)
2553 goto out_unlock;
2554
2555 /*
2556 * Skip rights over task check in v2 when nothing changes,
2557 * migration permission derives from hierarchy ownership in
2558 * cgroup_procs_write_permission()).
2559 */
2560 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
2561 (cpus_updated || mems_updated)) {
2562 ret = security_task_setscheduler(task);
2563 if (ret)
2564 goto out_unlock;
2565 }
2566
2567 if (dl_task(task)) {
2568 cs->nr_migrate_dl_tasks++;
2569 cs->sum_migrate_dl_bw += task->dl.dl_bw;
2570 }
2571 }
2572
2573 if (!cs->nr_migrate_dl_tasks)
2574 goto out_success;
2575
2576 if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
2577 int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
2578
2579 if (unlikely(cpu >= nr_cpu_ids)) {
2580 reset_migrate_dl_data(cs);
2581 ret = -EINVAL;
2582 goto out_unlock;
2583 }
2584
2585 ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
2586 if (ret) {
2587 reset_migrate_dl_data(cs);
2588 goto out_unlock;
2589 }
2590 }
2591
2592 out_success:
2593 /*
2594 * Mark attach is in progress. This makes validate_change() fail
2595 * changes which zero cpus/mems_allowed.
2596 */
2597 cs->attach_in_progress++;
2598 out_unlock:
2599 mutex_unlock(&cpuset_mutex);
2600 return ret;
2601 }
2602
cpuset_cancel_attach(struct cgroup_taskset * tset)2603 static void cpuset_cancel_attach(struct cgroup_taskset *tset)
2604 {
2605 struct cgroup_subsys_state *css;
2606 struct cpuset *cs;
2607
2608 cgroup_taskset_first(tset, &css);
2609 cs = css_cs(css);
2610
2611 mutex_lock(&cpuset_mutex);
2612 cs->attach_in_progress--;
2613 if (!cs->attach_in_progress)
2614 wake_up(&cpuset_attach_wq);
2615
2616 if (cs->nr_migrate_dl_tasks) {
2617 int cpu = cpumask_any(cs->effective_cpus);
2618
2619 dl_bw_free(cpu, cs->sum_migrate_dl_bw);
2620 reset_migrate_dl_data(cs);
2621 }
2622
2623 mutex_unlock(&cpuset_mutex);
2624 }
2625
2626 /*
2627 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
2628 * but we can't allocate it dynamically there. Define it global and
2629 * allocate from cpuset_init().
2630 */
2631 static cpumask_var_t cpus_attach;
2632 static nodemask_t cpuset_attach_nodemask_to;
2633
cpuset_attach_task(struct cpuset * cs,struct task_struct * task)2634 static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
2635 {
2636 lockdep_assert_held(&cpuset_mutex);
2637
2638 if (cs != &top_cpuset)
2639 guarantee_online_cpus(task, cpus_attach);
2640 else
2641 cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
2642 cs->subparts_cpus);
2643 /*
2644 * can_attach beforehand should guarantee that this doesn't
2645 * fail. TODO: have a better way to handle failure here
2646 */
2647 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
2648
2649 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
2650 cpuset_update_task_spread_flags(cs, task);
2651 }
2652
cpuset_attach(struct cgroup_taskset * tset)2653 static void cpuset_attach(struct cgroup_taskset *tset)
2654 {
2655 struct task_struct *task;
2656 struct task_struct *leader;
2657 struct cgroup_subsys_state *css;
2658 struct cpuset *cs;
2659 struct cpuset *oldcs = cpuset_attach_old_cs;
2660 bool cpus_updated, mems_updated;
2661
2662 cgroup_taskset_first(tset, &css);
2663 cs = css_cs(css);
2664
2665 lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */
2666 mutex_lock(&cpuset_mutex);
2667 cpus_updated = !cpumask_equal(cs->effective_cpus,
2668 oldcs->effective_cpus);
2669 mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
2670
2671 /*
2672 * In the default hierarchy, enabling cpuset in the child cgroups
2673 * will trigger a number of cpuset_attach() calls with no change
2674 * in effective cpus and mems. In that case, we can optimize out
2675 * by skipping the task iteration and update.
2676 */
2677 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
2678 !cpus_updated && !mems_updated) {
2679 cpuset_attach_nodemask_to = cs->effective_mems;
2680 goto out;
2681 }
2682
2683 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
2684
2685 cgroup_taskset_for_each(task, css, tset)
2686 cpuset_attach_task(cs, task);
2687
2688 /*
2689 * Change mm for all threadgroup leaders. This is expensive and may
2690 * sleep and should be moved outside migration path proper. Skip it
2691 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is
2692 * not set.
2693 */
2694 cpuset_attach_nodemask_to = cs->effective_mems;
2695 if (!is_memory_migrate(cs) && !mems_updated)
2696 goto out;
2697
2698 cgroup_taskset_for_each_leader(leader, css, tset) {
2699 struct mm_struct *mm = get_task_mm(leader);
2700
2701 if (mm) {
2702 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
2703
2704 /*
2705 * old_mems_allowed is the same with mems_allowed
2706 * here, except if this task is being moved
2707 * automatically due to hotplug. In that case
2708 * @mems_allowed has been updated and is empty, so
2709 * @old_mems_allowed is the right nodesets that we
2710 * migrate mm from.
2711 */
2712 if (is_memory_migrate(cs))
2713 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
2714 &cpuset_attach_nodemask_to);
2715 else
2716 mmput(mm);
2717 }
2718 }
2719
2720 out:
2721 cs->old_mems_allowed = cpuset_attach_nodemask_to;
2722
2723 if (cs->nr_migrate_dl_tasks) {
2724 cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
2725 oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
2726 reset_migrate_dl_data(cs);
2727 }
2728
2729 cs->attach_in_progress--;
2730 if (!cs->attach_in_progress)
2731 wake_up(&cpuset_attach_wq);
2732
2733 mutex_unlock(&cpuset_mutex);
2734 }
2735
2736 /* The various types of files and directories in a cpuset file system */
2737
2738 typedef enum {
2739 FILE_MEMORY_MIGRATE,
2740 FILE_CPULIST,
2741 FILE_MEMLIST,
2742 FILE_EFFECTIVE_CPULIST,
2743 FILE_EFFECTIVE_MEMLIST,
2744 FILE_SUBPARTS_CPULIST,
2745 FILE_CPU_EXCLUSIVE,
2746 FILE_MEM_EXCLUSIVE,
2747 FILE_MEM_HARDWALL,
2748 FILE_SCHED_LOAD_BALANCE,
2749 FILE_PARTITION_ROOT,
2750 FILE_SCHED_RELAX_DOMAIN_LEVEL,
2751 FILE_MEMORY_PRESSURE_ENABLED,
2752 FILE_MEMORY_PRESSURE,
2753 FILE_SPREAD_PAGE,
2754 FILE_SPREAD_SLAB,
2755 } cpuset_filetype_t;
2756
cpuset_write_u64(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)2757 static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
2758 u64 val)
2759 {
2760 struct cpuset *cs = css_cs(css);
2761 cpuset_filetype_t type = cft->private;
2762 int retval = 0;
2763
2764 cpus_read_lock();
2765 mutex_lock(&cpuset_mutex);
2766 if (!is_cpuset_online(cs)) {
2767 retval = -ENODEV;
2768 goto out_unlock;
2769 }
2770
2771 switch (type) {
2772 case FILE_CPU_EXCLUSIVE:
2773 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
2774 break;
2775 case FILE_MEM_EXCLUSIVE:
2776 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
2777 break;
2778 case FILE_MEM_HARDWALL:
2779 retval = update_flag(CS_MEM_HARDWALL, cs, val);
2780 break;
2781 case FILE_SCHED_LOAD_BALANCE:
2782 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
2783 break;
2784 case FILE_MEMORY_MIGRATE:
2785 retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
2786 break;
2787 case FILE_MEMORY_PRESSURE_ENABLED:
2788 cpuset_memory_pressure_enabled = !!val;
2789 break;
2790 case FILE_SPREAD_PAGE:
2791 retval = update_flag(CS_SPREAD_PAGE, cs, val);
2792 break;
2793 case FILE_SPREAD_SLAB:
2794 retval = update_flag(CS_SPREAD_SLAB, cs, val);
2795 break;
2796 default:
2797 retval = -EINVAL;
2798 break;
2799 }
2800 out_unlock:
2801 mutex_unlock(&cpuset_mutex);
2802 cpus_read_unlock();
2803 return retval;
2804 }
2805
cpuset_write_s64(struct cgroup_subsys_state * css,struct cftype * cft,s64 val)2806 static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
2807 s64 val)
2808 {
2809 struct cpuset *cs = css_cs(css);
2810 cpuset_filetype_t type = cft->private;
2811 int retval = -ENODEV;
2812
2813 cpus_read_lock();
2814 mutex_lock(&cpuset_mutex);
2815 if (!is_cpuset_online(cs))
2816 goto out_unlock;
2817
2818 switch (type) {
2819 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2820 retval = update_relax_domain_level(cs, val);
2821 break;
2822 default:
2823 retval = -EINVAL;
2824 break;
2825 }
2826 out_unlock:
2827 mutex_unlock(&cpuset_mutex);
2828 cpus_read_unlock();
2829 return retval;
2830 }
2831
2832 /*
2833 * Common handling for a write to a "cpus" or "mems" file.
2834 */
cpuset_write_resmask(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)2835 static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
2836 char *buf, size_t nbytes, loff_t off)
2837 {
2838 struct cpuset *cs = css_cs(of_css(of));
2839 struct cpuset *trialcs;
2840 int retval = -ENODEV;
2841
2842 buf = strstrip(buf);
2843
2844 /*
2845 * CPU or memory hotunplug may leave @cs w/o any execution
2846 * resources, in which case the hotplug code asynchronously updates
2847 * configuration and transfers all tasks to the nearest ancestor
2848 * which can execute.
2849 *
2850 * As writes to "cpus" or "mems" may restore @cs's execution
2851 * resources, wait for the previously scheduled operations before
2852 * proceeding, so that we don't end up keep removing tasks added
2853 * after execution capability is restored.
2854 *
2855 * cpuset_hotplug_work calls back into cgroup core via
2856 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
2857 * operation like this one can lead to a deadlock through kernfs
2858 * active_ref protection. Let's break the protection. Losing the
2859 * protection is okay as we check whether @cs is online after
2860 * grabbing cpuset_mutex anyway. This only happens on the legacy
2861 * hierarchies.
2862 */
2863 css_get(&cs->css);
2864 kernfs_break_active_protection(of->kn);
2865 flush_work(&cpuset_hotplug_work);
2866
2867 cpus_read_lock();
2868 mutex_lock(&cpuset_mutex);
2869 if (!is_cpuset_online(cs))
2870 goto out_unlock;
2871
2872 trialcs = alloc_trial_cpuset(cs);
2873 if (!trialcs) {
2874 retval = -ENOMEM;
2875 goto out_unlock;
2876 }
2877
2878 switch (of_cft(of)->private) {
2879 case FILE_CPULIST:
2880 retval = update_cpumask(cs, trialcs, buf);
2881 break;
2882 case FILE_MEMLIST:
2883 retval = update_nodemask(cs, trialcs, buf);
2884 break;
2885 default:
2886 retval = -EINVAL;
2887 break;
2888 }
2889
2890 free_cpuset(trialcs);
2891 out_unlock:
2892 mutex_unlock(&cpuset_mutex);
2893 cpus_read_unlock();
2894 kernfs_unbreak_active_protection(of->kn);
2895 css_put(&cs->css);
2896 flush_workqueue(cpuset_migrate_mm_wq);
2897 return retval ?: nbytes;
2898 }
2899
2900 /*
2901 * These ascii lists should be read in a single call, by using a user
2902 * buffer large enough to hold the entire map. If read in smaller
2903 * chunks, there is no guarantee of atomicity. Since the display format
2904 * used, list of ranges of sequential numbers, is variable length,
2905 * and since these maps can change value dynamically, one could read
2906 * gibberish by doing partial reads while a list was changing.
2907 */
cpuset_common_seq_show(struct seq_file * sf,void * v)2908 static int cpuset_common_seq_show(struct seq_file *sf, void *v)
2909 {
2910 struct cpuset *cs = css_cs(seq_css(sf));
2911 cpuset_filetype_t type = seq_cft(sf)->private;
2912 int ret = 0;
2913
2914 spin_lock_irq(&callback_lock);
2915
2916 switch (type) {
2917 case FILE_CPULIST:
2918 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
2919 break;
2920 case FILE_MEMLIST:
2921 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
2922 break;
2923 case FILE_EFFECTIVE_CPULIST:
2924 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
2925 break;
2926 case FILE_EFFECTIVE_MEMLIST:
2927 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
2928 break;
2929 case FILE_SUBPARTS_CPULIST:
2930 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus));
2931 break;
2932 default:
2933 ret = -EINVAL;
2934 }
2935
2936 spin_unlock_irq(&callback_lock);
2937 return ret;
2938 }
2939
cpuset_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)2940 static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
2941 {
2942 struct cpuset *cs = css_cs(css);
2943 cpuset_filetype_t type = cft->private;
2944 switch (type) {
2945 case FILE_CPU_EXCLUSIVE:
2946 return is_cpu_exclusive(cs);
2947 case FILE_MEM_EXCLUSIVE:
2948 return is_mem_exclusive(cs);
2949 case FILE_MEM_HARDWALL:
2950 return is_mem_hardwall(cs);
2951 case FILE_SCHED_LOAD_BALANCE:
2952 return is_sched_load_balance(cs);
2953 case FILE_MEMORY_MIGRATE:
2954 return is_memory_migrate(cs);
2955 case FILE_MEMORY_PRESSURE_ENABLED:
2956 return cpuset_memory_pressure_enabled;
2957 case FILE_MEMORY_PRESSURE:
2958 return fmeter_getrate(&cs->fmeter);
2959 case FILE_SPREAD_PAGE:
2960 return is_spread_page(cs);
2961 case FILE_SPREAD_SLAB:
2962 return is_spread_slab(cs);
2963 default:
2964 BUG();
2965 }
2966
2967 /* Unreachable but makes gcc happy */
2968 return 0;
2969 }
2970
cpuset_read_s64(struct cgroup_subsys_state * css,struct cftype * cft)2971 static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
2972 {
2973 struct cpuset *cs = css_cs(css);
2974 cpuset_filetype_t type = cft->private;
2975 switch (type) {
2976 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2977 return cs->relax_domain_level;
2978 default:
2979 BUG();
2980 }
2981
2982 /* Unreachable but makes gcc happy */
2983 return 0;
2984 }
2985
sched_partition_show(struct seq_file * seq,void * v)2986 static int sched_partition_show(struct seq_file *seq, void *v)
2987 {
2988 struct cpuset *cs = css_cs(seq_css(seq));
2989 const char *err, *type = NULL;
2990
2991 switch (cs->partition_root_state) {
2992 case PRS_ROOT:
2993 seq_puts(seq, "root\n");
2994 break;
2995 case PRS_ISOLATED:
2996 seq_puts(seq, "isolated\n");
2997 break;
2998 case PRS_MEMBER:
2999 seq_puts(seq, "member\n");
3000 break;
3001 case PRS_INVALID_ROOT:
3002 type = "root";
3003 fallthrough;
3004 case PRS_INVALID_ISOLATED:
3005 if (!type)
3006 type = "isolated";
3007 err = perr_strings[READ_ONCE(cs->prs_err)];
3008 if (err)
3009 seq_printf(seq, "%s invalid (%s)\n", type, err);
3010 else
3011 seq_printf(seq, "%s invalid\n", type);
3012 break;
3013 }
3014 return 0;
3015 }
3016
sched_partition_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3017 static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
3018 size_t nbytes, loff_t off)
3019 {
3020 struct cpuset *cs = css_cs(of_css(of));
3021 int val;
3022 int retval = -ENODEV;
3023
3024 buf = strstrip(buf);
3025
3026 /*
3027 * Convert "root" to ENABLED, and convert "member" to DISABLED.
3028 */
3029 if (!strcmp(buf, "root"))
3030 val = PRS_ROOT;
3031 else if (!strcmp(buf, "member"))
3032 val = PRS_MEMBER;
3033 else if (!strcmp(buf, "isolated"))
3034 val = PRS_ISOLATED;
3035 else
3036 return -EINVAL;
3037
3038 css_get(&cs->css);
3039 cpus_read_lock();
3040 mutex_lock(&cpuset_mutex);
3041 if (!is_cpuset_online(cs))
3042 goto out_unlock;
3043
3044 retval = update_prstate(cs, val);
3045 out_unlock:
3046 mutex_unlock(&cpuset_mutex);
3047 cpus_read_unlock();
3048 css_put(&cs->css);
3049 return retval ?: nbytes;
3050 }
3051
3052 /*
3053 * for the common functions, 'private' gives the type of file
3054 */
3055
3056 static struct cftype legacy_files[] = {
3057 {
3058 .name = "cpus",
3059 .seq_show = cpuset_common_seq_show,
3060 .write = cpuset_write_resmask,
3061 .max_write_len = (100U + 6 * NR_CPUS),
3062 .private = FILE_CPULIST,
3063 },
3064
3065 {
3066 .name = "mems",
3067 .seq_show = cpuset_common_seq_show,
3068 .write = cpuset_write_resmask,
3069 .max_write_len = (100U + 6 * MAX_NUMNODES),
3070 .private = FILE_MEMLIST,
3071 },
3072
3073 {
3074 .name = "effective_cpus",
3075 .seq_show = cpuset_common_seq_show,
3076 .private = FILE_EFFECTIVE_CPULIST,
3077 },
3078
3079 {
3080 .name = "effective_mems",
3081 .seq_show = cpuset_common_seq_show,
3082 .private = FILE_EFFECTIVE_MEMLIST,
3083 },
3084
3085 {
3086 .name = "cpu_exclusive",
3087 .read_u64 = cpuset_read_u64,
3088 .write_u64 = cpuset_write_u64,
3089 .private = FILE_CPU_EXCLUSIVE,
3090 },
3091
3092 {
3093 .name = "mem_exclusive",
3094 .read_u64 = cpuset_read_u64,
3095 .write_u64 = cpuset_write_u64,
3096 .private = FILE_MEM_EXCLUSIVE,
3097 },
3098
3099 {
3100 .name = "mem_hardwall",
3101 .read_u64 = cpuset_read_u64,
3102 .write_u64 = cpuset_write_u64,
3103 .private = FILE_MEM_HARDWALL,
3104 },
3105
3106 {
3107 .name = "sched_load_balance",
3108 .read_u64 = cpuset_read_u64,
3109 .write_u64 = cpuset_write_u64,
3110 .private = FILE_SCHED_LOAD_BALANCE,
3111 },
3112
3113 {
3114 .name = "sched_relax_domain_level",
3115 .read_s64 = cpuset_read_s64,
3116 .write_s64 = cpuset_write_s64,
3117 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
3118 },
3119
3120 {
3121 .name = "memory_migrate",
3122 .read_u64 = cpuset_read_u64,
3123 .write_u64 = cpuset_write_u64,
3124 .private = FILE_MEMORY_MIGRATE,
3125 },
3126
3127 {
3128 .name = "memory_pressure",
3129 .read_u64 = cpuset_read_u64,
3130 .private = FILE_MEMORY_PRESSURE,
3131 },
3132
3133 {
3134 .name = "memory_spread_page",
3135 .read_u64 = cpuset_read_u64,
3136 .write_u64 = cpuset_write_u64,
3137 .private = FILE_SPREAD_PAGE,
3138 },
3139
3140 {
3141 .name = "memory_spread_slab",
3142 .read_u64 = cpuset_read_u64,
3143 .write_u64 = cpuset_write_u64,
3144 .private = FILE_SPREAD_SLAB,
3145 },
3146
3147 {
3148 .name = "memory_pressure_enabled",
3149 .flags = CFTYPE_ONLY_ON_ROOT,
3150 .read_u64 = cpuset_read_u64,
3151 .write_u64 = cpuset_write_u64,
3152 .private = FILE_MEMORY_PRESSURE_ENABLED,
3153 },
3154
3155 { } /* terminate */
3156 };
3157
3158 /*
3159 * This is currently a minimal set for the default hierarchy. It can be
3160 * expanded later on by migrating more features and control files from v1.
3161 */
3162 static struct cftype dfl_files[] = {
3163 {
3164 .name = "cpus",
3165 .seq_show = cpuset_common_seq_show,
3166 .write = cpuset_write_resmask,
3167 .max_write_len = (100U + 6 * NR_CPUS),
3168 .private = FILE_CPULIST,
3169 .flags = CFTYPE_NOT_ON_ROOT,
3170 },
3171
3172 {
3173 .name = "mems",
3174 .seq_show = cpuset_common_seq_show,
3175 .write = cpuset_write_resmask,
3176 .max_write_len = (100U + 6 * MAX_NUMNODES),
3177 .private = FILE_MEMLIST,
3178 .flags = CFTYPE_NOT_ON_ROOT,
3179 },
3180
3181 {
3182 .name = "cpus.effective",
3183 .seq_show = cpuset_common_seq_show,
3184 .private = FILE_EFFECTIVE_CPULIST,
3185 },
3186
3187 {
3188 .name = "mems.effective",
3189 .seq_show = cpuset_common_seq_show,
3190 .private = FILE_EFFECTIVE_MEMLIST,
3191 },
3192
3193 {
3194 .name = "cpus.partition",
3195 .seq_show = sched_partition_show,
3196 .write = sched_partition_write,
3197 .private = FILE_PARTITION_ROOT,
3198 .flags = CFTYPE_NOT_ON_ROOT,
3199 .file_offset = offsetof(struct cpuset, partition_file),
3200 },
3201
3202 {
3203 .name = "cpus.subpartitions",
3204 .seq_show = cpuset_common_seq_show,
3205 .private = FILE_SUBPARTS_CPULIST,
3206 .flags = CFTYPE_DEBUG,
3207 },
3208
3209 { } /* terminate */
3210 };
3211
3212
3213 /**
3214 * cpuset_css_alloc - Allocate a cpuset css
3215 * @parent_css: Parent css of the control group that the new cpuset will be
3216 * part of
3217 * Return: cpuset css on success, -ENOMEM on failure.
3218 *
3219 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3220 * top cpuset css otherwise.
3221 */
3222 static struct cgroup_subsys_state *
cpuset_css_alloc(struct cgroup_subsys_state * parent_css)3223 cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3224 {
3225 struct cpuset *cs;
3226
3227 if (!parent_css)
3228 return &top_cpuset.css;
3229
3230 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
3231 if (!cs)
3232 return ERR_PTR(-ENOMEM);
3233
3234 if (alloc_cpumasks(cs, NULL)) {
3235 kfree(cs);
3236 return ERR_PTR(-ENOMEM);
3237 }
3238
3239 __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3240 nodes_clear(cs->mems_allowed);
3241 nodes_clear(cs->effective_mems);
3242 fmeter_init(&cs->fmeter);
3243 cs->relax_domain_level = -1;
3244
3245 /* Set CS_MEMORY_MIGRATE for default hierarchy */
3246 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
3247 __set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3248
3249 return &cs->css;
3250 }
3251
cpuset_css_online(struct cgroup_subsys_state * css)3252 static int cpuset_css_online(struct cgroup_subsys_state *css)
3253 {
3254 struct cpuset *cs = css_cs(css);
3255 struct cpuset *parent = parent_cs(cs);
3256 struct cpuset *tmp_cs;
3257 struct cgroup_subsys_state *pos_css;
3258
3259 if (!parent)
3260 return 0;
3261
3262 cpus_read_lock();
3263 mutex_lock(&cpuset_mutex);
3264
3265 set_bit(CS_ONLINE, &cs->flags);
3266 if (is_spread_page(parent))
3267 set_bit(CS_SPREAD_PAGE, &cs->flags);
3268 if (is_spread_slab(parent))
3269 set_bit(CS_SPREAD_SLAB, &cs->flags);
3270
3271 cpuset_inc();
3272
3273 spin_lock_irq(&callback_lock);
3274 if (is_in_v2_mode()) {
3275 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3276 cs->effective_mems = parent->effective_mems;
3277 cs->use_parent_ecpus = true;
3278 parent->child_ecpus_count++;
3279 }
3280
3281 /*
3282 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
3283 */
3284 if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
3285 !is_sched_load_balance(parent))
3286 clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3287
3288 spin_unlock_irq(&callback_lock);
3289
3290 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
3291 goto out_unlock;
3292
3293 /*
3294 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
3295 * set. This flag handling is implemented in cgroup core for
3296 * historical reasons - the flag may be specified during mount.
3297 *
3298 * Currently, if any sibling cpusets have exclusive cpus or mem, we
3299 * refuse to clone the configuration - thereby refusing the task to
3300 * be entered, and as a result refusing the sys_unshare() or
3301 * clone() which initiated it. If this becomes a problem for some
3302 * users who wish to allow that scenario, then this could be
3303 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
3304 * (and likewise for mems) to the new cgroup.
3305 */
3306 rcu_read_lock();
3307 cpuset_for_each_child(tmp_cs, pos_css, parent) {
3308 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
3309 rcu_read_unlock();
3310 goto out_unlock;
3311 }
3312 }
3313 rcu_read_unlock();
3314
3315 spin_lock_irq(&callback_lock);
3316 cs->mems_allowed = parent->mems_allowed;
3317 cs->effective_mems = parent->mems_allowed;
3318 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
3319 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
3320 spin_unlock_irq(&callback_lock);
3321 out_unlock:
3322 mutex_unlock(&cpuset_mutex);
3323 cpus_read_unlock();
3324 return 0;
3325 }
3326
3327 /*
3328 * If the cpuset being removed has its flag 'sched_load_balance'
3329 * enabled, then simulate turning sched_load_balance off, which
3330 * will call rebuild_sched_domains_locked(). That is not needed
3331 * in the default hierarchy where only changes in partition
3332 * will cause repartitioning.
3333 *
3334 * If the cpuset has the 'sched.partition' flag enabled, simulate
3335 * turning 'sched.partition" off.
3336 */
3337
cpuset_css_offline(struct cgroup_subsys_state * css)3338 static void cpuset_css_offline(struct cgroup_subsys_state *css)
3339 {
3340 struct cpuset *cs = css_cs(css);
3341
3342 cpus_read_lock();
3343 mutex_lock(&cpuset_mutex);
3344
3345 if (is_partition_valid(cs))
3346 update_prstate(cs, 0);
3347
3348 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
3349 is_sched_load_balance(cs))
3350 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3351
3352 if (cs->use_parent_ecpus) {
3353 struct cpuset *parent = parent_cs(cs);
3354
3355 cs->use_parent_ecpus = false;
3356 parent->child_ecpus_count--;
3357 }
3358
3359 cpuset_dec();
3360 clear_bit(CS_ONLINE, &cs->flags);
3361
3362 mutex_unlock(&cpuset_mutex);
3363 cpus_read_unlock();
3364 }
3365
cpuset_css_free(struct cgroup_subsys_state * css)3366 static void cpuset_css_free(struct cgroup_subsys_state *css)
3367 {
3368 struct cpuset *cs = css_cs(css);
3369
3370 free_cpuset(cs);
3371 }
3372
cpuset_bind(struct cgroup_subsys_state * root_css)3373 static void cpuset_bind(struct cgroup_subsys_state *root_css)
3374 {
3375 mutex_lock(&cpuset_mutex);
3376 spin_lock_irq(&callback_lock);
3377
3378 if (is_in_v2_mode()) {
3379 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
3380 top_cpuset.mems_allowed = node_possible_map;
3381 } else {
3382 cpumask_copy(top_cpuset.cpus_allowed,
3383 top_cpuset.effective_cpus);
3384 top_cpuset.mems_allowed = top_cpuset.effective_mems;
3385 }
3386
3387 spin_unlock_irq(&callback_lock);
3388 mutex_unlock(&cpuset_mutex);
3389 }
3390
3391 /*
3392 * In case the child is cloned into a cpuset different from its parent,
3393 * additional checks are done to see if the move is allowed.
3394 */
cpuset_can_fork(struct task_struct * task,struct css_set * cset)3395 static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
3396 {
3397 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3398 bool same_cs;
3399 int ret;
3400
3401 rcu_read_lock();
3402 same_cs = (cs == task_cs(current));
3403 rcu_read_unlock();
3404
3405 if (same_cs)
3406 return 0;
3407
3408 lockdep_assert_held(&cgroup_mutex);
3409 mutex_lock(&cpuset_mutex);
3410
3411 /* Check to see if task is allowed in the cpuset */
3412 ret = cpuset_can_attach_check(cs);
3413 if (ret)
3414 goto out_unlock;
3415
3416 ret = task_can_attach(task);
3417 if (ret)
3418 goto out_unlock;
3419
3420 ret = security_task_setscheduler(task);
3421 if (ret)
3422 goto out_unlock;
3423
3424 /*
3425 * Mark attach is in progress. This makes validate_change() fail
3426 * changes which zero cpus/mems_allowed.
3427 */
3428 cs->attach_in_progress++;
3429 out_unlock:
3430 mutex_unlock(&cpuset_mutex);
3431 return ret;
3432 }
3433
cpuset_cancel_fork(struct task_struct * task,struct css_set * cset)3434 static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
3435 {
3436 struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
3437 bool same_cs;
3438
3439 rcu_read_lock();
3440 same_cs = (cs == task_cs(current));
3441 rcu_read_unlock();
3442
3443 if (same_cs)
3444 return;
3445
3446 mutex_lock(&cpuset_mutex);
3447 cs->attach_in_progress--;
3448 if (!cs->attach_in_progress)
3449 wake_up(&cpuset_attach_wq);
3450 mutex_unlock(&cpuset_mutex);
3451 }
3452
3453 /*
3454 * Make sure the new task conform to the current state of its parent,
3455 * which could have been changed by cpuset just after it inherits the
3456 * state from the parent and before it sits on the cgroup's task list.
3457 */
cpuset_fork(struct task_struct * task)3458 static void cpuset_fork(struct task_struct *task)
3459 {
3460 struct cpuset *cs;
3461 bool same_cs;
3462
3463 rcu_read_lock();
3464 cs = task_cs(task);
3465 same_cs = (cs == task_cs(current));
3466 rcu_read_unlock();
3467
3468 if (same_cs) {
3469 if (cs == &top_cpuset)
3470 return;
3471
3472 set_cpus_allowed_ptr(task, current->cpus_ptr);
3473 task->mems_allowed = current->mems_allowed;
3474 return;
3475 }
3476
3477 /* CLONE_INTO_CGROUP */
3478 mutex_lock(&cpuset_mutex);
3479 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3480 cpuset_attach_task(cs, task);
3481
3482 cs->attach_in_progress--;
3483 if (!cs->attach_in_progress)
3484 wake_up(&cpuset_attach_wq);
3485
3486 mutex_unlock(&cpuset_mutex);
3487 }
3488
3489 struct cgroup_subsys cpuset_cgrp_subsys = {
3490 .css_alloc = cpuset_css_alloc,
3491 .css_online = cpuset_css_online,
3492 .css_offline = cpuset_css_offline,
3493 .css_free = cpuset_css_free,
3494 .can_attach = cpuset_can_attach,
3495 .cancel_attach = cpuset_cancel_attach,
3496 .attach = cpuset_attach,
3497 .post_attach = cpuset_post_attach,
3498 .bind = cpuset_bind,
3499 .can_fork = cpuset_can_fork,
3500 .cancel_fork = cpuset_cancel_fork,
3501 .fork = cpuset_fork,
3502 .legacy_cftypes = legacy_files,
3503 .dfl_cftypes = dfl_files,
3504 .early_init = true,
3505 .threaded = true,
3506 };
3507
3508 /**
3509 * cpuset_init - initialize cpusets at system boot
3510 *
3511 * Description: Initialize top_cpuset
3512 **/
3513
cpuset_init(void)3514 int __init cpuset_init(void)
3515 {
3516 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3517 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3518 BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
3519
3520 cpumask_setall(top_cpuset.cpus_allowed);
3521 nodes_setall(top_cpuset.mems_allowed);
3522 cpumask_setall(top_cpuset.effective_cpus);
3523 nodes_setall(top_cpuset.effective_mems);
3524
3525 fmeter_init(&top_cpuset.fmeter);
3526 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
3527 top_cpuset.relax_domain_level = -1;
3528
3529 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3530
3531 return 0;
3532 }
3533
3534 /*
3535 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
3536 * or memory nodes, we need to walk over the cpuset hierarchy,
3537 * removing that CPU or node from all cpusets. If this removes the
3538 * last CPU or node from a cpuset, then move the tasks in the empty
3539 * cpuset to its next-highest non-empty parent.
3540 */
remove_tasks_in_empty_cpuset(struct cpuset * cs)3541 static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
3542 {
3543 struct cpuset *parent;
3544
3545 /*
3546 * Find its next-highest non-empty parent, (top cpuset
3547 * has online cpus, so can't be empty).
3548 */
3549 parent = parent_cs(cs);
3550 while (cpumask_empty(parent->cpus_allowed) ||
3551 nodes_empty(parent->mems_allowed))
3552 parent = parent_cs(parent);
3553
3554 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
3555 pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
3556 pr_cont_cgroup_name(cs->css.cgroup);
3557 pr_cont("\n");
3558 }
3559 }
3560
3561 static void
hotplug_update_tasks_legacy(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)3562 hotplug_update_tasks_legacy(struct cpuset *cs,
3563 struct cpumask *new_cpus, nodemask_t *new_mems,
3564 bool cpus_updated, bool mems_updated)
3565 {
3566 bool is_empty;
3567
3568 spin_lock_irq(&callback_lock);
3569 cpumask_copy(cs->cpus_allowed, new_cpus);
3570 cpumask_copy(cs->effective_cpus, new_cpus);
3571 cs->mems_allowed = *new_mems;
3572 cs->effective_mems = *new_mems;
3573 spin_unlock_irq(&callback_lock);
3574
3575 /*
3576 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
3577 * as the tasks will be migrated to an ancestor.
3578 */
3579 if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
3580 update_tasks_cpumask(cs, new_cpus);
3581 if (mems_updated && !nodes_empty(cs->mems_allowed))
3582 update_tasks_nodemask(cs);
3583
3584 is_empty = cpumask_empty(cs->cpus_allowed) ||
3585 nodes_empty(cs->mems_allowed);
3586
3587 /*
3588 * Move tasks to the nearest ancestor with execution resources,
3589 * This is full cgroup operation which will also call back into
3590 * cpuset. Should be done outside any lock.
3591 */
3592 if (is_empty) {
3593 mutex_unlock(&cpuset_mutex);
3594 remove_tasks_in_empty_cpuset(cs);
3595 mutex_lock(&cpuset_mutex);
3596 }
3597 }
3598
3599 static void
hotplug_update_tasks(struct cpuset * cs,struct cpumask * new_cpus,nodemask_t * new_mems,bool cpus_updated,bool mems_updated)3600 hotplug_update_tasks(struct cpuset *cs,
3601 struct cpumask *new_cpus, nodemask_t *new_mems,
3602 bool cpus_updated, bool mems_updated)
3603 {
3604 /* A partition root is allowed to have empty effective cpus */
3605 if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3606 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3607 if (nodes_empty(*new_mems))
3608 *new_mems = parent_cs(cs)->effective_mems;
3609
3610 spin_lock_irq(&callback_lock);
3611 cpumask_copy(cs->effective_cpus, new_cpus);
3612 cs->effective_mems = *new_mems;
3613 spin_unlock_irq(&callback_lock);
3614
3615 if (cpus_updated)
3616 update_tasks_cpumask(cs, new_cpus);
3617 if (mems_updated)
3618 update_tasks_nodemask(cs);
3619 }
3620
3621 static bool force_rebuild;
3622
cpuset_force_rebuild(void)3623 void cpuset_force_rebuild(void)
3624 {
3625 force_rebuild = true;
3626 }
3627
3628 /**
3629 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3630 * @cs: cpuset in interest
3631 * @tmp: the tmpmasks structure pointer
3632 *
3633 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3634 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3635 * all its tasks are moved to the nearest ancestor with both resources.
3636 */
cpuset_hotplug_update_tasks(struct cpuset * cs,struct tmpmasks * tmp)3637 static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3638 {
3639 static cpumask_t new_cpus;
3640 static nodemask_t new_mems;
3641 bool cpus_updated;
3642 bool mems_updated;
3643 struct cpuset *parent;
3644 retry:
3645 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3646
3647 mutex_lock(&cpuset_mutex);
3648
3649 /*
3650 * We have raced with task attaching. We wait until attaching
3651 * is finished, so we won't attach a task to an empty cpuset.
3652 */
3653 if (cs->attach_in_progress) {
3654 mutex_unlock(&cpuset_mutex);
3655 goto retry;
3656 }
3657
3658 parent = parent_cs(cs);
3659 compute_effective_cpumask(&new_cpus, cs, parent);
3660 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3661
3662 if (cs->nr_subparts_cpus)
3663 /*
3664 * Make sure that CPUs allocated to child partitions
3665 * do not show up in effective_cpus.
3666 */
3667 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus);
3668
3669 if (!tmp || !cs->partition_root_state)
3670 goto update_tasks;
3671
3672 /*
3673 * In the unlikely event that a partition root has empty
3674 * effective_cpus with tasks, we will have to invalidate child
3675 * partitions, if present, by setting nr_subparts_cpus to 0 to
3676 * reclaim their cpus.
3677 */
3678 if (cs->nr_subparts_cpus && is_partition_valid(cs) &&
3679 cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)) {
3680 spin_lock_irq(&callback_lock);
3681 cs->nr_subparts_cpus = 0;
3682 cpumask_clear(cs->subparts_cpus);
3683 spin_unlock_irq(&callback_lock);
3684 compute_effective_cpumask(&new_cpus, cs, parent);
3685 }
3686
3687 /*
3688 * Force the partition to become invalid if either one of
3689 * the following conditions hold:
3690 * 1) empty effective cpus but not valid empty partition.
3691 * 2) parent is invalid or doesn't grant any cpus to child
3692 * partitions.
3693 */
3694 if (is_partition_valid(cs) && (!parent->nr_subparts_cpus ||
3695 (cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)))) {
3696 int old_prs, parent_prs;
3697
3698 update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp);
3699 if (cs->nr_subparts_cpus) {
3700 spin_lock_irq(&callback_lock);
3701 cs->nr_subparts_cpus = 0;
3702 cpumask_clear(cs->subparts_cpus);
3703 spin_unlock_irq(&callback_lock);
3704 compute_effective_cpumask(&new_cpus, cs, parent);
3705 }
3706
3707 old_prs = cs->partition_root_state;
3708 parent_prs = parent->partition_root_state;
3709 if (is_partition_valid(cs)) {
3710 spin_lock_irq(&callback_lock);
3711 make_partition_invalid(cs);
3712 spin_unlock_irq(&callback_lock);
3713 if (is_prs_invalid(parent_prs))
3714 WRITE_ONCE(cs->prs_err, PERR_INVPARENT);
3715 else if (!parent_prs)
3716 WRITE_ONCE(cs->prs_err, PERR_NOTPART);
3717 else
3718 WRITE_ONCE(cs->prs_err, PERR_HOTPLUG);
3719 notify_partition_change(cs, old_prs);
3720 }
3721 cpuset_force_rebuild();
3722 }
3723
3724 /*
3725 * On the other hand, an invalid partition root may be transitioned
3726 * back to a regular one.
3727 */
3728 else if (is_partition_valid(parent) && is_partition_invalid(cs)) {
3729 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp);
3730 if (is_partition_valid(cs))
3731 cpuset_force_rebuild();
3732 }
3733
3734 update_tasks:
3735 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3736 mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3737 if (!cpus_updated && !mems_updated)
3738 goto unlock; /* Hotplug doesn't affect this cpuset */
3739
3740 if (mems_updated)
3741 check_insane_mems_config(&new_mems);
3742
3743 if (is_in_v2_mode())
3744 hotplug_update_tasks(cs, &new_cpus, &new_mems,
3745 cpus_updated, mems_updated);
3746 else
3747 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
3748 cpus_updated, mems_updated);
3749
3750 unlock:
3751 mutex_unlock(&cpuset_mutex);
3752 }
3753
3754 /**
3755 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
3756 * @work: unused
3757 *
3758 * This function is called after either CPU or memory configuration has
3759 * changed and updates cpuset accordingly. The top_cpuset is always
3760 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3761 * order to make cpusets transparent (of no affect) on systems that are
3762 * actively using CPU hotplug but making no active use of cpusets.
3763 *
3764 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3765 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3766 * all descendants.
3767 *
3768 * Note that CPU offlining during suspend is ignored. We don't modify
3769 * cpusets across suspend/resume cycles at all.
3770 */
cpuset_hotplug_workfn(struct work_struct * work)3771 static void cpuset_hotplug_workfn(struct work_struct *work)
3772 {
3773 static cpumask_t new_cpus;
3774 static nodemask_t new_mems;
3775 bool cpus_updated, mems_updated;
3776 bool on_dfl = is_in_v2_mode();
3777 struct tmpmasks tmp, *ptmp = NULL;
3778
3779 if (on_dfl && !alloc_cpumasks(NULL, &tmp))
3780 ptmp = &tmp;
3781
3782 mutex_lock(&cpuset_mutex);
3783
3784 /* fetch the available cpus/mems and find out which changed how */
3785 cpumask_copy(&new_cpus, cpu_active_mask);
3786 new_mems = node_states[N_MEMORY];
3787
3788 /*
3789 * If subparts_cpus is populated, it is likely that the check below
3790 * will produce a false positive on cpus_updated when the cpu list
3791 * isn't changed. It is extra work, but it is better to be safe.
3792 */
3793 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
3794 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3795
3796 /*
3797 * In the rare case that hotplug removes all the cpus in subparts_cpus,
3798 * we assumed that cpus are updated.
3799 */
3800 if (!cpus_updated && top_cpuset.nr_subparts_cpus)
3801 cpus_updated = true;
3802
3803 /* synchronize cpus_allowed to cpu_active_mask */
3804 if (cpus_updated) {
3805 spin_lock_irq(&callback_lock);
3806 if (!on_dfl)
3807 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3808 /*
3809 * Make sure that CPUs allocated to child partitions
3810 * do not show up in effective_cpus. If no CPU is left,
3811 * we clear the subparts_cpus & let the child partitions
3812 * fight for the CPUs again.
3813 */
3814 if (top_cpuset.nr_subparts_cpus) {
3815 if (cpumask_subset(&new_cpus,
3816 top_cpuset.subparts_cpus)) {
3817 top_cpuset.nr_subparts_cpus = 0;
3818 cpumask_clear(top_cpuset.subparts_cpus);
3819 } else {
3820 cpumask_andnot(&new_cpus, &new_cpus,
3821 top_cpuset.subparts_cpus);
3822 }
3823 }
3824 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3825 spin_unlock_irq(&callback_lock);
3826 /* we don't mess with cpumasks of tasks in top_cpuset */
3827 }
3828
3829 /* synchronize mems_allowed to N_MEMORY */
3830 if (mems_updated) {
3831 spin_lock_irq(&callback_lock);
3832 if (!on_dfl)
3833 top_cpuset.mems_allowed = new_mems;
3834 top_cpuset.effective_mems = new_mems;
3835 spin_unlock_irq(&callback_lock);
3836 update_tasks_nodemask(&top_cpuset);
3837 }
3838
3839 mutex_unlock(&cpuset_mutex);
3840
3841 /* if cpus or mems changed, we need to propagate to descendants */
3842 if (cpus_updated || mems_updated) {
3843 struct cpuset *cs;
3844 struct cgroup_subsys_state *pos_css;
3845
3846 rcu_read_lock();
3847 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3848 if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3849 continue;
3850 rcu_read_unlock();
3851
3852 cpuset_hotplug_update_tasks(cs, ptmp);
3853
3854 rcu_read_lock();
3855 css_put(&cs->css);
3856 }
3857 rcu_read_unlock();
3858 }
3859
3860 /* rebuild sched domains if cpus_allowed has changed */
3861 if (cpus_updated || force_rebuild) {
3862 force_rebuild = false;
3863 rebuild_sched_domains();
3864 }
3865
3866 free_cpumasks(NULL, ptmp);
3867 }
3868
cpuset_update_active_cpus(void)3869 void cpuset_update_active_cpus(void)
3870 {
3871 /*
3872 * We're inside cpu hotplug critical region which usually nests
3873 * inside cgroup synchronization. Bounce actual hotplug processing
3874 * to a work item to avoid reverse locking order.
3875 */
3876 schedule_work(&cpuset_hotplug_work);
3877 }
3878
cpuset_wait_for_hotplug(void)3879 void cpuset_wait_for_hotplug(void)
3880 {
3881 flush_work(&cpuset_hotplug_work);
3882 }
3883
3884 /*
3885 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3886 * Call this routine anytime after node_states[N_MEMORY] changes.
3887 * See cpuset_update_active_cpus() for CPU hotplug handling.
3888 */
cpuset_track_online_nodes(struct notifier_block * self,unsigned long action,void * arg)3889 static int cpuset_track_online_nodes(struct notifier_block *self,
3890 unsigned long action, void *arg)
3891 {
3892 schedule_work(&cpuset_hotplug_work);
3893 return NOTIFY_OK;
3894 }
3895
3896 /**
3897 * cpuset_init_smp - initialize cpus_allowed
3898 *
3899 * Description: Finish top cpuset after cpu, node maps are initialized
3900 */
cpuset_init_smp(void)3901 void __init cpuset_init_smp(void)
3902 {
3903 /*
3904 * cpus_allowd/mems_allowed set to v2 values in the initial
3905 * cpuset_bind() call will be reset to v1 values in another
3906 * cpuset_bind() call when v1 cpuset is mounted.
3907 */
3908 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3909
3910 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3911 top_cpuset.effective_mems = node_states[N_MEMORY];
3912
3913 hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
3914
3915 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3916 BUG_ON(!cpuset_migrate_mm_wq);
3917 }
3918
3919 /**
3920 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3921 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3922 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
3923 *
3924 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
3925 * attached to the specified @tsk. Guaranteed to return some non-empty
3926 * subset of cpu_online_mask, even if this means going outside the
3927 * tasks cpuset, except when the task is in the top cpuset.
3928 **/
3929
cpuset_cpus_allowed(struct task_struct * tsk,struct cpumask * pmask)3930 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
3931 {
3932 unsigned long flags;
3933 struct cpuset *cs;
3934
3935 spin_lock_irqsave(&callback_lock, flags);
3936 rcu_read_lock();
3937
3938 cs = task_cs(tsk);
3939 if (cs != &top_cpuset)
3940 guarantee_online_cpus(tsk, pmask);
3941 /*
3942 * Tasks in the top cpuset won't get update to their cpumasks
3943 * when a hotplug online/offline event happens. So we include all
3944 * offline cpus in the allowed cpu list.
3945 */
3946 if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
3947 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3948
3949 /*
3950 * We first exclude cpus allocated to partitions. If there is no
3951 * allowable online cpu left, we fall back to all possible cpus.
3952 */
3953 cpumask_andnot(pmask, possible_mask, top_cpuset.subparts_cpus);
3954 if (!cpumask_intersects(pmask, cpu_online_mask))
3955 cpumask_copy(pmask, possible_mask);
3956 }
3957
3958 rcu_read_unlock();
3959 spin_unlock_irqrestore(&callback_lock, flags);
3960 }
3961
3962 /**
3963 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3964 * @tsk: pointer to task_struct with which the scheduler is struggling
3965 *
3966 * Description: In the case that the scheduler cannot find an allowed cpu in
3967 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3968 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3969 * which will not contain a sane cpumask during cases such as cpu hotplugging.
3970 * This is the absolute last resort for the scheduler and it is only used if
3971 * _every_ other avenue has been traveled.
3972 *
3973 * Returns true if the affinity of @tsk was changed, false otherwise.
3974 **/
3975
cpuset_cpus_allowed_fallback(struct task_struct * tsk)3976 bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
3977 {
3978 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3979 const struct cpumask *cs_mask;
3980 bool changed = false;
3981
3982 rcu_read_lock();
3983 cs_mask = task_cs(tsk)->cpus_allowed;
3984 if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
3985 do_set_cpus_allowed(tsk, cs_mask);
3986 changed = true;
3987 }
3988 rcu_read_unlock();
3989
3990 /*
3991 * We own tsk->cpus_allowed, nobody can change it under us.
3992 *
3993 * But we used cs && cs->cpus_allowed lockless and thus can
3994 * race with cgroup_attach_task() or update_cpumask() and get
3995 * the wrong tsk->cpus_allowed. However, both cases imply the
3996 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
3997 * which takes task_rq_lock().
3998 *
3999 * If we are called after it dropped the lock we must see all
4000 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
4001 * set any mask even if it is not right from task_cs() pov,
4002 * the pending set_cpus_allowed_ptr() will fix things.
4003 *
4004 * select_fallback_rq() will fix things ups and set cpu_possible_mask
4005 * if required.
4006 */
4007 return changed;
4008 }
4009
cpuset_init_current_mems_allowed(void)4010 void __init cpuset_init_current_mems_allowed(void)
4011 {
4012 nodes_setall(current->mems_allowed);
4013 }
4014
4015 /**
4016 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4017 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4018 *
4019 * Description: Returns the nodemask_t mems_allowed of the cpuset
4020 * attached to the specified @tsk. Guaranteed to return some non-empty
4021 * subset of node_states[N_MEMORY], even if this means going outside the
4022 * tasks cpuset.
4023 **/
4024
cpuset_mems_allowed(struct task_struct * tsk)4025 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
4026 {
4027 nodemask_t mask;
4028 unsigned long flags;
4029
4030 spin_lock_irqsave(&callback_lock, flags);
4031 rcu_read_lock();
4032 guarantee_online_mems(task_cs(tsk), &mask);
4033 rcu_read_unlock();
4034 spin_unlock_irqrestore(&callback_lock, flags);
4035
4036 return mask;
4037 }
4038
4039 /**
4040 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4041 * @nodemask: the nodemask to be checked
4042 *
4043 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4044 */
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)4045 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
4046 {
4047 return nodes_intersects(*nodemask, current->mems_allowed);
4048 }
4049
4050 /*
4051 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4052 * mem_hardwall ancestor to the specified cpuset. Call holding
4053 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall
4054 * (an unusual configuration), then returns the root cpuset.
4055 */
nearest_hardwall_ancestor(struct cpuset * cs)4056 static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4057 {
4058 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4059 cs = parent_cs(cs);
4060 return cs;
4061 }
4062
4063 /*
4064 * cpuset_node_allowed - Can we allocate on a memory node?
4065 * @node: is this an allowed node?
4066 * @gfp_mask: memory allocation flags
4067 *
4068 * If we're in interrupt, yes, we can always allocate. If @node is set in
4069 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
4070 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4071 * yes. If current has access to memory reserves as an oom victim, yes.
4072 * Otherwise, no.
4073 *
4074 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4075 * and do not allow allocations outside the current tasks cpuset
4076 * unless the task has been OOM killed.
4077 * GFP_KERNEL allocations are not so marked, so can escape to the
4078 * nearest enclosing hardwalled ancestor cpuset.
4079 *
4080 * Scanning up parent cpusets requires callback_lock. The
4081 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4082 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4083 * current tasks mems_allowed came up empty on the first pass over
4084 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
4085 * cpuset are short of memory, might require taking the callback_lock.
4086 *
4087 * The first call here from mm/page_alloc:get_page_from_freelist()
4088 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4089 * so no allocation on a node outside the cpuset is allowed (unless
4090 * in interrupt, of course).
4091 *
4092 * The second pass through get_page_from_freelist() doesn't even call
4093 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
4094 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4095 * in alloc_flags. That logic and the checks below have the combined
4096 * affect that:
4097 * in_interrupt - any node ok (current task context irrelevant)
4098 * GFP_ATOMIC - any node ok
4099 * tsk_is_oom_victim - any node ok
4100 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
4101 * GFP_USER - only nodes in current tasks mems allowed ok.
4102 */
cpuset_node_allowed(int node,gfp_t gfp_mask)4103 bool cpuset_node_allowed(int node, gfp_t gfp_mask)
4104 {
4105 struct cpuset *cs; /* current cpuset ancestors */
4106 bool allowed; /* is allocation in zone z allowed? */
4107 unsigned long flags;
4108
4109 if (in_interrupt())
4110 return true;
4111 if (node_isset(node, current->mems_allowed))
4112 return true;
4113 /*
4114 * Allow tasks that have access to memory reserves because they have
4115 * been OOM killed to get memory anywhere.
4116 */
4117 if (unlikely(tsk_is_oom_victim(current)))
4118 return true;
4119 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
4120 return false;
4121
4122 if (current->flags & PF_EXITING) /* Let dying task have memory */
4123 return true;
4124
4125 /* Not hardwall and node outside mems_allowed: scan up cpusets */
4126 spin_lock_irqsave(&callback_lock, flags);
4127
4128 rcu_read_lock();
4129 cs = nearest_hardwall_ancestor(task_cs(current));
4130 allowed = node_isset(node, cs->mems_allowed);
4131 rcu_read_unlock();
4132
4133 spin_unlock_irqrestore(&callback_lock, flags);
4134 return allowed;
4135 }
4136
4137 /**
4138 * cpuset_spread_node() - On which node to begin search for a page
4139 * @rotor: round robin rotor
4140 *
4141 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4142 * tasks in a cpuset with is_spread_page or is_spread_slab set),
4143 * and if the memory allocation used cpuset_mem_spread_node()
4144 * to determine on which node to start looking, as it will for
4145 * certain page cache or slab cache pages such as used for file
4146 * system buffers and inode caches, then instead of starting on the
4147 * local node to look for a free page, rather spread the starting
4148 * node around the tasks mems_allowed nodes.
4149 *
4150 * We don't have to worry about the returned node being offline
4151 * because "it can't happen", and even if it did, it would be ok.
4152 *
4153 * The routines calling guarantee_online_mems() are careful to
4154 * only set nodes in task->mems_allowed that are online. So it
4155 * should not be possible for the following code to return an
4156 * offline node. But if it did, that would be ok, as this routine
4157 * is not returning the node where the allocation must be, only
4158 * the node where the search should start. The zonelist passed to
4159 * __alloc_pages() will include all nodes. If the slab allocator
4160 * is passed an offline node, it will fall back to the local node.
4161 * See kmem_cache_alloc_node().
4162 */
cpuset_spread_node(int * rotor)4163 static int cpuset_spread_node(int *rotor)
4164 {
4165 return *rotor = next_node_in(*rotor, current->mems_allowed);
4166 }
4167
4168 /**
4169 * cpuset_mem_spread_node() - On which node to begin search for a file page
4170 */
cpuset_mem_spread_node(void)4171 int cpuset_mem_spread_node(void)
4172 {
4173 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4174 current->cpuset_mem_spread_rotor =
4175 node_random(¤t->mems_allowed);
4176
4177 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor);
4178 }
4179
4180 /**
4181 * cpuset_slab_spread_node() - On which node to begin search for a slab page
4182 */
cpuset_slab_spread_node(void)4183 int cpuset_slab_spread_node(void)
4184 {
4185 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
4186 current->cpuset_slab_spread_rotor =
4187 node_random(¤t->mems_allowed);
4188
4189 return cpuset_spread_node(¤t->cpuset_slab_spread_rotor);
4190 }
4191 EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
4192
4193 /**
4194 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
4195 * @tsk1: pointer to task_struct of some task.
4196 * @tsk2: pointer to task_struct of some other task.
4197 *
4198 * Description: Return true if @tsk1's mems_allowed intersects the
4199 * mems_allowed of @tsk2. Used by the OOM killer to determine if
4200 * one of the task's memory usage might impact the memory available
4201 * to the other.
4202 **/
4203
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)4204 int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
4205 const struct task_struct *tsk2)
4206 {
4207 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
4208 }
4209
4210 /**
4211 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
4212 *
4213 * Description: Prints current's name, cpuset name, and cached copy of its
4214 * mems_allowed to the kernel log.
4215 */
cpuset_print_current_mems_allowed(void)4216 void cpuset_print_current_mems_allowed(void)
4217 {
4218 struct cgroup *cgrp;
4219
4220 rcu_read_lock();
4221
4222 cgrp = task_cs(current)->css.cgroup;
4223 pr_cont(",cpuset=");
4224 pr_cont_cgroup_name(cgrp);
4225 pr_cont(",mems_allowed=%*pbl",
4226 nodemask_pr_args(¤t->mems_allowed));
4227
4228 rcu_read_unlock();
4229 }
4230
4231 /*
4232 * Collection of memory_pressure is suppressed unless
4233 * this flag is enabled by writing "1" to the special
4234 * cpuset file 'memory_pressure_enabled' in the root cpuset.
4235 */
4236
4237 int cpuset_memory_pressure_enabled __read_mostly;
4238
4239 /*
4240 * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
4241 *
4242 * Keep a running average of the rate of synchronous (direct)
4243 * page reclaim efforts initiated by tasks in each cpuset.
4244 *
4245 * This represents the rate at which some task in the cpuset
4246 * ran low on memory on all nodes it was allowed to use, and
4247 * had to enter the kernels page reclaim code in an effort to
4248 * create more free memory by tossing clean pages or swapping
4249 * or writing dirty pages.
4250 *
4251 * Display to user space in the per-cpuset read-only file
4252 * "memory_pressure". Value displayed is an integer
4253 * representing the recent rate of entry into the synchronous
4254 * (direct) page reclaim by any task attached to the cpuset.
4255 */
4256
__cpuset_memory_pressure_bump(void)4257 void __cpuset_memory_pressure_bump(void)
4258 {
4259 rcu_read_lock();
4260 fmeter_markevent(&task_cs(current)->fmeter);
4261 rcu_read_unlock();
4262 }
4263
4264 #ifdef CONFIG_PROC_PID_CPUSET
4265 /*
4266 * proc_cpuset_show()
4267 * - Print tasks cpuset path into seq_file.
4268 * - Used for /proc/<pid>/cpuset.
4269 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
4270 * doesn't really matter if tsk->cpuset changes after we read it,
4271 * and we take cpuset_mutex, keeping cpuset_attach() from changing it
4272 * anyway.
4273 */
proc_cpuset_show(struct seq_file * m,struct pid_namespace * ns,struct pid * pid,struct task_struct * tsk)4274 int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
4275 struct pid *pid, struct task_struct *tsk)
4276 {
4277 char *buf;
4278 struct cgroup_subsys_state *css;
4279 int retval;
4280
4281 retval = -ENOMEM;
4282 buf = kmalloc(PATH_MAX, GFP_KERNEL);
4283 if (!buf)
4284 goto out;
4285
4286 css = task_get_css(tsk, cpuset_cgrp_id);
4287 retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
4288 current->nsproxy->cgroup_ns);
4289 css_put(css);
4290 if (retval >= PATH_MAX)
4291 retval = -ENAMETOOLONG;
4292 if (retval < 0)
4293 goto out_free;
4294 seq_puts(m, buf);
4295 seq_putc(m, '\n');
4296 retval = 0;
4297 out_free:
4298 kfree(buf);
4299 out:
4300 return retval;
4301 }
4302 #endif /* CONFIG_PROC_PID_CPUSET */
4303
4304 /* Display task mems_allowed in /proc/<pid>/status file. */
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)4305 void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4306 {
4307 seq_printf(m, "Mems_allowed:\t%*pb\n",
4308 nodemask_pr_args(&task->mems_allowed));
4309 seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4310 nodemask_pr_args(&task->mems_allowed));
4311 }
4312