1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CGROUP_H
3 #define _LINUX_CGROUP_H
4 /*
5 * cgroup interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12 #include <linux/sched.h>
13 #include <linux/cpumask.h>
14 #include <linux/nodemask.h>
15 #include <linux/rculist.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/fs.h>
18 #include <linux/seq_file.h>
19 #include <linux/kernfs.h>
20 #include <linux/jump_label.h>
21 #include <linux/types.h>
22 #include <linux/ns_common.h>
23 #include <linux/nsproxy.h>
24 #include <linux/user_namespace.h>
25 #include <linux/refcount.h>
26 #include <linux/kernel_stat.h>
27
28 #include <linux/cgroup-defs.h>
29
30 #ifdef CONFIG_CGROUPS
31
32 /*
33 * All weight knobs on the default hierarhcy should use the following min,
34 * default and max values. The default value is the logarithmic center of
35 * MIN and MAX and allows 100x to be expressed in both directions.
36 */
37 #define CGROUP_WEIGHT_MIN 1
38 #define CGROUP_WEIGHT_DFL 100
39 #define CGROUP_WEIGHT_MAX 10000
40
41 /* walk only threadgroup leaders */
42 #define CSS_TASK_ITER_PROCS (1U << 0)
43 /* walk all threaded css_sets in the domain */
44 #define CSS_TASK_ITER_THREADED (1U << 1)
45
46 /* internal flags */
47 #define CSS_TASK_ITER_SKIPPED (1U << 16)
48
49 /* a css_task_iter should be treated as an opaque object */
50 struct css_task_iter {
51 struct cgroup_subsys *ss;
52 unsigned int flags;
53
54 struct list_head *cset_pos;
55 struct list_head *cset_head;
56
57 struct list_head *tcset_pos;
58 struct list_head *tcset_head;
59
60 struct list_head *task_pos;
61 struct list_head *tasks_head;
62 struct list_head *mg_tasks_head;
63 struct list_head *dying_tasks_head;
64
65 struct css_set *cur_cset;
66 struct css_set *cur_dcset;
67 struct task_struct *cur_task;
68 struct list_head iters_node; /* css_set->task_iters */
69 };
70
71 extern struct cgroup_root cgrp_dfl_root;
72 extern struct css_set init_css_set;
73
74 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
75 #include <linux/cgroup_subsys.h>
76 #undef SUBSYS
77
78 #define SUBSYS(_x) \
79 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
80 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
81 #include <linux/cgroup_subsys.h>
82 #undef SUBSYS
83
84 /**
85 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
86 * @ss: subsystem in question
87 */
88 #define cgroup_subsys_enabled(ss) \
89 static_branch_likely(&ss ## _enabled_key)
90
91 /**
92 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
93 * @ss: subsystem in question
94 */
95 #define cgroup_subsys_on_dfl(ss) \
96 static_branch_likely(&ss ## _on_dfl_key)
97
98 bool css_has_online_children(struct cgroup_subsys_state *css);
99 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
100 struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
101 struct cgroup_subsys *ss);
102 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
103 struct cgroup_subsys *ss);
104 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
105 struct cgroup_subsys *ss);
106
107 struct cgroup *cgroup_get_from_path(const char *path);
108 struct cgroup *cgroup_get_from_fd(int fd);
109
110 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
111 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
112
113 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
114 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
115 int cgroup_rm_cftypes(struct cftype *cfts);
116 void cgroup_file_notify(struct cgroup_file *cfile);
117
118 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
119 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
120 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
121 struct pid *pid, struct task_struct *tsk);
122
123 void cgroup_fork(struct task_struct *p);
124 extern int cgroup_can_fork(struct task_struct *p);
125 extern void cgroup_cancel_fork(struct task_struct *p);
126 extern void cgroup_post_fork(struct task_struct *p);
127 void cgroup_exit(struct task_struct *p);
128 void cgroup_release(struct task_struct *p);
129 void cgroup_free(struct task_struct *p);
130
131 int cgroup_init_early(void);
132 int cgroup_init(void);
133
134 int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
135
136 /*
137 * Iteration helpers and macros.
138 */
139
140 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
141 struct cgroup_subsys_state *parent);
142 struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
143 struct cgroup_subsys_state *css);
144 struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
145 struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
146 struct cgroup_subsys_state *css);
147
148 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
149 struct cgroup_subsys_state **dst_cssp);
150 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
151 struct cgroup_subsys_state **dst_cssp);
152
153 void cgroup_enable_task_cg_lists(void);
154 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
155 struct css_task_iter *it);
156 struct task_struct *css_task_iter_next(struct css_task_iter *it);
157 void css_task_iter_end(struct css_task_iter *it);
158
159 /**
160 * css_for_each_child - iterate through children of a css
161 * @pos: the css * to use as the loop cursor
162 * @parent: css whose children to walk
163 *
164 * Walk @parent's children. Must be called under rcu_read_lock().
165 *
166 * If a subsystem synchronizes ->css_online() and the start of iteration, a
167 * css which finished ->css_online() is guaranteed to be visible in the
168 * future iterations and will stay visible until the last reference is put.
169 * A css which hasn't finished ->css_online() or already finished
170 * ->css_offline() may show up during traversal. It's each subsystem's
171 * responsibility to synchronize against on/offlining.
172 *
173 * It is allowed to temporarily drop RCU read lock during iteration. The
174 * caller is responsible for ensuring that @pos remains accessible until
175 * the start of the next iteration by, for example, bumping the css refcnt.
176 */
177 #define css_for_each_child(pos, parent) \
178 for ((pos) = css_next_child(NULL, (parent)); (pos); \
179 (pos) = css_next_child((pos), (parent)))
180
181 /**
182 * css_for_each_descendant_pre - pre-order walk of a css's descendants
183 * @pos: the css * to use as the loop cursor
184 * @root: css whose descendants to walk
185 *
186 * Walk @root's descendants. @root is included in the iteration and the
187 * first node to be visited. Must be called under rcu_read_lock().
188 *
189 * If a subsystem synchronizes ->css_online() and the start of iteration, a
190 * css which finished ->css_online() is guaranteed to be visible in the
191 * future iterations and will stay visible until the last reference is put.
192 * A css which hasn't finished ->css_online() or already finished
193 * ->css_offline() may show up during traversal. It's each subsystem's
194 * responsibility to synchronize against on/offlining.
195 *
196 * For example, the following guarantees that a descendant can't escape
197 * state updates of its ancestors.
198 *
199 * my_online(@css)
200 * {
201 * Lock @css's parent and @css;
202 * Inherit state from the parent;
203 * Unlock both.
204 * }
205 *
206 * my_update_state(@css)
207 * {
208 * css_for_each_descendant_pre(@pos, @css) {
209 * Lock @pos;
210 * if (@pos == @css)
211 * Update @css's state;
212 * else
213 * Verify @pos is alive and inherit state from its parent;
214 * Unlock @pos;
215 * }
216 * }
217 *
218 * As long as the inheriting step, including checking the parent state, is
219 * enclosed inside @pos locking, double-locking the parent isn't necessary
220 * while inheriting. The state update to the parent is guaranteed to be
221 * visible by walking order and, as long as inheriting operations to the
222 * same @pos are atomic to each other, multiple updates racing each other
223 * still result in the correct state. It's guaranateed that at least one
224 * inheritance happens for any css after the latest update to its parent.
225 *
226 * If checking parent's state requires locking the parent, each inheriting
227 * iteration should lock and unlock both @pos->parent and @pos.
228 *
229 * Alternatively, a subsystem may choose to use a single global lock to
230 * synchronize ->css_online() and ->css_offline() against tree-walking
231 * operations.
232 *
233 * It is allowed to temporarily drop RCU read lock during iteration. The
234 * caller is responsible for ensuring that @pos remains accessible until
235 * the start of the next iteration by, for example, bumping the css refcnt.
236 */
237 #define css_for_each_descendant_pre(pos, css) \
238 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
239 (pos) = css_next_descendant_pre((pos), (css)))
240
241 /**
242 * css_for_each_descendant_post - post-order walk of a css's descendants
243 * @pos: the css * to use as the loop cursor
244 * @css: css whose descendants to walk
245 *
246 * Similar to css_for_each_descendant_pre() but performs post-order
247 * traversal instead. @root is included in the iteration and the last
248 * node to be visited.
249 *
250 * If a subsystem synchronizes ->css_online() and the start of iteration, a
251 * css which finished ->css_online() is guaranteed to be visible in the
252 * future iterations and will stay visible until the last reference is put.
253 * A css which hasn't finished ->css_online() or already finished
254 * ->css_offline() may show up during traversal. It's each subsystem's
255 * responsibility to synchronize against on/offlining.
256 *
257 * Note that the walk visibility guarantee example described in pre-order
258 * walk doesn't apply the same to post-order walks.
259 */
260 #define css_for_each_descendant_post(pos, css) \
261 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
262 (pos) = css_next_descendant_post((pos), (css)))
263
264 /**
265 * cgroup_taskset_for_each - iterate cgroup_taskset
266 * @task: the loop cursor
267 * @dst_css: the destination css
268 * @tset: taskset to iterate
269 *
270 * @tset may contain multiple tasks and they may belong to multiple
271 * processes.
272 *
273 * On the v2 hierarchy, there may be tasks from multiple processes and they
274 * may not share the source or destination csses.
275 *
276 * On traditional hierarchies, when there are multiple tasks in @tset, if a
277 * task of a process is in @tset, all tasks of the process are in @tset.
278 * Also, all are guaranteed to share the same source and destination csses.
279 *
280 * Iteration is not in any specific order.
281 */
282 #define cgroup_taskset_for_each(task, dst_css, tset) \
283 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
284 (task); \
285 (task) = cgroup_taskset_next((tset), &(dst_css)))
286
287 /**
288 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
289 * @leader: the loop cursor
290 * @dst_css: the destination css
291 * @tset: taskset to iterate
292 *
293 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
294 * may not contain any.
295 */
296 #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
297 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
298 (leader); \
299 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
300 if ((leader) != (leader)->group_leader) \
301 ; \
302 else
303
304 /*
305 * Inline functions.
306 */
307
308 /**
309 * css_get - obtain a reference on the specified css
310 * @css: target css
311 *
312 * The caller must already have a reference.
313 */
css_get(struct cgroup_subsys_state * css)314 static inline void css_get(struct cgroup_subsys_state *css)
315 {
316 if (!(css->flags & CSS_NO_REF))
317 percpu_ref_get(&css->refcnt);
318 }
319
320 /**
321 * css_get_many - obtain references on the specified css
322 * @css: target css
323 * @n: number of references to get
324 *
325 * The caller must already have a reference.
326 */
css_get_many(struct cgroup_subsys_state * css,unsigned int n)327 static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
328 {
329 if (!(css->flags & CSS_NO_REF))
330 percpu_ref_get_many(&css->refcnt, n);
331 }
332
333 /**
334 * css_tryget - try to obtain a reference on the specified css
335 * @css: target css
336 *
337 * Obtain a reference on @css unless it already has reached zero and is
338 * being released. This function doesn't care whether @css is on or
339 * offline. The caller naturally needs to ensure that @css is accessible
340 * but doesn't have to be holding a reference on it - IOW, RCU protected
341 * access is good enough for this function. Returns %true if a reference
342 * count was successfully obtained; %false otherwise.
343 */
css_tryget(struct cgroup_subsys_state * css)344 static inline bool css_tryget(struct cgroup_subsys_state *css)
345 {
346 if (!(css->flags & CSS_NO_REF))
347 return percpu_ref_tryget(&css->refcnt);
348 return true;
349 }
350
351 /**
352 * css_tryget_online - try to obtain a reference on the specified css if online
353 * @css: target css
354 *
355 * Obtain a reference on @css if it's online. The caller naturally needs
356 * to ensure that @css is accessible but doesn't have to be holding a
357 * reference on it - IOW, RCU protected access is good enough for this
358 * function. Returns %true if a reference count was successfully obtained;
359 * %false otherwise.
360 */
css_tryget_online(struct cgroup_subsys_state * css)361 static inline bool css_tryget_online(struct cgroup_subsys_state *css)
362 {
363 if (!(css->flags & CSS_NO_REF))
364 return percpu_ref_tryget_live(&css->refcnt);
365 return true;
366 }
367
368 /**
369 * css_is_dying - test whether the specified css is dying
370 * @css: target css
371 *
372 * Test whether @css is in the process of offlining or already offline. In
373 * most cases, ->css_online() and ->css_offline() callbacks should be
374 * enough; however, the actual offline operations are RCU delayed and this
375 * test returns %true also when @css is scheduled to be offlined.
376 *
377 * This is useful, for example, when the use case requires synchronous
378 * behavior with respect to cgroup removal. cgroup removal schedules css
379 * offlining but the css can seem alive while the operation is being
380 * delayed. If the delay affects user visible semantics, this test can be
381 * used to resolve the situation.
382 */
css_is_dying(struct cgroup_subsys_state * css)383 static inline bool css_is_dying(struct cgroup_subsys_state *css)
384 {
385 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
386 }
387
388 /**
389 * css_put - put a css reference
390 * @css: target css
391 *
392 * Put a reference obtained via css_get() and css_tryget_online().
393 */
css_put(struct cgroup_subsys_state * css)394 static inline void css_put(struct cgroup_subsys_state *css)
395 {
396 if (!(css->flags & CSS_NO_REF))
397 percpu_ref_put(&css->refcnt);
398 }
399
400 /**
401 * css_put_many - put css references
402 * @css: target css
403 * @n: number of references to put
404 *
405 * Put references obtained via css_get() and css_tryget_online().
406 */
css_put_many(struct cgroup_subsys_state * css,unsigned int n)407 static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
408 {
409 if (!(css->flags & CSS_NO_REF))
410 percpu_ref_put_many(&css->refcnt, n);
411 }
412
cgroup_get(struct cgroup * cgrp)413 static inline void cgroup_get(struct cgroup *cgrp)
414 {
415 css_get(&cgrp->self);
416 }
417
cgroup_tryget(struct cgroup * cgrp)418 static inline bool cgroup_tryget(struct cgroup *cgrp)
419 {
420 return css_tryget(&cgrp->self);
421 }
422
cgroup_put(struct cgroup * cgrp)423 static inline void cgroup_put(struct cgroup *cgrp)
424 {
425 css_put(&cgrp->self);
426 }
427
428 /**
429 * task_css_set_check - obtain a task's css_set with extra access conditions
430 * @task: the task to obtain css_set for
431 * @__c: extra condition expression to be passed to rcu_dereference_check()
432 *
433 * A task's css_set is RCU protected, initialized and exited while holding
434 * task_lock(), and can only be modified while holding both cgroup_mutex
435 * and task_lock() while the task is alive. This macro verifies that the
436 * caller is inside proper critical section and returns @task's css_set.
437 *
438 * The caller can also specify additional allowed conditions via @__c, such
439 * as locks used during the cgroup_subsys::attach() methods.
440 */
441 #ifdef CONFIG_PROVE_RCU
442 extern struct mutex cgroup_mutex;
443 extern spinlock_t css_set_lock;
444 #define task_css_set_check(task, __c) \
445 rcu_dereference_check((task)->cgroups, \
446 lockdep_is_held(&cgroup_mutex) || \
447 lockdep_is_held(&css_set_lock) || \
448 ((task)->flags & PF_EXITING) || (__c))
449 #else
450 #define task_css_set_check(task, __c) \
451 rcu_dereference((task)->cgroups)
452 #endif
453
454 /**
455 * task_css_check - obtain css for (task, subsys) w/ extra access conds
456 * @task: the target task
457 * @subsys_id: the target subsystem ID
458 * @__c: extra condition expression to be passed to rcu_dereference_check()
459 *
460 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
461 * synchronization rules are the same as task_css_set_check().
462 */
463 #define task_css_check(task, subsys_id, __c) \
464 task_css_set_check((task), (__c))->subsys[(subsys_id)]
465
466 /**
467 * task_css_set - obtain a task's css_set
468 * @task: the task to obtain css_set for
469 *
470 * See task_css_set_check().
471 */
task_css_set(struct task_struct * task)472 static inline struct css_set *task_css_set(struct task_struct *task)
473 {
474 return task_css_set_check(task, false);
475 }
476
477 /**
478 * task_css - obtain css for (task, subsys)
479 * @task: the target task
480 * @subsys_id: the target subsystem ID
481 *
482 * See task_css_check().
483 */
task_css(struct task_struct * task,int subsys_id)484 static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
485 int subsys_id)
486 {
487 return task_css_check(task, subsys_id, false);
488 }
489
490 /**
491 * task_get_css - find and get the css for (task, subsys)
492 * @task: the target task
493 * @subsys_id: the target subsystem ID
494 *
495 * Find the css for the (@task, @subsys_id) combination, increment a
496 * reference on and return it. This function is guaranteed to return a
497 * valid css. The returned css may already have been offlined.
498 */
499 static inline struct cgroup_subsys_state *
task_get_css(struct task_struct * task,int subsys_id)500 task_get_css(struct task_struct *task, int subsys_id)
501 {
502 struct cgroup_subsys_state *css;
503
504 rcu_read_lock();
505 while (true) {
506 css = task_css(task, subsys_id);
507 /*
508 * Can't use css_tryget_online() here. A task which has
509 * PF_EXITING set may stay associated with an offline css.
510 * If such task calls this function, css_tryget_online()
511 * will keep failing.
512 */
513 if (likely(css_tryget(css)))
514 break;
515 cpu_relax();
516 }
517 rcu_read_unlock();
518 return css;
519 }
520
521 /**
522 * task_css_is_root - test whether a task belongs to the root css
523 * @task: the target task
524 * @subsys_id: the target subsystem ID
525 *
526 * Test whether @task belongs to the root css on the specified subsystem.
527 * May be invoked in any context.
528 */
task_css_is_root(struct task_struct * task,int subsys_id)529 static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
530 {
531 return task_css_check(task, subsys_id, true) ==
532 init_css_set.subsys[subsys_id];
533 }
534
task_cgroup(struct task_struct * task,int subsys_id)535 static inline struct cgroup *task_cgroup(struct task_struct *task,
536 int subsys_id)
537 {
538 return task_css(task, subsys_id)->cgroup;
539 }
540
task_dfl_cgroup(struct task_struct * task)541 static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
542 {
543 return task_css_set(task)->dfl_cgrp;
544 }
545
cgroup_parent(struct cgroup * cgrp)546 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
547 {
548 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
549
550 if (parent_css)
551 return container_of(parent_css, struct cgroup, self);
552 return NULL;
553 }
554
555 /**
556 * cgroup_is_descendant - test ancestry
557 * @cgrp: the cgroup to be tested
558 * @ancestor: possible ancestor of @cgrp
559 *
560 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
561 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
562 * and @ancestor are accessible.
563 */
cgroup_is_descendant(struct cgroup * cgrp,struct cgroup * ancestor)564 static inline bool cgroup_is_descendant(struct cgroup *cgrp,
565 struct cgroup *ancestor)
566 {
567 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
568 return false;
569 return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
570 }
571
572 /**
573 * cgroup_ancestor - find ancestor of cgroup
574 * @cgrp: cgroup to find ancestor of
575 * @ancestor_level: level of ancestor to find starting from root
576 *
577 * Find ancestor of cgroup at specified level starting from root if it exists
578 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
579 * @ancestor_level.
580 *
581 * This function is safe to call as long as @cgrp is accessible.
582 */
cgroup_ancestor(struct cgroup * cgrp,int ancestor_level)583 static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
584 int ancestor_level)
585 {
586 if (cgrp->level < ancestor_level)
587 return NULL;
588 while (cgrp && cgrp->level > ancestor_level)
589 cgrp = cgroup_parent(cgrp);
590 return cgrp;
591 }
592
593 /**
594 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
595 * @task: the task to be tested
596 * @ancestor: possible ancestor of @task's cgroup
597 *
598 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
599 * It follows all the same rules as cgroup_is_descendant, and only applies
600 * to the default hierarchy.
601 */
task_under_cgroup_hierarchy(struct task_struct * task,struct cgroup * ancestor)602 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
603 struct cgroup *ancestor)
604 {
605 struct css_set *cset = task_css_set(task);
606
607 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
608 }
609
610 /* no synchronization, the result can only be used as a hint */
cgroup_is_populated(struct cgroup * cgrp)611 static inline bool cgroup_is_populated(struct cgroup *cgrp)
612 {
613 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
614 cgrp->nr_populated_threaded_children;
615 }
616
617 /* returns ino associated with a cgroup */
cgroup_ino(struct cgroup * cgrp)618 static inline ino_t cgroup_ino(struct cgroup *cgrp)
619 {
620 return cgrp->kn->id.ino;
621 }
622
623 /* cft/css accessors for cftype->write() operation */
of_cft(struct kernfs_open_file * of)624 static inline struct cftype *of_cft(struct kernfs_open_file *of)
625 {
626 return of->kn->priv;
627 }
628
629 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
630
631 /* cft/css accessors for cftype->seq_*() operations */
seq_cft(struct seq_file * seq)632 static inline struct cftype *seq_cft(struct seq_file *seq)
633 {
634 return of_cft(seq->private);
635 }
636
seq_css(struct seq_file * seq)637 static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
638 {
639 return of_css(seq->private);
640 }
641
642 /*
643 * Name / path handling functions. All are thin wrappers around the kernfs
644 * counterparts and can be called under any context.
645 */
646
cgroup_name(struct cgroup * cgrp,char * buf,size_t buflen)647 static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
648 {
649 return kernfs_name(cgrp->kn, buf, buflen);
650 }
651
cgroup_path(struct cgroup * cgrp,char * buf,size_t buflen)652 static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
653 {
654 return kernfs_path(cgrp->kn, buf, buflen);
655 }
656
pr_cont_cgroup_name(struct cgroup * cgrp)657 static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
658 {
659 pr_cont_kernfs_name(cgrp->kn);
660 }
661
pr_cont_cgroup_path(struct cgroup * cgrp)662 static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
663 {
664 pr_cont_kernfs_path(cgrp->kn);
665 }
666
cgroup_psi(struct cgroup * cgrp)667 static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
668 {
669 return &cgrp->psi;
670 }
671
cgroup_init_kthreadd(void)672 static inline void cgroup_init_kthreadd(void)
673 {
674 /*
675 * kthreadd is inherited by all kthreads, keep it in the root so
676 * that the new kthreads are guaranteed to stay in the root until
677 * initialization is finished.
678 */
679 current->no_cgroup_migration = 1;
680 }
681
cgroup_kthread_ready(void)682 static inline void cgroup_kthread_ready(void)
683 {
684 /*
685 * This kthread finished initialization. The creator should have
686 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
687 */
688 current->no_cgroup_migration = 0;
689 }
690
cgroup_get_kernfs_id(struct cgroup * cgrp)691 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
692 {
693 return &cgrp->kn->id;
694 }
695
696 void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
697 char *buf, size_t buflen);
698 #else /* !CONFIG_CGROUPS */
699
700 struct cgroup_subsys_state;
701 struct cgroup;
702
css_get(struct cgroup_subsys_state * css)703 static inline void css_get(struct cgroup_subsys_state *css) {}
css_put(struct cgroup_subsys_state * css)704 static inline void css_put(struct cgroup_subsys_state *css) {}
cgroup_attach_task_all(struct task_struct * from,struct task_struct * t)705 static inline int cgroup_attach_task_all(struct task_struct *from,
706 struct task_struct *t) { return 0; }
cgroupstats_build(struct cgroupstats * stats,struct dentry * dentry)707 static inline int cgroupstats_build(struct cgroupstats *stats,
708 struct dentry *dentry) { return -EINVAL; }
709
cgroup_fork(struct task_struct * p)710 static inline void cgroup_fork(struct task_struct *p) {}
cgroup_can_fork(struct task_struct * p)711 static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
cgroup_cancel_fork(struct task_struct * p)712 static inline void cgroup_cancel_fork(struct task_struct *p) {}
cgroup_post_fork(struct task_struct * p)713 static inline void cgroup_post_fork(struct task_struct *p) {}
cgroup_exit(struct task_struct * p)714 static inline void cgroup_exit(struct task_struct *p) {}
cgroup_release(struct task_struct * p)715 static inline void cgroup_release(struct task_struct *p) {}
cgroup_free(struct task_struct * p)716 static inline void cgroup_free(struct task_struct *p) {}
717
cgroup_init_early(void)718 static inline int cgroup_init_early(void) { return 0; }
cgroup_init(void)719 static inline int cgroup_init(void) { return 0; }
cgroup_init_kthreadd(void)720 static inline void cgroup_init_kthreadd(void) {}
cgroup_kthread_ready(void)721 static inline void cgroup_kthread_ready(void) {}
cgroup_get_kernfs_id(struct cgroup * cgrp)722 static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
723 {
724 return NULL;
725 }
726
cgroup_parent(struct cgroup * cgrp)727 static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
728 {
729 return NULL;
730 }
731
cgroup_psi(struct cgroup * cgrp)732 static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
733 {
734 return NULL;
735 }
736
task_under_cgroup_hierarchy(struct task_struct * task,struct cgroup * ancestor)737 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
738 struct cgroup *ancestor)
739 {
740 return true;
741 }
742
cgroup_path_from_kernfs_id(const union kernfs_node_id * id,char * buf,size_t buflen)743 static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
744 char *buf, size_t buflen) {}
745 #endif /* !CONFIG_CGROUPS */
746
747 #ifdef CONFIG_CGROUPS
748 /*
749 * cgroup scalable recursive statistics.
750 */
751 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
752 void cgroup_rstat_flush(struct cgroup *cgrp);
753 void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
754 void cgroup_rstat_flush_hold(struct cgroup *cgrp);
755 void cgroup_rstat_flush_release(void);
756
757 /*
758 * Basic resource stats.
759 */
760 #ifdef CONFIG_CGROUP_CPUACCT
761 void cpuacct_charge(struct task_struct *tsk, u64 cputime);
762 void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
763 #else
cpuacct_charge(struct task_struct * tsk,u64 cputime)764 static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
cpuacct_account_field(struct task_struct * tsk,int index,u64 val)765 static inline void cpuacct_account_field(struct task_struct *tsk, int index,
766 u64 val) {}
767 #endif
768
769 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
770 void __cgroup_account_cputime_field(struct cgroup *cgrp,
771 enum cpu_usage_stat index, u64 delta_exec);
772
cgroup_account_cputime(struct task_struct * task,u64 delta_exec)773 static inline void cgroup_account_cputime(struct task_struct *task,
774 u64 delta_exec)
775 {
776 struct cgroup *cgrp;
777
778 cpuacct_charge(task, delta_exec);
779
780 rcu_read_lock();
781 cgrp = task_dfl_cgroup(task);
782 if (cgroup_parent(cgrp))
783 __cgroup_account_cputime(cgrp, delta_exec);
784 rcu_read_unlock();
785 }
786
cgroup_account_cputime_field(struct task_struct * task,enum cpu_usage_stat index,u64 delta_exec)787 static inline void cgroup_account_cputime_field(struct task_struct *task,
788 enum cpu_usage_stat index,
789 u64 delta_exec)
790 {
791 struct cgroup *cgrp;
792
793 cpuacct_account_field(task, index, delta_exec);
794
795 rcu_read_lock();
796 cgrp = task_dfl_cgroup(task);
797 if (cgroup_parent(cgrp))
798 __cgroup_account_cputime_field(cgrp, index, delta_exec);
799 rcu_read_unlock();
800 }
801
802 #else /* CONFIG_CGROUPS */
803
cgroup_account_cputime(struct task_struct * task,u64 delta_exec)804 static inline void cgroup_account_cputime(struct task_struct *task,
805 u64 delta_exec) {}
cgroup_account_cputime_field(struct task_struct * task,enum cpu_usage_stat index,u64 delta_exec)806 static inline void cgroup_account_cputime_field(struct task_struct *task,
807 enum cpu_usage_stat index,
808 u64 delta_exec) {}
809
810 #endif /* CONFIG_CGROUPS */
811
812 /*
813 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
814 * definition in cgroup-defs.h.
815 */
816 #ifdef CONFIG_SOCK_CGROUP_DATA
817
818 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
819 extern spinlock_t cgroup_sk_update_lock;
820 #endif
821
822 void cgroup_sk_alloc_disable(void);
823 void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
824 void cgroup_sk_free(struct sock_cgroup_data *skcd);
825
sock_cgroup_ptr(struct sock_cgroup_data * skcd)826 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
827 {
828 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
829 unsigned long v;
830
831 /*
832 * @skcd->val is 64bit but the following is safe on 32bit too as we
833 * just need the lower ulong to be written and read atomically.
834 */
835 v = READ_ONCE(skcd->val);
836
837 if (v & 1)
838 return &cgrp_dfl_root.cgrp;
839
840 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
841 #else
842 return (struct cgroup *)(unsigned long)skcd->val;
843 #endif
844 }
845
846 #else /* CONFIG_CGROUP_DATA */
847
cgroup_sk_alloc(struct sock_cgroup_data * skcd)848 static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
cgroup_sk_free(struct sock_cgroup_data * skcd)849 static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
850
851 #endif /* CONFIG_CGROUP_DATA */
852
853 struct cgroup_namespace {
854 refcount_t count;
855 struct ns_common ns;
856 struct user_namespace *user_ns;
857 struct ucounts *ucounts;
858 struct css_set *root_cset;
859 };
860
861 extern struct cgroup_namespace init_cgroup_ns;
862
863 #ifdef CONFIG_CGROUPS
864
865 void free_cgroup_ns(struct cgroup_namespace *ns);
866
867 struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
868 struct user_namespace *user_ns,
869 struct cgroup_namespace *old_ns);
870
871 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
872 struct cgroup_namespace *ns);
873
874 #else /* !CONFIG_CGROUPS */
875
free_cgroup_ns(struct cgroup_namespace * ns)876 static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
877 static inline struct cgroup_namespace *
copy_cgroup_ns(unsigned long flags,struct user_namespace * user_ns,struct cgroup_namespace * old_ns)878 copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
879 struct cgroup_namespace *old_ns)
880 {
881 return old_ns;
882 }
883
884 #endif /* !CONFIG_CGROUPS */
885
get_cgroup_ns(struct cgroup_namespace * ns)886 static inline void get_cgroup_ns(struct cgroup_namespace *ns)
887 {
888 if (ns)
889 refcount_inc(&ns->count);
890 }
891
put_cgroup_ns(struct cgroup_namespace * ns)892 static inline void put_cgroup_ns(struct cgroup_namespace *ns)
893 {
894 if (ns && refcount_dec_and_test(&ns->count))
895 free_cgroup_ns(ns);
896 }
897
898 #ifdef CONFIG_CGROUPS
899
900 void cgroup_enter_frozen(void);
901 void cgroup_leave_frozen(bool always_leave);
902 void cgroup_update_frozen(struct cgroup *cgrp);
903 void cgroup_freeze(struct cgroup *cgrp, bool freeze);
904 void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
905 struct cgroup *dst);
906
cgroup_task_freeze(struct task_struct * task)907 static inline bool cgroup_task_freeze(struct task_struct *task)
908 {
909 bool ret;
910
911 if (task->flags & PF_KTHREAD)
912 return false;
913
914 rcu_read_lock();
915 ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags);
916 rcu_read_unlock();
917
918 return ret;
919 }
920
cgroup_task_frozen(struct task_struct * task)921 static inline bool cgroup_task_frozen(struct task_struct *task)
922 {
923 return task->frozen;
924 }
925
926 #else /* !CONFIG_CGROUPS */
927
cgroup_enter_frozen(void)928 static inline void cgroup_enter_frozen(void) { }
cgroup_leave_frozen(bool always_leave)929 static inline void cgroup_leave_frozen(bool always_leave) { }
cgroup_task_freeze(struct task_struct * task)930 static inline bool cgroup_task_freeze(struct task_struct *task)
931 {
932 return false;
933 }
cgroup_task_frozen(struct task_struct * task)934 static inline bool cgroup_task_frozen(struct task_struct *task)
935 {
936 return false;
937 }
938
939 #endif /* !CONFIG_CGROUPS */
940
941 #ifdef CONFIG_CGROUP_BPF
cgroup_bpf_get(struct cgroup * cgrp)942 static inline void cgroup_bpf_get(struct cgroup *cgrp)
943 {
944 percpu_ref_get(&cgrp->bpf.refcnt);
945 }
946
cgroup_bpf_put(struct cgroup * cgrp)947 static inline void cgroup_bpf_put(struct cgroup *cgrp)
948 {
949 percpu_ref_put(&cgrp->bpf.refcnt);
950 }
951
952 #else /* CONFIG_CGROUP_BPF */
953
cgroup_bpf_get(struct cgroup * cgrp)954 static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
cgroup_bpf_put(struct cgroup * cgrp)955 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
956
957 #endif /* CONFIG_CGROUP_BPF */
958
959 #endif /* _LINUX_CGROUP_H */
960