1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _LINUX_CGROUP_H
3  #define _LINUX_CGROUP_H
4  /*
5   *  cgroup interface
6   *
7   *  Copyright (C) 2003 BULL SA
8   *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
9   *
10   */
11  
12  #include <linux/sched.h>
13  #include <linux/cpumask.h>
14  #include <linux/nodemask.h>
15  #include <linux/rculist.h>
16  #include <linux/cgroupstats.h>
17  #include <linux/fs.h>
18  #include <linux/seq_file.h>
19  #include <linux/kernfs.h>
20  #include <linux/jump_label.h>
21  #include <linux/types.h>
22  #include <linux/ns_common.h>
23  #include <linux/nsproxy.h>
24  #include <linux/user_namespace.h>
25  #include <linux/refcount.h>
26  #include <linux/kernel_stat.h>
27  
28  #include <linux/cgroup-defs.h>
29  
30  struct kernel_clone_args;
31  
32  #ifdef CONFIG_CGROUPS
33  
34  /*
35   * All weight knobs on the default hierarchy should use the following min,
36   * default and max values.  The default value is the logarithmic center of
37   * MIN and MAX and allows 100x to be expressed in both directions.
38   */
39  #define CGROUP_WEIGHT_MIN		1
40  #define CGROUP_WEIGHT_DFL		100
41  #define CGROUP_WEIGHT_MAX		10000
42  
43  /* walk only threadgroup leaders */
44  #define CSS_TASK_ITER_PROCS		(1U << 0)
45  /* walk all threaded css_sets in the domain */
46  #define CSS_TASK_ITER_THREADED		(1U << 1)
47  
48  /* internal flags */
49  #define CSS_TASK_ITER_SKIPPED		(1U << 16)
50  
51  /* a css_task_iter should be treated as an opaque object */
52  struct css_task_iter {
53  	struct cgroup_subsys		*ss;
54  	unsigned int			flags;
55  
56  	struct list_head		*cset_pos;
57  	struct list_head		*cset_head;
58  
59  	struct list_head		*tcset_pos;
60  	struct list_head		*tcset_head;
61  
62  	struct list_head		*task_pos;
63  
64  	struct list_head		*cur_tasks_head;
65  	struct css_set			*cur_cset;
66  	struct css_set			*cur_dcset;
67  	struct task_struct		*cur_task;
68  	struct list_head		iters_node;	/* css_set->task_iters */
69  };
70  
71  extern struct file_system_type cgroup_fs_type;
72  extern struct cgroup_root cgrp_dfl_root;
73  extern struct css_set init_css_set;
74  
75  #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
76  #include <linux/cgroup_subsys.h>
77  #undef SUBSYS
78  
79  #define SUBSYS(_x)								\
80  	extern struct static_key_true _x ## _cgrp_subsys_enabled_key;		\
81  	extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
82  #include <linux/cgroup_subsys.h>
83  #undef SUBSYS
84  
85  /**
86   * cgroup_subsys_enabled - fast test on whether a subsys is enabled
87   * @ss: subsystem in question
88   */
89  #define cgroup_subsys_enabled(ss)						\
90  	static_branch_likely(&ss ## _enabled_key)
91  
92  /**
93   * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
94   * @ss: subsystem in question
95   */
96  #define cgroup_subsys_on_dfl(ss)						\
97  	static_branch_likely(&ss ## _on_dfl_key)
98  
99  bool css_has_online_children(struct cgroup_subsys_state *css);
100  struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
101  struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
102  					 struct cgroup_subsys *ss);
103  struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
104  					     struct cgroup_subsys *ss);
105  struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
106  						       struct cgroup_subsys *ss);
107  
108  struct cgroup *cgroup_get_from_path(const char *path);
109  struct cgroup *cgroup_get_from_fd(int fd);
110  struct cgroup *cgroup_v1v2_get_from_fd(int fd);
111  
112  int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
113  int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
114  
115  int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
116  int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
117  int cgroup_rm_cftypes(struct cftype *cfts);
118  void cgroup_file_notify(struct cgroup_file *cfile);
119  void cgroup_file_show(struct cgroup_file *cfile, bool show);
120  
121  int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
122  int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
123  		     struct pid *pid, struct task_struct *tsk);
124  
125  void cgroup_fork(struct task_struct *p);
126  extern int cgroup_can_fork(struct task_struct *p,
127  			   struct kernel_clone_args *kargs);
128  extern void cgroup_cancel_fork(struct task_struct *p,
129  			       struct kernel_clone_args *kargs);
130  extern void cgroup_post_fork(struct task_struct *p,
131  			     struct kernel_clone_args *kargs);
132  void cgroup_exit(struct task_struct *p);
133  void cgroup_release(struct task_struct *p);
134  void cgroup_free(struct task_struct *p);
135  
136  int cgroup_init_early(void);
137  int cgroup_init(void);
138  
139  int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
140  
141  /*
142   * Iteration helpers and macros.
143   */
144  
145  struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
146  					   struct cgroup_subsys_state *parent);
147  struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
148  						    struct cgroup_subsys_state *css);
149  struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
150  struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
151  						     struct cgroup_subsys_state *css);
152  
153  struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
154  					 struct cgroup_subsys_state **dst_cssp);
155  struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
156  					struct cgroup_subsys_state **dst_cssp);
157  
158  void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
159  			 struct css_task_iter *it);
160  struct task_struct *css_task_iter_next(struct css_task_iter *it);
161  void css_task_iter_end(struct css_task_iter *it);
162  
163  /**
164   * css_for_each_child - iterate through children of a css
165   * @pos: the css * to use as the loop cursor
166   * @parent: css whose children to walk
167   *
168   * Walk @parent's children.  Must be called under rcu_read_lock().
169   *
170   * If a subsystem synchronizes ->css_online() and the start of iteration, a
171   * css which finished ->css_online() is guaranteed to be visible in the
172   * future iterations and will stay visible until the last reference is put.
173   * A css which hasn't finished ->css_online() or already finished
174   * ->css_offline() may show up during traversal.  It's each subsystem's
175   * responsibility to synchronize against on/offlining.
176   *
177   * It is allowed to temporarily drop RCU read lock during iteration.  The
178   * caller is responsible for ensuring that @pos remains accessible until
179   * the start of the next iteration by, for example, bumping the css refcnt.
180   */
181  #define css_for_each_child(pos, parent)					\
182  	for ((pos) = css_next_child(NULL, (parent)); (pos);		\
183  	     (pos) = css_next_child((pos), (parent)))
184  
185  /**
186   * css_for_each_descendant_pre - pre-order walk of a css's descendants
187   * @pos: the css * to use as the loop cursor
188   * @root: css whose descendants to walk
189   *
190   * Walk @root's descendants.  @root is included in the iteration and the
191   * first node to be visited.  Must be called under rcu_read_lock().
192   *
193   * If a subsystem synchronizes ->css_online() and the start of iteration, a
194   * css which finished ->css_online() is guaranteed to be visible in the
195   * future iterations and will stay visible until the last reference is put.
196   * A css which hasn't finished ->css_online() or already finished
197   * ->css_offline() may show up during traversal.  It's each subsystem's
198   * responsibility to synchronize against on/offlining.
199   *
200   * For example, the following guarantees that a descendant can't escape
201   * state updates of its ancestors.
202   *
203   * my_online(@css)
204   * {
205   *	Lock @css's parent and @css;
206   *	Inherit state from the parent;
207   *	Unlock both.
208   * }
209   *
210   * my_update_state(@css)
211   * {
212   *	css_for_each_descendant_pre(@pos, @css) {
213   *		Lock @pos;
214   *		if (@pos == @css)
215   *			Update @css's state;
216   *		else
217   *			Verify @pos is alive and inherit state from its parent;
218   *		Unlock @pos;
219   *	}
220   * }
221   *
222   * As long as the inheriting step, including checking the parent state, is
223   * enclosed inside @pos locking, double-locking the parent isn't necessary
224   * while inheriting.  The state update to the parent is guaranteed to be
225   * visible by walking order and, as long as inheriting operations to the
226   * same @pos are atomic to each other, multiple updates racing each other
227   * still result in the correct state.  It's guaranateed that at least one
228   * inheritance happens for any css after the latest update to its parent.
229   *
230   * If checking parent's state requires locking the parent, each inheriting
231   * iteration should lock and unlock both @pos->parent and @pos.
232   *
233   * Alternatively, a subsystem may choose to use a single global lock to
234   * synchronize ->css_online() and ->css_offline() against tree-walking
235   * operations.
236   *
237   * It is allowed to temporarily drop RCU read lock during iteration.  The
238   * caller is responsible for ensuring that @pos remains accessible until
239   * the start of the next iteration by, for example, bumping the css refcnt.
240   */
241  #define css_for_each_descendant_pre(pos, css)				\
242  	for ((pos) = css_next_descendant_pre(NULL, (css)); (pos);	\
243  	     (pos) = css_next_descendant_pre((pos), (css)))
244  
245  /**
246   * css_for_each_descendant_post - post-order walk of a css's descendants
247   * @pos: the css * to use as the loop cursor
248   * @css: css whose descendants to walk
249   *
250   * Similar to css_for_each_descendant_pre() but performs post-order
251   * traversal instead.  @root is included in the iteration and the last
252   * node to be visited.
253   *
254   * If a subsystem synchronizes ->css_online() and the start of iteration, a
255   * css which finished ->css_online() is guaranteed to be visible in the
256   * future iterations and will stay visible until the last reference is put.
257   * A css which hasn't finished ->css_online() or already finished
258   * ->css_offline() may show up during traversal.  It's each subsystem's
259   * responsibility to synchronize against on/offlining.
260   *
261   * Note that the walk visibility guarantee example described in pre-order
262   * walk doesn't apply the same to post-order walks.
263   */
264  #define css_for_each_descendant_post(pos, css)				\
265  	for ((pos) = css_next_descendant_post(NULL, (css)); (pos);	\
266  	     (pos) = css_next_descendant_post((pos), (css)))
267  
268  /**
269   * cgroup_taskset_for_each - iterate cgroup_taskset
270   * @task: the loop cursor
271   * @dst_css: the destination css
272   * @tset: taskset to iterate
273   *
274   * @tset may contain multiple tasks and they may belong to multiple
275   * processes.
276   *
277   * On the v2 hierarchy, there may be tasks from multiple processes and they
278   * may not share the source or destination csses.
279   *
280   * On traditional hierarchies, when there are multiple tasks in @tset, if a
281   * task of a process is in @tset, all tasks of the process are in @tset.
282   * Also, all are guaranteed to share the same source and destination csses.
283   *
284   * Iteration is not in any specific order.
285   */
286  #define cgroup_taskset_for_each(task, dst_css, tset)			\
287  	for ((task) = cgroup_taskset_first((tset), &(dst_css));		\
288  	     (task);							\
289  	     (task) = cgroup_taskset_next((tset), &(dst_css)))
290  
291  /**
292   * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
293   * @leader: the loop cursor
294   * @dst_css: the destination css
295   * @tset: taskset to iterate
296   *
297   * Iterate threadgroup leaders of @tset.  For single-task migrations, @tset
298   * may not contain any.
299   */
300  #define cgroup_taskset_for_each_leader(leader, dst_css, tset)		\
301  	for ((leader) = cgroup_taskset_first((tset), &(dst_css));	\
302  	     (leader);							\
303  	     (leader) = cgroup_taskset_next((tset), &(dst_css)))	\
304  		if ((leader) != (leader)->group_leader)			\
305  			;						\
306  		else
307  
308  /*
309   * Inline functions.
310   */
311  
312  #ifdef CONFIG_DEBUG_CGROUP_REF
313  void css_get(struct cgroup_subsys_state *css);
314  void css_get_many(struct cgroup_subsys_state *css, unsigned int n);
315  bool css_tryget(struct cgroup_subsys_state *css);
316  bool css_tryget_online(struct cgroup_subsys_state *css);
317  void css_put(struct cgroup_subsys_state *css);
318  void css_put_many(struct cgroup_subsys_state *css, unsigned int n);
319  #else
320  #define CGROUP_REF_FN_ATTRS	static inline
321  #define CGROUP_REF_EXPORT(fn)
322  #include <linux/cgroup_refcnt.h>
323  #endif
324  
cgroup_id(const struct cgroup * cgrp)325  static inline u64 cgroup_id(const struct cgroup *cgrp)
326  {
327  	return cgrp->kn->id;
328  }
329  
330  /**
331   * css_is_dying - test whether the specified css is dying
332   * @css: target css
333   *
334   * Test whether @css is in the process of offlining or already offline.  In
335   * most cases, ->css_online() and ->css_offline() callbacks should be
336   * enough; however, the actual offline operations are RCU delayed and this
337   * test returns %true also when @css is scheduled to be offlined.
338   *
339   * This is useful, for example, when the use case requires synchronous
340   * behavior with respect to cgroup removal.  cgroup removal schedules css
341   * offlining but the css can seem alive while the operation is being
342   * delayed.  If the delay affects user visible semantics, this test can be
343   * used to resolve the situation.
344   */
css_is_dying(struct cgroup_subsys_state * css)345  static inline bool css_is_dying(struct cgroup_subsys_state *css)
346  {
347  	return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
348  }
349  
cgroup_get(struct cgroup * cgrp)350  static inline void cgroup_get(struct cgroup *cgrp)
351  {
352  	css_get(&cgrp->self);
353  }
354  
cgroup_tryget(struct cgroup * cgrp)355  static inline bool cgroup_tryget(struct cgroup *cgrp)
356  {
357  	return css_tryget(&cgrp->self);
358  }
359  
cgroup_put(struct cgroup * cgrp)360  static inline void cgroup_put(struct cgroup *cgrp)
361  {
362  	css_put(&cgrp->self);
363  }
364  
365  extern struct mutex cgroup_mutex;
366  
cgroup_lock(void)367  static inline void cgroup_lock(void)
368  {
369  	mutex_lock(&cgroup_mutex);
370  }
371  
cgroup_unlock(void)372  static inline void cgroup_unlock(void)
373  {
374  	mutex_unlock(&cgroup_mutex);
375  }
376  
377  /**
378   * task_css_set_check - obtain a task's css_set with extra access conditions
379   * @task: the task to obtain css_set for
380   * @__c: extra condition expression to be passed to rcu_dereference_check()
381   *
382   * A task's css_set is RCU protected, initialized and exited while holding
383   * task_lock(), and can only be modified while holding both cgroup_mutex
384   * and task_lock() while the task is alive.  This macro verifies that the
385   * caller is inside proper critical section and returns @task's css_set.
386   *
387   * The caller can also specify additional allowed conditions via @__c, such
388   * as locks used during the cgroup_subsys::attach() methods.
389   */
390  #ifdef CONFIG_PROVE_RCU
391  extern spinlock_t css_set_lock;
392  #define task_css_set_check(task, __c)					\
393  	rcu_dereference_check((task)->cgroups,				\
394  		rcu_read_lock_sched_held() ||				\
395  		lockdep_is_held(&cgroup_mutex) ||			\
396  		lockdep_is_held(&css_set_lock) ||			\
397  		((task)->flags & PF_EXITING) || (__c))
398  #else
399  #define task_css_set_check(task, __c)					\
400  	rcu_dereference((task)->cgroups)
401  #endif
402  
403  /**
404   * task_css_check - obtain css for (task, subsys) w/ extra access conds
405   * @task: the target task
406   * @subsys_id: the target subsystem ID
407   * @__c: extra condition expression to be passed to rcu_dereference_check()
408   *
409   * Return the cgroup_subsys_state for the (@task, @subsys_id) pair.  The
410   * synchronization rules are the same as task_css_set_check().
411   */
412  #define task_css_check(task, subsys_id, __c)				\
413  	task_css_set_check((task), (__c))->subsys[(subsys_id)]
414  
415  /**
416   * task_css_set - obtain a task's css_set
417   * @task: the task to obtain css_set for
418   *
419   * See task_css_set_check().
420   */
task_css_set(struct task_struct * task)421  static inline struct css_set *task_css_set(struct task_struct *task)
422  {
423  	return task_css_set_check(task, false);
424  }
425  
426  /**
427   * task_css - obtain css for (task, subsys)
428   * @task: the target task
429   * @subsys_id: the target subsystem ID
430   *
431   * See task_css_check().
432   */
task_css(struct task_struct * task,int subsys_id)433  static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
434  						   int subsys_id)
435  {
436  	return task_css_check(task, subsys_id, false);
437  }
438  
439  /**
440   * task_get_css - find and get the css for (task, subsys)
441   * @task: the target task
442   * @subsys_id: the target subsystem ID
443   *
444   * Find the css for the (@task, @subsys_id) combination, increment a
445   * reference on and return it.  This function is guaranteed to return a
446   * valid css.  The returned css may already have been offlined.
447   */
448  static inline struct cgroup_subsys_state *
task_get_css(struct task_struct * task,int subsys_id)449  task_get_css(struct task_struct *task, int subsys_id)
450  {
451  	struct cgroup_subsys_state *css;
452  
453  	rcu_read_lock();
454  	while (true) {
455  		css = task_css(task, subsys_id);
456  		/*
457  		 * Can't use css_tryget_online() here.  A task which has
458  		 * PF_EXITING set may stay associated with an offline css.
459  		 * If such task calls this function, css_tryget_online()
460  		 * will keep failing.
461  		 */
462  		if (likely(css_tryget(css)))
463  			break;
464  		cpu_relax();
465  	}
466  	rcu_read_unlock();
467  	return css;
468  }
469  
470  /**
471   * task_css_is_root - test whether a task belongs to the root css
472   * @task: the target task
473   * @subsys_id: the target subsystem ID
474   *
475   * Test whether @task belongs to the root css on the specified subsystem.
476   * May be invoked in any context.
477   */
task_css_is_root(struct task_struct * task,int subsys_id)478  static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
479  {
480  	return task_css_check(task, subsys_id, true) ==
481  		init_css_set.subsys[subsys_id];
482  }
483  
task_cgroup(struct task_struct * task,int subsys_id)484  static inline struct cgroup *task_cgroup(struct task_struct *task,
485  					 int subsys_id)
486  {
487  	return task_css(task, subsys_id)->cgroup;
488  }
489  
task_dfl_cgroup(struct task_struct * task)490  static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
491  {
492  	return task_css_set(task)->dfl_cgrp;
493  }
494  
cgroup_parent(struct cgroup * cgrp)495  static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
496  {
497  	struct cgroup_subsys_state *parent_css = cgrp->self.parent;
498  
499  	if (parent_css)
500  		return container_of(parent_css, struct cgroup, self);
501  	return NULL;
502  }
503  
504  /**
505   * cgroup_is_descendant - test ancestry
506   * @cgrp: the cgroup to be tested
507   * @ancestor: possible ancestor of @cgrp
508   *
509   * Test whether @cgrp is a descendant of @ancestor.  It also returns %true
510   * if @cgrp == @ancestor.  This function is safe to call as long as @cgrp
511   * and @ancestor are accessible.
512   */
cgroup_is_descendant(struct cgroup * cgrp,struct cgroup * ancestor)513  static inline bool cgroup_is_descendant(struct cgroup *cgrp,
514  					struct cgroup *ancestor)
515  {
516  	if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
517  		return false;
518  	return cgrp->ancestors[ancestor->level] == ancestor;
519  }
520  
521  /**
522   * cgroup_ancestor - find ancestor of cgroup
523   * @cgrp: cgroup to find ancestor of
524   * @ancestor_level: level of ancestor to find starting from root
525   *
526   * Find ancestor of cgroup at specified level starting from root if it exists
527   * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
528   * @ancestor_level.
529   *
530   * This function is safe to call as long as @cgrp is accessible.
531   */
cgroup_ancestor(struct cgroup * cgrp,int ancestor_level)532  static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
533  					     int ancestor_level)
534  {
535  	if (ancestor_level < 0 || ancestor_level > cgrp->level)
536  		return NULL;
537  	return cgrp->ancestors[ancestor_level];
538  }
539  
540  /**
541   * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
542   * @task: the task to be tested
543   * @ancestor: possible ancestor of @task's cgroup
544   *
545   * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
546   * It follows all the same rules as cgroup_is_descendant, and only applies
547   * to the default hierarchy.
548   */
task_under_cgroup_hierarchy(struct task_struct * task,struct cgroup * ancestor)549  static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
550  					       struct cgroup *ancestor)
551  {
552  	struct css_set *cset = task_css_set(task);
553  
554  	return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
555  }
556  
557  /* no synchronization, the result can only be used as a hint */
cgroup_is_populated(struct cgroup * cgrp)558  static inline bool cgroup_is_populated(struct cgroup *cgrp)
559  {
560  	return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
561  		cgrp->nr_populated_threaded_children;
562  }
563  
564  /* returns ino associated with a cgroup */
cgroup_ino(struct cgroup * cgrp)565  static inline ino_t cgroup_ino(struct cgroup *cgrp)
566  {
567  	return kernfs_ino(cgrp->kn);
568  }
569  
570  /* cft/css accessors for cftype->write() operation */
of_cft(struct kernfs_open_file * of)571  static inline struct cftype *of_cft(struct kernfs_open_file *of)
572  {
573  	return of->kn->priv;
574  }
575  
576  struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
577  
578  /* cft/css accessors for cftype->seq_*() operations */
seq_cft(struct seq_file * seq)579  static inline struct cftype *seq_cft(struct seq_file *seq)
580  {
581  	return of_cft(seq->private);
582  }
583  
seq_css(struct seq_file * seq)584  static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
585  {
586  	return of_css(seq->private);
587  }
588  
589  /*
590   * Name / path handling functions.  All are thin wrappers around the kernfs
591   * counterparts and can be called under any context.
592   */
593  
cgroup_name(struct cgroup * cgrp,char * buf,size_t buflen)594  static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
595  {
596  	return kernfs_name(cgrp->kn, buf, buflen);
597  }
598  
cgroup_path(struct cgroup * cgrp,char * buf,size_t buflen)599  static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
600  {
601  	return kernfs_path(cgrp->kn, buf, buflen);
602  }
603  
pr_cont_cgroup_name(struct cgroup * cgrp)604  static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
605  {
606  	pr_cont_kernfs_name(cgrp->kn);
607  }
608  
pr_cont_cgroup_path(struct cgroup * cgrp)609  static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
610  {
611  	pr_cont_kernfs_path(cgrp->kn);
612  }
613  
614  bool cgroup_psi_enabled(void);
615  
cgroup_init_kthreadd(void)616  static inline void cgroup_init_kthreadd(void)
617  {
618  	/*
619  	 * kthreadd is inherited by all kthreads, keep it in the root so
620  	 * that the new kthreads are guaranteed to stay in the root until
621  	 * initialization is finished.
622  	 */
623  	current->no_cgroup_migration = 1;
624  }
625  
cgroup_kthread_ready(void)626  static inline void cgroup_kthread_ready(void)
627  {
628  	/*
629  	 * This kthread finished initialization.  The creator should have
630  	 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
631  	 */
632  	current->no_cgroup_migration = 0;
633  }
634  
635  void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
636  struct cgroup *cgroup_get_from_id(u64 id);
637  #else /* !CONFIG_CGROUPS */
638  
639  struct cgroup_subsys_state;
640  struct cgroup;
641  
cgroup_id(const struct cgroup * cgrp)642  static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
css_get(struct cgroup_subsys_state * css)643  static inline void css_get(struct cgroup_subsys_state *css) {}
css_put(struct cgroup_subsys_state * css)644  static inline void css_put(struct cgroup_subsys_state *css) {}
cgroup_lock(void)645  static inline void cgroup_lock(void) {}
cgroup_unlock(void)646  static inline void cgroup_unlock(void) {}
cgroup_attach_task_all(struct task_struct * from,struct task_struct * t)647  static inline int cgroup_attach_task_all(struct task_struct *from,
648  					 struct task_struct *t) { return 0; }
cgroupstats_build(struct cgroupstats * stats,struct dentry * dentry)649  static inline int cgroupstats_build(struct cgroupstats *stats,
650  				    struct dentry *dentry) { return -EINVAL; }
651  
cgroup_fork(struct task_struct * p)652  static inline void cgroup_fork(struct task_struct *p) {}
cgroup_can_fork(struct task_struct * p,struct kernel_clone_args * kargs)653  static inline int cgroup_can_fork(struct task_struct *p,
654  				  struct kernel_clone_args *kargs) { return 0; }
cgroup_cancel_fork(struct task_struct * p,struct kernel_clone_args * kargs)655  static inline void cgroup_cancel_fork(struct task_struct *p,
656  				      struct kernel_clone_args *kargs) {}
cgroup_post_fork(struct task_struct * p,struct kernel_clone_args * kargs)657  static inline void cgroup_post_fork(struct task_struct *p,
658  				    struct kernel_clone_args *kargs) {}
cgroup_exit(struct task_struct * p)659  static inline void cgroup_exit(struct task_struct *p) {}
cgroup_release(struct task_struct * p)660  static inline void cgroup_release(struct task_struct *p) {}
cgroup_free(struct task_struct * p)661  static inline void cgroup_free(struct task_struct *p) {}
662  
cgroup_init_early(void)663  static inline int cgroup_init_early(void) { return 0; }
cgroup_init(void)664  static inline int cgroup_init(void) { return 0; }
cgroup_init_kthreadd(void)665  static inline void cgroup_init_kthreadd(void) {}
cgroup_kthread_ready(void)666  static inline void cgroup_kthread_ready(void) {}
667  
cgroup_parent(struct cgroup * cgrp)668  static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
669  {
670  	return NULL;
671  }
672  
cgroup_psi_enabled(void)673  static inline bool cgroup_psi_enabled(void)
674  {
675  	return false;
676  }
677  
task_under_cgroup_hierarchy(struct task_struct * task,struct cgroup * ancestor)678  static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
679  					       struct cgroup *ancestor)
680  {
681  	return true;
682  }
683  
cgroup_path_from_kernfs_id(u64 id,char * buf,size_t buflen)684  static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
685  {}
686  #endif /* !CONFIG_CGROUPS */
687  
688  #ifdef CONFIG_CGROUPS
689  /*
690   * cgroup scalable recursive statistics.
691   */
692  void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
693  void cgroup_rstat_flush(struct cgroup *cgrp);
694  void cgroup_rstat_flush_hold(struct cgroup *cgrp);
695  void cgroup_rstat_flush_release(void);
696  
697  /*
698   * Basic resource stats.
699   */
700  #ifdef CONFIG_CGROUP_CPUACCT
701  void cpuacct_charge(struct task_struct *tsk, u64 cputime);
702  void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
703  #else
cpuacct_charge(struct task_struct * tsk,u64 cputime)704  static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
cpuacct_account_field(struct task_struct * tsk,int index,u64 val)705  static inline void cpuacct_account_field(struct task_struct *tsk, int index,
706  					 u64 val) {}
707  #endif
708  
709  void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
710  void __cgroup_account_cputime_field(struct cgroup *cgrp,
711  				    enum cpu_usage_stat index, u64 delta_exec);
712  
cgroup_account_cputime(struct task_struct * task,u64 delta_exec)713  static inline void cgroup_account_cputime(struct task_struct *task,
714  					  u64 delta_exec)
715  {
716  	struct cgroup *cgrp;
717  
718  	cpuacct_charge(task, delta_exec);
719  
720  	cgrp = task_dfl_cgroup(task);
721  	if (cgroup_parent(cgrp))
722  		__cgroup_account_cputime(cgrp, delta_exec);
723  }
724  
cgroup_account_cputime_field(struct task_struct * task,enum cpu_usage_stat index,u64 delta_exec)725  static inline void cgroup_account_cputime_field(struct task_struct *task,
726  						enum cpu_usage_stat index,
727  						u64 delta_exec)
728  {
729  	struct cgroup *cgrp;
730  
731  	cpuacct_account_field(task, index, delta_exec);
732  
733  	cgrp = task_dfl_cgroup(task);
734  	if (cgroup_parent(cgrp))
735  		__cgroup_account_cputime_field(cgrp, index, delta_exec);
736  }
737  
738  #else	/* CONFIG_CGROUPS */
739  
cgroup_account_cputime(struct task_struct * task,u64 delta_exec)740  static inline void cgroup_account_cputime(struct task_struct *task,
741  					  u64 delta_exec) {}
cgroup_account_cputime_field(struct task_struct * task,enum cpu_usage_stat index,u64 delta_exec)742  static inline void cgroup_account_cputime_field(struct task_struct *task,
743  						enum cpu_usage_stat index,
744  						u64 delta_exec) {}
745  
746  #endif	/* CONFIG_CGROUPS */
747  
748  /*
749   * sock->sk_cgrp_data handling.  For more info, see sock_cgroup_data
750   * definition in cgroup-defs.h.
751   */
752  #ifdef CONFIG_SOCK_CGROUP_DATA
753  
754  void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
755  void cgroup_sk_clone(struct sock_cgroup_data *skcd);
756  void cgroup_sk_free(struct sock_cgroup_data *skcd);
757  
sock_cgroup_ptr(struct sock_cgroup_data * skcd)758  static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
759  {
760  	return skcd->cgroup;
761  }
762  
763  #else	/* CONFIG_CGROUP_DATA */
764  
cgroup_sk_alloc(struct sock_cgroup_data * skcd)765  static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
cgroup_sk_clone(struct sock_cgroup_data * skcd)766  static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
cgroup_sk_free(struct sock_cgroup_data * skcd)767  static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
768  
769  #endif	/* CONFIG_CGROUP_DATA */
770  
771  struct cgroup_namespace {
772  	struct ns_common	ns;
773  	struct user_namespace	*user_ns;
774  	struct ucounts		*ucounts;
775  	struct css_set          *root_cset;
776  };
777  
778  extern struct cgroup_namespace init_cgroup_ns;
779  
780  #ifdef CONFIG_CGROUPS
781  
782  void free_cgroup_ns(struct cgroup_namespace *ns);
783  
784  struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
785  					struct user_namespace *user_ns,
786  					struct cgroup_namespace *old_ns);
787  
788  int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
789  		   struct cgroup_namespace *ns);
790  
791  #else /* !CONFIG_CGROUPS */
792  
free_cgroup_ns(struct cgroup_namespace * ns)793  static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
794  static inline struct cgroup_namespace *
copy_cgroup_ns(unsigned long flags,struct user_namespace * user_ns,struct cgroup_namespace * old_ns)795  copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
796  	       struct cgroup_namespace *old_ns)
797  {
798  	return old_ns;
799  }
800  
801  #endif /* !CONFIG_CGROUPS */
802  
get_cgroup_ns(struct cgroup_namespace * ns)803  static inline void get_cgroup_ns(struct cgroup_namespace *ns)
804  {
805  	if (ns)
806  		refcount_inc(&ns->ns.count);
807  }
808  
put_cgroup_ns(struct cgroup_namespace * ns)809  static inline void put_cgroup_ns(struct cgroup_namespace *ns)
810  {
811  	if (ns && refcount_dec_and_test(&ns->ns.count))
812  		free_cgroup_ns(ns);
813  }
814  
815  #ifdef CONFIG_CGROUPS
816  
817  void cgroup_enter_frozen(void);
818  void cgroup_leave_frozen(bool always_leave);
819  void cgroup_update_frozen(struct cgroup *cgrp);
820  void cgroup_freeze(struct cgroup *cgrp, bool freeze);
821  void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
822  				 struct cgroup *dst);
823  
cgroup_task_frozen(struct task_struct * task)824  static inline bool cgroup_task_frozen(struct task_struct *task)
825  {
826  	return task->frozen;
827  }
828  
829  #else /* !CONFIG_CGROUPS */
830  
cgroup_enter_frozen(void)831  static inline void cgroup_enter_frozen(void) { }
cgroup_leave_frozen(bool always_leave)832  static inline void cgroup_leave_frozen(bool always_leave) { }
cgroup_task_frozen(struct task_struct * task)833  static inline bool cgroup_task_frozen(struct task_struct *task)
834  {
835  	return false;
836  }
837  
838  #endif /* !CONFIG_CGROUPS */
839  
840  #ifdef CONFIG_CGROUP_BPF
cgroup_bpf_get(struct cgroup * cgrp)841  static inline void cgroup_bpf_get(struct cgroup *cgrp)
842  {
843  	percpu_ref_get(&cgrp->bpf.refcnt);
844  }
845  
cgroup_bpf_put(struct cgroup * cgrp)846  static inline void cgroup_bpf_put(struct cgroup *cgrp)
847  {
848  	percpu_ref_put(&cgrp->bpf.refcnt);
849  }
850  
851  #else /* CONFIG_CGROUP_BPF */
852  
cgroup_bpf_get(struct cgroup * cgrp)853  static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
cgroup_bpf_put(struct cgroup * cgrp)854  static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
855  
856  #endif /* CONFIG_CGROUP_BPF */
857  
858  #endif /* _LINUX_CGROUP_H */
859