1 /* memcontrol.h - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
24 #include <linux/hardirq.h>
25 #include <linux/jump_label.h>
26 #include <linux/page_counter.h>
27 #include <linux/vmpressure.h>
28 #include <linux/eventfd.h>
29 #include <linux/mm.h>
30 #include <linux/vmstat.h>
31 #include <linux/writeback.h>
32 #include <linux/page-flags.h>
33 
34 struct mem_cgroup;
35 struct page;
36 struct mm_struct;
37 struct kmem_cache;
38 
39 /* Cgroup-specific page state, on top of universal node page state */
40 enum memcg_stat_item {
41 	MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
42 	MEMCG_RSS,
43 	MEMCG_RSS_HUGE,
44 	MEMCG_SWAP,
45 	MEMCG_SOCK,
46 	/* XXX: why are these zone and not node counters? */
47 	MEMCG_KERNEL_STACK_KB,
48 	MEMCG_NR_STAT,
49 };
50 
51 enum memcg_memory_event {
52 	MEMCG_LOW,
53 	MEMCG_HIGH,
54 	MEMCG_MAX,
55 	MEMCG_OOM,
56 	MEMCG_OOM_KILL,
57 	MEMCG_SWAP_MAX,
58 	MEMCG_SWAP_FAIL,
59 	MEMCG_NR_MEMORY_EVENTS,
60 };
61 
62 enum mem_cgroup_protection {
63 	MEMCG_PROT_NONE,
64 	MEMCG_PROT_LOW,
65 	MEMCG_PROT_MIN,
66 };
67 
68 struct mem_cgroup_reclaim_cookie {
69 	pg_data_t *pgdat;
70 	int priority;
71 	unsigned int generation;
72 };
73 
74 #ifdef CONFIG_MEMCG
75 
76 #define MEM_CGROUP_ID_SHIFT	16
77 #define MEM_CGROUP_ID_MAX	USHRT_MAX
78 
79 struct mem_cgroup_id {
80 	int id;
81 	atomic_t ref;
82 };
83 
84 /*
85  * Per memcg event counter is incremented at every pagein/pageout. With THP,
86  * it will be incremated by the number of pages. This counter is used for
87  * for trigger some periodic events. This is straightforward and better
88  * than using jiffies etc. to handle periodic memcg event.
89  */
90 enum mem_cgroup_events_target {
91 	MEM_CGROUP_TARGET_THRESH,
92 	MEM_CGROUP_TARGET_SOFTLIMIT,
93 	MEM_CGROUP_TARGET_NUMAINFO,
94 	MEM_CGROUP_NTARGETS,
95 };
96 
97 struct mem_cgroup_stat_cpu {
98 	long count[MEMCG_NR_STAT];
99 	unsigned long events[NR_VM_EVENT_ITEMS];
100 	unsigned long nr_page_events;
101 	unsigned long targets[MEM_CGROUP_NTARGETS];
102 };
103 
104 struct mem_cgroup_reclaim_iter {
105 	struct mem_cgroup *position;
106 	/* scan generation, increased every round-trip */
107 	unsigned int generation;
108 };
109 
110 struct lruvec_stat {
111 	long count[NR_VM_NODE_STAT_ITEMS];
112 };
113 
114 /*
115  * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
116  * which have elements charged to this memcg.
117  */
118 struct memcg_shrinker_map {
119 	struct rcu_head rcu;
120 	unsigned long map[0];
121 };
122 
123 /*
124  * per-zone information in memory controller.
125  */
126 struct mem_cgroup_per_node {
127 	struct lruvec		lruvec;
128 
129 	struct lruvec_stat __percpu *lruvec_stat_cpu;
130 	atomic_long_t		lruvec_stat[NR_VM_NODE_STAT_ITEMS];
131 
132 	unsigned long		lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
133 
134 	struct mem_cgroup_reclaim_iter	iter[DEF_PRIORITY + 1];
135 
136 #ifdef CONFIG_MEMCG_KMEM
137 	struct memcg_shrinker_map __rcu	*shrinker_map;
138 #endif
139 	struct rb_node		tree_node;	/* RB tree node */
140 	unsigned long		usage_in_excess;/* Set to the value by which */
141 						/* the soft limit is exceeded*/
142 	bool			on_tree;
143 	bool			congested;	/* memcg has many dirty pages */
144 						/* backed by a congested BDI */
145 
146 	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */
147 						/* use container_of	   */
148 };
149 
150 struct mem_cgroup_threshold {
151 	struct eventfd_ctx *eventfd;
152 	unsigned long threshold;
153 };
154 
155 /* For threshold */
156 struct mem_cgroup_threshold_ary {
157 	/* An array index points to threshold just below or equal to usage. */
158 	int current_threshold;
159 	/* Size of entries[] */
160 	unsigned int size;
161 	/* Array of thresholds */
162 	struct mem_cgroup_threshold entries[0];
163 };
164 
165 struct mem_cgroup_thresholds {
166 	/* Primary thresholds array */
167 	struct mem_cgroup_threshold_ary *primary;
168 	/*
169 	 * Spare threshold array.
170 	 * This is needed to make mem_cgroup_unregister_event() "never fail".
171 	 * It must be able to store at least primary->size - 1 entries.
172 	 */
173 	struct mem_cgroup_threshold_ary *spare;
174 };
175 
176 enum memcg_kmem_state {
177 	KMEM_NONE,
178 	KMEM_ALLOCATED,
179 	KMEM_ONLINE,
180 };
181 
182 #if defined(CONFIG_SMP)
183 struct memcg_padding {
184 	char x[0];
185 } ____cacheline_internodealigned_in_smp;
186 #define MEMCG_PADDING(name)      struct memcg_padding name;
187 #else
188 #define MEMCG_PADDING(name)
189 #endif
190 
191 /*
192  * The memory controller data structure. The memory controller controls both
193  * page cache and RSS per cgroup. We would eventually like to provide
194  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
195  * to help the administrator determine what knobs to tune.
196  */
197 struct mem_cgroup {
198 	struct cgroup_subsys_state css;
199 
200 	/* Private memcg ID. Used to ID objects that outlive the cgroup */
201 	struct mem_cgroup_id id;
202 
203 	/* Accounted resources */
204 	struct page_counter memory;
205 	struct page_counter swap;
206 
207 	/* Legacy consumer-oriented counters */
208 	struct page_counter memsw;
209 	struct page_counter kmem;
210 	struct page_counter tcpmem;
211 
212 	/* Upper bound of normal memory consumption range */
213 	unsigned long high;
214 
215 	/* Range enforcement for interrupt charges */
216 	struct work_struct high_work;
217 
218 	unsigned long soft_limit;
219 
220 	/* vmpressure notifications */
221 	struct vmpressure vmpressure;
222 
223 	/*
224 	 * Should the accounting and control be hierarchical, per subtree?
225 	 */
226 	bool use_hierarchy;
227 
228 	/*
229 	 * Should the OOM killer kill all belonging tasks, had it kill one?
230 	 */
231 	bool oom_group;
232 
233 	/* protected by memcg_oom_lock */
234 	bool		oom_lock;
235 	int		under_oom;
236 
237 	int	swappiness;
238 	/* OOM-Killer disable */
239 	int		oom_kill_disable;
240 
241 	/* memory.events */
242 	struct cgroup_file events_file;
243 
244 	/* handle for "memory.swap.events" */
245 	struct cgroup_file swap_events_file;
246 
247 	/* protect arrays of thresholds */
248 	struct mutex thresholds_lock;
249 
250 	/* thresholds for memory usage. RCU-protected */
251 	struct mem_cgroup_thresholds thresholds;
252 
253 	/* thresholds for mem+swap usage. RCU-protected */
254 	struct mem_cgroup_thresholds memsw_thresholds;
255 
256 	/* For oom notifier event fd */
257 	struct list_head oom_notify;
258 
259 	/*
260 	 * Should we move charges of a task when a task is moved into this
261 	 * mem_cgroup ? And what type of charges should we move ?
262 	 */
263 	unsigned long move_charge_at_immigrate;
264 	/* taken only while moving_account > 0 */
265 	spinlock_t		move_lock;
266 	unsigned long		move_lock_flags;
267 
268 	MEMCG_PADDING(_pad1_);
269 
270 	/*
271 	 * set > 0 if pages under this cgroup are moving to other cgroup.
272 	 */
273 	atomic_t		moving_account;
274 	struct task_struct	*move_lock_task;
275 
276 	/* memory.stat */
277 	struct mem_cgroup_stat_cpu __percpu *stat_cpu;
278 
279 	MEMCG_PADDING(_pad2_);
280 
281 	atomic_long_t		stat[MEMCG_NR_STAT];
282 	atomic_long_t		events[NR_VM_EVENT_ITEMS];
283 	atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
284 
285 	unsigned long		socket_pressure;
286 
287 	/* Legacy tcp memory accounting */
288 	bool			tcpmem_active;
289 	int			tcpmem_pressure;
290 
291 #ifdef CONFIG_MEMCG_KMEM
292         /* Index in the kmem_cache->memcg_params.memcg_caches array */
293 	int kmemcg_id;
294 	enum memcg_kmem_state kmem_state;
295 	struct list_head kmem_caches;
296 #endif
297 
298 	int last_scanned_node;
299 #if MAX_NUMNODES > 1
300 	nodemask_t	scan_nodes;
301 	atomic_t	numainfo_events;
302 	atomic_t	numainfo_updating;
303 #endif
304 
305 #ifdef CONFIG_CGROUP_WRITEBACK
306 	struct list_head cgwb_list;
307 	struct wb_domain cgwb_domain;
308 #endif
309 
310 	/* List of events which userspace want to receive */
311 	struct list_head event_list;
312 	spinlock_t event_list_lock;
313 
314 	struct mem_cgroup_per_node *nodeinfo[0];
315 	/* WARNING: nodeinfo must be the last member here */
316 };
317 
318 /*
319  * size of first charge trial. "32" comes from vmscan.c's magic value.
320  * TODO: maybe necessary to use big numbers in big irons.
321  */
322 #define MEMCG_CHARGE_BATCH 32U
323 
324 extern struct mem_cgroup *root_mem_cgroup;
325 
mem_cgroup_is_root(struct mem_cgroup * memcg)326 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
327 {
328 	return (memcg == root_mem_cgroup);
329 }
330 
mem_cgroup_disabled(void)331 static inline bool mem_cgroup_disabled(void)
332 {
333 	return !cgroup_subsys_enabled(memory_cgrp_subsys);
334 }
335 
336 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
337 						struct mem_cgroup *memcg);
338 
339 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
340 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
341 			  bool compound);
342 int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
343 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
344 			  bool compound);
345 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
346 			      bool lrucare, bool compound);
347 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
348 		bool compound);
349 void mem_cgroup_uncharge(struct page *page);
350 void mem_cgroup_uncharge_list(struct list_head *page_list);
351 
352 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
353 
354 static struct mem_cgroup_per_node *
mem_cgroup_nodeinfo(struct mem_cgroup * memcg,int nid)355 mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
356 {
357 	return memcg->nodeinfo[nid];
358 }
359 
360 /**
361  * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
362  * @node: node of the wanted lruvec
363  * @memcg: memcg of the wanted lruvec
364  *
365  * Returns the lru list vector holding pages for a given @node or a given
366  * @memcg and @zone. This can be the node lruvec, if the memory controller
367  * is disabled.
368  */
mem_cgroup_lruvec(struct pglist_data * pgdat,struct mem_cgroup * memcg)369 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
370 				struct mem_cgroup *memcg)
371 {
372 	struct mem_cgroup_per_node *mz;
373 	struct lruvec *lruvec;
374 
375 	if (mem_cgroup_disabled()) {
376 		lruvec = node_lruvec(pgdat);
377 		goto out;
378 	}
379 
380 	mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
381 	lruvec = &mz->lruvec;
382 out:
383 	/*
384 	 * Since a node can be onlined after the mem_cgroup was created,
385 	 * we have to be prepared to initialize lruvec->pgdat here;
386 	 * and if offlined then reonlined, we need to reinitialize it.
387 	 */
388 	if (unlikely(lruvec->pgdat != pgdat))
389 		lruvec->pgdat = pgdat;
390 	return lruvec;
391 }
392 
393 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
394 
395 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
396 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
397 
398 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
399 
400 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
401 
402 static inline
mem_cgroup_from_css(struct cgroup_subsys_state * css)403 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
404 	return css ? container_of(css, struct mem_cgroup, css) : NULL;
405 }
406 
mem_cgroup_put(struct mem_cgroup * memcg)407 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
408 {
409 	if (memcg)
410 		css_put(&memcg->css);
411 }
412 
413 #define mem_cgroup_from_counter(counter, member)	\
414 	container_of(counter, struct mem_cgroup, member)
415 
416 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
417 				   struct mem_cgroup *,
418 				   struct mem_cgroup_reclaim_cookie *);
419 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
420 int mem_cgroup_scan_tasks(struct mem_cgroup *,
421 			  int (*)(struct task_struct *, void *), void *);
422 
mem_cgroup_id(struct mem_cgroup * memcg)423 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
424 {
425 	if (mem_cgroup_disabled())
426 		return 0;
427 
428 	return memcg->id.id;
429 }
430 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
431 
lruvec_memcg(struct lruvec * lruvec)432 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
433 {
434 	struct mem_cgroup_per_node *mz;
435 
436 	if (mem_cgroup_disabled())
437 		return NULL;
438 
439 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
440 	return mz->memcg;
441 }
442 
443 /**
444  * parent_mem_cgroup - find the accounting parent of a memcg
445  * @memcg: memcg whose parent to find
446  *
447  * Returns the parent memcg, or NULL if this is the root or the memory
448  * controller is in legacy no-hierarchy mode.
449  */
parent_mem_cgroup(struct mem_cgroup * memcg)450 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
451 {
452 	if (!memcg->memory.parent)
453 		return NULL;
454 	return mem_cgroup_from_counter(memcg->memory.parent, memory);
455 }
456 
mem_cgroup_is_descendant(struct mem_cgroup * memcg,struct mem_cgroup * root)457 static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
458 			      struct mem_cgroup *root)
459 {
460 	if (root == memcg)
461 		return true;
462 	if (!root->use_hierarchy)
463 		return false;
464 	return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
465 }
466 
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)467 static inline bool mm_match_cgroup(struct mm_struct *mm,
468 				   struct mem_cgroup *memcg)
469 {
470 	struct mem_cgroup *task_memcg;
471 	bool match = false;
472 
473 	rcu_read_lock();
474 	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
475 	if (task_memcg)
476 		match = mem_cgroup_is_descendant(task_memcg, memcg);
477 	rcu_read_unlock();
478 	return match;
479 }
480 
481 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
482 ino_t page_cgroup_ino(struct page *page);
483 
mem_cgroup_online(struct mem_cgroup * memcg)484 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
485 {
486 	if (mem_cgroup_disabled())
487 		return true;
488 	return !!(memcg->css.flags & CSS_ONLINE);
489 }
490 
491 /*
492  * For memory reclaim.
493  */
494 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
495 
496 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
497 		int zid, int nr_pages);
498 
499 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
500 					   int nid, unsigned int lru_mask);
501 
502 static inline
mem_cgroup_get_lru_size(struct lruvec * lruvec,enum lru_list lru)503 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
504 {
505 	struct mem_cgroup_per_node *mz;
506 	unsigned long nr_pages = 0;
507 	int zid;
508 
509 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
510 	for (zid = 0; zid < MAX_NR_ZONES; zid++)
511 		nr_pages += mz->lru_zone_size[zid][lru];
512 	return nr_pages;
513 }
514 
515 static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)516 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
517 		enum lru_list lru, int zone_idx)
518 {
519 	struct mem_cgroup_per_node *mz;
520 
521 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
522 	return mz->lru_zone_size[zone_idx][lru];
523 }
524 
525 void mem_cgroup_handle_over_high(void);
526 
527 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
528 
529 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
530 				struct task_struct *p);
531 
mem_cgroup_enter_user_fault(void)532 static inline void mem_cgroup_enter_user_fault(void)
533 {
534 	WARN_ON(current->in_user_fault);
535 	current->in_user_fault = 1;
536 }
537 
mem_cgroup_exit_user_fault(void)538 static inline void mem_cgroup_exit_user_fault(void)
539 {
540 	WARN_ON(!current->in_user_fault);
541 	current->in_user_fault = 0;
542 }
543 
task_in_memcg_oom(struct task_struct * p)544 static inline bool task_in_memcg_oom(struct task_struct *p)
545 {
546 	return p->memcg_in_oom;
547 }
548 
549 bool mem_cgroup_oom_synchronize(bool wait);
550 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
551 					    struct mem_cgroup *oom_domain);
552 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
553 
554 #ifdef CONFIG_MEMCG_SWAP
555 extern int do_swap_account;
556 #endif
557 
558 struct mem_cgroup *lock_page_memcg(struct page *page);
559 void __unlock_page_memcg(struct mem_cgroup *memcg);
560 void unlock_page_memcg(struct page *page);
561 
562 /* idx can be of type enum memcg_stat_item or node_stat_item */
memcg_page_state(struct mem_cgroup * memcg,int idx)563 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
564 					     int idx)
565 {
566 	long x = atomic_long_read(&memcg->stat[idx]);
567 #ifdef CONFIG_SMP
568 	if (x < 0)
569 		x = 0;
570 #endif
571 	return x;
572 }
573 
574 /* idx can be of type enum memcg_stat_item or node_stat_item */
__mod_memcg_state(struct mem_cgroup * memcg,int idx,int val)575 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
576 				     int idx, int val)
577 {
578 	long x;
579 
580 	if (mem_cgroup_disabled())
581 		return;
582 
583 	x = val + __this_cpu_read(memcg->stat_cpu->count[idx]);
584 	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
585 		atomic_long_add(x, &memcg->stat[idx]);
586 		x = 0;
587 	}
588 	__this_cpu_write(memcg->stat_cpu->count[idx], x);
589 }
590 
591 /* idx can be of type enum memcg_stat_item or node_stat_item */
mod_memcg_state(struct mem_cgroup * memcg,int idx,int val)592 static inline void mod_memcg_state(struct mem_cgroup *memcg,
593 				   int idx, int val)
594 {
595 	unsigned long flags;
596 
597 	local_irq_save(flags);
598 	__mod_memcg_state(memcg, idx, val);
599 	local_irq_restore(flags);
600 }
601 
602 /**
603  * mod_memcg_page_state - update page state statistics
604  * @page: the page
605  * @idx: page state item to account
606  * @val: number of pages (positive or negative)
607  *
608  * The @page must be locked or the caller must use lock_page_memcg()
609  * to prevent double accounting when the page is concurrently being
610  * moved to another memcg:
611  *
612  *   lock_page(page) or lock_page_memcg(page)
613  *   if (TestClearPageState(page))
614  *     mod_memcg_page_state(page, state, -1);
615  *   unlock_page(page) or unlock_page_memcg(page)
616  *
617  * Kernel pages are an exception to this, since they'll never move.
618  */
__mod_memcg_page_state(struct page * page,int idx,int val)619 static inline void __mod_memcg_page_state(struct page *page,
620 					  int idx, int val)
621 {
622 	if (page->mem_cgroup)
623 		__mod_memcg_state(page->mem_cgroup, idx, val);
624 }
625 
mod_memcg_page_state(struct page * page,int idx,int val)626 static inline void mod_memcg_page_state(struct page *page,
627 					int idx, int val)
628 {
629 	if (page->mem_cgroup)
630 		mod_memcg_state(page->mem_cgroup, idx, val);
631 }
632 
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)633 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
634 					      enum node_stat_item idx)
635 {
636 	struct mem_cgroup_per_node *pn;
637 	long x;
638 
639 	if (mem_cgroup_disabled())
640 		return node_page_state(lruvec_pgdat(lruvec), idx);
641 
642 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
643 	x = atomic_long_read(&pn->lruvec_stat[idx]);
644 #ifdef CONFIG_SMP
645 	if (x < 0)
646 		x = 0;
647 #endif
648 	return x;
649 }
650 
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)651 static inline void __mod_lruvec_state(struct lruvec *lruvec,
652 				      enum node_stat_item idx, int val)
653 {
654 	struct mem_cgroup_per_node *pn;
655 	long x;
656 
657 	/* Update node */
658 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
659 
660 	if (mem_cgroup_disabled())
661 		return;
662 
663 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
664 
665 	/* Update memcg */
666 	__mod_memcg_state(pn->memcg, idx, val);
667 
668 	/* Update lruvec */
669 	x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
670 	if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
671 		atomic_long_add(x, &pn->lruvec_stat[idx]);
672 		x = 0;
673 	}
674 	__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
675 }
676 
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)677 static inline void mod_lruvec_state(struct lruvec *lruvec,
678 				    enum node_stat_item idx, int val)
679 {
680 	unsigned long flags;
681 
682 	local_irq_save(flags);
683 	__mod_lruvec_state(lruvec, idx, val);
684 	local_irq_restore(flags);
685 }
686 
__mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)687 static inline void __mod_lruvec_page_state(struct page *page,
688 					   enum node_stat_item idx, int val)
689 {
690 	pg_data_t *pgdat = page_pgdat(page);
691 	struct lruvec *lruvec;
692 
693 	/* Untracked pages have no memcg, no lruvec. Update only the node */
694 	if (!page->mem_cgroup) {
695 		__mod_node_page_state(pgdat, idx, val);
696 		return;
697 	}
698 
699 	lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
700 	__mod_lruvec_state(lruvec, idx, val);
701 }
702 
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)703 static inline void mod_lruvec_page_state(struct page *page,
704 					 enum node_stat_item idx, int val)
705 {
706 	unsigned long flags;
707 
708 	local_irq_save(flags);
709 	__mod_lruvec_page_state(page, idx, val);
710 	local_irq_restore(flags);
711 }
712 
713 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
714 						gfp_t gfp_mask,
715 						unsigned long *total_scanned);
716 
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)717 static inline void __count_memcg_events(struct mem_cgroup *memcg,
718 					enum vm_event_item idx,
719 					unsigned long count)
720 {
721 	unsigned long x;
722 
723 	if (mem_cgroup_disabled())
724 		return;
725 
726 	x = count + __this_cpu_read(memcg->stat_cpu->events[idx]);
727 	if (unlikely(x > MEMCG_CHARGE_BATCH)) {
728 		atomic_long_add(x, &memcg->events[idx]);
729 		x = 0;
730 	}
731 	__this_cpu_write(memcg->stat_cpu->events[idx], x);
732 }
733 
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)734 static inline void count_memcg_events(struct mem_cgroup *memcg,
735 				      enum vm_event_item idx,
736 				      unsigned long count)
737 {
738 	unsigned long flags;
739 
740 	local_irq_save(flags);
741 	__count_memcg_events(memcg, idx, count);
742 	local_irq_restore(flags);
743 }
744 
count_memcg_page_event(struct page * page,enum vm_event_item idx)745 static inline void count_memcg_page_event(struct page *page,
746 					  enum vm_event_item idx)
747 {
748 	if (page->mem_cgroup)
749 		count_memcg_events(page->mem_cgroup, idx, 1);
750 }
751 
count_memcg_event_mm(struct mm_struct * mm,enum vm_event_item idx)752 static inline void count_memcg_event_mm(struct mm_struct *mm,
753 					enum vm_event_item idx)
754 {
755 	struct mem_cgroup *memcg;
756 
757 	if (mem_cgroup_disabled())
758 		return;
759 
760 	rcu_read_lock();
761 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
762 	if (likely(memcg))
763 		count_memcg_events(memcg, idx, 1);
764 	rcu_read_unlock();
765 }
766 
memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event)767 static inline void memcg_memory_event(struct mem_cgroup *memcg,
768 				      enum memcg_memory_event event)
769 {
770 	atomic_long_inc(&memcg->memory_events[event]);
771 	cgroup_file_notify(&memcg->events_file);
772 }
773 
memcg_memory_event_mm(struct mm_struct * mm,enum memcg_memory_event event)774 static inline void memcg_memory_event_mm(struct mm_struct *mm,
775 					 enum memcg_memory_event event)
776 {
777 	struct mem_cgroup *memcg;
778 
779 	if (mem_cgroup_disabled())
780 		return;
781 
782 	rcu_read_lock();
783 	memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
784 	if (likely(memcg))
785 		memcg_memory_event(memcg, event);
786 	rcu_read_unlock();
787 }
788 
789 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
790 void mem_cgroup_split_huge_fixup(struct page *head);
791 #endif
792 
793 #else /* CONFIG_MEMCG */
794 
795 #define MEM_CGROUP_ID_SHIFT	0
796 #define MEM_CGROUP_ID_MAX	0
797 
798 struct mem_cgroup;
799 
mem_cgroup_is_root(struct mem_cgroup * memcg)800 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
801 {
802 	return true;
803 }
804 
mem_cgroup_disabled(void)805 static inline bool mem_cgroup_disabled(void)
806 {
807 	return true;
808 }
809 
memcg_memory_event(struct mem_cgroup * memcg,enum memcg_memory_event event)810 static inline void memcg_memory_event(struct mem_cgroup *memcg,
811 				      enum memcg_memory_event event)
812 {
813 }
814 
memcg_memory_event_mm(struct mm_struct * mm,enum memcg_memory_event event)815 static inline void memcg_memory_event_mm(struct mm_struct *mm,
816 					 enum memcg_memory_event event)
817 {
818 }
819 
mem_cgroup_protected(struct mem_cgroup * root,struct mem_cgroup * memcg)820 static inline enum mem_cgroup_protection mem_cgroup_protected(
821 	struct mem_cgroup *root, struct mem_cgroup *memcg)
822 {
823 	return MEMCG_PROT_NONE;
824 }
825 
mem_cgroup_try_charge(struct page * page,struct mm_struct * mm,gfp_t gfp_mask,struct mem_cgroup ** memcgp,bool compound)826 static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
827 					gfp_t gfp_mask,
828 					struct mem_cgroup **memcgp,
829 					bool compound)
830 {
831 	*memcgp = NULL;
832 	return 0;
833 }
834 
mem_cgroup_try_charge_delay(struct page * page,struct mm_struct * mm,gfp_t gfp_mask,struct mem_cgroup ** memcgp,bool compound)835 static inline int mem_cgroup_try_charge_delay(struct page *page,
836 					      struct mm_struct *mm,
837 					      gfp_t gfp_mask,
838 					      struct mem_cgroup **memcgp,
839 					      bool compound)
840 {
841 	*memcgp = NULL;
842 	return 0;
843 }
844 
mem_cgroup_commit_charge(struct page * page,struct mem_cgroup * memcg,bool lrucare,bool compound)845 static inline void mem_cgroup_commit_charge(struct page *page,
846 					    struct mem_cgroup *memcg,
847 					    bool lrucare, bool compound)
848 {
849 }
850 
mem_cgroup_cancel_charge(struct page * page,struct mem_cgroup * memcg,bool compound)851 static inline void mem_cgroup_cancel_charge(struct page *page,
852 					    struct mem_cgroup *memcg,
853 					    bool compound)
854 {
855 }
856 
mem_cgroup_uncharge(struct page * page)857 static inline void mem_cgroup_uncharge(struct page *page)
858 {
859 }
860 
mem_cgroup_uncharge_list(struct list_head * page_list)861 static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
862 {
863 }
864 
mem_cgroup_migrate(struct page * old,struct page * new)865 static inline void mem_cgroup_migrate(struct page *old, struct page *new)
866 {
867 }
868 
mem_cgroup_lruvec(struct pglist_data * pgdat,struct mem_cgroup * memcg)869 static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
870 				struct mem_cgroup *memcg)
871 {
872 	return node_lruvec(pgdat);
873 }
874 
mem_cgroup_page_lruvec(struct page * page,struct pglist_data * pgdat)875 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
876 						    struct pglist_data *pgdat)
877 {
878 	return &pgdat->lruvec;
879 }
880 
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)881 static inline bool mm_match_cgroup(struct mm_struct *mm,
882 		struct mem_cgroup *memcg)
883 {
884 	return true;
885 }
886 
task_in_mem_cgroup(struct task_struct * task,const struct mem_cgroup * memcg)887 static inline bool task_in_mem_cgroup(struct task_struct *task,
888 				      const struct mem_cgroup *memcg)
889 {
890 	return true;
891 }
892 
get_mem_cgroup_from_mm(struct mm_struct * mm)893 static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
894 {
895 	return NULL;
896 }
897 
get_mem_cgroup_from_page(struct page * page)898 static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
899 {
900 	return NULL;
901 }
902 
mem_cgroup_put(struct mem_cgroup * memcg)903 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
904 {
905 }
906 
907 static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)908 mem_cgroup_iter(struct mem_cgroup *root,
909 		struct mem_cgroup *prev,
910 		struct mem_cgroup_reclaim_cookie *reclaim)
911 {
912 	return NULL;
913 }
914 
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)915 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
916 					 struct mem_cgroup *prev)
917 {
918 }
919 
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)920 static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
921 		int (*fn)(struct task_struct *, void *), void *arg)
922 {
923 	return 0;
924 }
925 
mem_cgroup_id(struct mem_cgroup * memcg)926 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
927 {
928 	return 0;
929 }
930 
mem_cgroup_from_id(unsigned short id)931 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
932 {
933 	WARN_ON_ONCE(id);
934 	/* XXX: This should always return root_mem_cgroup */
935 	return NULL;
936 }
937 
lruvec_memcg(struct lruvec * lruvec)938 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
939 {
940 	return NULL;
941 }
942 
mem_cgroup_online(struct mem_cgroup * memcg)943 static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
944 {
945 	return true;
946 }
947 
948 static inline unsigned long
mem_cgroup_get_lru_size(struct lruvec * lruvec,enum lru_list lru)949 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
950 {
951 	return 0;
952 }
953 static inline
mem_cgroup_get_zone_lru_size(struct lruvec * lruvec,enum lru_list lru,int zone_idx)954 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
955 		enum lru_list lru, int zone_idx)
956 {
957 	return 0;
958 }
959 
960 static inline unsigned long
mem_cgroup_node_nr_lru_pages(struct mem_cgroup * memcg,int nid,unsigned int lru_mask)961 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
962 			     int nid, unsigned int lru_mask)
963 {
964 	return 0;
965 }
966 
mem_cgroup_get_max(struct mem_cgroup * memcg)967 static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
968 {
969 	return 0;
970 }
971 
972 static inline void
mem_cgroup_print_oom_info(struct mem_cgroup * memcg,struct task_struct * p)973 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
974 {
975 }
976 
lock_page_memcg(struct page * page)977 static inline struct mem_cgroup *lock_page_memcg(struct page *page)
978 {
979 	return NULL;
980 }
981 
__unlock_page_memcg(struct mem_cgroup * memcg)982 static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
983 {
984 }
985 
unlock_page_memcg(struct page * page)986 static inline void unlock_page_memcg(struct page *page)
987 {
988 }
989 
mem_cgroup_handle_over_high(void)990 static inline void mem_cgroup_handle_over_high(void)
991 {
992 }
993 
mem_cgroup_enter_user_fault(void)994 static inline void mem_cgroup_enter_user_fault(void)
995 {
996 }
997 
mem_cgroup_exit_user_fault(void)998 static inline void mem_cgroup_exit_user_fault(void)
999 {
1000 }
1001 
task_in_memcg_oom(struct task_struct * p)1002 static inline bool task_in_memcg_oom(struct task_struct *p)
1003 {
1004 	return false;
1005 }
1006 
mem_cgroup_oom_synchronize(bool wait)1007 static inline bool mem_cgroup_oom_synchronize(bool wait)
1008 {
1009 	return false;
1010 }
1011 
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)1012 static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1013 	struct task_struct *victim, struct mem_cgroup *oom_domain)
1014 {
1015 	return NULL;
1016 }
1017 
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)1018 static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1019 {
1020 }
1021 
memcg_page_state(struct mem_cgroup * memcg,int idx)1022 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
1023 					     int idx)
1024 {
1025 	return 0;
1026 }
1027 
__mod_memcg_state(struct mem_cgroup * memcg,int idx,int nr)1028 static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1029 				     int idx,
1030 				     int nr)
1031 {
1032 }
1033 
mod_memcg_state(struct mem_cgroup * memcg,int idx,int nr)1034 static inline void mod_memcg_state(struct mem_cgroup *memcg,
1035 				   int idx,
1036 				   int nr)
1037 {
1038 }
1039 
__mod_memcg_page_state(struct page * page,int idx,int nr)1040 static inline void __mod_memcg_page_state(struct page *page,
1041 					  int idx,
1042 					  int nr)
1043 {
1044 }
1045 
mod_memcg_page_state(struct page * page,int idx,int nr)1046 static inline void mod_memcg_page_state(struct page *page,
1047 					int idx,
1048 					int nr)
1049 {
1050 }
1051 
lruvec_page_state(struct lruvec * lruvec,enum node_stat_item idx)1052 static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1053 					      enum node_stat_item idx)
1054 {
1055 	return node_page_state(lruvec_pgdat(lruvec), idx);
1056 }
1057 
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)1058 static inline void __mod_lruvec_state(struct lruvec *lruvec,
1059 				      enum node_stat_item idx, int val)
1060 {
1061 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1062 }
1063 
mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)1064 static inline void mod_lruvec_state(struct lruvec *lruvec,
1065 				    enum node_stat_item idx, int val)
1066 {
1067 	mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1068 }
1069 
__mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)1070 static inline void __mod_lruvec_page_state(struct page *page,
1071 					   enum node_stat_item idx, int val)
1072 {
1073 	__mod_node_page_state(page_pgdat(page), idx, val);
1074 }
1075 
mod_lruvec_page_state(struct page * page,enum node_stat_item idx,int val)1076 static inline void mod_lruvec_page_state(struct page *page,
1077 					 enum node_stat_item idx, int val)
1078 {
1079 	mod_node_page_state(page_pgdat(page), idx, val);
1080 }
1081 
1082 static inline
mem_cgroup_soft_limit_reclaim(pg_data_t * pgdat,int order,gfp_t gfp_mask,unsigned long * total_scanned)1083 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1084 					    gfp_t gfp_mask,
1085 					    unsigned long *total_scanned)
1086 {
1087 	return 0;
1088 }
1089 
mem_cgroup_split_huge_fixup(struct page * head)1090 static inline void mem_cgroup_split_huge_fixup(struct page *head)
1091 {
1092 }
1093 
count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)1094 static inline void count_memcg_events(struct mem_cgroup *memcg,
1095 				      enum vm_event_item idx,
1096 				      unsigned long count)
1097 {
1098 }
1099 
count_memcg_page_event(struct page * page,int idx)1100 static inline void count_memcg_page_event(struct page *page,
1101 					  int idx)
1102 {
1103 }
1104 
1105 static inline
count_memcg_event_mm(struct mm_struct * mm,enum vm_event_item idx)1106 void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1107 {
1108 }
1109 #endif /* CONFIG_MEMCG */
1110 
1111 /* idx can be of type enum memcg_stat_item or node_stat_item */
__inc_memcg_state(struct mem_cgroup * memcg,int idx)1112 static inline void __inc_memcg_state(struct mem_cgroup *memcg,
1113 				     int idx)
1114 {
1115 	__mod_memcg_state(memcg, idx, 1);
1116 }
1117 
1118 /* idx can be of type enum memcg_stat_item or node_stat_item */
__dec_memcg_state(struct mem_cgroup * memcg,int idx)1119 static inline void __dec_memcg_state(struct mem_cgroup *memcg,
1120 				     int idx)
1121 {
1122 	__mod_memcg_state(memcg, idx, -1);
1123 }
1124 
1125 /* idx can be of type enum memcg_stat_item or node_stat_item */
__inc_memcg_page_state(struct page * page,int idx)1126 static inline void __inc_memcg_page_state(struct page *page,
1127 					  int idx)
1128 {
1129 	__mod_memcg_page_state(page, idx, 1);
1130 }
1131 
1132 /* idx can be of type enum memcg_stat_item or node_stat_item */
__dec_memcg_page_state(struct page * page,int idx)1133 static inline void __dec_memcg_page_state(struct page *page,
1134 					  int idx)
1135 {
1136 	__mod_memcg_page_state(page, idx, -1);
1137 }
1138 
__inc_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx)1139 static inline void __inc_lruvec_state(struct lruvec *lruvec,
1140 				      enum node_stat_item idx)
1141 {
1142 	__mod_lruvec_state(lruvec, idx, 1);
1143 }
1144 
__dec_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx)1145 static inline void __dec_lruvec_state(struct lruvec *lruvec,
1146 				      enum node_stat_item idx)
1147 {
1148 	__mod_lruvec_state(lruvec, idx, -1);
1149 }
1150 
__inc_lruvec_page_state(struct page * page,enum node_stat_item idx)1151 static inline void __inc_lruvec_page_state(struct page *page,
1152 					   enum node_stat_item idx)
1153 {
1154 	__mod_lruvec_page_state(page, idx, 1);
1155 }
1156 
__dec_lruvec_page_state(struct page * page,enum node_stat_item idx)1157 static inline void __dec_lruvec_page_state(struct page *page,
1158 					   enum node_stat_item idx)
1159 {
1160 	__mod_lruvec_page_state(page, idx, -1);
1161 }
1162 
1163 /* idx can be of type enum memcg_stat_item or node_stat_item */
inc_memcg_state(struct mem_cgroup * memcg,int idx)1164 static inline void inc_memcg_state(struct mem_cgroup *memcg,
1165 				   int idx)
1166 {
1167 	mod_memcg_state(memcg, idx, 1);
1168 }
1169 
1170 /* idx can be of type enum memcg_stat_item or node_stat_item */
dec_memcg_state(struct mem_cgroup * memcg,int idx)1171 static inline void dec_memcg_state(struct mem_cgroup *memcg,
1172 				   int idx)
1173 {
1174 	mod_memcg_state(memcg, idx, -1);
1175 }
1176 
1177 /* idx can be of type enum memcg_stat_item or node_stat_item */
inc_memcg_page_state(struct page * page,int idx)1178 static inline void inc_memcg_page_state(struct page *page,
1179 					int idx)
1180 {
1181 	mod_memcg_page_state(page, idx, 1);
1182 }
1183 
1184 /* idx can be of type enum memcg_stat_item or node_stat_item */
dec_memcg_page_state(struct page * page,int idx)1185 static inline void dec_memcg_page_state(struct page *page,
1186 					int idx)
1187 {
1188 	mod_memcg_page_state(page, idx, -1);
1189 }
1190 
inc_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx)1191 static inline void inc_lruvec_state(struct lruvec *lruvec,
1192 				    enum node_stat_item idx)
1193 {
1194 	mod_lruvec_state(lruvec, idx, 1);
1195 }
1196 
dec_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx)1197 static inline void dec_lruvec_state(struct lruvec *lruvec,
1198 				    enum node_stat_item idx)
1199 {
1200 	mod_lruvec_state(lruvec, idx, -1);
1201 }
1202 
inc_lruvec_page_state(struct page * page,enum node_stat_item idx)1203 static inline void inc_lruvec_page_state(struct page *page,
1204 					 enum node_stat_item idx)
1205 {
1206 	mod_lruvec_page_state(page, idx, 1);
1207 }
1208 
dec_lruvec_page_state(struct page * page,enum node_stat_item idx)1209 static inline void dec_lruvec_page_state(struct page *page,
1210 					 enum node_stat_item idx)
1211 {
1212 	mod_lruvec_page_state(page, idx, -1);
1213 }
1214 
1215 #ifdef CONFIG_CGROUP_WRITEBACK
1216 
1217 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1218 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1219 			 unsigned long *pheadroom, unsigned long *pdirty,
1220 			 unsigned long *pwriteback);
1221 
1222 #else	/* CONFIG_CGROUP_WRITEBACK */
1223 
mem_cgroup_wb_domain(struct bdi_writeback * wb)1224 static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1225 {
1226 	return NULL;
1227 }
1228 
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)1229 static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1230 				       unsigned long *pfilepages,
1231 				       unsigned long *pheadroom,
1232 				       unsigned long *pdirty,
1233 				       unsigned long *pwriteback)
1234 {
1235 }
1236 
1237 #endif	/* CONFIG_CGROUP_WRITEBACK */
1238 
1239 struct sock;
1240 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1241 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1242 #ifdef CONFIG_MEMCG
1243 extern struct static_key_false memcg_sockets_enabled_key;
1244 #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1245 void mem_cgroup_sk_alloc(struct sock *sk);
1246 void mem_cgroup_sk_free(struct sock *sk);
mem_cgroup_under_socket_pressure(struct mem_cgroup * memcg)1247 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1248 {
1249 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1250 		return true;
1251 	do {
1252 		if (time_before(jiffies, memcg->socket_pressure))
1253 			return true;
1254 	} while ((memcg = parent_mem_cgroup(memcg)));
1255 	return false;
1256 }
1257 #else
1258 #define mem_cgroup_sockets_enabled 0
mem_cgroup_sk_alloc(struct sock * sk)1259 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
mem_cgroup_sk_free(struct sock * sk)1260 static inline void mem_cgroup_sk_free(struct sock *sk) { };
mem_cgroup_under_socket_pressure(struct mem_cgroup * memcg)1261 static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1262 {
1263 	return false;
1264 }
1265 #endif
1266 
1267 struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
1268 void memcg_kmem_put_cache(struct kmem_cache *cachep);
1269 int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
1270 			    struct mem_cgroup *memcg);
1271 int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
1272 void memcg_kmem_uncharge(struct page *page, int order);
1273 
1274 #ifdef CONFIG_MEMCG_KMEM
1275 extern struct static_key_false memcg_kmem_enabled_key;
1276 extern struct workqueue_struct *memcg_kmem_cache_wq;
1277 
1278 extern int memcg_nr_cache_ids;
1279 void memcg_get_cache_ids(void);
1280 void memcg_put_cache_ids(void);
1281 
1282 /*
1283  * Helper macro to loop through all memcg-specific caches. Callers must still
1284  * check if the cache is valid (it is either valid or NULL).
1285  * the slab_mutex must be held when looping through those caches
1286  */
1287 #define for_each_memcg_cache_index(_idx)	\
1288 	for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1289 
memcg_kmem_enabled(void)1290 static inline bool memcg_kmem_enabled(void)
1291 {
1292 	return static_branch_unlikely(&memcg_kmem_enabled_key);
1293 }
1294 
1295 /*
1296  * helper for accessing a memcg's index. It will be used as an index in the
1297  * child cache array in kmem_cache, and also to derive its name. This function
1298  * will return -1 when this is not a kmem-limited memcg.
1299  */
memcg_cache_id(struct mem_cgroup * memcg)1300 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1301 {
1302 	return memcg ? memcg->kmemcg_id : -1;
1303 }
1304 
1305 extern int memcg_expand_shrinker_maps(int new_id);
1306 
1307 extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1308 				   int nid, int shrinker_id);
1309 #else
1310 #define for_each_memcg_cache_index(_idx)	\
1311 	for (; NULL; )
1312 
memcg_kmem_enabled(void)1313 static inline bool memcg_kmem_enabled(void)
1314 {
1315 	return false;
1316 }
1317 
memcg_cache_id(struct mem_cgroup * memcg)1318 static inline int memcg_cache_id(struct mem_cgroup *memcg)
1319 {
1320 	return -1;
1321 }
1322 
memcg_get_cache_ids(void)1323 static inline void memcg_get_cache_ids(void)
1324 {
1325 }
1326 
memcg_put_cache_ids(void)1327 static inline void memcg_put_cache_ids(void)
1328 {
1329 }
1330 
memcg_set_shrinker_bit(struct mem_cgroup * memcg,int nid,int shrinker_id)1331 static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1332 					  int nid, int shrinker_id) { }
1333 #endif /* CONFIG_MEMCG_KMEM */
1334 
1335 #endif /* _LINUX_MEMCONTROL_H */
1336