1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  */
24 
25 #include <linux/page_counter.h>
26 #include <linux/memcontrol.h>
27 #include <linux/cgroup.h>
28 #include <linux/pagewalk.h>
29 #include <linux/sched/mm.h>
30 #include <linux/shmem_fs.h>
31 #include <linux/hugetlb.h>
32 #include <linux/pagemap.h>
33 #include <linux/vm_event_item.h>
34 #include <linux/smp.h>
35 #include <linux/page-flags.h>
36 #include <linux/backing-dev.h>
37 #include <linux/bit_spinlock.h>
38 #include <linux/rcupdate.h>
39 #include <linux/limits.h>
40 #include <linux/export.h>
41 #include <linux/mutex.h>
42 #include <linux/rbtree.h>
43 #include <linux/slab.h>
44 #include <linux/swap.h>
45 #include <linux/swapops.h>
46 #include <linux/spinlock.h>
47 #include <linux/eventfd.h>
48 #include <linux/poll.h>
49 #include <linux/sort.h>
50 #include <linux/fs.h>
51 #include <linux/seq_file.h>
52 #include <linux/vmpressure.h>
53 #include <linux/mm_inline.h>
54 #include <linux/swap_cgroup.h>
55 #include <linux/cpu.h>
56 #include <linux/oom.h>
57 #include <linux/lockdep.h>
58 #include <linux/file.h>
59 #include <linux/tracehook.h>
60 #include <linux/psi.h>
61 #include <linux/seq_buf.h>
62 #include "internal.h"
63 #include <net/sock.h>
64 #include <net/ip.h>
65 #include "slab.h"
66 
67 #include <linux/uaccess.h>
68 
69 #include <trace/events/vmscan.h>
70 
71 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
72 EXPORT_SYMBOL(memory_cgrp_subsys);
73 
74 struct mem_cgroup *root_mem_cgroup __read_mostly;
75 
76 /* Active memory cgroup to use from an interrupt context */
77 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
78 
79 /* Socket memory accounting disabled? */
80 static bool cgroup_memory_nosocket;
81 
82 /* Kernel memory accounting disabled? */
83 static bool cgroup_memory_nokmem;
84 
85 /* Whether the swap controller is active */
86 #ifdef CONFIG_MEMCG_SWAP
87 bool cgroup_memory_noswap __read_mostly;
88 #else
89 #define cgroup_memory_noswap		1
90 #endif
91 
92 #ifdef CONFIG_CGROUP_WRITEBACK
93 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
94 #endif
95 
96 /* Whether legacy memory+swap accounting is active */
do_memsw_account(void)97 static bool do_memsw_account(void)
98 {
99 	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
100 }
101 
102 #define THRESHOLDS_EVENTS_TARGET 128
103 #define SOFTLIMIT_EVENTS_TARGET 1024
104 
105 /*
106  * Cgroups above their limits are maintained in a RB-Tree, independent of
107  * their hierarchy representation
108  */
109 
110 struct mem_cgroup_tree_per_node {
111 	struct rb_root rb_root;
112 	struct rb_node *rb_rightmost;
113 	spinlock_t lock;
114 };
115 
116 struct mem_cgroup_tree {
117 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
118 };
119 
120 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
121 
122 /* for OOM */
123 struct mem_cgroup_eventfd_list {
124 	struct list_head list;
125 	struct eventfd_ctx *eventfd;
126 };
127 
128 /*
129  * cgroup_event represents events which userspace want to receive.
130  */
131 struct mem_cgroup_event {
132 	/*
133 	 * memcg which the event belongs to.
134 	 */
135 	struct mem_cgroup *memcg;
136 	/*
137 	 * eventfd to signal userspace about the event.
138 	 */
139 	struct eventfd_ctx *eventfd;
140 	/*
141 	 * Each of these stored in a list by the cgroup.
142 	 */
143 	struct list_head list;
144 	/*
145 	 * register_event() callback will be used to add new userspace
146 	 * waiter for changes related to this event.  Use eventfd_signal()
147 	 * on eventfd to send notification to userspace.
148 	 */
149 	int (*register_event)(struct mem_cgroup *memcg,
150 			      struct eventfd_ctx *eventfd, const char *args);
151 	/*
152 	 * unregister_event() callback will be called when userspace closes
153 	 * the eventfd or on cgroup removing.  This callback must be set,
154 	 * if you want provide notification functionality.
155 	 */
156 	void (*unregister_event)(struct mem_cgroup *memcg,
157 				 struct eventfd_ctx *eventfd);
158 	/*
159 	 * All fields below needed to unregister event when
160 	 * userspace closes eventfd.
161 	 */
162 	poll_table pt;
163 	wait_queue_head_t *wqh;
164 	wait_queue_entry_t wait;
165 	struct work_struct remove;
166 };
167 
168 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
169 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
170 
171 /* Stuffs for move charges at task migration. */
172 /*
173  * Types of charges to be moved.
174  */
175 #define MOVE_ANON	0x1U
176 #define MOVE_FILE	0x2U
177 #define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
178 
179 /* "mc" and its members are protected by cgroup_mutex */
180 static struct move_charge_struct {
181 	spinlock_t	  lock; /* for from, to */
182 	struct mm_struct  *mm;
183 	struct mem_cgroup *from;
184 	struct mem_cgroup *to;
185 	unsigned long flags;
186 	unsigned long precharge;
187 	unsigned long moved_charge;
188 	unsigned long moved_swap;
189 	struct task_struct *moving_task;	/* a task moving charges */
190 	wait_queue_head_t waitq;		/* a waitq for other context */
191 } mc = {
192 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
193 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
194 };
195 
196 /*
197  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
198  * limit reclaim to prevent infinite loops, if they ever occur.
199  */
200 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
201 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
202 
203 /* for encoding cft->private value on file */
204 enum res_type {
205 	_MEM,
206 	_MEMSWAP,
207 	_OOM_TYPE,
208 	_KMEM,
209 	_TCP,
210 };
211 
212 #define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
213 #define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
214 #define MEMFILE_ATTR(val)	((val) & 0xffff)
215 /* Used for OOM nofiier */
216 #define OOM_CONTROL		(0)
217 
218 /*
219  * Iteration constructs for visiting all cgroups (under a tree).  If
220  * loops are exited prematurely (break), mem_cgroup_iter_break() must
221  * be used for reference counting.
222  */
223 #define for_each_mem_cgroup_tree(iter, root)		\
224 	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
225 	     iter != NULL;				\
226 	     iter = mem_cgroup_iter(root, iter, NULL))
227 
228 #define for_each_mem_cgroup(iter)			\
229 	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
230 	     iter != NULL;				\
231 	     iter = mem_cgroup_iter(NULL, iter, NULL))
232 
should_force_charge(void)233 static inline bool should_force_charge(void)
234 {
235 	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
236 		(current->flags & PF_EXITING);
237 }
238 
239 /* Some nice accessors for the vmpressure. */
memcg_to_vmpressure(struct mem_cgroup * memcg)240 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
241 {
242 	if (!memcg)
243 		memcg = root_mem_cgroup;
244 	return &memcg->vmpressure;
245 }
246 
vmpressure_to_css(struct vmpressure * vmpr)247 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
248 {
249 	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
250 }
251 
252 #ifdef CONFIG_MEMCG_KMEM
253 extern spinlock_t css_set_lock;
254 
obj_cgroup_release(struct percpu_ref * ref)255 static void obj_cgroup_release(struct percpu_ref *ref)
256 {
257 	struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
258 	struct mem_cgroup *memcg;
259 	unsigned int nr_bytes;
260 	unsigned int nr_pages;
261 	unsigned long flags;
262 
263 	/*
264 	 * At this point all allocated objects are freed, and
265 	 * objcg->nr_charged_bytes can't have an arbitrary byte value.
266 	 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
267 	 *
268 	 * The following sequence can lead to it:
269 	 * 1) CPU0: objcg == stock->cached_objcg
270 	 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
271 	 *          PAGE_SIZE bytes are charged
272 	 * 3) CPU1: a process from another memcg is allocating something,
273 	 *          the stock if flushed,
274 	 *          objcg->nr_charged_bytes = PAGE_SIZE - 92
275 	 * 5) CPU0: we do release this object,
276 	 *          92 bytes are added to stock->nr_bytes
277 	 * 6) CPU0: stock is flushed,
278 	 *          92 bytes are added to objcg->nr_charged_bytes
279 	 *
280 	 * In the result, nr_charged_bytes == PAGE_SIZE.
281 	 * This page will be uncharged in obj_cgroup_release().
282 	 */
283 	nr_bytes = atomic_read(&objcg->nr_charged_bytes);
284 	WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
285 	nr_pages = nr_bytes >> PAGE_SHIFT;
286 
287 	spin_lock_irqsave(&css_set_lock, flags);
288 	memcg = obj_cgroup_memcg(objcg);
289 	if (nr_pages)
290 		__memcg_kmem_uncharge(memcg, nr_pages);
291 	list_del(&objcg->list);
292 	mem_cgroup_put(memcg);
293 	spin_unlock_irqrestore(&css_set_lock, flags);
294 
295 	percpu_ref_exit(ref);
296 	kfree_rcu(objcg, rcu);
297 }
298 
obj_cgroup_alloc(void)299 static struct obj_cgroup *obj_cgroup_alloc(void)
300 {
301 	struct obj_cgroup *objcg;
302 	int ret;
303 
304 	objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
305 	if (!objcg)
306 		return NULL;
307 
308 	ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
309 			      GFP_KERNEL);
310 	if (ret) {
311 		kfree(objcg);
312 		return NULL;
313 	}
314 	INIT_LIST_HEAD(&objcg->list);
315 	return objcg;
316 }
317 
memcg_reparent_objcgs(struct mem_cgroup * memcg,struct mem_cgroup * parent)318 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
319 				  struct mem_cgroup *parent)
320 {
321 	struct obj_cgroup *objcg, *iter;
322 
323 	objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
324 
325 	spin_lock_irq(&css_set_lock);
326 
327 	/* Move active objcg to the parent's list */
328 	xchg(&objcg->memcg, parent);
329 	css_get(&parent->css);
330 	list_add(&objcg->list, &parent->objcg_list);
331 
332 	/* Move already reparented objcgs to the parent's list */
333 	list_for_each_entry(iter, &memcg->objcg_list, list) {
334 		css_get(&parent->css);
335 		xchg(&iter->memcg, parent);
336 		css_put(&memcg->css);
337 	}
338 	list_splice(&memcg->objcg_list, &parent->objcg_list);
339 
340 	spin_unlock_irq(&css_set_lock);
341 
342 	percpu_ref_kill(&objcg->refcnt);
343 }
344 
345 /*
346  * This will be used as a shrinker list's index.
347  * The main reason for not using cgroup id for this:
348  *  this works better in sparse environments, where we have a lot of memcgs,
349  *  but only a few kmem-limited. Or also, if we have, for instance, 200
350  *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
351  *  200 entry array for that.
352  *
353  * The current size of the caches array is stored in memcg_nr_cache_ids. It
354  * will double each time we have to increase it.
355  */
356 static DEFINE_IDA(memcg_cache_ida);
357 int memcg_nr_cache_ids;
358 
359 /* Protects memcg_nr_cache_ids */
360 static DECLARE_RWSEM(memcg_cache_ids_sem);
361 
memcg_get_cache_ids(void)362 void memcg_get_cache_ids(void)
363 {
364 	down_read(&memcg_cache_ids_sem);
365 }
366 
memcg_put_cache_ids(void)367 void memcg_put_cache_ids(void)
368 {
369 	up_read(&memcg_cache_ids_sem);
370 }
371 
372 /*
373  * MIN_SIZE is different than 1, because we would like to avoid going through
374  * the alloc/free process all the time. In a small machine, 4 kmem-limited
375  * cgroups is a reasonable guess. In the future, it could be a parameter or
376  * tunable, but that is strictly not necessary.
377  *
378  * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
379  * this constant directly from cgroup, but it is understandable that this is
380  * better kept as an internal representation in cgroup.c. In any case, the
381  * cgrp_id space is not getting any smaller, and we don't have to necessarily
382  * increase ours as well if it increases.
383  */
384 #define MEMCG_CACHES_MIN_SIZE 4
385 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
386 
387 /*
388  * A lot of the calls to the cache allocation functions are expected to be
389  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
390  * conditional to this static branch, we'll have to allow modules that does
391  * kmem_cache_alloc and the such to see this symbol as well
392  */
393 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
394 EXPORT_SYMBOL(memcg_kmem_enabled_key);
395 #endif
396 
397 static int memcg_shrinker_map_size;
398 static DEFINE_MUTEX(memcg_shrinker_map_mutex);
399 
memcg_free_shrinker_map_rcu(struct rcu_head * head)400 static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
401 {
402 	kvfree(container_of(head, struct memcg_shrinker_map, rcu));
403 }
404 
memcg_expand_one_shrinker_map(struct mem_cgroup * memcg,int size,int old_size)405 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
406 					 int size, int old_size)
407 {
408 	struct memcg_shrinker_map *new, *old;
409 	int nid;
410 
411 	lockdep_assert_held(&memcg_shrinker_map_mutex);
412 
413 	for_each_node(nid) {
414 		old = rcu_dereference_protected(
415 			mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
416 		/* Not yet online memcg */
417 		if (!old)
418 			return 0;
419 
420 		new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
421 		if (!new)
422 			return -ENOMEM;
423 
424 		/* Set all old bits, clear all new bits */
425 		memset(new->map, (int)0xff, old_size);
426 		memset((void *)new->map + old_size, 0, size - old_size);
427 
428 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
429 		call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
430 	}
431 
432 	return 0;
433 }
434 
memcg_free_shrinker_maps(struct mem_cgroup * memcg)435 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
436 {
437 	struct mem_cgroup_per_node *pn;
438 	struct memcg_shrinker_map *map;
439 	int nid;
440 
441 	if (mem_cgroup_is_root(memcg))
442 		return;
443 
444 	for_each_node(nid) {
445 		pn = mem_cgroup_nodeinfo(memcg, nid);
446 		map = rcu_dereference_protected(pn->shrinker_map, true);
447 		if (map)
448 			kvfree(map);
449 		rcu_assign_pointer(pn->shrinker_map, NULL);
450 	}
451 }
452 
memcg_alloc_shrinker_maps(struct mem_cgroup * memcg)453 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
454 {
455 	struct memcg_shrinker_map *map;
456 	int nid, size, ret = 0;
457 
458 	if (mem_cgroup_is_root(memcg))
459 		return 0;
460 
461 	mutex_lock(&memcg_shrinker_map_mutex);
462 	size = memcg_shrinker_map_size;
463 	for_each_node(nid) {
464 		map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
465 		if (!map) {
466 			memcg_free_shrinker_maps(memcg);
467 			ret = -ENOMEM;
468 			break;
469 		}
470 		rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
471 	}
472 	mutex_unlock(&memcg_shrinker_map_mutex);
473 
474 	return ret;
475 }
476 
memcg_expand_shrinker_maps(int new_id)477 int memcg_expand_shrinker_maps(int new_id)
478 {
479 	int size, old_size, ret = 0;
480 	struct mem_cgroup *memcg;
481 
482 	size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
483 	old_size = memcg_shrinker_map_size;
484 	if (size <= old_size)
485 		return 0;
486 
487 	mutex_lock(&memcg_shrinker_map_mutex);
488 	if (!root_mem_cgroup)
489 		goto unlock;
490 
491 	for_each_mem_cgroup(memcg) {
492 		if (mem_cgroup_is_root(memcg))
493 			continue;
494 		ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
495 		if (ret) {
496 			mem_cgroup_iter_break(NULL, memcg);
497 			goto unlock;
498 		}
499 	}
500 unlock:
501 	if (!ret)
502 		memcg_shrinker_map_size = size;
503 	mutex_unlock(&memcg_shrinker_map_mutex);
504 	return ret;
505 }
506 
memcg_set_shrinker_bit(struct mem_cgroup * memcg,int nid,int shrinker_id)507 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
508 {
509 	if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
510 		struct memcg_shrinker_map *map;
511 
512 		rcu_read_lock();
513 		map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
514 		/* Pairs with smp mb in shrink_slab() */
515 		smp_mb__before_atomic();
516 		set_bit(shrinker_id, map->map);
517 		rcu_read_unlock();
518 	}
519 }
520 
521 /**
522  * mem_cgroup_css_from_page - css of the memcg associated with a page
523  * @page: page of interest
524  *
525  * If memcg is bound to the default hierarchy, css of the memcg associated
526  * with @page is returned.  The returned css remains associated with @page
527  * until it is released.
528  *
529  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
530  * is returned.
531  */
mem_cgroup_css_from_page(struct page * page)532 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
533 {
534 	struct mem_cgroup *memcg;
535 
536 	memcg = page->mem_cgroup;
537 
538 	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
539 		memcg = root_mem_cgroup;
540 
541 	return &memcg->css;
542 }
543 
544 /**
545  * page_cgroup_ino - return inode number of the memcg a page is charged to
546  * @page: the page
547  *
548  * Look up the closest online ancestor of the memory cgroup @page is charged to
549  * and return its inode number or 0 if @page is not charged to any cgroup. It
550  * is safe to call this function without holding a reference to @page.
551  *
552  * Note, this function is inherently racy, because there is nothing to prevent
553  * the cgroup inode from getting torn down and potentially reallocated a moment
554  * after page_cgroup_ino() returns, so it only should be used by callers that
555  * do not care (such as procfs interfaces).
556  */
page_cgroup_ino(struct page * page)557 ino_t page_cgroup_ino(struct page *page)
558 {
559 	struct mem_cgroup *memcg;
560 	unsigned long ino = 0;
561 
562 	rcu_read_lock();
563 	memcg = page->mem_cgroup;
564 
565 	/*
566 	 * The lowest bit set means that memcg isn't a valid
567 	 * memcg pointer, but a obj_cgroups pointer.
568 	 * In this case the page is shared and doesn't belong
569 	 * to any specific memory cgroup.
570 	 */
571 	if ((unsigned long) memcg & 0x1UL)
572 		memcg = NULL;
573 
574 	while (memcg && !(memcg->css.flags & CSS_ONLINE))
575 		memcg = parent_mem_cgroup(memcg);
576 	if (memcg)
577 		ino = cgroup_ino(memcg->css.cgroup);
578 	rcu_read_unlock();
579 	return ino;
580 }
581 
582 static struct mem_cgroup_per_node *
mem_cgroup_page_nodeinfo(struct mem_cgroup * memcg,struct page * page)583 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
584 {
585 	int nid = page_to_nid(page);
586 
587 	return memcg->nodeinfo[nid];
588 }
589 
590 static struct mem_cgroup_tree_per_node *
soft_limit_tree_node(int nid)591 soft_limit_tree_node(int nid)
592 {
593 	return soft_limit_tree.rb_tree_per_node[nid];
594 }
595 
596 static struct mem_cgroup_tree_per_node *
soft_limit_tree_from_page(struct page * page)597 soft_limit_tree_from_page(struct page *page)
598 {
599 	int nid = page_to_nid(page);
600 
601 	return soft_limit_tree.rb_tree_per_node[nid];
602 }
603 
__mem_cgroup_insert_exceeded(struct mem_cgroup_per_node * mz,struct mem_cgroup_tree_per_node * mctz,unsigned long new_usage_in_excess)604 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
605 					 struct mem_cgroup_tree_per_node *mctz,
606 					 unsigned long new_usage_in_excess)
607 {
608 	struct rb_node **p = &mctz->rb_root.rb_node;
609 	struct rb_node *parent = NULL;
610 	struct mem_cgroup_per_node *mz_node;
611 	bool rightmost = true;
612 
613 	if (mz->on_tree)
614 		return;
615 
616 	mz->usage_in_excess = new_usage_in_excess;
617 	if (!mz->usage_in_excess)
618 		return;
619 	while (*p) {
620 		parent = *p;
621 		mz_node = rb_entry(parent, struct mem_cgroup_per_node,
622 					tree_node);
623 		if (mz->usage_in_excess < mz_node->usage_in_excess) {
624 			p = &(*p)->rb_left;
625 			rightmost = false;
626 		}
627 
628 		/*
629 		 * We can't avoid mem cgroups that are over their soft
630 		 * limit by the same amount
631 		 */
632 		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
633 			p = &(*p)->rb_right;
634 	}
635 
636 	if (rightmost)
637 		mctz->rb_rightmost = &mz->tree_node;
638 
639 	rb_link_node(&mz->tree_node, parent, p);
640 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
641 	mz->on_tree = true;
642 }
643 
__mem_cgroup_remove_exceeded(struct mem_cgroup_per_node * mz,struct mem_cgroup_tree_per_node * mctz)644 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
645 					 struct mem_cgroup_tree_per_node *mctz)
646 {
647 	if (!mz->on_tree)
648 		return;
649 
650 	if (&mz->tree_node == mctz->rb_rightmost)
651 		mctz->rb_rightmost = rb_prev(&mz->tree_node);
652 
653 	rb_erase(&mz->tree_node, &mctz->rb_root);
654 	mz->on_tree = false;
655 }
656 
mem_cgroup_remove_exceeded(struct mem_cgroup_per_node * mz,struct mem_cgroup_tree_per_node * mctz)657 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
658 				       struct mem_cgroup_tree_per_node *mctz)
659 {
660 	unsigned long flags;
661 
662 	spin_lock_irqsave(&mctz->lock, flags);
663 	__mem_cgroup_remove_exceeded(mz, mctz);
664 	spin_unlock_irqrestore(&mctz->lock, flags);
665 }
666 
soft_limit_excess(struct mem_cgroup * memcg)667 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
668 {
669 	unsigned long nr_pages = page_counter_read(&memcg->memory);
670 	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
671 	unsigned long excess = 0;
672 
673 	if (nr_pages > soft_limit)
674 		excess = nr_pages - soft_limit;
675 
676 	return excess;
677 }
678 
mem_cgroup_update_tree(struct mem_cgroup * memcg,struct page * page)679 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
680 {
681 	unsigned long excess;
682 	struct mem_cgroup_per_node *mz;
683 	struct mem_cgroup_tree_per_node *mctz;
684 
685 	mctz = soft_limit_tree_from_page(page);
686 	if (!mctz)
687 		return;
688 	/*
689 	 * Necessary to update all ancestors when hierarchy is used.
690 	 * because their event counter is not touched.
691 	 */
692 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
693 		mz = mem_cgroup_page_nodeinfo(memcg, page);
694 		excess = soft_limit_excess(memcg);
695 		/*
696 		 * We have to update the tree if mz is on RB-tree or
697 		 * mem is over its softlimit.
698 		 */
699 		if (excess || mz->on_tree) {
700 			unsigned long flags;
701 
702 			spin_lock_irqsave(&mctz->lock, flags);
703 			/* if on-tree, remove it */
704 			if (mz->on_tree)
705 				__mem_cgroup_remove_exceeded(mz, mctz);
706 			/*
707 			 * Insert again. mz->usage_in_excess will be updated.
708 			 * If excess is 0, no tree ops.
709 			 */
710 			__mem_cgroup_insert_exceeded(mz, mctz, excess);
711 			spin_unlock_irqrestore(&mctz->lock, flags);
712 		}
713 	}
714 }
715 
mem_cgroup_remove_from_trees(struct mem_cgroup * memcg)716 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
717 {
718 	struct mem_cgroup_tree_per_node *mctz;
719 	struct mem_cgroup_per_node *mz;
720 	int nid;
721 
722 	for_each_node(nid) {
723 		mz = mem_cgroup_nodeinfo(memcg, nid);
724 		mctz = soft_limit_tree_node(nid);
725 		if (mctz)
726 			mem_cgroup_remove_exceeded(mz, mctz);
727 	}
728 }
729 
730 static struct mem_cgroup_per_node *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node * mctz)731 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
732 {
733 	struct mem_cgroup_per_node *mz;
734 
735 retry:
736 	mz = NULL;
737 	if (!mctz->rb_rightmost)
738 		goto done;		/* Nothing to reclaim from */
739 
740 	mz = rb_entry(mctz->rb_rightmost,
741 		      struct mem_cgroup_per_node, tree_node);
742 	/*
743 	 * Remove the node now but someone else can add it back,
744 	 * we will to add it back at the end of reclaim to its correct
745 	 * position in the tree.
746 	 */
747 	__mem_cgroup_remove_exceeded(mz, mctz);
748 	if (!soft_limit_excess(mz->memcg) ||
749 	    !css_tryget(&mz->memcg->css))
750 		goto retry;
751 done:
752 	return mz;
753 }
754 
755 static struct mem_cgroup_per_node *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node * mctz)756 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
757 {
758 	struct mem_cgroup_per_node *mz;
759 
760 	spin_lock_irq(&mctz->lock);
761 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
762 	spin_unlock_irq(&mctz->lock);
763 	return mz;
764 }
765 
766 /**
767  * __mod_memcg_state - update cgroup memory statistics
768  * @memcg: the memory cgroup
769  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
770  * @val: delta to add to the counter, can be negative
771  */
__mod_memcg_state(struct mem_cgroup * memcg,int idx,int val)772 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
773 {
774 	long x, threshold = MEMCG_CHARGE_BATCH;
775 
776 	if (mem_cgroup_disabled())
777 		return;
778 
779 	if (memcg_stat_item_in_bytes(idx))
780 		threshold <<= PAGE_SHIFT;
781 
782 	x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
783 	if (unlikely(abs(x) > threshold)) {
784 		struct mem_cgroup *mi;
785 
786 		/*
787 		 * Batch local counters to keep them in sync with
788 		 * the hierarchical ones.
789 		 */
790 		__this_cpu_add(memcg->vmstats_local->stat[idx], x);
791 		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
792 			atomic_long_add(x, &mi->vmstats[idx]);
793 		x = 0;
794 	}
795 	__this_cpu_write(memcg->vmstats_percpu->stat[idx], x);
796 }
797 
798 static struct mem_cgroup_per_node *
parent_nodeinfo(struct mem_cgroup_per_node * pn,int nid)799 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
800 {
801 	struct mem_cgroup *parent;
802 
803 	parent = parent_mem_cgroup(pn->memcg);
804 	if (!parent)
805 		return NULL;
806 	return mem_cgroup_nodeinfo(parent, nid);
807 }
808 
__mod_memcg_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)809 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
810 			      int val)
811 {
812 	struct mem_cgroup_per_node *pn;
813 	struct mem_cgroup *memcg;
814 	long x, threshold = MEMCG_CHARGE_BATCH;
815 
816 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
817 	memcg = pn->memcg;
818 
819 	/* Update memcg */
820 	__mod_memcg_state(memcg, idx, val);
821 
822 	/* Update lruvec */
823 	__this_cpu_add(pn->lruvec_stat_local->count[idx], val);
824 
825 	if (vmstat_item_in_bytes(idx))
826 		threshold <<= PAGE_SHIFT;
827 
828 	x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
829 	if (unlikely(abs(x) > threshold)) {
830 		pg_data_t *pgdat = lruvec_pgdat(lruvec);
831 		struct mem_cgroup_per_node *pi;
832 
833 		for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
834 			atomic_long_add(x, &pi->lruvec_stat[idx]);
835 		x = 0;
836 	}
837 	__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
838 }
839 
840 /**
841  * __mod_lruvec_state - update lruvec memory statistics
842  * @lruvec: the lruvec
843  * @idx: the stat item
844  * @val: delta to add to the counter, can be negative
845  *
846  * The lruvec is the intersection of the NUMA node and a cgroup. This
847  * function updates the all three counters that are affected by a
848  * change of state at this level: per-node, per-cgroup, per-lruvec.
849  */
__mod_lruvec_state(struct lruvec * lruvec,enum node_stat_item idx,int val)850 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
851 			int val)
852 {
853 	/* Update node */
854 	__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
855 
856 	/* Update memcg and lruvec */
857 	if (!mem_cgroup_disabled())
858 		__mod_memcg_lruvec_state(lruvec, idx, val);
859 }
860 
__mod_lruvec_slab_state(void * p,enum node_stat_item idx,int val)861 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
862 {
863 	pg_data_t *pgdat = page_pgdat(virt_to_page(p));
864 	struct mem_cgroup *memcg;
865 	struct lruvec *lruvec;
866 
867 	rcu_read_lock();
868 	memcg = mem_cgroup_from_obj(p);
869 
870 	/*
871 	 * Untracked pages have no memcg, no lruvec. Update only the
872 	 * node. If we reparent the slab objects to the root memcg,
873 	 * when we free the slab object, we need to update the per-memcg
874 	 * vmstats to keep it correct for the root memcg.
875 	 */
876 	if (!memcg) {
877 		__mod_node_page_state(pgdat, idx, val);
878 	} else {
879 		lruvec = mem_cgroup_lruvec(memcg, pgdat);
880 		__mod_lruvec_state(lruvec, idx, val);
881 	}
882 	rcu_read_unlock();
883 }
884 
mod_memcg_obj_state(void * p,int idx,int val)885 void mod_memcg_obj_state(void *p, int idx, int val)
886 {
887 	struct mem_cgroup *memcg;
888 
889 	rcu_read_lock();
890 	memcg = mem_cgroup_from_obj(p);
891 	if (memcg)
892 		mod_memcg_state(memcg, idx, val);
893 	rcu_read_unlock();
894 }
895 
896 /**
897  * __count_memcg_events - account VM events in a cgroup
898  * @memcg: the memory cgroup
899  * @idx: the event item
900  * @count: the number of events that occured
901  */
__count_memcg_events(struct mem_cgroup * memcg,enum vm_event_item idx,unsigned long count)902 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
903 			  unsigned long count)
904 {
905 	unsigned long x;
906 
907 	if (mem_cgroup_disabled())
908 		return;
909 
910 	x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
911 	if (unlikely(x > MEMCG_CHARGE_BATCH)) {
912 		struct mem_cgroup *mi;
913 
914 		/*
915 		 * Batch local counters to keep them in sync with
916 		 * the hierarchical ones.
917 		 */
918 		__this_cpu_add(memcg->vmstats_local->events[idx], x);
919 		for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
920 			atomic_long_add(x, &mi->vmevents[idx]);
921 		x = 0;
922 	}
923 	__this_cpu_write(memcg->vmstats_percpu->events[idx], x);
924 }
925 
memcg_events(struct mem_cgroup * memcg,int event)926 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
927 {
928 	return atomic_long_read(&memcg->vmevents[event]);
929 }
930 
memcg_events_local(struct mem_cgroup * memcg,int event)931 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
932 {
933 	long x = 0;
934 	int cpu;
935 
936 	for_each_possible_cpu(cpu)
937 		x += per_cpu(memcg->vmstats_local->events[event], cpu);
938 	return x;
939 }
940 
mem_cgroup_charge_statistics(struct mem_cgroup * memcg,struct page * page,int nr_pages)941 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
942 					 struct page *page,
943 					 int nr_pages)
944 {
945 	/* pagein of a big page is an event. So, ignore page size */
946 	if (nr_pages > 0)
947 		__count_memcg_events(memcg, PGPGIN, 1);
948 	else {
949 		__count_memcg_events(memcg, PGPGOUT, 1);
950 		nr_pages = -nr_pages; /* for event */
951 	}
952 
953 	__this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
954 }
955 
mem_cgroup_event_ratelimit(struct mem_cgroup * memcg,enum mem_cgroup_events_target target)956 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
957 				       enum mem_cgroup_events_target target)
958 {
959 	unsigned long val, next;
960 
961 	val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
962 	next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
963 	/* from time_after() in jiffies.h */
964 	if ((long)(next - val) < 0) {
965 		switch (target) {
966 		case MEM_CGROUP_TARGET_THRESH:
967 			next = val + THRESHOLDS_EVENTS_TARGET;
968 			break;
969 		case MEM_CGROUP_TARGET_SOFTLIMIT:
970 			next = val + SOFTLIMIT_EVENTS_TARGET;
971 			break;
972 		default:
973 			break;
974 		}
975 		__this_cpu_write(memcg->vmstats_percpu->targets[target], next);
976 		return true;
977 	}
978 	return false;
979 }
980 
981 /*
982  * Check events in order.
983  *
984  */
memcg_check_events(struct mem_cgroup * memcg,struct page * page)985 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
986 {
987 	/* threshold event is triggered in finer grain than soft limit */
988 	if (unlikely(mem_cgroup_event_ratelimit(memcg,
989 						MEM_CGROUP_TARGET_THRESH))) {
990 		bool do_softlimit;
991 
992 		do_softlimit = mem_cgroup_event_ratelimit(memcg,
993 						MEM_CGROUP_TARGET_SOFTLIMIT);
994 		mem_cgroup_threshold(memcg);
995 		if (unlikely(do_softlimit))
996 			mem_cgroup_update_tree(memcg, page);
997 	}
998 }
999 
mem_cgroup_from_task(struct task_struct * p)1000 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1001 {
1002 	/*
1003 	 * mm_update_next_owner() may clear mm->owner to NULL
1004 	 * if it races with swapoff, page migration, etc.
1005 	 * So this can be called with p == NULL.
1006 	 */
1007 	if (unlikely(!p))
1008 		return NULL;
1009 
1010 	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1011 }
1012 EXPORT_SYMBOL(mem_cgroup_from_task);
1013 
1014 /**
1015  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1016  * @mm: mm from which memcg should be extracted. It can be NULL.
1017  *
1018  * Obtain a reference on mm->memcg and returns it if successful. Otherwise
1019  * root_mem_cgroup is returned. However if mem_cgroup is disabled, NULL is
1020  * returned.
1021  */
get_mem_cgroup_from_mm(struct mm_struct * mm)1022 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1023 {
1024 	struct mem_cgroup *memcg;
1025 
1026 	if (mem_cgroup_disabled())
1027 		return NULL;
1028 
1029 	rcu_read_lock();
1030 	do {
1031 		/*
1032 		 * Page cache insertions can happen withou an
1033 		 * actual mm context, e.g. during disk probing
1034 		 * on boot, loopback IO, acct() writes etc.
1035 		 */
1036 		if (unlikely(!mm))
1037 			memcg = root_mem_cgroup;
1038 		else {
1039 			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1040 			if (unlikely(!memcg))
1041 				memcg = root_mem_cgroup;
1042 		}
1043 	} while (!css_tryget(&memcg->css));
1044 	rcu_read_unlock();
1045 	return memcg;
1046 }
1047 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1048 
1049 /**
1050  * get_mem_cgroup_from_page: Obtain a reference on given page's memcg.
1051  * @page: page from which memcg should be extracted.
1052  *
1053  * Obtain a reference on page->memcg and returns it if successful. Otherwise
1054  * root_mem_cgroup is returned.
1055  */
get_mem_cgroup_from_page(struct page * page)1056 struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
1057 {
1058 	struct mem_cgroup *memcg = page->mem_cgroup;
1059 
1060 	if (mem_cgroup_disabled())
1061 		return NULL;
1062 
1063 	rcu_read_lock();
1064 	/* Page should not get uncharged and freed memcg under us. */
1065 	if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
1066 		memcg = root_mem_cgroup;
1067 	rcu_read_unlock();
1068 	return memcg;
1069 }
1070 EXPORT_SYMBOL(get_mem_cgroup_from_page);
1071 
active_memcg(void)1072 static __always_inline struct mem_cgroup *active_memcg(void)
1073 {
1074 	if (in_interrupt())
1075 		return this_cpu_read(int_active_memcg);
1076 	else
1077 		return current->active_memcg;
1078 }
1079 
get_active_memcg(void)1080 static __always_inline struct mem_cgroup *get_active_memcg(void)
1081 {
1082 	struct mem_cgroup *memcg;
1083 
1084 	rcu_read_lock();
1085 	memcg = active_memcg();
1086 	if (memcg) {
1087 		/* current->active_memcg must hold a ref. */
1088 		if (WARN_ON_ONCE(!css_tryget(&memcg->css)))
1089 			memcg = root_mem_cgroup;
1090 		else
1091 			memcg = current->active_memcg;
1092 	}
1093 	rcu_read_unlock();
1094 
1095 	return memcg;
1096 }
1097 
memcg_kmem_bypass(void)1098 static __always_inline bool memcg_kmem_bypass(void)
1099 {
1100 	/* Allow remote memcg charging from any context. */
1101 	if (unlikely(active_memcg()))
1102 		return false;
1103 
1104 	/* Memcg to charge can't be determined. */
1105 	if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
1106 		return true;
1107 
1108 	return false;
1109 }
1110 
1111 /**
1112  * If active memcg is set, do not fallback to current->mm->memcg.
1113  */
get_mem_cgroup_from_current(void)1114 static __always_inline struct mem_cgroup *get_mem_cgroup_from_current(void)
1115 {
1116 	if (memcg_kmem_bypass())
1117 		return NULL;
1118 
1119 	if (unlikely(active_memcg()))
1120 		return get_active_memcg();
1121 
1122 	return get_mem_cgroup_from_mm(current->mm);
1123 }
1124 
1125 /**
1126  * mem_cgroup_iter - iterate over memory cgroup hierarchy
1127  * @root: hierarchy root
1128  * @prev: previously returned memcg, NULL on first invocation
1129  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1130  *
1131  * Returns references to children of the hierarchy below @root, or
1132  * @root itself, or %NULL after a full round-trip.
1133  *
1134  * Caller must pass the return value in @prev on subsequent
1135  * invocations for reference counting, or use mem_cgroup_iter_break()
1136  * to cancel a hierarchy walk before the round-trip is complete.
1137  *
1138  * Reclaimers can specify a node in @reclaim to divide up the memcgs
1139  * in the hierarchy among all concurrent reclaimers operating on the
1140  * same node.
1141  */
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)1142 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1143 				   struct mem_cgroup *prev,
1144 				   struct mem_cgroup_reclaim_cookie *reclaim)
1145 {
1146 	struct mem_cgroup_reclaim_iter *iter;
1147 	struct cgroup_subsys_state *css = NULL;
1148 	struct mem_cgroup *memcg = NULL;
1149 	struct mem_cgroup *pos = NULL;
1150 
1151 	if (mem_cgroup_disabled())
1152 		return NULL;
1153 
1154 	if (!root)
1155 		root = root_mem_cgroup;
1156 
1157 	if (prev && !reclaim)
1158 		pos = prev;
1159 
1160 	if (!root->use_hierarchy && root != root_mem_cgroup) {
1161 		if (prev)
1162 			goto out;
1163 		return root;
1164 	}
1165 
1166 	rcu_read_lock();
1167 
1168 	if (reclaim) {
1169 		struct mem_cgroup_per_node *mz;
1170 
1171 		mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
1172 		iter = &mz->iter;
1173 
1174 		if (prev && reclaim->generation != iter->generation)
1175 			goto out_unlock;
1176 
1177 		while (1) {
1178 			pos = READ_ONCE(iter->position);
1179 			if (!pos || css_tryget(&pos->css))
1180 				break;
1181 			/*
1182 			 * css reference reached zero, so iter->position will
1183 			 * be cleared by ->css_released. However, we should not
1184 			 * rely on this happening soon, because ->css_released
1185 			 * is called from a work queue, and by busy-waiting we
1186 			 * might block it. So we clear iter->position right
1187 			 * away.
1188 			 */
1189 			(void)cmpxchg(&iter->position, pos, NULL);
1190 		}
1191 	}
1192 
1193 	if (pos)
1194 		css = &pos->css;
1195 
1196 	for (;;) {
1197 		css = css_next_descendant_pre(css, &root->css);
1198 		if (!css) {
1199 			/*
1200 			 * Reclaimers share the hierarchy walk, and a
1201 			 * new one might jump in right at the end of
1202 			 * the hierarchy - make sure they see at least
1203 			 * one group and restart from the beginning.
1204 			 */
1205 			if (!prev)
1206 				continue;
1207 			break;
1208 		}
1209 
1210 		/*
1211 		 * Verify the css and acquire a reference.  The root
1212 		 * is provided by the caller, so we know it's alive
1213 		 * and kicking, and don't take an extra reference.
1214 		 */
1215 		memcg = mem_cgroup_from_css(css);
1216 
1217 		if (css == &root->css)
1218 			break;
1219 
1220 		if (css_tryget(css))
1221 			break;
1222 
1223 		memcg = NULL;
1224 	}
1225 
1226 	if (reclaim) {
1227 		/*
1228 		 * The position could have already been updated by a competing
1229 		 * thread, so check that the value hasn't changed since we read
1230 		 * it to avoid reclaiming from the same cgroup twice.
1231 		 */
1232 		(void)cmpxchg(&iter->position, pos, memcg);
1233 
1234 		if (pos)
1235 			css_put(&pos->css);
1236 
1237 		if (!memcg)
1238 			iter->generation++;
1239 		else if (!prev)
1240 			reclaim->generation = iter->generation;
1241 	}
1242 
1243 out_unlock:
1244 	rcu_read_unlock();
1245 out:
1246 	if (prev && prev != root)
1247 		css_put(&prev->css);
1248 
1249 	return memcg;
1250 }
1251 
1252 /**
1253  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1254  * @root: hierarchy root
1255  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1256  */
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)1257 void mem_cgroup_iter_break(struct mem_cgroup *root,
1258 			   struct mem_cgroup *prev)
1259 {
1260 	if (!root)
1261 		root = root_mem_cgroup;
1262 	if (prev && prev != root)
1263 		css_put(&prev->css);
1264 }
1265 
__invalidate_reclaim_iterators(struct mem_cgroup * from,struct mem_cgroup * dead_memcg)1266 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1267 					struct mem_cgroup *dead_memcg)
1268 {
1269 	struct mem_cgroup_reclaim_iter *iter;
1270 	struct mem_cgroup_per_node *mz;
1271 	int nid;
1272 
1273 	for_each_node(nid) {
1274 		mz = mem_cgroup_nodeinfo(from, nid);
1275 		iter = &mz->iter;
1276 		cmpxchg(&iter->position, dead_memcg, NULL);
1277 	}
1278 }
1279 
invalidate_reclaim_iterators(struct mem_cgroup * dead_memcg)1280 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1281 {
1282 	struct mem_cgroup *memcg = dead_memcg;
1283 	struct mem_cgroup *last;
1284 
1285 	do {
1286 		__invalidate_reclaim_iterators(memcg, dead_memcg);
1287 		last = memcg;
1288 	} while ((memcg = parent_mem_cgroup(memcg)));
1289 
1290 	/*
1291 	 * When cgruop1 non-hierarchy mode is used,
1292 	 * parent_mem_cgroup() does not walk all the way up to the
1293 	 * cgroup root (root_mem_cgroup). So we have to handle
1294 	 * dead_memcg from cgroup root separately.
1295 	 */
1296 	if (last != root_mem_cgroup)
1297 		__invalidate_reclaim_iterators(root_mem_cgroup,
1298 						dead_memcg);
1299 }
1300 
1301 /**
1302  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1303  * @memcg: hierarchy root
1304  * @fn: function to call for each task
1305  * @arg: argument passed to @fn
1306  *
1307  * This function iterates over tasks attached to @memcg or to any of its
1308  * descendants and calls @fn for each task. If @fn returns a non-zero
1309  * value, the function breaks the iteration loop and returns the value.
1310  * Otherwise, it will iterate over all tasks and return 0.
1311  *
1312  * This function must not be called for the root memory cgroup.
1313  */
mem_cgroup_scan_tasks(struct mem_cgroup * memcg,int (* fn)(struct task_struct *,void *),void * arg)1314 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1315 			  int (*fn)(struct task_struct *, void *), void *arg)
1316 {
1317 	struct mem_cgroup *iter;
1318 	int ret = 0;
1319 
1320 	BUG_ON(memcg == root_mem_cgroup);
1321 
1322 	for_each_mem_cgroup_tree(iter, memcg) {
1323 		struct css_task_iter it;
1324 		struct task_struct *task;
1325 
1326 		css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1327 		while (!ret && (task = css_task_iter_next(&it)))
1328 			ret = fn(task, arg);
1329 		css_task_iter_end(&it);
1330 		if (ret) {
1331 			mem_cgroup_iter_break(memcg, iter);
1332 			break;
1333 		}
1334 	}
1335 	return ret;
1336 }
1337 
1338 /**
1339  * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1340  * @page: the page
1341  * @pgdat: pgdat of the page
1342  *
1343  * This function relies on page->mem_cgroup being stable - see the
1344  * access rules in commit_charge().
1345  */
mem_cgroup_page_lruvec(struct page * page,struct pglist_data * pgdat)1346 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
1347 {
1348 	struct mem_cgroup_per_node *mz;
1349 	struct mem_cgroup *memcg;
1350 	struct lruvec *lruvec;
1351 
1352 	if (mem_cgroup_disabled()) {
1353 		lruvec = &pgdat->__lruvec;
1354 		goto out;
1355 	}
1356 
1357 	memcg = page->mem_cgroup;
1358 	/*
1359 	 * Swapcache readahead pages are added to the LRU - and
1360 	 * possibly migrated - before they are charged.
1361 	 */
1362 	if (!memcg)
1363 		memcg = root_mem_cgroup;
1364 
1365 	mz = mem_cgroup_page_nodeinfo(memcg, page);
1366 	lruvec = &mz->lruvec;
1367 out:
1368 	/*
1369 	 * Since a node can be onlined after the mem_cgroup was created,
1370 	 * we have to be prepared to initialize lruvec->zone here;
1371 	 * and if offlined then reonlined, we need to reinitialize it.
1372 	 */
1373 	if (unlikely(lruvec->pgdat != pgdat))
1374 		lruvec->pgdat = pgdat;
1375 	return lruvec;
1376 }
1377 
1378 /**
1379  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1380  * @lruvec: mem_cgroup per zone lru vector
1381  * @lru: index of lru list the page is sitting on
1382  * @zid: zone id of the accounted pages
1383  * @nr_pages: positive when adding or negative when removing
1384  *
1385  * This function must be called under lru_lock, just before a page is added
1386  * to or just after a page is removed from an lru list (that ordering being
1387  * so as to allow it to check that lru_size 0 is consistent with list_empty).
1388  */
mem_cgroup_update_lru_size(struct lruvec * lruvec,enum lru_list lru,int zid,int nr_pages)1389 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1390 				int zid, int nr_pages)
1391 {
1392 	struct mem_cgroup_per_node *mz;
1393 	unsigned long *lru_size;
1394 	long size;
1395 
1396 	if (mem_cgroup_disabled())
1397 		return;
1398 
1399 	mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1400 	lru_size = &mz->lru_zone_size[zid][lru];
1401 
1402 	if (nr_pages < 0)
1403 		*lru_size += nr_pages;
1404 
1405 	size = *lru_size;
1406 	if (WARN_ONCE(size < 0,
1407 		"%s(%p, %d, %d): lru_size %ld\n",
1408 		__func__, lruvec, lru, nr_pages, size)) {
1409 		VM_BUG_ON(1);
1410 		*lru_size = 0;
1411 	}
1412 
1413 	if (nr_pages > 0)
1414 		*lru_size += nr_pages;
1415 }
1416 
1417 /**
1418  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1419  * @memcg: the memory cgroup
1420  *
1421  * Returns the maximum amount of memory @mem can be charged with, in
1422  * pages.
1423  */
mem_cgroup_margin(struct mem_cgroup * memcg)1424 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1425 {
1426 	unsigned long margin = 0;
1427 	unsigned long count;
1428 	unsigned long limit;
1429 
1430 	count = page_counter_read(&memcg->memory);
1431 	limit = READ_ONCE(memcg->memory.max);
1432 	if (count < limit)
1433 		margin = limit - count;
1434 
1435 	if (do_memsw_account()) {
1436 		count = page_counter_read(&memcg->memsw);
1437 		limit = READ_ONCE(memcg->memsw.max);
1438 		if (count < limit)
1439 			margin = min(margin, limit - count);
1440 		else
1441 			margin = 0;
1442 	}
1443 
1444 	return margin;
1445 }
1446 
1447 /*
1448  * A routine for checking "mem" is under move_account() or not.
1449  *
1450  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1451  * moving cgroups. This is for waiting at high-memory pressure
1452  * caused by "move".
1453  */
mem_cgroup_under_move(struct mem_cgroup * memcg)1454 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1455 {
1456 	struct mem_cgroup *from;
1457 	struct mem_cgroup *to;
1458 	bool ret = false;
1459 	/*
1460 	 * Unlike task_move routines, we access mc.to, mc.from not under
1461 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1462 	 */
1463 	spin_lock(&mc.lock);
1464 	from = mc.from;
1465 	to = mc.to;
1466 	if (!from)
1467 		goto unlock;
1468 
1469 	ret = mem_cgroup_is_descendant(from, memcg) ||
1470 		mem_cgroup_is_descendant(to, memcg);
1471 unlock:
1472 	spin_unlock(&mc.lock);
1473 	return ret;
1474 }
1475 
mem_cgroup_wait_acct_move(struct mem_cgroup * memcg)1476 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1477 {
1478 	if (mc.moving_task && current != mc.moving_task) {
1479 		if (mem_cgroup_under_move(memcg)) {
1480 			DEFINE_WAIT(wait);
1481 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1482 			/* moving charge context might have finished. */
1483 			if (mc.moving_task)
1484 				schedule();
1485 			finish_wait(&mc.waitq, &wait);
1486 			return true;
1487 		}
1488 	}
1489 	return false;
1490 }
1491 
1492 struct memory_stat {
1493 	const char *name;
1494 	unsigned int ratio;
1495 	unsigned int idx;
1496 };
1497 
1498 static struct memory_stat memory_stats[] = {
1499 	{ "anon", PAGE_SIZE, NR_ANON_MAPPED },
1500 	{ "file", PAGE_SIZE, NR_FILE_PAGES },
1501 	{ "kernel_stack", 1024, NR_KERNEL_STACK_KB },
1502 	{ "percpu", 1, MEMCG_PERCPU_B },
1503 	{ "sock", PAGE_SIZE, MEMCG_SOCK },
1504 	{ "shmem", PAGE_SIZE, NR_SHMEM },
1505 	{ "file_mapped", PAGE_SIZE, NR_FILE_MAPPED },
1506 	{ "file_dirty", PAGE_SIZE, NR_FILE_DIRTY },
1507 	{ "file_writeback", PAGE_SIZE, NR_WRITEBACK },
1508 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1509 	/*
1510 	 * The ratio will be initialized in memory_stats_init(). Because
1511 	 * on some architectures, the macro of HPAGE_PMD_SIZE is not
1512 	 * constant(e.g. powerpc).
1513 	 */
1514 	{ "anon_thp", 0, NR_ANON_THPS },
1515 #endif
1516 	{ "inactive_anon", PAGE_SIZE, NR_INACTIVE_ANON },
1517 	{ "active_anon", PAGE_SIZE, NR_ACTIVE_ANON },
1518 	{ "inactive_file", PAGE_SIZE, NR_INACTIVE_FILE },
1519 	{ "active_file", PAGE_SIZE, NR_ACTIVE_FILE },
1520 	{ "unevictable", PAGE_SIZE, NR_UNEVICTABLE },
1521 
1522 	/*
1523 	 * Note: The slab_reclaimable and slab_unreclaimable must be
1524 	 * together and slab_reclaimable must be in front.
1525 	 */
1526 	{ "slab_reclaimable", 1, NR_SLAB_RECLAIMABLE_B },
1527 	{ "slab_unreclaimable", 1, NR_SLAB_UNRECLAIMABLE_B },
1528 
1529 	/* The memory events */
1530 	{ "workingset_refault_anon", 1, WORKINGSET_REFAULT_ANON },
1531 	{ "workingset_refault_file", 1, WORKINGSET_REFAULT_FILE },
1532 	{ "workingset_activate_anon", 1, WORKINGSET_ACTIVATE_ANON },
1533 	{ "workingset_activate_file", 1, WORKINGSET_ACTIVATE_FILE },
1534 	{ "workingset_restore_anon", 1, WORKINGSET_RESTORE_ANON },
1535 	{ "workingset_restore_file", 1, WORKINGSET_RESTORE_FILE },
1536 	{ "workingset_nodereclaim", 1, WORKINGSET_NODERECLAIM },
1537 };
1538 
memory_stats_init(void)1539 static int __init memory_stats_init(void)
1540 {
1541 	int i;
1542 
1543 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1544 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1545 		if (memory_stats[i].idx == NR_ANON_THPS)
1546 			memory_stats[i].ratio = HPAGE_PMD_SIZE;
1547 #endif
1548 		VM_BUG_ON(!memory_stats[i].ratio);
1549 		VM_BUG_ON(memory_stats[i].idx >= MEMCG_NR_STAT);
1550 	}
1551 
1552 	return 0;
1553 }
1554 pure_initcall(memory_stats_init);
1555 
memory_stat_format(struct mem_cgroup * memcg)1556 static char *memory_stat_format(struct mem_cgroup *memcg)
1557 {
1558 	struct seq_buf s;
1559 	int i;
1560 
1561 	seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1562 	if (!s.buffer)
1563 		return NULL;
1564 
1565 	/*
1566 	 * Provide statistics on the state of the memory subsystem as
1567 	 * well as cumulative event counters that show past behavior.
1568 	 *
1569 	 * This list is ordered following a combination of these gradients:
1570 	 * 1) generic big picture -> specifics and details
1571 	 * 2) reflecting userspace activity -> reflecting kernel heuristics
1572 	 *
1573 	 * Current memory state:
1574 	 */
1575 
1576 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1577 		u64 size;
1578 
1579 		size = memcg_page_state(memcg, memory_stats[i].idx);
1580 		size *= memory_stats[i].ratio;
1581 		seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1582 
1583 		if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1584 			size = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) +
1585 			       memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B);
1586 			seq_buf_printf(&s, "slab %llu\n", size);
1587 		}
1588 	}
1589 
1590 	/* Accumulated memory events */
1591 
1592 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1593 		       memcg_events(memcg, PGFAULT));
1594 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1595 		       memcg_events(memcg, PGMAJFAULT));
1596 	seq_buf_printf(&s, "%s %lu\n",  vm_event_name(PGREFILL),
1597 		       memcg_events(memcg, PGREFILL));
1598 	seq_buf_printf(&s, "pgscan %lu\n",
1599 		       memcg_events(memcg, PGSCAN_KSWAPD) +
1600 		       memcg_events(memcg, PGSCAN_DIRECT));
1601 	seq_buf_printf(&s, "pgsteal %lu\n",
1602 		       memcg_events(memcg, PGSTEAL_KSWAPD) +
1603 		       memcg_events(memcg, PGSTEAL_DIRECT));
1604 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1605 		       memcg_events(memcg, PGACTIVATE));
1606 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1607 		       memcg_events(memcg, PGDEACTIVATE));
1608 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1609 		       memcg_events(memcg, PGLAZYFREE));
1610 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1611 		       memcg_events(memcg, PGLAZYFREED));
1612 
1613 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1614 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1615 		       memcg_events(memcg, THP_FAULT_ALLOC));
1616 	seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1617 		       memcg_events(memcg, THP_COLLAPSE_ALLOC));
1618 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1619 
1620 	/* The above should easily fit into one page */
1621 	WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1622 
1623 	return s.buffer;
1624 }
1625 
1626 #define K(x) ((x) << (PAGE_SHIFT-10))
1627 /**
1628  * mem_cgroup_print_oom_context: Print OOM information relevant to
1629  * memory controller.
1630  * @memcg: The memory cgroup that went over limit
1631  * @p: Task that is going to be killed
1632  *
1633  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1634  * enabled
1635  */
mem_cgroup_print_oom_context(struct mem_cgroup * memcg,struct task_struct * p)1636 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1637 {
1638 	rcu_read_lock();
1639 
1640 	if (memcg) {
1641 		pr_cont(",oom_memcg=");
1642 		pr_cont_cgroup_path(memcg->css.cgroup);
1643 	} else
1644 		pr_cont(",global_oom");
1645 	if (p) {
1646 		pr_cont(",task_memcg=");
1647 		pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1648 	}
1649 	rcu_read_unlock();
1650 }
1651 
1652 /**
1653  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1654  * memory controller.
1655  * @memcg: The memory cgroup that went over limit
1656  */
mem_cgroup_print_oom_meminfo(struct mem_cgroup * memcg)1657 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1658 {
1659 	char *buf;
1660 
1661 	pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1662 		K((u64)page_counter_read(&memcg->memory)),
1663 		K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1664 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1665 		pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1666 			K((u64)page_counter_read(&memcg->swap)),
1667 			K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1668 	else {
1669 		pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1670 			K((u64)page_counter_read(&memcg->memsw)),
1671 			K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1672 		pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1673 			K((u64)page_counter_read(&memcg->kmem)),
1674 			K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1675 	}
1676 
1677 	pr_info("Memory cgroup stats for ");
1678 	pr_cont_cgroup_path(memcg->css.cgroup);
1679 	pr_cont(":");
1680 	buf = memory_stat_format(memcg);
1681 	if (!buf)
1682 		return;
1683 	pr_info("%s", buf);
1684 	kfree(buf);
1685 }
1686 
1687 /*
1688  * Return the memory (and swap, if configured) limit for a memcg.
1689  */
mem_cgroup_get_max(struct mem_cgroup * memcg)1690 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1691 {
1692 	unsigned long max = READ_ONCE(memcg->memory.max);
1693 
1694 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
1695 		if (mem_cgroup_swappiness(memcg))
1696 			max += min(READ_ONCE(memcg->swap.max),
1697 				   (unsigned long)total_swap_pages);
1698 	} else { /* v1 */
1699 		if (mem_cgroup_swappiness(memcg)) {
1700 			/* Calculate swap excess capacity from memsw limit */
1701 			unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1702 
1703 			max += min(swap, (unsigned long)total_swap_pages);
1704 		}
1705 	}
1706 	return max;
1707 }
1708 
mem_cgroup_size(struct mem_cgroup * memcg)1709 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1710 {
1711 	return page_counter_read(&memcg->memory);
1712 }
1713 
mem_cgroup_out_of_memory(struct mem_cgroup * memcg,gfp_t gfp_mask,int order)1714 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1715 				     int order)
1716 {
1717 	struct oom_control oc = {
1718 		.zonelist = NULL,
1719 		.nodemask = NULL,
1720 		.memcg = memcg,
1721 		.gfp_mask = gfp_mask,
1722 		.order = order,
1723 	};
1724 	bool ret = true;
1725 
1726 	if (mutex_lock_killable(&oom_lock))
1727 		return true;
1728 
1729 	if (mem_cgroup_margin(memcg) >= (1 << order))
1730 		goto unlock;
1731 
1732 	/*
1733 	 * A few threads which were not waiting at mutex_lock_killable() can
1734 	 * fail to bail out. Therefore, check again after holding oom_lock.
1735 	 */
1736 	ret = should_force_charge() || out_of_memory(&oc);
1737 
1738 unlock:
1739 	mutex_unlock(&oom_lock);
1740 	return ret;
1741 }
1742 
mem_cgroup_soft_reclaim(struct mem_cgroup * root_memcg,pg_data_t * pgdat,gfp_t gfp_mask,unsigned long * total_scanned)1743 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1744 				   pg_data_t *pgdat,
1745 				   gfp_t gfp_mask,
1746 				   unsigned long *total_scanned)
1747 {
1748 	struct mem_cgroup *victim = NULL;
1749 	int total = 0;
1750 	int loop = 0;
1751 	unsigned long excess;
1752 	unsigned long nr_scanned;
1753 	struct mem_cgroup_reclaim_cookie reclaim = {
1754 		.pgdat = pgdat,
1755 	};
1756 
1757 	excess = soft_limit_excess(root_memcg);
1758 
1759 	while (1) {
1760 		victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1761 		if (!victim) {
1762 			loop++;
1763 			if (loop >= 2) {
1764 				/*
1765 				 * If we have not been able to reclaim
1766 				 * anything, it might because there are
1767 				 * no reclaimable pages under this hierarchy
1768 				 */
1769 				if (!total)
1770 					break;
1771 				/*
1772 				 * We want to do more targeted reclaim.
1773 				 * excess >> 2 is not to excessive so as to
1774 				 * reclaim too much, nor too less that we keep
1775 				 * coming back to reclaim from this cgroup
1776 				 */
1777 				if (total >= (excess >> 2) ||
1778 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1779 					break;
1780 			}
1781 			continue;
1782 		}
1783 		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1784 					pgdat, &nr_scanned);
1785 		*total_scanned += nr_scanned;
1786 		if (!soft_limit_excess(root_memcg))
1787 			break;
1788 	}
1789 	mem_cgroup_iter_break(root_memcg, victim);
1790 	return total;
1791 }
1792 
1793 #ifdef CONFIG_LOCKDEP
1794 static struct lockdep_map memcg_oom_lock_dep_map = {
1795 	.name = "memcg_oom_lock",
1796 };
1797 #endif
1798 
1799 static DEFINE_SPINLOCK(memcg_oom_lock);
1800 
1801 /*
1802  * Check OOM-Killer is already running under our hierarchy.
1803  * If someone is running, return false.
1804  */
mem_cgroup_oom_trylock(struct mem_cgroup * memcg)1805 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1806 {
1807 	struct mem_cgroup *iter, *failed = NULL;
1808 
1809 	spin_lock(&memcg_oom_lock);
1810 
1811 	for_each_mem_cgroup_tree(iter, memcg) {
1812 		if (iter->oom_lock) {
1813 			/*
1814 			 * this subtree of our hierarchy is already locked
1815 			 * so we cannot give a lock.
1816 			 */
1817 			failed = iter;
1818 			mem_cgroup_iter_break(memcg, iter);
1819 			break;
1820 		} else
1821 			iter->oom_lock = true;
1822 	}
1823 
1824 	if (failed) {
1825 		/*
1826 		 * OK, we failed to lock the whole subtree so we have
1827 		 * to clean up what we set up to the failing subtree
1828 		 */
1829 		for_each_mem_cgroup_tree(iter, memcg) {
1830 			if (iter == failed) {
1831 				mem_cgroup_iter_break(memcg, iter);
1832 				break;
1833 			}
1834 			iter->oom_lock = false;
1835 		}
1836 	} else
1837 		mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1838 
1839 	spin_unlock(&memcg_oom_lock);
1840 
1841 	return !failed;
1842 }
1843 
mem_cgroup_oom_unlock(struct mem_cgroup * memcg)1844 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1845 {
1846 	struct mem_cgroup *iter;
1847 
1848 	spin_lock(&memcg_oom_lock);
1849 	mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1850 	for_each_mem_cgroup_tree(iter, memcg)
1851 		iter->oom_lock = false;
1852 	spin_unlock(&memcg_oom_lock);
1853 }
1854 
mem_cgroup_mark_under_oom(struct mem_cgroup * memcg)1855 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1856 {
1857 	struct mem_cgroup *iter;
1858 
1859 	spin_lock(&memcg_oom_lock);
1860 	for_each_mem_cgroup_tree(iter, memcg)
1861 		iter->under_oom++;
1862 	spin_unlock(&memcg_oom_lock);
1863 }
1864 
mem_cgroup_unmark_under_oom(struct mem_cgroup * memcg)1865 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1866 {
1867 	struct mem_cgroup *iter;
1868 
1869 	/*
1870 	 * Be careful about under_oom underflows becase a child memcg
1871 	 * could have been added after mem_cgroup_mark_under_oom.
1872 	 */
1873 	spin_lock(&memcg_oom_lock);
1874 	for_each_mem_cgroup_tree(iter, memcg)
1875 		if (iter->under_oom > 0)
1876 			iter->under_oom--;
1877 	spin_unlock(&memcg_oom_lock);
1878 }
1879 
1880 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1881 
1882 struct oom_wait_info {
1883 	struct mem_cgroup *memcg;
1884 	wait_queue_entry_t	wait;
1885 };
1886 
memcg_oom_wake_function(wait_queue_entry_t * wait,unsigned mode,int sync,void * arg)1887 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1888 	unsigned mode, int sync, void *arg)
1889 {
1890 	struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1891 	struct mem_cgroup *oom_wait_memcg;
1892 	struct oom_wait_info *oom_wait_info;
1893 
1894 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1895 	oom_wait_memcg = oom_wait_info->memcg;
1896 
1897 	if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1898 	    !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1899 		return 0;
1900 	return autoremove_wake_function(wait, mode, sync, arg);
1901 }
1902 
memcg_oom_recover(struct mem_cgroup * memcg)1903 static void memcg_oom_recover(struct mem_cgroup *memcg)
1904 {
1905 	/*
1906 	 * For the following lockless ->under_oom test, the only required
1907 	 * guarantee is that it must see the state asserted by an OOM when
1908 	 * this function is called as a result of userland actions
1909 	 * triggered by the notification of the OOM.  This is trivially
1910 	 * achieved by invoking mem_cgroup_mark_under_oom() before
1911 	 * triggering notification.
1912 	 */
1913 	if (memcg && memcg->under_oom)
1914 		__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1915 }
1916 
1917 enum oom_status {
1918 	OOM_SUCCESS,
1919 	OOM_FAILED,
1920 	OOM_ASYNC,
1921 	OOM_SKIPPED
1922 };
1923 
mem_cgroup_oom(struct mem_cgroup * memcg,gfp_t mask,int order)1924 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1925 {
1926 	enum oom_status ret;
1927 	bool locked;
1928 
1929 	if (order > PAGE_ALLOC_COSTLY_ORDER)
1930 		return OOM_SKIPPED;
1931 
1932 	memcg_memory_event(memcg, MEMCG_OOM);
1933 
1934 	/*
1935 	 * We are in the middle of the charge context here, so we
1936 	 * don't want to block when potentially sitting on a callstack
1937 	 * that holds all kinds of filesystem and mm locks.
1938 	 *
1939 	 * cgroup1 allows disabling the OOM killer and waiting for outside
1940 	 * handling until the charge can succeed; remember the context and put
1941 	 * the task to sleep at the end of the page fault when all locks are
1942 	 * released.
1943 	 *
1944 	 * On the other hand, in-kernel OOM killer allows for an async victim
1945 	 * memory reclaim (oom_reaper) and that means that we are not solely
1946 	 * relying on the oom victim to make a forward progress and we can
1947 	 * invoke the oom killer here.
1948 	 *
1949 	 * Please note that mem_cgroup_out_of_memory might fail to find a
1950 	 * victim and then we have to bail out from the charge path.
1951 	 */
1952 	if (memcg->oom_kill_disable) {
1953 		if (!current->in_user_fault)
1954 			return OOM_SKIPPED;
1955 		css_get(&memcg->css);
1956 		current->memcg_in_oom = memcg;
1957 		current->memcg_oom_gfp_mask = mask;
1958 		current->memcg_oom_order = order;
1959 
1960 		return OOM_ASYNC;
1961 	}
1962 
1963 	mem_cgroup_mark_under_oom(memcg);
1964 
1965 	locked = mem_cgroup_oom_trylock(memcg);
1966 
1967 	if (locked)
1968 		mem_cgroup_oom_notify(memcg);
1969 
1970 	mem_cgroup_unmark_under_oom(memcg);
1971 	if (mem_cgroup_out_of_memory(memcg, mask, order))
1972 		ret = OOM_SUCCESS;
1973 	else
1974 		ret = OOM_FAILED;
1975 
1976 	if (locked)
1977 		mem_cgroup_oom_unlock(memcg);
1978 
1979 	return ret;
1980 }
1981 
1982 /**
1983  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1984  * @handle: actually kill/wait or just clean up the OOM state
1985  *
1986  * This has to be called at the end of a page fault if the memcg OOM
1987  * handler was enabled.
1988  *
1989  * Memcg supports userspace OOM handling where failed allocations must
1990  * sleep on a waitqueue until the userspace task resolves the
1991  * situation.  Sleeping directly in the charge context with all kinds
1992  * of locks held is not a good idea, instead we remember an OOM state
1993  * in the task and mem_cgroup_oom_synchronize() has to be called at
1994  * the end of the page fault to complete the OOM handling.
1995  *
1996  * Returns %true if an ongoing memcg OOM situation was detected and
1997  * completed, %false otherwise.
1998  */
mem_cgroup_oom_synchronize(bool handle)1999 bool mem_cgroup_oom_synchronize(bool handle)
2000 {
2001 	struct mem_cgroup *memcg = current->memcg_in_oom;
2002 	struct oom_wait_info owait;
2003 	bool locked;
2004 
2005 	/* OOM is global, do not handle */
2006 	if (!memcg)
2007 		return false;
2008 
2009 	if (!handle)
2010 		goto cleanup;
2011 
2012 	owait.memcg = memcg;
2013 	owait.wait.flags = 0;
2014 	owait.wait.func = memcg_oom_wake_function;
2015 	owait.wait.private = current;
2016 	INIT_LIST_HEAD(&owait.wait.entry);
2017 
2018 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2019 	mem_cgroup_mark_under_oom(memcg);
2020 
2021 	locked = mem_cgroup_oom_trylock(memcg);
2022 
2023 	if (locked)
2024 		mem_cgroup_oom_notify(memcg);
2025 
2026 	if (locked && !memcg->oom_kill_disable) {
2027 		mem_cgroup_unmark_under_oom(memcg);
2028 		finish_wait(&memcg_oom_waitq, &owait.wait);
2029 		mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
2030 					 current->memcg_oom_order);
2031 	} else {
2032 		schedule();
2033 		mem_cgroup_unmark_under_oom(memcg);
2034 		finish_wait(&memcg_oom_waitq, &owait.wait);
2035 	}
2036 
2037 	if (locked) {
2038 		mem_cgroup_oom_unlock(memcg);
2039 		/*
2040 		 * There is no guarantee that an OOM-lock contender
2041 		 * sees the wakeups triggered by the OOM kill
2042 		 * uncharges.  Wake any sleepers explicitely.
2043 		 */
2044 		memcg_oom_recover(memcg);
2045 	}
2046 cleanup:
2047 	current->memcg_in_oom = NULL;
2048 	css_put(&memcg->css);
2049 	return true;
2050 }
2051 
2052 /**
2053  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2054  * @victim: task to be killed by the OOM killer
2055  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2056  *
2057  * Returns a pointer to a memory cgroup, which has to be cleaned up
2058  * by killing all belonging OOM-killable tasks.
2059  *
2060  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2061  */
mem_cgroup_get_oom_group(struct task_struct * victim,struct mem_cgroup * oom_domain)2062 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2063 					    struct mem_cgroup *oom_domain)
2064 {
2065 	struct mem_cgroup *oom_group = NULL;
2066 	struct mem_cgroup *memcg;
2067 
2068 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2069 		return NULL;
2070 
2071 	if (!oom_domain)
2072 		oom_domain = root_mem_cgroup;
2073 
2074 	rcu_read_lock();
2075 
2076 	memcg = mem_cgroup_from_task(victim);
2077 	if (memcg == root_mem_cgroup)
2078 		goto out;
2079 
2080 	/*
2081 	 * If the victim task has been asynchronously moved to a different
2082 	 * memory cgroup, we might end up killing tasks outside oom_domain.
2083 	 * In this case it's better to ignore memory.group.oom.
2084 	 */
2085 	if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2086 		goto out;
2087 
2088 	/*
2089 	 * Traverse the memory cgroup hierarchy from the victim task's
2090 	 * cgroup up to the OOMing cgroup (or root) to find the
2091 	 * highest-level memory cgroup with oom.group set.
2092 	 */
2093 	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2094 		if (memcg->oom_group)
2095 			oom_group = memcg;
2096 
2097 		if (memcg == oom_domain)
2098 			break;
2099 	}
2100 
2101 	if (oom_group)
2102 		css_get(&oom_group->css);
2103 out:
2104 	rcu_read_unlock();
2105 
2106 	return oom_group;
2107 }
2108 
mem_cgroup_print_oom_group(struct mem_cgroup * memcg)2109 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2110 {
2111 	pr_info("Tasks in ");
2112 	pr_cont_cgroup_path(memcg->css.cgroup);
2113 	pr_cont(" are going to be killed due to memory.oom.group set\n");
2114 }
2115 
2116 /**
2117  * lock_page_memcg - lock a page->mem_cgroup binding
2118  * @page: the page
2119  *
2120  * This function protects unlocked LRU pages from being moved to
2121  * another cgroup.
2122  *
2123  * It ensures lifetime of the returned memcg. Caller is responsible
2124  * for the lifetime of the page; __unlock_page_memcg() is available
2125  * when @page might get freed inside the locked section.
2126  */
lock_page_memcg(struct page * page)2127 struct mem_cgroup *lock_page_memcg(struct page *page)
2128 {
2129 	struct page *head = compound_head(page); /* rmap on tail pages */
2130 	struct mem_cgroup *memcg;
2131 	unsigned long flags;
2132 
2133 	/*
2134 	 * The RCU lock is held throughout the transaction.  The fast
2135 	 * path can get away without acquiring the memcg->move_lock
2136 	 * because page moving starts with an RCU grace period.
2137 	 *
2138 	 * The RCU lock also protects the memcg from being freed when
2139 	 * the page state that is going to change is the only thing
2140 	 * preventing the page itself from being freed. E.g. writeback
2141 	 * doesn't hold a page reference and relies on PG_writeback to
2142 	 * keep off truncation, migration and so forth.
2143          */
2144 	rcu_read_lock();
2145 
2146 	if (mem_cgroup_disabled())
2147 		return NULL;
2148 again:
2149 	memcg = head->mem_cgroup;
2150 	if (unlikely(!memcg))
2151 		return NULL;
2152 
2153 	if (atomic_read(&memcg->moving_account) <= 0)
2154 		return memcg;
2155 
2156 	spin_lock_irqsave(&memcg->move_lock, flags);
2157 	if (memcg != head->mem_cgroup) {
2158 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2159 		goto again;
2160 	}
2161 
2162 	/*
2163 	 * When charge migration first begins, we can have locked and
2164 	 * unlocked page stat updates happening concurrently.  Track
2165 	 * the task who has the lock for unlock_page_memcg().
2166 	 */
2167 	memcg->move_lock_task = current;
2168 	memcg->move_lock_flags = flags;
2169 
2170 	return memcg;
2171 }
2172 EXPORT_SYMBOL(lock_page_memcg);
2173 
2174 /**
2175  * __unlock_page_memcg - unlock and unpin a memcg
2176  * @memcg: the memcg
2177  *
2178  * Unlock and unpin a memcg returned by lock_page_memcg().
2179  */
__unlock_page_memcg(struct mem_cgroup * memcg)2180 void __unlock_page_memcg(struct mem_cgroup *memcg)
2181 {
2182 	if (memcg && memcg->move_lock_task == current) {
2183 		unsigned long flags = memcg->move_lock_flags;
2184 
2185 		memcg->move_lock_task = NULL;
2186 		memcg->move_lock_flags = 0;
2187 
2188 		spin_unlock_irqrestore(&memcg->move_lock, flags);
2189 	}
2190 
2191 	rcu_read_unlock();
2192 }
2193 
2194 /**
2195  * unlock_page_memcg - unlock a page->mem_cgroup binding
2196  * @page: the page
2197  */
unlock_page_memcg(struct page * page)2198 void unlock_page_memcg(struct page *page)
2199 {
2200 	struct page *head = compound_head(page);
2201 
2202 	__unlock_page_memcg(head->mem_cgroup);
2203 }
2204 EXPORT_SYMBOL(unlock_page_memcg);
2205 
2206 struct memcg_stock_pcp {
2207 	struct mem_cgroup *cached; /* this never be root cgroup */
2208 	unsigned int nr_pages;
2209 
2210 #ifdef CONFIG_MEMCG_KMEM
2211 	struct obj_cgroup *cached_objcg;
2212 	unsigned int nr_bytes;
2213 #endif
2214 
2215 	struct work_struct work;
2216 	unsigned long flags;
2217 #define FLUSHING_CACHED_CHARGE	0
2218 };
2219 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2220 static DEFINE_MUTEX(percpu_charge_mutex);
2221 
2222 #ifdef CONFIG_MEMCG_KMEM
2223 static void drain_obj_stock(struct memcg_stock_pcp *stock);
2224 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2225 				     struct mem_cgroup *root_memcg);
2226 
2227 #else
drain_obj_stock(struct memcg_stock_pcp * stock)2228 static inline void drain_obj_stock(struct memcg_stock_pcp *stock)
2229 {
2230 }
obj_stock_flush_required(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)2231 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2232 				     struct mem_cgroup *root_memcg)
2233 {
2234 	return false;
2235 }
2236 #endif
2237 
2238 /**
2239  * consume_stock: Try to consume stocked charge on this cpu.
2240  * @memcg: memcg to consume from.
2241  * @nr_pages: how many pages to charge.
2242  *
2243  * The charges will only happen if @memcg matches the current cpu's memcg
2244  * stock, and at least @nr_pages are available in that stock.  Failure to
2245  * service an allocation will refill the stock.
2246  *
2247  * returns true if successful, false otherwise.
2248  */
consume_stock(struct mem_cgroup * memcg,unsigned int nr_pages)2249 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2250 {
2251 	struct memcg_stock_pcp *stock;
2252 	unsigned long flags;
2253 	bool ret = false;
2254 
2255 	if (nr_pages > MEMCG_CHARGE_BATCH)
2256 		return ret;
2257 
2258 	local_irq_save(flags);
2259 
2260 	stock = this_cpu_ptr(&memcg_stock);
2261 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2262 		stock->nr_pages -= nr_pages;
2263 		ret = true;
2264 	}
2265 
2266 	local_irq_restore(flags);
2267 
2268 	return ret;
2269 }
2270 
2271 /*
2272  * Returns stocks cached in percpu and reset cached information.
2273  */
drain_stock(struct memcg_stock_pcp * stock)2274 static void drain_stock(struct memcg_stock_pcp *stock)
2275 {
2276 	struct mem_cgroup *old = stock->cached;
2277 
2278 	if (!old)
2279 		return;
2280 
2281 	if (stock->nr_pages) {
2282 		page_counter_uncharge(&old->memory, stock->nr_pages);
2283 		if (do_memsw_account())
2284 			page_counter_uncharge(&old->memsw, stock->nr_pages);
2285 		stock->nr_pages = 0;
2286 	}
2287 
2288 	css_put(&old->css);
2289 	stock->cached = NULL;
2290 }
2291 
drain_local_stock(struct work_struct * dummy)2292 static void drain_local_stock(struct work_struct *dummy)
2293 {
2294 	struct memcg_stock_pcp *stock;
2295 	unsigned long flags;
2296 
2297 	/*
2298 	 * The only protection from memory hotplug vs. drain_stock races is
2299 	 * that we always operate on local CPU stock here with IRQ disabled
2300 	 */
2301 	local_irq_save(flags);
2302 
2303 	stock = this_cpu_ptr(&memcg_stock);
2304 	drain_obj_stock(stock);
2305 	drain_stock(stock);
2306 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2307 
2308 	local_irq_restore(flags);
2309 }
2310 
2311 /*
2312  * Cache charges(val) to local per_cpu area.
2313  * This will be consumed by consume_stock() function, later.
2314  */
refill_stock(struct mem_cgroup * memcg,unsigned int nr_pages)2315 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2316 {
2317 	struct memcg_stock_pcp *stock;
2318 	unsigned long flags;
2319 
2320 	local_irq_save(flags);
2321 
2322 	stock = this_cpu_ptr(&memcg_stock);
2323 	if (stock->cached != memcg) { /* reset if necessary */
2324 		drain_stock(stock);
2325 		css_get(&memcg->css);
2326 		stock->cached = memcg;
2327 	}
2328 	stock->nr_pages += nr_pages;
2329 
2330 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2331 		drain_stock(stock);
2332 
2333 	local_irq_restore(flags);
2334 }
2335 
2336 /*
2337  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2338  * of the hierarchy under it.
2339  */
drain_all_stock(struct mem_cgroup * root_memcg)2340 static void drain_all_stock(struct mem_cgroup *root_memcg)
2341 {
2342 	int cpu, curcpu;
2343 
2344 	/* If someone's already draining, avoid adding running more workers. */
2345 	if (!mutex_trylock(&percpu_charge_mutex))
2346 		return;
2347 	/*
2348 	 * Notify other cpus that system-wide "drain" is running
2349 	 * We do not care about races with the cpu hotplug because cpu down
2350 	 * as well as workers from this path always operate on the local
2351 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2352 	 */
2353 	curcpu = get_cpu();
2354 	for_each_online_cpu(cpu) {
2355 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2356 		struct mem_cgroup *memcg;
2357 		bool flush = false;
2358 
2359 		rcu_read_lock();
2360 		memcg = stock->cached;
2361 		if (memcg && stock->nr_pages &&
2362 		    mem_cgroup_is_descendant(memcg, root_memcg))
2363 			flush = true;
2364 		if (obj_stock_flush_required(stock, root_memcg))
2365 			flush = true;
2366 		rcu_read_unlock();
2367 
2368 		if (flush &&
2369 		    !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2370 			if (cpu == curcpu)
2371 				drain_local_stock(&stock->work);
2372 			else
2373 				schedule_work_on(cpu, &stock->work);
2374 		}
2375 	}
2376 	put_cpu();
2377 	mutex_unlock(&percpu_charge_mutex);
2378 }
2379 
memcg_hotplug_cpu_dead(unsigned int cpu)2380 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2381 {
2382 	struct memcg_stock_pcp *stock;
2383 	struct mem_cgroup *memcg, *mi;
2384 
2385 	stock = &per_cpu(memcg_stock, cpu);
2386 	drain_stock(stock);
2387 
2388 	for_each_mem_cgroup(memcg) {
2389 		int i;
2390 
2391 		for (i = 0; i < MEMCG_NR_STAT; i++) {
2392 			int nid;
2393 			long x;
2394 
2395 			x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0);
2396 			if (x)
2397 				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2398 					atomic_long_add(x, &memcg->vmstats[i]);
2399 
2400 			if (i >= NR_VM_NODE_STAT_ITEMS)
2401 				continue;
2402 
2403 			for_each_node(nid) {
2404 				struct mem_cgroup_per_node *pn;
2405 
2406 				pn = mem_cgroup_nodeinfo(memcg, nid);
2407 				x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0);
2408 				if (x)
2409 					do {
2410 						atomic_long_add(x, &pn->lruvec_stat[i]);
2411 					} while ((pn = parent_nodeinfo(pn, nid)));
2412 			}
2413 		}
2414 
2415 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
2416 			long x;
2417 
2418 			x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0);
2419 			if (x)
2420 				for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
2421 					atomic_long_add(x, &memcg->vmevents[i]);
2422 		}
2423 	}
2424 
2425 	return 0;
2426 }
2427 
reclaim_high(struct mem_cgroup * memcg,unsigned int nr_pages,gfp_t gfp_mask)2428 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2429 				  unsigned int nr_pages,
2430 				  gfp_t gfp_mask)
2431 {
2432 	unsigned long nr_reclaimed = 0;
2433 
2434 	do {
2435 		unsigned long pflags;
2436 
2437 		if (page_counter_read(&memcg->memory) <=
2438 		    READ_ONCE(memcg->memory.high))
2439 			continue;
2440 
2441 		memcg_memory_event(memcg, MEMCG_HIGH);
2442 
2443 		psi_memstall_enter(&pflags);
2444 		nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2445 							     gfp_mask, true);
2446 		psi_memstall_leave(&pflags);
2447 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2448 		 !mem_cgroup_is_root(memcg));
2449 
2450 	return nr_reclaimed;
2451 }
2452 
high_work_func(struct work_struct * work)2453 static void high_work_func(struct work_struct *work)
2454 {
2455 	struct mem_cgroup *memcg;
2456 
2457 	memcg = container_of(work, struct mem_cgroup, high_work);
2458 	reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2459 }
2460 
2461 /*
2462  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2463  * enough to still cause a significant slowdown in most cases, while still
2464  * allowing diagnostics and tracing to proceed without becoming stuck.
2465  */
2466 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2467 
2468 /*
2469  * When calculating the delay, we use these either side of the exponentiation to
2470  * maintain precision and scale to a reasonable number of jiffies (see the table
2471  * below.
2472  *
2473  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2474  *   overage ratio to a delay.
2475  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2476  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2477  *   to produce a reasonable delay curve.
2478  *
2479  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2480  * reasonable delay curve compared to precision-adjusted overage, not
2481  * penalising heavily at first, but still making sure that growth beyond the
2482  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2483  * example, with a high of 100 megabytes:
2484  *
2485  *  +-------+------------------------+
2486  *  | usage | time to allocate in ms |
2487  *  +-------+------------------------+
2488  *  | 100M  |                      0 |
2489  *  | 101M  |                      6 |
2490  *  | 102M  |                     25 |
2491  *  | 103M  |                     57 |
2492  *  | 104M  |                    102 |
2493  *  | 105M  |                    159 |
2494  *  | 106M  |                    230 |
2495  *  | 107M  |                    313 |
2496  *  | 108M  |                    409 |
2497  *  | 109M  |                    518 |
2498  *  | 110M  |                    639 |
2499  *  | 111M  |                    774 |
2500  *  | 112M  |                    921 |
2501  *  | 113M  |                   1081 |
2502  *  | 114M  |                   1254 |
2503  *  | 115M  |                   1439 |
2504  *  | 116M  |                   1638 |
2505  *  | 117M  |                   1849 |
2506  *  | 118M  |                   2000 |
2507  *  | 119M  |                   2000 |
2508  *  | 120M  |                   2000 |
2509  *  +-------+------------------------+
2510  */
2511  #define MEMCG_DELAY_PRECISION_SHIFT 20
2512  #define MEMCG_DELAY_SCALING_SHIFT 14
2513 
calculate_overage(unsigned long usage,unsigned long high)2514 static u64 calculate_overage(unsigned long usage, unsigned long high)
2515 {
2516 	u64 overage;
2517 
2518 	if (usage <= high)
2519 		return 0;
2520 
2521 	/*
2522 	 * Prevent division by 0 in overage calculation by acting as if
2523 	 * it was a threshold of 1 page
2524 	 */
2525 	high = max(high, 1UL);
2526 
2527 	overage = usage - high;
2528 	overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2529 	return div64_u64(overage, high);
2530 }
2531 
mem_find_max_overage(struct mem_cgroup * memcg)2532 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2533 {
2534 	u64 overage, max_overage = 0;
2535 
2536 	do {
2537 		overage = calculate_overage(page_counter_read(&memcg->memory),
2538 					    READ_ONCE(memcg->memory.high));
2539 		max_overage = max(overage, max_overage);
2540 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2541 		 !mem_cgroup_is_root(memcg));
2542 
2543 	return max_overage;
2544 }
2545 
swap_find_max_overage(struct mem_cgroup * memcg)2546 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2547 {
2548 	u64 overage, max_overage = 0;
2549 
2550 	do {
2551 		overage = calculate_overage(page_counter_read(&memcg->swap),
2552 					    READ_ONCE(memcg->swap.high));
2553 		if (overage)
2554 			memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2555 		max_overage = max(overage, max_overage);
2556 	} while ((memcg = parent_mem_cgroup(memcg)) &&
2557 		 !mem_cgroup_is_root(memcg));
2558 
2559 	return max_overage;
2560 }
2561 
2562 /*
2563  * Get the number of jiffies that we should penalise a mischievous cgroup which
2564  * is exceeding its memory.high by checking both it and its ancestors.
2565  */
calculate_high_delay(struct mem_cgroup * memcg,unsigned int nr_pages,u64 max_overage)2566 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2567 					  unsigned int nr_pages,
2568 					  u64 max_overage)
2569 {
2570 	unsigned long penalty_jiffies;
2571 
2572 	if (!max_overage)
2573 		return 0;
2574 
2575 	/*
2576 	 * We use overage compared to memory.high to calculate the number of
2577 	 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2578 	 * fairly lenient on small overages, and increasingly harsh when the
2579 	 * memcg in question makes it clear that it has no intention of stopping
2580 	 * its crazy behaviour, so we exponentially increase the delay based on
2581 	 * overage amount.
2582 	 */
2583 	penalty_jiffies = max_overage * max_overage * HZ;
2584 	penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2585 	penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2586 
2587 	/*
2588 	 * Factor in the task's own contribution to the overage, such that four
2589 	 * N-sized allocations are throttled approximately the same as one
2590 	 * 4N-sized allocation.
2591 	 *
2592 	 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2593 	 * larger the current charge patch is than that.
2594 	 */
2595 	return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2596 }
2597 
2598 /*
2599  * Scheduled by try_charge() to be executed from the userland return path
2600  * and reclaims memory over the high limit.
2601  */
mem_cgroup_handle_over_high(void)2602 void mem_cgroup_handle_over_high(void)
2603 {
2604 	unsigned long penalty_jiffies;
2605 	unsigned long pflags;
2606 	unsigned long nr_reclaimed;
2607 	unsigned int nr_pages = current->memcg_nr_pages_over_high;
2608 	int nr_retries = MAX_RECLAIM_RETRIES;
2609 	struct mem_cgroup *memcg;
2610 	bool in_retry = false;
2611 
2612 	if (likely(!nr_pages))
2613 		return;
2614 
2615 	memcg = get_mem_cgroup_from_mm(current->mm);
2616 	current->memcg_nr_pages_over_high = 0;
2617 
2618 retry_reclaim:
2619 	/*
2620 	 * The allocating task should reclaim at least the batch size, but for
2621 	 * subsequent retries we only want to do what's necessary to prevent oom
2622 	 * or breaching resource isolation.
2623 	 *
2624 	 * This is distinct from memory.max or page allocator behaviour because
2625 	 * memory.high is currently batched, whereas memory.max and the page
2626 	 * allocator run every time an allocation is made.
2627 	 */
2628 	nr_reclaimed = reclaim_high(memcg,
2629 				    in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2630 				    GFP_KERNEL);
2631 
2632 	/*
2633 	 * memory.high is breached and reclaim is unable to keep up. Throttle
2634 	 * allocators proactively to slow down excessive growth.
2635 	 */
2636 	penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2637 					       mem_find_max_overage(memcg));
2638 
2639 	penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2640 						swap_find_max_overage(memcg));
2641 
2642 	/*
2643 	 * Clamp the max delay per usermode return so as to still keep the
2644 	 * application moving forwards and also permit diagnostics, albeit
2645 	 * extremely slowly.
2646 	 */
2647 	penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2648 
2649 	/*
2650 	 * Don't sleep if the amount of jiffies this memcg owes us is so low
2651 	 * that it's not even worth doing, in an attempt to be nice to those who
2652 	 * go only a small amount over their memory.high value and maybe haven't
2653 	 * been aggressively reclaimed enough yet.
2654 	 */
2655 	if (penalty_jiffies <= HZ / 100)
2656 		goto out;
2657 
2658 	/*
2659 	 * If reclaim is making forward progress but we're still over
2660 	 * memory.high, we want to encourage that rather than doing allocator
2661 	 * throttling.
2662 	 */
2663 	if (nr_reclaimed || nr_retries--) {
2664 		in_retry = true;
2665 		goto retry_reclaim;
2666 	}
2667 
2668 	/*
2669 	 * If we exit early, we're guaranteed to die (since
2670 	 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2671 	 * need to account for any ill-begotten jiffies to pay them off later.
2672 	 */
2673 	psi_memstall_enter(&pflags);
2674 	schedule_timeout_killable(penalty_jiffies);
2675 	psi_memstall_leave(&pflags);
2676 
2677 out:
2678 	css_put(&memcg->css);
2679 }
2680 
try_charge(struct mem_cgroup * memcg,gfp_t gfp_mask,unsigned int nr_pages)2681 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2682 		      unsigned int nr_pages)
2683 {
2684 	unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2685 	int nr_retries = MAX_RECLAIM_RETRIES;
2686 	struct mem_cgroup *mem_over_limit;
2687 	struct page_counter *counter;
2688 	enum oom_status oom_status;
2689 	unsigned long nr_reclaimed;
2690 	bool may_swap = true;
2691 	bool drained = false;
2692 	unsigned long pflags;
2693 
2694 	if (mem_cgroup_is_root(memcg))
2695 		return 0;
2696 retry:
2697 	if (consume_stock(memcg, nr_pages))
2698 		return 0;
2699 
2700 	if (!do_memsw_account() ||
2701 	    page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2702 		if (page_counter_try_charge(&memcg->memory, batch, &counter))
2703 			goto done_restock;
2704 		if (do_memsw_account())
2705 			page_counter_uncharge(&memcg->memsw, batch);
2706 		mem_over_limit = mem_cgroup_from_counter(counter, memory);
2707 	} else {
2708 		mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2709 		may_swap = false;
2710 	}
2711 
2712 	if (batch > nr_pages) {
2713 		batch = nr_pages;
2714 		goto retry;
2715 	}
2716 
2717 	/*
2718 	 * Memcg doesn't have a dedicated reserve for atomic
2719 	 * allocations. But like the global atomic pool, we need to
2720 	 * put the burden of reclaim on regular allocation requests
2721 	 * and let these go through as privileged allocations.
2722 	 */
2723 	if (gfp_mask & __GFP_ATOMIC)
2724 		goto force;
2725 
2726 	/*
2727 	 * Unlike in global OOM situations, memcg is not in a physical
2728 	 * memory shortage.  Allow dying and OOM-killed tasks to
2729 	 * bypass the last charges so that they can exit quickly and
2730 	 * free their memory.
2731 	 */
2732 	if (unlikely(should_force_charge()))
2733 		goto force;
2734 
2735 	/*
2736 	 * Prevent unbounded recursion when reclaim operations need to
2737 	 * allocate memory. This might exceed the limits temporarily,
2738 	 * but we prefer facilitating memory reclaim and getting back
2739 	 * under the limit over triggering OOM kills in these cases.
2740 	 */
2741 	if (unlikely(current->flags & PF_MEMALLOC))
2742 		goto force;
2743 
2744 	if (unlikely(task_in_memcg_oom(current)))
2745 		goto nomem;
2746 
2747 	if (!gfpflags_allow_blocking(gfp_mask))
2748 		goto nomem;
2749 
2750 	memcg_memory_event(mem_over_limit, MEMCG_MAX);
2751 
2752 	psi_memstall_enter(&pflags);
2753 	nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2754 						    gfp_mask, may_swap);
2755 	psi_memstall_leave(&pflags);
2756 
2757 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2758 		goto retry;
2759 
2760 	if (!drained) {
2761 		drain_all_stock(mem_over_limit);
2762 		drained = true;
2763 		goto retry;
2764 	}
2765 
2766 	if (gfp_mask & __GFP_NORETRY)
2767 		goto nomem;
2768 	/*
2769 	 * Even though the limit is exceeded at this point, reclaim
2770 	 * may have been able to free some pages.  Retry the charge
2771 	 * before killing the task.
2772 	 *
2773 	 * Only for regular pages, though: huge pages are rather
2774 	 * unlikely to succeed so close to the limit, and we fall back
2775 	 * to regular pages anyway in case of failure.
2776 	 */
2777 	if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2778 		goto retry;
2779 	/*
2780 	 * At task move, charge accounts can be doubly counted. So, it's
2781 	 * better to wait until the end of task_move if something is going on.
2782 	 */
2783 	if (mem_cgroup_wait_acct_move(mem_over_limit))
2784 		goto retry;
2785 
2786 	if (nr_retries--)
2787 		goto retry;
2788 
2789 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
2790 		goto nomem;
2791 
2792 	if (gfp_mask & __GFP_NOFAIL)
2793 		goto force;
2794 
2795 	if (fatal_signal_pending(current))
2796 		goto force;
2797 
2798 	/*
2799 	 * keep retrying as long as the memcg oom killer is able to make
2800 	 * a forward progress or bypass the charge if the oom killer
2801 	 * couldn't make any progress.
2802 	 */
2803 	oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2804 		       get_order(nr_pages * PAGE_SIZE));
2805 	switch (oom_status) {
2806 	case OOM_SUCCESS:
2807 		nr_retries = MAX_RECLAIM_RETRIES;
2808 		goto retry;
2809 	case OOM_FAILED:
2810 		goto force;
2811 	default:
2812 		goto nomem;
2813 	}
2814 nomem:
2815 	if (!(gfp_mask & __GFP_NOFAIL))
2816 		return -ENOMEM;
2817 force:
2818 	/*
2819 	 * The allocation either can't fail or will lead to more memory
2820 	 * being freed very soon.  Allow memory usage go over the limit
2821 	 * temporarily by force charging it.
2822 	 */
2823 	page_counter_charge(&memcg->memory, nr_pages);
2824 	if (do_memsw_account())
2825 		page_counter_charge(&memcg->memsw, nr_pages);
2826 
2827 	return 0;
2828 
2829 done_restock:
2830 	if (batch > nr_pages)
2831 		refill_stock(memcg, batch - nr_pages);
2832 
2833 	/*
2834 	 * If the hierarchy is above the normal consumption range, schedule
2835 	 * reclaim on returning to userland.  We can perform reclaim here
2836 	 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2837 	 * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2838 	 * not recorded as it most likely matches current's and won't
2839 	 * change in the meantime.  As high limit is checked again before
2840 	 * reclaim, the cost of mismatch is negligible.
2841 	 */
2842 	do {
2843 		bool mem_high, swap_high;
2844 
2845 		mem_high = page_counter_read(&memcg->memory) >
2846 			READ_ONCE(memcg->memory.high);
2847 		swap_high = page_counter_read(&memcg->swap) >
2848 			READ_ONCE(memcg->swap.high);
2849 
2850 		/* Don't bother a random interrupted task */
2851 		if (in_interrupt()) {
2852 			if (mem_high) {
2853 				schedule_work(&memcg->high_work);
2854 				break;
2855 			}
2856 			continue;
2857 		}
2858 
2859 		if (mem_high || swap_high) {
2860 			/*
2861 			 * The allocating tasks in this cgroup will need to do
2862 			 * reclaim or be throttled to prevent further growth
2863 			 * of the memory or swap footprints.
2864 			 *
2865 			 * Target some best-effort fairness between the tasks,
2866 			 * and distribute reclaim work and delay penalties
2867 			 * based on how much each task is actually allocating.
2868 			 */
2869 			current->memcg_nr_pages_over_high += batch;
2870 			set_notify_resume(current);
2871 			break;
2872 		}
2873 	} while ((memcg = parent_mem_cgroup(memcg)));
2874 
2875 	return 0;
2876 }
2877 
2878 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
cancel_charge(struct mem_cgroup * memcg,unsigned int nr_pages)2879 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2880 {
2881 	if (mem_cgroup_is_root(memcg))
2882 		return;
2883 
2884 	page_counter_uncharge(&memcg->memory, nr_pages);
2885 	if (do_memsw_account())
2886 		page_counter_uncharge(&memcg->memsw, nr_pages);
2887 }
2888 #endif
2889 
commit_charge(struct page * page,struct mem_cgroup * memcg)2890 static void commit_charge(struct page *page, struct mem_cgroup *memcg)
2891 {
2892 	VM_BUG_ON_PAGE(page->mem_cgroup, page);
2893 	/*
2894 	 * Any of the following ensures page->mem_cgroup stability:
2895 	 *
2896 	 * - the page lock
2897 	 * - LRU isolation
2898 	 * - lock_page_memcg()
2899 	 * - exclusive reference
2900 	 */
2901 	page->mem_cgroup = memcg;
2902 }
2903 
2904 #ifdef CONFIG_MEMCG_KMEM
memcg_alloc_page_obj_cgroups(struct page * page,struct kmem_cache * s,gfp_t gfp)2905 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
2906 				 gfp_t gfp)
2907 {
2908 	unsigned int objects = objs_per_slab_page(s, page);
2909 	void *vec;
2910 
2911 	vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2912 			   page_to_nid(page));
2913 	if (!vec)
2914 		return -ENOMEM;
2915 
2916 	if (cmpxchg(&page->obj_cgroups, NULL,
2917 		    (struct obj_cgroup **) ((unsigned long)vec | 0x1UL)))
2918 		kfree(vec);
2919 	else
2920 		kmemleak_not_leak(vec);
2921 
2922 	return 0;
2923 }
2924 
2925 /*
2926  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2927  *
2928  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2929  * cgroup_mutex, etc.
2930  */
mem_cgroup_from_obj(void * p)2931 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2932 {
2933 	struct page *page;
2934 
2935 	if (mem_cgroup_disabled())
2936 		return NULL;
2937 
2938 	page = virt_to_head_page(p);
2939 
2940 	/*
2941 	 * If page->mem_cgroup is set, it's either a simple mem_cgroup pointer
2942 	 * or a pointer to obj_cgroup vector. In the latter case the lowest
2943 	 * bit of the pointer is set.
2944 	 * The page->mem_cgroup pointer can be asynchronously changed
2945 	 * from NULL to (obj_cgroup_vec | 0x1UL), but can't be changed
2946 	 * from a valid memcg pointer to objcg vector or back.
2947 	 */
2948 	if (!page->mem_cgroup)
2949 		return NULL;
2950 
2951 	/*
2952 	 * Slab objects are accounted individually, not per-page.
2953 	 * Memcg membership data for each individual object is saved in
2954 	 * the page->obj_cgroups.
2955 	 */
2956 	if (page_has_obj_cgroups(page)) {
2957 		struct obj_cgroup *objcg;
2958 		unsigned int off;
2959 
2960 		off = obj_to_index(page->slab_cache, page, p);
2961 		objcg = page_obj_cgroups(page)[off];
2962 		if (objcg)
2963 			return obj_cgroup_memcg(objcg);
2964 
2965 		return NULL;
2966 	}
2967 
2968 	/* All other pages use page->mem_cgroup */
2969 	return page->mem_cgroup;
2970 }
2971 
get_obj_cgroup_from_current(void)2972 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
2973 {
2974 	struct obj_cgroup *objcg = NULL;
2975 	struct mem_cgroup *memcg;
2976 
2977 	if (memcg_kmem_bypass())
2978 		return NULL;
2979 
2980 	rcu_read_lock();
2981 	if (unlikely(active_memcg()))
2982 		memcg = active_memcg();
2983 	else
2984 		memcg = mem_cgroup_from_task(current);
2985 
2986 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
2987 		objcg = rcu_dereference(memcg->objcg);
2988 		if (objcg && obj_cgroup_tryget(objcg))
2989 			break;
2990 	}
2991 	rcu_read_unlock();
2992 
2993 	return objcg;
2994 }
2995 
memcg_alloc_cache_id(void)2996 static int memcg_alloc_cache_id(void)
2997 {
2998 	int id, size;
2999 	int err;
3000 
3001 	id = ida_simple_get(&memcg_cache_ida,
3002 			    0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
3003 	if (id < 0)
3004 		return id;
3005 
3006 	if (id < memcg_nr_cache_ids)
3007 		return id;
3008 
3009 	/*
3010 	 * There's no space for the new id in memcg_caches arrays,
3011 	 * so we have to grow them.
3012 	 */
3013 	down_write(&memcg_cache_ids_sem);
3014 
3015 	size = 2 * (id + 1);
3016 	if (size < MEMCG_CACHES_MIN_SIZE)
3017 		size = MEMCG_CACHES_MIN_SIZE;
3018 	else if (size > MEMCG_CACHES_MAX_SIZE)
3019 		size = MEMCG_CACHES_MAX_SIZE;
3020 
3021 	err = memcg_update_all_list_lrus(size);
3022 	if (!err)
3023 		memcg_nr_cache_ids = size;
3024 
3025 	up_write(&memcg_cache_ids_sem);
3026 
3027 	if (err) {
3028 		ida_simple_remove(&memcg_cache_ida, id);
3029 		return err;
3030 	}
3031 	return id;
3032 }
3033 
memcg_free_cache_id(int id)3034 static void memcg_free_cache_id(int id)
3035 {
3036 	ida_simple_remove(&memcg_cache_ida, id);
3037 }
3038 
3039 /**
3040  * __memcg_kmem_charge: charge a number of kernel pages to a memcg
3041  * @memcg: memory cgroup to charge
3042  * @gfp: reclaim mode
3043  * @nr_pages: number of pages to charge
3044  *
3045  * Returns 0 on success, an error code on failure.
3046  */
__memcg_kmem_charge(struct mem_cgroup * memcg,gfp_t gfp,unsigned int nr_pages)3047 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
3048 			unsigned int nr_pages)
3049 {
3050 	struct page_counter *counter;
3051 	int ret;
3052 
3053 	ret = try_charge(memcg, gfp, nr_pages);
3054 	if (ret)
3055 		return ret;
3056 
3057 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
3058 	    !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
3059 
3060 		/*
3061 		 * Enforce __GFP_NOFAIL allocation because callers are not
3062 		 * prepared to see failures and likely do not have any failure
3063 		 * handling code.
3064 		 */
3065 		if (gfp & __GFP_NOFAIL) {
3066 			page_counter_charge(&memcg->kmem, nr_pages);
3067 			return 0;
3068 		}
3069 		cancel_charge(memcg, nr_pages);
3070 		return -ENOMEM;
3071 	}
3072 	return 0;
3073 }
3074 
3075 /**
3076  * __memcg_kmem_uncharge: uncharge a number of kernel pages from a memcg
3077  * @memcg: memcg to uncharge
3078  * @nr_pages: number of pages to uncharge
3079  */
__memcg_kmem_uncharge(struct mem_cgroup * memcg,unsigned int nr_pages)3080 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
3081 {
3082 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
3083 		page_counter_uncharge(&memcg->kmem, nr_pages);
3084 
3085 	page_counter_uncharge(&memcg->memory, nr_pages);
3086 	if (do_memsw_account())
3087 		page_counter_uncharge(&memcg->memsw, nr_pages);
3088 }
3089 
3090 /**
3091  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3092  * @page: page to charge
3093  * @gfp: reclaim mode
3094  * @order: allocation order
3095  *
3096  * Returns 0 on success, an error code on failure.
3097  */
__memcg_kmem_charge_page(struct page * page,gfp_t gfp,int order)3098 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3099 {
3100 	struct mem_cgroup *memcg;
3101 	int ret = 0;
3102 
3103 	memcg = get_mem_cgroup_from_current();
3104 	if (memcg && !mem_cgroup_is_root(memcg)) {
3105 		ret = __memcg_kmem_charge(memcg, gfp, 1 << order);
3106 		if (!ret) {
3107 			page->mem_cgroup = memcg;
3108 			__SetPageKmemcg(page);
3109 			return 0;
3110 		}
3111 		css_put(&memcg->css);
3112 	}
3113 	return ret;
3114 }
3115 
3116 /**
3117  * __memcg_kmem_uncharge_page: uncharge a kmem page
3118  * @page: page to uncharge
3119  * @order: allocation order
3120  */
__memcg_kmem_uncharge_page(struct page * page,int order)3121 void __memcg_kmem_uncharge_page(struct page *page, int order)
3122 {
3123 	struct mem_cgroup *memcg = page->mem_cgroup;
3124 	unsigned int nr_pages = 1 << order;
3125 
3126 	if (!memcg)
3127 		return;
3128 
3129 	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
3130 	__memcg_kmem_uncharge(memcg, nr_pages);
3131 	page->mem_cgroup = NULL;
3132 	css_put(&memcg->css);
3133 
3134 	/* slab pages do not have PageKmemcg flag set */
3135 	if (PageKmemcg(page))
3136 		__ClearPageKmemcg(page);
3137 }
3138 
consume_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes)3139 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3140 {
3141 	struct memcg_stock_pcp *stock;
3142 	unsigned long flags;
3143 	bool ret = false;
3144 
3145 	local_irq_save(flags);
3146 
3147 	stock = this_cpu_ptr(&memcg_stock);
3148 	if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3149 		stock->nr_bytes -= nr_bytes;
3150 		ret = true;
3151 	}
3152 
3153 	local_irq_restore(flags);
3154 
3155 	return ret;
3156 }
3157 
drain_obj_stock(struct memcg_stock_pcp * stock)3158 static void drain_obj_stock(struct memcg_stock_pcp *stock)
3159 {
3160 	struct obj_cgroup *old = stock->cached_objcg;
3161 
3162 	if (!old)
3163 		return;
3164 
3165 	if (stock->nr_bytes) {
3166 		unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3167 		unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3168 
3169 		if (nr_pages) {
3170 			rcu_read_lock();
3171 			__memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
3172 			rcu_read_unlock();
3173 		}
3174 
3175 		/*
3176 		 * The leftover is flushed to the centralized per-memcg value.
3177 		 * On the next attempt to refill obj stock it will be moved
3178 		 * to a per-cpu stock (probably, on an other CPU), see
3179 		 * refill_obj_stock().
3180 		 *
3181 		 * How often it's flushed is a trade-off between the memory
3182 		 * limit enforcement accuracy and potential CPU contention,
3183 		 * so it might be changed in the future.
3184 		 */
3185 		atomic_add(nr_bytes, &old->nr_charged_bytes);
3186 		stock->nr_bytes = 0;
3187 	}
3188 
3189 	obj_cgroup_put(old);
3190 	stock->cached_objcg = NULL;
3191 }
3192 
obj_stock_flush_required(struct memcg_stock_pcp * stock,struct mem_cgroup * root_memcg)3193 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3194 				     struct mem_cgroup *root_memcg)
3195 {
3196 	struct mem_cgroup *memcg;
3197 
3198 	if (stock->cached_objcg) {
3199 		memcg = obj_cgroup_memcg(stock->cached_objcg);
3200 		if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3201 			return true;
3202 	}
3203 
3204 	return false;
3205 }
3206 
refill_obj_stock(struct obj_cgroup * objcg,unsigned int nr_bytes)3207 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3208 {
3209 	struct memcg_stock_pcp *stock;
3210 	unsigned long flags;
3211 
3212 	local_irq_save(flags);
3213 
3214 	stock = this_cpu_ptr(&memcg_stock);
3215 	if (stock->cached_objcg != objcg) { /* reset if necessary */
3216 		drain_obj_stock(stock);
3217 		obj_cgroup_get(objcg);
3218 		stock->cached_objcg = objcg;
3219 		stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0);
3220 	}
3221 	stock->nr_bytes += nr_bytes;
3222 
3223 	if (stock->nr_bytes > PAGE_SIZE)
3224 		drain_obj_stock(stock);
3225 
3226 	local_irq_restore(flags);
3227 }
3228 
obj_cgroup_charge(struct obj_cgroup * objcg,gfp_t gfp,size_t size)3229 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3230 {
3231 	struct mem_cgroup *memcg;
3232 	unsigned int nr_pages, nr_bytes;
3233 	int ret;
3234 
3235 	if (consume_obj_stock(objcg, size))
3236 		return 0;
3237 
3238 	/*
3239 	 * In theory, memcg->nr_charged_bytes can have enough
3240 	 * pre-charged bytes to satisfy the allocation. However,
3241 	 * flushing memcg->nr_charged_bytes requires two atomic
3242 	 * operations, and memcg->nr_charged_bytes can't be big,
3243 	 * so it's better to ignore it and try grab some new pages.
3244 	 * memcg->nr_charged_bytes will be flushed in
3245 	 * refill_obj_stock(), called from this function or
3246 	 * independently later.
3247 	 */
3248 	rcu_read_lock();
3249 	memcg = obj_cgroup_memcg(objcg);
3250 	css_get(&memcg->css);
3251 	rcu_read_unlock();
3252 
3253 	nr_pages = size >> PAGE_SHIFT;
3254 	nr_bytes = size & (PAGE_SIZE - 1);
3255 
3256 	if (nr_bytes)
3257 		nr_pages += 1;
3258 
3259 	ret = __memcg_kmem_charge(memcg, gfp, nr_pages);
3260 	if (!ret && nr_bytes)
3261 		refill_obj_stock(objcg, PAGE_SIZE - nr_bytes);
3262 
3263 	css_put(&memcg->css);
3264 	return ret;
3265 }
3266 
obj_cgroup_uncharge(struct obj_cgroup * objcg,size_t size)3267 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3268 {
3269 	refill_obj_stock(objcg, size);
3270 }
3271 
3272 #endif /* CONFIG_MEMCG_KMEM */
3273 
3274 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3275 
3276 /*
3277  * Because tail pages are not marked as "used", set it. We're under
3278  * pgdat->lru_lock and migration entries setup in all page mappings.
3279  */
mem_cgroup_split_huge_fixup(struct page * head)3280 void mem_cgroup_split_huge_fixup(struct page *head)
3281 {
3282 	struct mem_cgroup *memcg = head->mem_cgroup;
3283 	int i;
3284 
3285 	if (mem_cgroup_disabled())
3286 		return;
3287 
3288 	for (i = 1; i < HPAGE_PMD_NR; i++) {
3289 		css_get(&memcg->css);
3290 		head[i].mem_cgroup = memcg;
3291 	}
3292 }
3293 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
3294 
3295 #ifdef CONFIG_MEMCG_SWAP
3296 /**
3297  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3298  * @entry: swap entry to be moved
3299  * @from:  mem_cgroup which the entry is moved from
3300  * @to:  mem_cgroup which the entry is moved to
3301  *
3302  * It succeeds only when the swap_cgroup's record for this entry is the same
3303  * as the mem_cgroup's id of @from.
3304  *
3305  * Returns 0 on success, -EINVAL on failure.
3306  *
3307  * The caller must have charged to @to, IOW, called page_counter_charge() about
3308  * both res and memsw, and called css_get().
3309  */
mem_cgroup_move_swap_account(swp_entry_t entry,struct mem_cgroup * from,struct mem_cgroup * to)3310 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3311 				struct mem_cgroup *from, struct mem_cgroup *to)
3312 {
3313 	unsigned short old_id, new_id;
3314 
3315 	old_id = mem_cgroup_id(from);
3316 	new_id = mem_cgroup_id(to);
3317 
3318 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3319 		mod_memcg_state(from, MEMCG_SWAP, -1);
3320 		mod_memcg_state(to, MEMCG_SWAP, 1);
3321 		return 0;
3322 	}
3323 	return -EINVAL;
3324 }
3325 #else
mem_cgroup_move_swap_account(swp_entry_t entry,struct mem_cgroup * from,struct mem_cgroup * to)3326 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3327 				struct mem_cgroup *from, struct mem_cgroup *to)
3328 {
3329 	return -EINVAL;
3330 }
3331 #endif
3332 
3333 static DEFINE_MUTEX(memcg_max_mutex);
3334 
mem_cgroup_resize_max(struct mem_cgroup * memcg,unsigned long max,bool memsw)3335 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3336 				 unsigned long max, bool memsw)
3337 {
3338 	bool enlarge = false;
3339 	bool drained = false;
3340 	int ret;
3341 	bool limits_invariant;
3342 	struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3343 
3344 	do {
3345 		if (signal_pending(current)) {
3346 			ret = -EINTR;
3347 			break;
3348 		}
3349 
3350 		mutex_lock(&memcg_max_mutex);
3351 		/*
3352 		 * Make sure that the new limit (memsw or memory limit) doesn't
3353 		 * break our basic invariant rule memory.max <= memsw.max.
3354 		 */
3355 		limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3356 					   max <= memcg->memsw.max;
3357 		if (!limits_invariant) {
3358 			mutex_unlock(&memcg_max_mutex);
3359 			ret = -EINVAL;
3360 			break;
3361 		}
3362 		if (max > counter->max)
3363 			enlarge = true;
3364 		ret = page_counter_set_max(counter, max);
3365 		mutex_unlock(&memcg_max_mutex);
3366 
3367 		if (!ret)
3368 			break;
3369 
3370 		if (!drained) {
3371 			drain_all_stock(memcg);
3372 			drained = true;
3373 			continue;
3374 		}
3375 
3376 		if (!try_to_free_mem_cgroup_pages(memcg, 1,
3377 					GFP_KERNEL, !memsw)) {
3378 			ret = -EBUSY;
3379 			break;
3380 		}
3381 	} while (true);
3382 
3383 	if (!ret && enlarge)
3384 		memcg_oom_recover(memcg);
3385 
3386 	return ret;
3387 }
3388 
mem_cgroup_soft_limit_reclaim(pg_data_t * pgdat,int order,gfp_t gfp_mask,unsigned long * total_scanned)3389 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3390 					    gfp_t gfp_mask,
3391 					    unsigned long *total_scanned)
3392 {
3393 	unsigned long nr_reclaimed = 0;
3394 	struct mem_cgroup_per_node *mz, *next_mz = NULL;
3395 	unsigned long reclaimed;
3396 	int loop = 0;
3397 	struct mem_cgroup_tree_per_node *mctz;
3398 	unsigned long excess;
3399 	unsigned long nr_scanned;
3400 
3401 	if (order > 0)
3402 		return 0;
3403 
3404 	mctz = soft_limit_tree_node(pgdat->node_id);
3405 
3406 	/*
3407 	 * Do not even bother to check the largest node if the root
3408 	 * is empty. Do it lockless to prevent lock bouncing. Races
3409 	 * are acceptable as soft limit is best effort anyway.
3410 	 */
3411 	if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3412 		return 0;
3413 
3414 	/*
3415 	 * This loop can run a while, specially if mem_cgroup's continuously
3416 	 * keep exceeding their soft limit and putting the system under
3417 	 * pressure
3418 	 */
3419 	do {
3420 		if (next_mz)
3421 			mz = next_mz;
3422 		else
3423 			mz = mem_cgroup_largest_soft_limit_node(mctz);
3424 		if (!mz)
3425 			break;
3426 
3427 		nr_scanned = 0;
3428 		reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3429 						    gfp_mask, &nr_scanned);
3430 		nr_reclaimed += reclaimed;
3431 		*total_scanned += nr_scanned;
3432 		spin_lock_irq(&mctz->lock);
3433 		__mem_cgroup_remove_exceeded(mz, mctz);
3434 
3435 		/*
3436 		 * If we failed to reclaim anything from this memory cgroup
3437 		 * it is time to move on to the next cgroup
3438 		 */
3439 		next_mz = NULL;
3440 		if (!reclaimed)
3441 			next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3442 
3443 		excess = soft_limit_excess(mz->memcg);
3444 		/*
3445 		 * One school of thought says that we should not add
3446 		 * back the node to the tree if reclaim returns 0.
3447 		 * But our reclaim could return 0, simply because due
3448 		 * to priority we are exposing a smaller subset of
3449 		 * memory to reclaim from. Consider this as a longer
3450 		 * term TODO.
3451 		 */
3452 		/* If excess == 0, no tree ops */
3453 		__mem_cgroup_insert_exceeded(mz, mctz, excess);
3454 		spin_unlock_irq(&mctz->lock);
3455 		css_put(&mz->memcg->css);
3456 		loop++;
3457 		/*
3458 		 * Could not reclaim anything and there are no more
3459 		 * mem cgroups to try or we seem to be looping without
3460 		 * reclaiming anything.
3461 		 */
3462 		if (!nr_reclaimed &&
3463 			(next_mz == NULL ||
3464 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3465 			break;
3466 	} while (!nr_reclaimed);
3467 	if (next_mz)
3468 		css_put(&next_mz->memcg->css);
3469 	return nr_reclaimed;
3470 }
3471 
3472 /*
3473  * Test whether @memcg has children, dead or alive.  Note that this
3474  * function doesn't care whether @memcg has use_hierarchy enabled and
3475  * returns %true if there are child csses according to the cgroup
3476  * hierarchy.  Testing use_hierarchy is the caller's responsibility.
3477  */
memcg_has_children(struct mem_cgroup * memcg)3478 static inline bool memcg_has_children(struct mem_cgroup *memcg)
3479 {
3480 	bool ret;
3481 
3482 	rcu_read_lock();
3483 	ret = css_next_child(NULL, &memcg->css);
3484 	rcu_read_unlock();
3485 	return ret;
3486 }
3487 
3488 /*
3489  * Reclaims as many pages from the given memcg as possible.
3490  *
3491  * Caller is responsible for holding css reference for memcg.
3492  */
mem_cgroup_force_empty(struct mem_cgroup * memcg)3493 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3494 {
3495 	int nr_retries = MAX_RECLAIM_RETRIES;
3496 
3497 	/* we call try-to-free pages for make this cgroup empty */
3498 	lru_add_drain_all();
3499 
3500 	drain_all_stock(memcg);
3501 
3502 	/* try to free all pages in this cgroup */
3503 	while (nr_retries && page_counter_read(&memcg->memory)) {
3504 		int progress;
3505 
3506 		if (signal_pending(current))
3507 			return -EINTR;
3508 
3509 		progress = try_to_free_mem_cgroup_pages(memcg, 1,
3510 							GFP_KERNEL, true);
3511 		if (!progress) {
3512 			nr_retries--;
3513 			/* maybe some writeback is necessary */
3514 			congestion_wait(BLK_RW_ASYNC, HZ/10);
3515 		}
3516 
3517 	}
3518 
3519 	return 0;
3520 }
3521 
mem_cgroup_force_empty_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3522 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3523 					    char *buf, size_t nbytes,
3524 					    loff_t off)
3525 {
3526 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3527 
3528 	if (mem_cgroup_is_root(memcg))
3529 		return -EINVAL;
3530 	return mem_cgroup_force_empty(memcg) ?: nbytes;
3531 }
3532 
mem_cgroup_hierarchy_read(struct cgroup_subsys_state * css,struct cftype * cft)3533 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3534 				     struct cftype *cft)
3535 {
3536 	return mem_cgroup_from_css(css)->use_hierarchy;
3537 }
3538 
mem_cgroup_hierarchy_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)3539 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3540 				      struct cftype *cft, u64 val)
3541 {
3542 	int retval = 0;
3543 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3544 	struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
3545 
3546 	if (memcg->use_hierarchy == val)
3547 		return 0;
3548 
3549 	/*
3550 	 * If parent's use_hierarchy is set, we can't make any modifications
3551 	 * in the child subtrees. If it is unset, then the change can
3552 	 * occur, provided the current cgroup has no children.
3553 	 *
3554 	 * For the root cgroup, parent_mem is NULL, we allow value to be
3555 	 * set if there are no children.
3556 	 */
3557 	if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
3558 				(val == 1 || val == 0)) {
3559 		if (!memcg_has_children(memcg))
3560 			memcg->use_hierarchy = val;
3561 		else
3562 			retval = -EBUSY;
3563 	} else
3564 		retval = -EINVAL;
3565 
3566 	return retval;
3567 }
3568 
mem_cgroup_usage(struct mem_cgroup * memcg,bool swap)3569 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3570 {
3571 	unsigned long val;
3572 
3573 	if (mem_cgroup_is_root(memcg)) {
3574 		val = memcg_page_state(memcg, NR_FILE_PAGES) +
3575 			memcg_page_state(memcg, NR_ANON_MAPPED);
3576 		if (swap)
3577 			val += memcg_page_state(memcg, MEMCG_SWAP);
3578 	} else {
3579 		if (!swap)
3580 			val = page_counter_read(&memcg->memory);
3581 		else
3582 			val = page_counter_read(&memcg->memsw);
3583 	}
3584 	return val;
3585 }
3586 
3587 enum {
3588 	RES_USAGE,
3589 	RES_LIMIT,
3590 	RES_MAX_USAGE,
3591 	RES_FAILCNT,
3592 	RES_SOFT_LIMIT,
3593 };
3594 
mem_cgroup_read_u64(struct cgroup_subsys_state * css,struct cftype * cft)3595 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3596 			       struct cftype *cft)
3597 {
3598 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3599 	struct page_counter *counter;
3600 
3601 	switch (MEMFILE_TYPE(cft->private)) {
3602 	case _MEM:
3603 		counter = &memcg->memory;
3604 		break;
3605 	case _MEMSWAP:
3606 		counter = &memcg->memsw;
3607 		break;
3608 	case _KMEM:
3609 		counter = &memcg->kmem;
3610 		break;
3611 	case _TCP:
3612 		counter = &memcg->tcpmem;
3613 		break;
3614 	default:
3615 		BUG();
3616 	}
3617 
3618 	switch (MEMFILE_ATTR(cft->private)) {
3619 	case RES_USAGE:
3620 		if (counter == &memcg->memory)
3621 			return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3622 		if (counter == &memcg->memsw)
3623 			return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3624 		return (u64)page_counter_read(counter) * PAGE_SIZE;
3625 	case RES_LIMIT:
3626 		return (u64)counter->max * PAGE_SIZE;
3627 	case RES_MAX_USAGE:
3628 		return (u64)counter->watermark * PAGE_SIZE;
3629 	case RES_FAILCNT:
3630 		return counter->failcnt;
3631 	case RES_SOFT_LIMIT:
3632 		return (u64)memcg->soft_limit * PAGE_SIZE;
3633 	default:
3634 		BUG();
3635 	}
3636 }
3637 
memcg_flush_percpu_vmstats(struct mem_cgroup * memcg)3638 static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
3639 {
3640 	unsigned long stat[MEMCG_NR_STAT] = {0};
3641 	struct mem_cgroup *mi;
3642 	int node, cpu, i;
3643 
3644 	for_each_online_cpu(cpu)
3645 		for (i = 0; i < MEMCG_NR_STAT; i++)
3646 			stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3647 
3648 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3649 		for (i = 0; i < MEMCG_NR_STAT; i++)
3650 			atomic_long_add(stat[i], &mi->vmstats[i]);
3651 
3652 	for_each_node(node) {
3653 		struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3654 		struct mem_cgroup_per_node *pi;
3655 
3656 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3657 			stat[i] = 0;
3658 
3659 		for_each_online_cpu(cpu)
3660 			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3661 				stat[i] += per_cpu(
3662 					pn->lruvec_stat_cpu->count[i], cpu);
3663 
3664 		for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3665 			for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
3666 				atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3667 	}
3668 }
3669 
memcg_flush_percpu_vmevents(struct mem_cgroup * memcg)3670 static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3671 {
3672 	unsigned long events[NR_VM_EVENT_ITEMS];
3673 	struct mem_cgroup *mi;
3674 	int cpu, i;
3675 
3676 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3677 		events[i] = 0;
3678 
3679 	for_each_online_cpu(cpu)
3680 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3681 			events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3682 					     cpu);
3683 
3684 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3685 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3686 			atomic_long_add(events[i], &mi->vmevents[i]);
3687 }
3688 
3689 #ifdef CONFIG_MEMCG_KMEM
memcg_online_kmem(struct mem_cgroup * memcg)3690 static int memcg_online_kmem(struct mem_cgroup *memcg)
3691 {
3692 	struct obj_cgroup *objcg;
3693 	int memcg_id;
3694 
3695 	if (cgroup_memory_nokmem)
3696 		return 0;
3697 
3698 	BUG_ON(memcg->kmemcg_id >= 0);
3699 	BUG_ON(memcg->kmem_state);
3700 
3701 	memcg_id = memcg_alloc_cache_id();
3702 	if (memcg_id < 0)
3703 		return memcg_id;
3704 
3705 	objcg = obj_cgroup_alloc();
3706 	if (!objcg) {
3707 		memcg_free_cache_id(memcg_id);
3708 		return -ENOMEM;
3709 	}
3710 	objcg->memcg = memcg;
3711 	rcu_assign_pointer(memcg->objcg, objcg);
3712 
3713 	static_branch_enable(&memcg_kmem_enabled_key);
3714 
3715 	/*
3716 	 * A memory cgroup is considered kmem-online as soon as it gets
3717 	 * kmemcg_id. Setting the id after enabling static branching will
3718 	 * guarantee no one starts accounting before all call sites are
3719 	 * patched.
3720 	 */
3721 	memcg->kmemcg_id = memcg_id;
3722 	memcg->kmem_state = KMEM_ONLINE;
3723 
3724 	return 0;
3725 }
3726 
memcg_offline_kmem(struct mem_cgroup * memcg)3727 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3728 {
3729 	struct cgroup_subsys_state *css;
3730 	struct mem_cgroup *parent, *child;
3731 	int kmemcg_id;
3732 
3733 	if (memcg->kmem_state != KMEM_ONLINE)
3734 		return;
3735 
3736 	memcg->kmem_state = KMEM_ALLOCATED;
3737 
3738 	parent = parent_mem_cgroup(memcg);
3739 	if (!parent)
3740 		parent = root_mem_cgroup;
3741 
3742 	memcg_reparent_objcgs(memcg, parent);
3743 
3744 	kmemcg_id = memcg->kmemcg_id;
3745 	BUG_ON(kmemcg_id < 0);
3746 
3747 	/*
3748 	 * Change kmemcg_id of this cgroup and all its descendants to the
3749 	 * parent's id, and then move all entries from this cgroup's list_lrus
3750 	 * to ones of the parent. After we have finished, all list_lrus
3751 	 * corresponding to this cgroup are guaranteed to remain empty. The
3752 	 * ordering is imposed by list_lru_node->lock taken by
3753 	 * memcg_drain_all_list_lrus().
3754 	 */
3755 	rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3756 	css_for_each_descendant_pre(css, &memcg->css) {
3757 		child = mem_cgroup_from_css(css);
3758 		BUG_ON(child->kmemcg_id != kmemcg_id);
3759 		child->kmemcg_id = parent->kmemcg_id;
3760 		if (!memcg->use_hierarchy)
3761 			break;
3762 	}
3763 	rcu_read_unlock();
3764 
3765 	memcg_drain_all_list_lrus(kmemcg_id, parent);
3766 
3767 	memcg_free_cache_id(kmemcg_id);
3768 }
3769 
memcg_free_kmem(struct mem_cgroup * memcg)3770 static void memcg_free_kmem(struct mem_cgroup *memcg)
3771 {
3772 	/* css_alloc() failed, offlining didn't happen */
3773 	if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3774 		memcg_offline_kmem(memcg);
3775 }
3776 #else
memcg_online_kmem(struct mem_cgroup * memcg)3777 static int memcg_online_kmem(struct mem_cgroup *memcg)
3778 {
3779 	return 0;
3780 }
memcg_offline_kmem(struct mem_cgroup * memcg)3781 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3782 {
3783 }
memcg_free_kmem(struct mem_cgroup * memcg)3784 static void memcg_free_kmem(struct mem_cgroup *memcg)
3785 {
3786 }
3787 #endif /* CONFIG_MEMCG_KMEM */
3788 
memcg_update_kmem_max(struct mem_cgroup * memcg,unsigned long max)3789 static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3790 				 unsigned long max)
3791 {
3792 	int ret;
3793 
3794 	mutex_lock(&memcg_max_mutex);
3795 	ret = page_counter_set_max(&memcg->kmem, max);
3796 	mutex_unlock(&memcg_max_mutex);
3797 	return ret;
3798 }
3799 
memcg_update_tcp_max(struct mem_cgroup * memcg,unsigned long max)3800 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3801 {
3802 	int ret;
3803 
3804 	mutex_lock(&memcg_max_mutex);
3805 
3806 	ret = page_counter_set_max(&memcg->tcpmem, max);
3807 	if (ret)
3808 		goto out;
3809 
3810 	if (!memcg->tcpmem_active) {
3811 		/*
3812 		 * The active flag needs to be written after the static_key
3813 		 * update. This is what guarantees that the socket activation
3814 		 * function is the last one to run. See mem_cgroup_sk_alloc()
3815 		 * for details, and note that we don't mark any socket as
3816 		 * belonging to this memcg until that flag is up.
3817 		 *
3818 		 * We need to do this, because static_keys will span multiple
3819 		 * sites, but we can't control their order. If we mark a socket
3820 		 * as accounted, but the accounting functions are not patched in
3821 		 * yet, we'll lose accounting.
3822 		 *
3823 		 * We never race with the readers in mem_cgroup_sk_alloc(),
3824 		 * because when this value change, the code to process it is not
3825 		 * patched in yet.
3826 		 */
3827 		static_branch_inc(&memcg_sockets_enabled_key);
3828 		memcg->tcpmem_active = true;
3829 	}
3830 out:
3831 	mutex_unlock(&memcg_max_mutex);
3832 	return ret;
3833 }
3834 
3835 /*
3836  * The user of this function is...
3837  * RES_LIMIT.
3838  */
mem_cgroup_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3839 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3840 				char *buf, size_t nbytes, loff_t off)
3841 {
3842 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3843 	unsigned long nr_pages;
3844 	int ret;
3845 
3846 	buf = strstrip(buf);
3847 	ret = page_counter_memparse(buf, "-1", &nr_pages);
3848 	if (ret)
3849 		return ret;
3850 
3851 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3852 	case RES_LIMIT:
3853 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3854 			ret = -EINVAL;
3855 			break;
3856 		}
3857 		switch (MEMFILE_TYPE(of_cft(of)->private)) {
3858 		case _MEM:
3859 			ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3860 			break;
3861 		case _MEMSWAP:
3862 			ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3863 			break;
3864 		case _KMEM:
3865 			pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3866 				     "Please report your usecase to linux-mm@kvack.org if you "
3867 				     "depend on this functionality.\n");
3868 			ret = memcg_update_kmem_max(memcg, nr_pages);
3869 			break;
3870 		case _TCP:
3871 			ret = memcg_update_tcp_max(memcg, nr_pages);
3872 			break;
3873 		}
3874 		break;
3875 	case RES_SOFT_LIMIT:
3876 		memcg->soft_limit = nr_pages;
3877 		ret = 0;
3878 		break;
3879 	}
3880 	return ret ?: nbytes;
3881 }
3882 
mem_cgroup_reset(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)3883 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3884 				size_t nbytes, loff_t off)
3885 {
3886 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3887 	struct page_counter *counter;
3888 
3889 	switch (MEMFILE_TYPE(of_cft(of)->private)) {
3890 	case _MEM:
3891 		counter = &memcg->memory;
3892 		break;
3893 	case _MEMSWAP:
3894 		counter = &memcg->memsw;
3895 		break;
3896 	case _KMEM:
3897 		counter = &memcg->kmem;
3898 		break;
3899 	case _TCP:
3900 		counter = &memcg->tcpmem;
3901 		break;
3902 	default:
3903 		BUG();
3904 	}
3905 
3906 	switch (MEMFILE_ATTR(of_cft(of)->private)) {
3907 	case RES_MAX_USAGE:
3908 		page_counter_reset_watermark(counter);
3909 		break;
3910 	case RES_FAILCNT:
3911 		counter->failcnt = 0;
3912 		break;
3913 	default:
3914 		BUG();
3915 	}
3916 
3917 	return nbytes;
3918 }
3919 
mem_cgroup_move_charge_read(struct cgroup_subsys_state * css,struct cftype * cft)3920 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3921 					struct cftype *cft)
3922 {
3923 	return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3924 }
3925 
3926 #ifdef CONFIG_MMU
mem_cgroup_move_charge_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)3927 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3928 					struct cftype *cft, u64 val)
3929 {
3930 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3931 
3932 	if (val & ~MOVE_MASK)
3933 		return -EINVAL;
3934 
3935 	/*
3936 	 * No kind of locking is needed in here, because ->can_attach() will
3937 	 * check this value once in the beginning of the process, and then carry
3938 	 * on with stale data. This means that changes to this value will only
3939 	 * affect task migrations starting after the change.
3940 	 */
3941 	memcg->move_charge_at_immigrate = val;
3942 	return 0;
3943 }
3944 #else
mem_cgroup_move_charge_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)3945 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3946 					struct cftype *cft, u64 val)
3947 {
3948 	return -ENOSYS;
3949 }
3950 #endif
3951 
3952 #ifdef CONFIG_NUMA
3953 
3954 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3955 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3956 #define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)
3957 
mem_cgroup_node_nr_lru_pages(struct mem_cgroup * memcg,int nid,unsigned int lru_mask,bool tree)3958 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3959 				int nid, unsigned int lru_mask, bool tree)
3960 {
3961 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3962 	unsigned long nr = 0;
3963 	enum lru_list lru;
3964 
3965 	VM_BUG_ON((unsigned)nid >= nr_node_ids);
3966 
3967 	for_each_lru(lru) {
3968 		if (!(BIT(lru) & lru_mask))
3969 			continue;
3970 		if (tree)
3971 			nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3972 		else
3973 			nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3974 	}
3975 	return nr;
3976 }
3977 
mem_cgroup_nr_lru_pages(struct mem_cgroup * memcg,unsigned int lru_mask,bool tree)3978 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3979 					     unsigned int lru_mask,
3980 					     bool tree)
3981 {
3982 	unsigned long nr = 0;
3983 	enum lru_list lru;
3984 
3985 	for_each_lru(lru) {
3986 		if (!(BIT(lru) & lru_mask))
3987 			continue;
3988 		if (tree)
3989 			nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3990 		else
3991 			nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3992 	}
3993 	return nr;
3994 }
3995 
memcg_numa_stat_show(struct seq_file * m,void * v)3996 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3997 {
3998 	struct numa_stat {
3999 		const char *name;
4000 		unsigned int lru_mask;
4001 	};
4002 
4003 	static const struct numa_stat stats[] = {
4004 		{ "total", LRU_ALL },
4005 		{ "file", LRU_ALL_FILE },
4006 		{ "anon", LRU_ALL_ANON },
4007 		{ "unevictable", BIT(LRU_UNEVICTABLE) },
4008 	};
4009 	const struct numa_stat *stat;
4010 	int nid;
4011 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4012 
4013 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4014 		seq_printf(m, "%s=%lu", stat->name,
4015 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4016 						   false));
4017 		for_each_node_state(nid, N_MEMORY)
4018 			seq_printf(m, " N%d=%lu", nid,
4019 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4020 							stat->lru_mask, false));
4021 		seq_putc(m, '\n');
4022 	}
4023 
4024 	for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4025 
4026 		seq_printf(m, "hierarchical_%s=%lu", stat->name,
4027 			   mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4028 						   true));
4029 		for_each_node_state(nid, N_MEMORY)
4030 			seq_printf(m, " N%d=%lu", nid,
4031 				   mem_cgroup_node_nr_lru_pages(memcg, nid,
4032 							stat->lru_mask, true));
4033 		seq_putc(m, '\n');
4034 	}
4035 
4036 	return 0;
4037 }
4038 #endif /* CONFIG_NUMA */
4039 
4040 static const unsigned int memcg1_stats[] = {
4041 	NR_FILE_PAGES,
4042 	NR_ANON_MAPPED,
4043 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4044 	NR_ANON_THPS,
4045 #endif
4046 	NR_SHMEM,
4047 	NR_FILE_MAPPED,
4048 	NR_FILE_DIRTY,
4049 	NR_WRITEBACK,
4050 	MEMCG_SWAP,
4051 };
4052 
4053 static const char *const memcg1_stat_names[] = {
4054 	"cache",
4055 	"rss",
4056 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4057 	"rss_huge",
4058 #endif
4059 	"shmem",
4060 	"mapped_file",
4061 	"dirty",
4062 	"writeback",
4063 	"swap",
4064 };
4065 
4066 /* Universal VM events cgroup1 shows, original sort order */
4067 static const unsigned int memcg1_events[] = {
4068 	PGPGIN,
4069 	PGPGOUT,
4070 	PGFAULT,
4071 	PGMAJFAULT,
4072 };
4073 
memcg_stat_show(struct seq_file * m,void * v)4074 static int memcg_stat_show(struct seq_file *m, void *v)
4075 {
4076 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4077 	unsigned long memory, memsw;
4078 	struct mem_cgroup *mi;
4079 	unsigned int i;
4080 
4081 	BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4082 
4083 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4084 		unsigned long nr;
4085 
4086 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4087 			continue;
4088 		nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4089 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4090 		if (memcg1_stats[i] == NR_ANON_THPS)
4091 			nr *= HPAGE_PMD_NR;
4092 #endif
4093 		seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
4094 	}
4095 
4096 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4097 		seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4098 			   memcg_events_local(memcg, memcg1_events[i]));
4099 
4100 	for (i = 0; i < NR_LRU_LISTS; i++)
4101 		seq_printf(m, "%s %lu\n", lru_list_name(i),
4102 			   memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4103 			   PAGE_SIZE);
4104 
4105 	/* Hierarchical information */
4106 	memory = memsw = PAGE_COUNTER_MAX;
4107 	for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4108 		memory = min(memory, READ_ONCE(mi->memory.max));
4109 		memsw = min(memsw, READ_ONCE(mi->memsw.max));
4110 	}
4111 	seq_printf(m, "hierarchical_memory_limit %llu\n",
4112 		   (u64)memory * PAGE_SIZE);
4113 	if (do_memsw_account())
4114 		seq_printf(m, "hierarchical_memsw_limit %llu\n",
4115 			   (u64)memsw * PAGE_SIZE);
4116 
4117 	for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4118 		unsigned long nr;
4119 
4120 		if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4121 			continue;
4122 		nr = memcg_page_state(memcg, memcg1_stats[i]);
4123 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4124 		if (memcg1_stats[i] == NR_ANON_THPS)
4125 			nr *= HPAGE_PMD_NR;
4126 #endif
4127 		seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4128 						(u64)nr * PAGE_SIZE);
4129 	}
4130 
4131 	for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4132 		seq_printf(m, "total_%s %llu\n",
4133 			   vm_event_name(memcg1_events[i]),
4134 			   (u64)memcg_events(memcg, memcg1_events[i]));
4135 
4136 	for (i = 0; i < NR_LRU_LISTS; i++)
4137 		seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4138 			   (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4139 			   PAGE_SIZE);
4140 
4141 #ifdef CONFIG_DEBUG_VM
4142 	{
4143 		pg_data_t *pgdat;
4144 		struct mem_cgroup_per_node *mz;
4145 		unsigned long anon_cost = 0;
4146 		unsigned long file_cost = 0;
4147 
4148 		for_each_online_pgdat(pgdat) {
4149 			mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
4150 
4151 			anon_cost += mz->lruvec.anon_cost;
4152 			file_cost += mz->lruvec.file_cost;
4153 		}
4154 		seq_printf(m, "anon_cost %lu\n", anon_cost);
4155 		seq_printf(m, "file_cost %lu\n", file_cost);
4156 	}
4157 #endif
4158 
4159 	return 0;
4160 }
4161 
mem_cgroup_swappiness_read(struct cgroup_subsys_state * css,struct cftype * cft)4162 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4163 				      struct cftype *cft)
4164 {
4165 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4166 
4167 	return mem_cgroup_swappiness(memcg);
4168 }
4169 
mem_cgroup_swappiness_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)4170 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4171 				       struct cftype *cft, u64 val)
4172 {
4173 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4174 
4175 	if (val > 100)
4176 		return -EINVAL;
4177 
4178 	if (css->parent)
4179 		memcg->swappiness = val;
4180 	else
4181 		vm_swappiness = val;
4182 
4183 	return 0;
4184 }
4185 
__mem_cgroup_threshold(struct mem_cgroup * memcg,bool swap)4186 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4187 {
4188 	struct mem_cgroup_threshold_ary *t;
4189 	unsigned long usage;
4190 	int i;
4191 
4192 	rcu_read_lock();
4193 	if (!swap)
4194 		t = rcu_dereference(memcg->thresholds.primary);
4195 	else
4196 		t = rcu_dereference(memcg->memsw_thresholds.primary);
4197 
4198 	if (!t)
4199 		goto unlock;
4200 
4201 	usage = mem_cgroup_usage(memcg, swap);
4202 
4203 	/*
4204 	 * current_threshold points to threshold just below or equal to usage.
4205 	 * If it's not true, a threshold was crossed after last
4206 	 * call of __mem_cgroup_threshold().
4207 	 */
4208 	i = t->current_threshold;
4209 
4210 	/*
4211 	 * Iterate backward over array of thresholds starting from
4212 	 * current_threshold and check if a threshold is crossed.
4213 	 * If none of thresholds below usage is crossed, we read
4214 	 * only one element of the array here.
4215 	 */
4216 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4217 		eventfd_signal(t->entries[i].eventfd, 1);
4218 
4219 	/* i = current_threshold + 1 */
4220 	i++;
4221 
4222 	/*
4223 	 * Iterate forward over array of thresholds starting from
4224 	 * current_threshold+1 and check if a threshold is crossed.
4225 	 * If none of thresholds above usage is crossed, we read
4226 	 * only one element of the array here.
4227 	 */
4228 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4229 		eventfd_signal(t->entries[i].eventfd, 1);
4230 
4231 	/* Update current_threshold */
4232 	t->current_threshold = i - 1;
4233 unlock:
4234 	rcu_read_unlock();
4235 }
4236 
mem_cgroup_threshold(struct mem_cgroup * memcg)4237 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4238 {
4239 	while (memcg) {
4240 		__mem_cgroup_threshold(memcg, false);
4241 		if (do_memsw_account())
4242 			__mem_cgroup_threshold(memcg, true);
4243 
4244 		memcg = parent_mem_cgroup(memcg);
4245 	}
4246 }
4247 
compare_thresholds(const void * a,const void * b)4248 static int compare_thresholds(const void *a, const void *b)
4249 {
4250 	const struct mem_cgroup_threshold *_a = a;
4251 	const struct mem_cgroup_threshold *_b = b;
4252 
4253 	if (_a->threshold > _b->threshold)
4254 		return 1;
4255 
4256 	if (_a->threshold < _b->threshold)
4257 		return -1;
4258 
4259 	return 0;
4260 }
4261 
mem_cgroup_oom_notify_cb(struct mem_cgroup * memcg)4262 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4263 {
4264 	struct mem_cgroup_eventfd_list *ev;
4265 
4266 	spin_lock(&memcg_oom_lock);
4267 
4268 	list_for_each_entry(ev, &memcg->oom_notify, list)
4269 		eventfd_signal(ev->eventfd, 1);
4270 
4271 	spin_unlock(&memcg_oom_lock);
4272 	return 0;
4273 }
4274 
mem_cgroup_oom_notify(struct mem_cgroup * memcg)4275 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4276 {
4277 	struct mem_cgroup *iter;
4278 
4279 	for_each_mem_cgroup_tree(iter, memcg)
4280 		mem_cgroup_oom_notify_cb(iter);
4281 }
4282 
__mem_cgroup_usage_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args,enum res_type type)4283 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4284 	struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4285 {
4286 	struct mem_cgroup_thresholds *thresholds;
4287 	struct mem_cgroup_threshold_ary *new;
4288 	unsigned long threshold;
4289 	unsigned long usage;
4290 	int i, size, ret;
4291 
4292 	ret = page_counter_memparse(args, "-1", &threshold);
4293 	if (ret)
4294 		return ret;
4295 
4296 	mutex_lock(&memcg->thresholds_lock);
4297 
4298 	if (type == _MEM) {
4299 		thresholds = &memcg->thresholds;
4300 		usage = mem_cgroup_usage(memcg, false);
4301 	} else if (type == _MEMSWAP) {
4302 		thresholds = &memcg->memsw_thresholds;
4303 		usage = mem_cgroup_usage(memcg, true);
4304 	} else
4305 		BUG();
4306 
4307 	/* Check if a threshold crossed before adding a new one */
4308 	if (thresholds->primary)
4309 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4310 
4311 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4312 
4313 	/* Allocate memory for new array of thresholds */
4314 	new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4315 	if (!new) {
4316 		ret = -ENOMEM;
4317 		goto unlock;
4318 	}
4319 	new->size = size;
4320 
4321 	/* Copy thresholds (if any) to new array */
4322 	if (thresholds->primary)
4323 		memcpy(new->entries, thresholds->primary->entries,
4324 		       flex_array_size(new, entries, size - 1));
4325 
4326 	/* Add new threshold */
4327 	new->entries[size - 1].eventfd = eventfd;
4328 	new->entries[size - 1].threshold = threshold;
4329 
4330 	/* Sort thresholds. Registering of new threshold isn't time-critical */
4331 	sort(new->entries, size, sizeof(*new->entries),
4332 			compare_thresholds, NULL);
4333 
4334 	/* Find current threshold */
4335 	new->current_threshold = -1;
4336 	for (i = 0; i < size; i++) {
4337 		if (new->entries[i].threshold <= usage) {
4338 			/*
4339 			 * new->current_threshold will not be used until
4340 			 * rcu_assign_pointer(), so it's safe to increment
4341 			 * it here.
4342 			 */
4343 			++new->current_threshold;
4344 		} else
4345 			break;
4346 	}
4347 
4348 	/* Free old spare buffer and save old primary buffer as spare */
4349 	kfree(thresholds->spare);
4350 	thresholds->spare = thresholds->primary;
4351 
4352 	rcu_assign_pointer(thresholds->primary, new);
4353 
4354 	/* To be sure that nobody uses thresholds */
4355 	synchronize_rcu();
4356 
4357 unlock:
4358 	mutex_unlock(&memcg->thresholds_lock);
4359 
4360 	return ret;
4361 }
4362 
mem_cgroup_usage_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args)4363 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4364 	struct eventfd_ctx *eventfd, const char *args)
4365 {
4366 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4367 }
4368 
memsw_cgroup_usage_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args)4369 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4370 	struct eventfd_ctx *eventfd, const char *args)
4371 {
4372 	return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4373 }
4374 
__mem_cgroup_usage_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,enum res_type type)4375 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4376 	struct eventfd_ctx *eventfd, enum res_type type)
4377 {
4378 	struct mem_cgroup_thresholds *thresholds;
4379 	struct mem_cgroup_threshold_ary *new;
4380 	unsigned long usage;
4381 	int i, j, size, entries;
4382 
4383 	mutex_lock(&memcg->thresholds_lock);
4384 
4385 	if (type == _MEM) {
4386 		thresholds = &memcg->thresholds;
4387 		usage = mem_cgroup_usage(memcg, false);
4388 	} else if (type == _MEMSWAP) {
4389 		thresholds = &memcg->memsw_thresholds;
4390 		usage = mem_cgroup_usage(memcg, true);
4391 	} else
4392 		BUG();
4393 
4394 	if (!thresholds->primary)
4395 		goto unlock;
4396 
4397 	/* Check if a threshold crossed before removing */
4398 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4399 
4400 	/* Calculate new number of threshold */
4401 	size = entries = 0;
4402 	for (i = 0; i < thresholds->primary->size; i++) {
4403 		if (thresholds->primary->entries[i].eventfd != eventfd)
4404 			size++;
4405 		else
4406 			entries++;
4407 	}
4408 
4409 	new = thresholds->spare;
4410 
4411 	/* If no items related to eventfd have been cleared, nothing to do */
4412 	if (!entries)
4413 		goto unlock;
4414 
4415 	/* Set thresholds array to NULL if we don't have thresholds */
4416 	if (!size) {
4417 		kfree(new);
4418 		new = NULL;
4419 		goto swap_buffers;
4420 	}
4421 
4422 	new->size = size;
4423 
4424 	/* Copy thresholds and find current threshold */
4425 	new->current_threshold = -1;
4426 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4427 		if (thresholds->primary->entries[i].eventfd == eventfd)
4428 			continue;
4429 
4430 		new->entries[j] = thresholds->primary->entries[i];
4431 		if (new->entries[j].threshold <= usage) {
4432 			/*
4433 			 * new->current_threshold will not be used
4434 			 * until rcu_assign_pointer(), so it's safe to increment
4435 			 * it here.
4436 			 */
4437 			++new->current_threshold;
4438 		}
4439 		j++;
4440 	}
4441 
4442 swap_buffers:
4443 	/* Swap primary and spare array */
4444 	thresholds->spare = thresholds->primary;
4445 
4446 	rcu_assign_pointer(thresholds->primary, new);
4447 
4448 	/* To be sure that nobody uses thresholds */
4449 	synchronize_rcu();
4450 
4451 	/* If all events are unregistered, free the spare array */
4452 	if (!new) {
4453 		kfree(thresholds->spare);
4454 		thresholds->spare = NULL;
4455 	}
4456 unlock:
4457 	mutex_unlock(&memcg->thresholds_lock);
4458 }
4459 
mem_cgroup_usage_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd)4460 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4461 	struct eventfd_ctx *eventfd)
4462 {
4463 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4464 }
4465 
memsw_cgroup_usage_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd)4466 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4467 	struct eventfd_ctx *eventfd)
4468 {
4469 	return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4470 }
4471 
mem_cgroup_oom_register_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd,const char * args)4472 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4473 	struct eventfd_ctx *eventfd, const char *args)
4474 {
4475 	struct mem_cgroup_eventfd_list *event;
4476 
4477 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4478 	if (!event)
4479 		return -ENOMEM;
4480 
4481 	spin_lock(&memcg_oom_lock);
4482 
4483 	event->eventfd = eventfd;
4484 	list_add(&event->list, &memcg->oom_notify);
4485 
4486 	/* already in OOM ? */
4487 	if (memcg->under_oom)
4488 		eventfd_signal(eventfd, 1);
4489 	spin_unlock(&memcg_oom_lock);
4490 
4491 	return 0;
4492 }
4493 
mem_cgroup_oom_unregister_event(struct mem_cgroup * memcg,struct eventfd_ctx * eventfd)4494 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4495 	struct eventfd_ctx *eventfd)
4496 {
4497 	struct mem_cgroup_eventfd_list *ev, *tmp;
4498 
4499 	spin_lock(&memcg_oom_lock);
4500 
4501 	list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4502 		if (ev->eventfd == eventfd) {
4503 			list_del(&ev->list);
4504 			kfree(ev);
4505 		}
4506 	}
4507 
4508 	spin_unlock(&memcg_oom_lock);
4509 }
4510 
mem_cgroup_oom_control_read(struct seq_file * sf,void * v)4511 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4512 {
4513 	struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4514 
4515 	seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4516 	seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4517 	seq_printf(sf, "oom_kill %lu\n",
4518 		   atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4519 	return 0;
4520 }
4521 
mem_cgroup_oom_control_write(struct cgroup_subsys_state * css,struct cftype * cft,u64 val)4522 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4523 	struct cftype *cft, u64 val)
4524 {
4525 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4526 
4527 	/* cannot set to root cgroup and only 0 and 1 are allowed */
4528 	if (!css->parent || !((val == 0) || (val == 1)))
4529 		return -EINVAL;
4530 
4531 	memcg->oom_kill_disable = val;
4532 	if (!val)
4533 		memcg_oom_recover(memcg);
4534 
4535 	return 0;
4536 }
4537 
4538 #ifdef CONFIG_CGROUP_WRITEBACK
4539 
4540 #include <trace/events/writeback.h>
4541 
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)4542 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4543 {
4544 	return wb_domain_init(&memcg->cgwb_domain, gfp);
4545 }
4546 
memcg_wb_domain_exit(struct mem_cgroup * memcg)4547 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4548 {
4549 	wb_domain_exit(&memcg->cgwb_domain);
4550 }
4551 
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)4552 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4553 {
4554 	wb_domain_size_changed(&memcg->cgwb_domain);
4555 }
4556 
mem_cgroup_wb_domain(struct bdi_writeback * wb)4557 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4558 {
4559 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4560 
4561 	if (!memcg->css.parent)
4562 		return NULL;
4563 
4564 	return &memcg->cgwb_domain;
4565 }
4566 
4567 /*
4568  * idx can be of type enum memcg_stat_item or node_stat_item.
4569  * Keep in sync with memcg_exact_page().
4570  */
memcg_exact_page_state(struct mem_cgroup * memcg,int idx)4571 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
4572 {
4573 	long x = atomic_long_read(&memcg->vmstats[idx]);
4574 	int cpu;
4575 
4576 	for_each_online_cpu(cpu)
4577 		x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx];
4578 	if (x < 0)
4579 		x = 0;
4580 	return x;
4581 }
4582 
4583 /**
4584  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4585  * @wb: bdi_writeback in question
4586  * @pfilepages: out parameter for number of file pages
4587  * @pheadroom: out parameter for number of allocatable pages according to memcg
4588  * @pdirty: out parameter for number of dirty pages
4589  * @pwriteback: out parameter for number of pages under writeback
4590  *
4591  * Determine the numbers of file, headroom, dirty, and writeback pages in
4592  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4593  * is a bit more involved.
4594  *
4595  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4596  * headroom is calculated as the lowest headroom of itself and the
4597  * ancestors.  Note that this doesn't consider the actual amount of
4598  * available memory in the system.  The caller should further cap
4599  * *@pheadroom accordingly.
4600  */
mem_cgroup_wb_stats(struct bdi_writeback * wb,unsigned long * pfilepages,unsigned long * pheadroom,unsigned long * pdirty,unsigned long * pwriteback)4601 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4602 			 unsigned long *pheadroom, unsigned long *pdirty,
4603 			 unsigned long *pwriteback)
4604 {
4605 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4606 	struct mem_cgroup *parent;
4607 
4608 	*pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
4609 
4610 	*pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
4611 	*pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) +
4612 			memcg_exact_page_state(memcg, NR_ACTIVE_FILE);
4613 	*pheadroom = PAGE_COUNTER_MAX;
4614 
4615 	while ((parent = parent_mem_cgroup(memcg))) {
4616 		unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4617 					    READ_ONCE(memcg->memory.high));
4618 		unsigned long used = page_counter_read(&memcg->memory);
4619 
4620 		*pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4621 		memcg = parent;
4622 	}
4623 }
4624 
4625 /*
4626  * Foreign dirty flushing
4627  *
4628  * There's an inherent mismatch between memcg and writeback.  The former
4629  * trackes ownership per-page while the latter per-inode.  This was a
4630  * deliberate design decision because honoring per-page ownership in the
4631  * writeback path is complicated, may lead to higher CPU and IO overheads
4632  * and deemed unnecessary given that write-sharing an inode across
4633  * different cgroups isn't a common use-case.
4634  *
4635  * Combined with inode majority-writer ownership switching, this works well
4636  * enough in most cases but there are some pathological cases.  For
4637  * example, let's say there are two cgroups A and B which keep writing to
4638  * different but confined parts of the same inode.  B owns the inode and
4639  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4640  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4641  * triggering background writeback.  A will be slowed down without a way to
4642  * make writeback of the dirty pages happen.
4643  *
4644  * Conditions like the above can lead to a cgroup getting repatedly and
4645  * severely throttled after making some progress after each
4646  * dirty_expire_interval while the underyling IO device is almost
4647  * completely idle.
4648  *
4649  * Solving this problem completely requires matching the ownership tracking
4650  * granularities between memcg and writeback in either direction.  However,
4651  * the more egregious behaviors can be avoided by simply remembering the
4652  * most recent foreign dirtying events and initiating remote flushes on
4653  * them when local writeback isn't enough to keep the memory clean enough.
4654  *
4655  * The following two functions implement such mechanism.  When a foreign
4656  * page - a page whose memcg and writeback ownerships don't match - is
4657  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4658  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4659  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4660  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4661  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4662  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4663  * limited to MEMCG_CGWB_FRN_CNT.
4664  *
4665  * The mechanism only remembers IDs and doesn't hold any object references.
4666  * As being wrong occasionally doesn't matter, updates and accesses to the
4667  * records are lockless and racy.
4668  */
mem_cgroup_track_foreign_dirty_slowpath(struct page * page,struct bdi_writeback * wb)4669 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4670 					     struct bdi_writeback *wb)
4671 {
4672 	struct mem_cgroup *memcg = page->mem_cgroup;
4673 	struct memcg_cgwb_frn *frn;
4674 	u64 now = get_jiffies_64();
4675 	u64 oldest_at = now;
4676 	int oldest = -1;
4677 	int i;
4678 
4679 	trace_track_foreign_dirty(page, wb);
4680 
4681 	/*
4682 	 * Pick the slot to use.  If there is already a slot for @wb, keep
4683 	 * using it.  If not replace the oldest one which isn't being
4684 	 * written out.
4685 	 */
4686 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4687 		frn = &memcg->cgwb_frn[i];
4688 		if (frn->bdi_id == wb->bdi->id &&
4689 		    frn->memcg_id == wb->memcg_css->id)
4690 			break;
4691 		if (time_before64(frn->at, oldest_at) &&
4692 		    atomic_read(&frn->done.cnt) == 1) {
4693 			oldest = i;
4694 			oldest_at = frn->at;
4695 		}
4696 	}
4697 
4698 	if (i < MEMCG_CGWB_FRN_CNT) {
4699 		/*
4700 		 * Re-using an existing one.  Update timestamp lazily to
4701 		 * avoid making the cacheline hot.  We want them to be
4702 		 * reasonably up-to-date and significantly shorter than
4703 		 * dirty_expire_interval as that's what expires the record.
4704 		 * Use the shorter of 1s and dirty_expire_interval / 8.
4705 		 */
4706 		unsigned long update_intv =
4707 			min_t(unsigned long, HZ,
4708 			      msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4709 
4710 		if (time_before64(frn->at, now - update_intv))
4711 			frn->at = now;
4712 	} else if (oldest >= 0) {
4713 		/* replace the oldest free one */
4714 		frn = &memcg->cgwb_frn[oldest];
4715 		frn->bdi_id = wb->bdi->id;
4716 		frn->memcg_id = wb->memcg_css->id;
4717 		frn->at = now;
4718 	}
4719 }
4720 
4721 /* issue foreign writeback flushes for recorded foreign dirtying events */
mem_cgroup_flush_foreign(struct bdi_writeback * wb)4722 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4723 {
4724 	struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4725 	unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4726 	u64 now = jiffies_64;
4727 	int i;
4728 
4729 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4730 		struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4731 
4732 		/*
4733 		 * If the record is older than dirty_expire_interval,
4734 		 * writeback on it has already started.  No need to kick it
4735 		 * off again.  Also, don't start a new one if there's
4736 		 * already one in flight.
4737 		 */
4738 		if (time_after64(frn->at, now - intv) &&
4739 		    atomic_read(&frn->done.cnt) == 1) {
4740 			frn->at = 0;
4741 			trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4742 			cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
4743 					       WB_REASON_FOREIGN_FLUSH,
4744 					       &frn->done);
4745 		}
4746 	}
4747 }
4748 
4749 #else	/* CONFIG_CGROUP_WRITEBACK */
4750 
memcg_wb_domain_init(struct mem_cgroup * memcg,gfp_t gfp)4751 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4752 {
4753 	return 0;
4754 }
4755 
memcg_wb_domain_exit(struct mem_cgroup * memcg)4756 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4757 {
4758 }
4759 
memcg_wb_domain_size_changed(struct mem_cgroup * memcg)4760 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4761 {
4762 }
4763 
4764 #endif	/* CONFIG_CGROUP_WRITEBACK */
4765 
4766 /*
4767  * DO NOT USE IN NEW FILES.
4768  *
4769  * "cgroup.event_control" implementation.
4770  *
4771  * This is way over-engineered.  It tries to support fully configurable
4772  * events for each user.  Such level of flexibility is completely
4773  * unnecessary especially in the light of the planned unified hierarchy.
4774  *
4775  * Please deprecate this and replace with something simpler if at all
4776  * possible.
4777  */
4778 
4779 /*
4780  * Unregister event and free resources.
4781  *
4782  * Gets called from workqueue.
4783  */
memcg_event_remove(struct work_struct * work)4784 static void memcg_event_remove(struct work_struct *work)
4785 {
4786 	struct mem_cgroup_event *event =
4787 		container_of(work, struct mem_cgroup_event, remove);
4788 	struct mem_cgroup *memcg = event->memcg;
4789 
4790 	remove_wait_queue(event->wqh, &event->wait);
4791 
4792 	event->unregister_event(memcg, event->eventfd);
4793 
4794 	/* Notify userspace the event is going away. */
4795 	eventfd_signal(event->eventfd, 1);
4796 
4797 	eventfd_ctx_put(event->eventfd);
4798 	kfree(event);
4799 	css_put(&memcg->css);
4800 }
4801 
4802 /*
4803  * Gets called on EPOLLHUP on eventfd when user closes it.
4804  *
4805  * Called with wqh->lock held and interrupts disabled.
4806  */
memcg_event_wake(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)4807 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4808 			    int sync, void *key)
4809 {
4810 	struct mem_cgroup_event *event =
4811 		container_of(wait, struct mem_cgroup_event, wait);
4812 	struct mem_cgroup *memcg = event->memcg;
4813 	__poll_t flags = key_to_poll(key);
4814 
4815 	if (flags & EPOLLHUP) {
4816 		/*
4817 		 * If the event has been detached at cgroup removal, we
4818 		 * can simply return knowing the other side will cleanup
4819 		 * for us.
4820 		 *
4821 		 * We can't race against event freeing since the other
4822 		 * side will require wqh->lock via remove_wait_queue(),
4823 		 * which we hold.
4824 		 */
4825 		spin_lock(&memcg->event_list_lock);
4826 		if (!list_empty(&event->list)) {
4827 			list_del_init(&event->list);
4828 			/*
4829 			 * We are in atomic context, but cgroup_event_remove()
4830 			 * may sleep, so we have to call it in workqueue.
4831 			 */
4832 			schedule_work(&event->remove);
4833 		}
4834 		spin_unlock(&memcg->event_list_lock);
4835 	}
4836 
4837 	return 0;
4838 }
4839 
memcg_event_ptable_queue_proc(struct file * file,wait_queue_head_t * wqh,poll_table * pt)4840 static void memcg_event_ptable_queue_proc(struct file *file,
4841 		wait_queue_head_t *wqh, poll_table *pt)
4842 {
4843 	struct mem_cgroup_event *event =
4844 		container_of(pt, struct mem_cgroup_event, pt);
4845 
4846 	event->wqh = wqh;
4847 	add_wait_queue(wqh, &event->wait);
4848 }
4849 
4850 /*
4851  * DO NOT USE IN NEW FILES.
4852  *
4853  * Parse input and register new cgroup event handler.
4854  *
4855  * Input must be in format '<event_fd> <control_fd> <args>'.
4856  * Interpretation of args is defined by control file implementation.
4857  */
memcg_write_event_control(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)4858 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4859 					 char *buf, size_t nbytes, loff_t off)
4860 {
4861 	struct cgroup_subsys_state *css = of_css(of);
4862 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4863 	struct mem_cgroup_event *event;
4864 	struct cgroup_subsys_state *cfile_css;
4865 	unsigned int efd, cfd;
4866 	struct fd efile;
4867 	struct fd cfile;
4868 	const char *name;
4869 	char *endp;
4870 	int ret;
4871 
4872 	buf = strstrip(buf);
4873 
4874 	efd = simple_strtoul(buf, &endp, 10);
4875 	if (*endp != ' ')
4876 		return -EINVAL;
4877 	buf = endp + 1;
4878 
4879 	cfd = simple_strtoul(buf, &endp, 10);
4880 	if ((*endp != ' ') && (*endp != '\0'))
4881 		return -EINVAL;
4882 	buf = endp + 1;
4883 
4884 	event = kzalloc(sizeof(*event), GFP_KERNEL);
4885 	if (!event)
4886 		return -ENOMEM;
4887 
4888 	event->memcg = memcg;
4889 	INIT_LIST_HEAD(&event->list);
4890 	init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4891 	init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4892 	INIT_WORK(&event->remove, memcg_event_remove);
4893 
4894 	efile = fdget(efd);
4895 	if (!efile.file) {
4896 		ret = -EBADF;
4897 		goto out_kfree;
4898 	}
4899 
4900 	event->eventfd = eventfd_ctx_fileget(efile.file);
4901 	if (IS_ERR(event->eventfd)) {
4902 		ret = PTR_ERR(event->eventfd);
4903 		goto out_put_efile;
4904 	}
4905 
4906 	cfile = fdget(cfd);
4907 	if (!cfile.file) {
4908 		ret = -EBADF;
4909 		goto out_put_eventfd;
4910 	}
4911 
4912 	/* the process need read permission on control file */
4913 	/* AV: shouldn't we check that it's been opened for read instead? */
4914 	ret = inode_permission(file_inode(cfile.file), MAY_READ);
4915 	if (ret < 0)
4916 		goto out_put_cfile;
4917 
4918 	/*
4919 	 * Determine the event callbacks and set them in @event.  This used
4920 	 * to be done via struct cftype but cgroup core no longer knows
4921 	 * about these events.  The following is crude but the whole thing
4922 	 * is for compatibility anyway.
4923 	 *
4924 	 * DO NOT ADD NEW FILES.
4925 	 */
4926 	name = cfile.file->f_path.dentry->d_name.name;
4927 
4928 	if (!strcmp(name, "memory.usage_in_bytes")) {
4929 		event->register_event = mem_cgroup_usage_register_event;
4930 		event->unregister_event = mem_cgroup_usage_unregister_event;
4931 	} else if (!strcmp(name, "memory.oom_control")) {
4932 		event->register_event = mem_cgroup_oom_register_event;
4933 		event->unregister_event = mem_cgroup_oom_unregister_event;
4934 	} else if (!strcmp(name, "memory.pressure_level")) {
4935 		event->register_event = vmpressure_register_event;
4936 		event->unregister_event = vmpressure_unregister_event;
4937 	} else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4938 		event->register_event = memsw_cgroup_usage_register_event;
4939 		event->unregister_event = memsw_cgroup_usage_unregister_event;
4940 	} else {
4941 		ret = -EINVAL;
4942 		goto out_put_cfile;
4943 	}
4944 
4945 	/*
4946 	 * Verify @cfile should belong to @css.  Also, remaining events are
4947 	 * automatically removed on cgroup destruction but the removal is
4948 	 * asynchronous, so take an extra ref on @css.
4949 	 */
4950 	cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4951 					       &memory_cgrp_subsys);
4952 	ret = -EINVAL;
4953 	if (IS_ERR(cfile_css))
4954 		goto out_put_cfile;
4955 	if (cfile_css != css) {
4956 		css_put(cfile_css);
4957 		goto out_put_cfile;
4958 	}
4959 
4960 	ret = event->register_event(memcg, event->eventfd, buf);
4961 	if (ret)
4962 		goto out_put_css;
4963 
4964 	vfs_poll(efile.file, &event->pt);
4965 
4966 	spin_lock(&memcg->event_list_lock);
4967 	list_add(&event->list, &memcg->event_list);
4968 	spin_unlock(&memcg->event_list_lock);
4969 
4970 	fdput(cfile);
4971 	fdput(efile);
4972 
4973 	return nbytes;
4974 
4975 out_put_css:
4976 	css_put(css);
4977 out_put_cfile:
4978 	fdput(cfile);
4979 out_put_eventfd:
4980 	eventfd_ctx_put(event->eventfd);
4981 out_put_efile:
4982 	fdput(efile);
4983 out_kfree:
4984 	kfree(event);
4985 
4986 	return ret;
4987 }
4988 
4989 static struct cftype mem_cgroup_legacy_files[] = {
4990 	{
4991 		.name = "usage_in_bytes",
4992 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4993 		.read_u64 = mem_cgroup_read_u64,
4994 	},
4995 	{
4996 		.name = "max_usage_in_bytes",
4997 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4998 		.write = mem_cgroup_reset,
4999 		.read_u64 = mem_cgroup_read_u64,
5000 	},
5001 	{
5002 		.name = "limit_in_bytes",
5003 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5004 		.write = mem_cgroup_write,
5005 		.read_u64 = mem_cgroup_read_u64,
5006 	},
5007 	{
5008 		.name = "soft_limit_in_bytes",
5009 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5010 		.write = mem_cgroup_write,
5011 		.read_u64 = mem_cgroup_read_u64,
5012 	},
5013 	{
5014 		.name = "failcnt",
5015 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5016 		.write = mem_cgroup_reset,
5017 		.read_u64 = mem_cgroup_read_u64,
5018 	},
5019 	{
5020 		.name = "stat",
5021 		.seq_show = memcg_stat_show,
5022 	},
5023 	{
5024 		.name = "force_empty",
5025 		.write = mem_cgroup_force_empty_write,
5026 	},
5027 	{
5028 		.name = "use_hierarchy",
5029 		.write_u64 = mem_cgroup_hierarchy_write,
5030 		.read_u64 = mem_cgroup_hierarchy_read,
5031 	},
5032 	{
5033 		.name = "cgroup.event_control",		/* XXX: for compat */
5034 		.write = memcg_write_event_control,
5035 		.flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
5036 	},
5037 	{
5038 		.name = "swappiness",
5039 		.read_u64 = mem_cgroup_swappiness_read,
5040 		.write_u64 = mem_cgroup_swappiness_write,
5041 	},
5042 	{
5043 		.name = "move_charge_at_immigrate",
5044 		.read_u64 = mem_cgroup_move_charge_read,
5045 		.write_u64 = mem_cgroup_move_charge_write,
5046 	},
5047 	{
5048 		.name = "oom_control",
5049 		.seq_show = mem_cgroup_oom_control_read,
5050 		.write_u64 = mem_cgroup_oom_control_write,
5051 		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
5052 	},
5053 	{
5054 		.name = "pressure_level",
5055 	},
5056 #ifdef CONFIG_NUMA
5057 	{
5058 		.name = "numa_stat",
5059 		.seq_show = memcg_numa_stat_show,
5060 	},
5061 #endif
5062 	{
5063 		.name = "kmem.limit_in_bytes",
5064 		.private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5065 		.write = mem_cgroup_write,
5066 		.read_u64 = mem_cgroup_read_u64,
5067 	},
5068 	{
5069 		.name = "kmem.usage_in_bytes",
5070 		.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5071 		.read_u64 = mem_cgroup_read_u64,
5072 	},
5073 	{
5074 		.name = "kmem.failcnt",
5075 		.private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5076 		.write = mem_cgroup_reset,
5077 		.read_u64 = mem_cgroup_read_u64,
5078 	},
5079 	{
5080 		.name = "kmem.max_usage_in_bytes",
5081 		.private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5082 		.write = mem_cgroup_reset,
5083 		.read_u64 = mem_cgroup_read_u64,
5084 	},
5085 #if defined(CONFIG_MEMCG_KMEM) && \
5086 	(defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5087 	{
5088 		.name = "kmem.slabinfo",
5089 		.seq_show = memcg_slab_show,
5090 	},
5091 #endif
5092 	{
5093 		.name = "kmem.tcp.limit_in_bytes",
5094 		.private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5095 		.write = mem_cgroup_write,
5096 		.read_u64 = mem_cgroup_read_u64,
5097 	},
5098 	{
5099 		.name = "kmem.tcp.usage_in_bytes",
5100 		.private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5101 		.read_u64 = mem_cgroup_read_u64,
5102 	},
5103 	{
5104 		.name = "kmem.tcp.failcnt",
5105 		.private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5106 		.write = mem_cgroup_reset,
5107 		.read_u64 = mem_cgroup_read_u64,
5108 	},
5109 	{
5110 		.name = "kmem.tcp.max_usage_in_bytes",
5111 		.private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5112 		.write = mem_cgroup_reset,
5113 		.read_u64 = mem_cgroup_read_u64,
5114 	},
5115 	{ },	/* terminate */
5116 };
5117 
5118 /*
5119  * Private memory cgroup IDR
5120  *
5121  * Swap-out records and page cache shadow entries need to store memcg
5122  * references in constrained space, so we maintain an ID space that is
5123  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5124  * memory-controlled cgroups to 64k.
5125  *
5126  * However, there usually are many references to the offline CSS after
5127  * the cgroup has been destroyed, such as page cache or reclaimable
5128  * slab objects, that don't need to hang on to the ID. We want to keep
5129  * those dead CSS from occupying IDs, or we might quickly exhaust the
5130  * relatively small ID space and prevent the creation of new cgroups
5131  * even when there are much fewer than 64k cgroups - possibly none.
5132  *
5133  * Maintain a private 16-bit ID space for memcg, and allow the ID to
5134  * be freed and recycled when it's no longer needed, which is usually
5135  * when the CSS is offlined.
5136  *
5137  * The only exception to that are records of swapped out tmpfs/shmem
5138  * pages that need to be attributed to live ancestors on swapin. But
5139  * those references are manageable from userspace.
5140  */
5141 
5142 static DEFINE_IDR(mem_cgroup_idr);
5143 
mem_cgroup_id_remove(struct mem_cgroup * memcg)5144 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5145 {
5146 	if (memcg->id.id > 0) {
5147 		idr_remove(&mem_cgroup_idr, memcg->id.id);
5148 		memcg->id.id = 0;
5149 	}
5150 }
5151 
mem_cgroup_id_get_many(struct mem_cgroup * memcg,unsigned int n)5152 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5153 						  unsigned int n)
5154 {
5155 	refcount_add(n, &memcg->id.ref);
5156 }
5157 
mem_cgroup_id_put_many(struct mem_cgroup * memcg,unsigned int n)5158 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5159 {
5160 	if (refcount_sub_and_test(n, &memcg->id.ref)) {
5161 		mem_cgroup_id_remove(memcg);
5162 
5163 		/* Memcg ID pins CSS */
5164 		css_put(&memcg->css);
5165 	}
5166 }
5167 
mem_cgroup_id_put(struct mem_cgroup * memcg)5168 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5169 {
5170 	mem_cgroup_id_put_many(memcg, 1);
5171 }
5172 
5173 /**
5174  * mem_cgroup_from_id - look up a memcg from a memcg id
5175  * @id: the memcg id to look up
5176  *
5177  * Caller must hold rcu_read_lock().
5178  */
mem_cgroup_from_id(unsigned short id)5179 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5180 {
5181 	WARN_ON_ONCE(!rcu_read_lock_held());
5182 	return idr_find(&mem_cgroup_idr, id);
5183 }
5184 
alloc_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)5185 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5186 {
5187 	struct mem_cgroup_per_node *pn;
5188 	int tmp = node;
5189 	/*
5190 	 * This routine is called against possible nodes.
5191 	 * But it's BUG to call kmalloc() against offline node.
5192 	 *
5193 	 * TODO: this routine can waste much memory for nodes which will
5194 	 *       never be onlined. It's better to use memory hotplug callback
5195 	 *       function.
5196 	 */
5197 	if (!node_state(node, N_NORMAL_MEMORY))
5198 		tmp = -1;
5199 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
5200 	if (!pn)
5201 		return 1;
5202 
5203 	pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat,
5204 						 GFP_KERNEL_ACCOUNT);
5205 	if (!pn->lruvec_stat_local) {
5206 		kfree(pn);
5207 		return 1;
5208 	}
5209 
5210 	pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat,
5211 					       GFP_KERNEL_ACCOUNT);
5212 	if (!pn->lruvec_stat_cpu) {
5213 		free_percpu(pn->lruvec_stat_local);
5214 		kfree(pn);
5215 		return 1;
5216 	}
5217 
5218 	lruvec_init(&pn->lruvec);
5219 	pn->usage_in_excess = 0;
5220 	pn->on_tree = false;
5221 	pn->memcg = memcg;
5222 
5223 	memcg->nodeinfo[node] = pn;
5224 	return 0;
5225 }
5226 
free_mem_cgroup_per_node_info(struct mem_cgroup * memcg,int node)5227 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5228 {
5229 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5230 
5231 	if (!pn)
5232 		return;
5233 
5234 	free_percpu(pn->lruvec_stat_cpu);
5235 	free_percpu(pn->lruvec_stat_local);
5236 	kfree(pn);
5237 }
5238 
__mem_cgroup_free(struct mem_cgroup * memcg)5239 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5240 {
5241 	int node;
5242 
5243 	for_each_node(node)
5244 		free_mem_cgroup_per_node_info(memcg, node);
5245 	free_percpu(memcg->vmstats_percpu);
5246 	free_percpu(memcg->vmstats_local);
5247 	kfree(memcg);
5248 }
5249 
mem_cgroup_free(struct mem_cgroup * memcg)5250 static void mem_cgroup_free(struct mem_cgroup *memcg)
5251 {
5252 	memcg_wb_domain_exit(memcg);
5253 	/*
5254 	 * Flush percpu vmstats and vmevents to guarantee the value correctness
5255 	 * on parent's and all ancestor levels.
5256 	 */
5257 	memcg_flush_percpu_vmstats(memcg);
5258 	memcg_flush_percpu_vmevents(memcg);
5259 	__mem_cgroup_free(memcg);
5260 }
5261 
mem_cgroup_alloc(void)5262 static struct mem_cgroup *mem_cgroup_alloc(void)
5263 {
5264 	struct mem_cgroup *memcg;
5265 	unsigned int size;
5266 	int node;
5267 	int __maybe_unused i;
5268 	long error = -ENOMEM;
5269 
5270 	size = sizeof(struct mem_cgroup);
5271 	size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
5272 
5273 	memcg = kzalloc(size, GFP_KERNEL);
5274 	if (!memcg)
5275 		return ERR_PTR(error);
5276 
5277 	memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5278 				 1, MEM_CGROUP_ID_MAX,
5279 				 GFP_KERNEL);
5280 	if (memcg->id.id < 0) {
5281 		error = memcg->id.id;
5282 		goto fail;
5283 	}
5284 
5285 	memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5286 						GFP_KERNEL_ACCOUNT);
5287 	if (!memcg->vmstats_local)
5288 		goto fail;
5289 
5290 	memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5291 						 GFP_KERNEL_ACCOUNT);
5292 	if (!memcg->vmstats_percpu)
5293 		goto fail;
5294 
5295 	for_each_node(node)
5296 		if (alloc_mem_cgroup_per_node_info(memcg, node))
5297 			goto fail;
5298 
5299 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5300 		goto fail;
5301 
5302 	INIT_WORK(&memcg->high_work, high_work_func);
5303 	INIT_LIST_HEAD(&memcg->oom_notify);
5304 	mutex_init(&memcg->thresholds_lock);
5305 	spin_lock_init(&memcg->move_lock);
5306 	vmpressure_init(&memcg->vmpressure);
5307 	INIT_LIST_HEAD(&memcg->event_list);
5308 	spin_lock_init(&memcg->event_list_lock);
5309 	memcg->socket_pressure = jiffies;
5310 #ifdef CONFIG_MEMCG_KMEM
5311 	memcg->kmemcg_id = -1;
5312 	INIT_LIST_HEAD(&memcg->objcg_list);
5313 #endif
5314 #ifdef CONFIG_CGROUP_WRITEBACK
5315 	INIT_LIST_HEAD(&memcg->cgwb_list);
5316 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5317 		memcg->cgwb_frn[i].done =
5318 			__WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5319 #endif
5320 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5321 	spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5322 	INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5323 	memcg->deferred_split_queue.split_queue_len = 0;
5324 #endif
5325 	idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5326 	return memcg;
5327 fail:
5328 	mem_cgroup_id_remove(memcg);
5329 	__mem_cgroup_free(memcg);
5330 	return ERR_PTR(error);
5331 }
5332 
5333 static struct cgroup_subsys_state * __ref
mem_cgroup_css_alloc(struct cgroup_subsys_state * parent_css)5334 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5335 {
5336 	struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5337 	struct mem_cgroup *memcg, *old_memcg;
5338 	long error = -ENOMEM;
5339 
5340 	old_memcg = set_active_memcg(parent);
5341 	memcg = mem_cgroup_alloc();
5342 	set_active_memcg(old_memcg);
5343 	if (IS_ERR(memcg))
5344 		return ERR_CAST(memcg);
5345 
5346 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5347 	memcg->soft_limit = PAGE_COUNTER_MAX;
5348 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5349 	if (parent) {
5350 		memcg->swappiness = mem_cgroup_swappiness(parent);
5351 		memcg->oom_kill_disable = parent->oom_kill_disable;
5352 	}
5353 	if (!parent) {
5354 		page_counter_init(&memcg->memory, NULL);
5355 		page_counter_init(&memcg->swap, NULL);
5356 		page_counter_init(&memcg->kmem, NULL);
5357 		page_counter_init(&memcg->tcpmem, NULL);
5358 	} else if (parent->use_hierarchy) {
5359 		memcg->use_hierarchy = true;
5360 		page_counter_init(&memcg->memory, &parent->memory);
5361 		page_counter_init(&memcg->swap, &parent->swap);
5362 		page_counter_init(&memcg->kmem, &parent->kmem);
5363 		page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5364 	} else {
5365 		page_counter_init(&memcg->memory, &root_mem_cgroup->memory);
5366 		page_counter_init(&memcg->swap, &root_mem_cgroup->swap);
5367 		page_counter_init(&memcg->kmem, &root_mem_cgroup->kmem);
5368 		page_counter_init(&memcg->tcpmem, &root_mem_cgroup->tcpmem);
5369 		/*
5370 		 * Deeper hierachy with use_hierarchy == false doesn't make
5371 		 * much sense so let cgroup subsystem know about this
5372 		 * unfortunate state in our controller.
5373 		 */
5374 		if (parent != root_mem_cgroup)
5375 			memory_cgrp_subsys.broken_hierarchy = true;
5376 	}
5377 
5378 	/* The following stuff does not apply to the root */
5379 	if (!parent) {
5380 		root_mem_cgroup = memcg;
5381 		return &memcg->css;
5382 	}
5383 
5384 	error = memcg_online_kmem(memcg);
5385 	if (error)
5386 		goto fail;
5387 
5388 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5389 		static_branch_inc(&memcg_sockets_enabled_key);
5390 
5391 	return &memcg->css;
5392 fail:
5393 	mem_cgroup_id_remove(memcg);
5394 	mem_cgroup_free(memcg);
5395 	return ERR_PTR(error);
5396 }
5397 
mem_cgroup_css_online(struct cgroup_subsys_state * css)5398 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5399 {
5400 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5401 
5402 	/*
5403 	 * A memcg must be visible for memcg_expand_shrinker_maps()
5404 	 * by the time the maps are allocated. So, we allocate maps
5405 	 * here, when for_each_mem_cgroup() can't skip it.
5406 	 */
5407 	if (memcg_alloc_shrinker_maps(memcg)) {
5408 		mem_cgroup_id_remove(memcg);
5409 		return -ENOMEM;
5410 	}
5411 
5412 	/* Online state pins memcg ID, memcg ID pins CSS */
5413 	refcount_set(&memcg->id.ref, 1);
5414 	css_get(css);
5415 	return 0;
5416 }
5417 
mem_cgroup_css_offline(struct cgroup_subsys_state * css)5418 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5419 {
5420 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5421 	struct mem_cgroup_event *event, *tmp;
5422 
5423 	/*
5424 	 * Unregister events and notify userspace.
5425 	 * Notify userspace about cgroup removing only after rmdir of cgroup
5426 	 * directory to avoid race between userspace and kernelspace.
5427 	 */
5428 	spin_lock(&memcg->event_list_lock);
5429 	list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5430 		list_del_init(&event->list);
5431 		schedule_work(&event->remove);
5432 	}
5433 	spin_unlock(&memcg->event_list_lock);
5434 
5435 	page_counter_set_min(&memcg->memory, 0);
5436 	page_counter_set_low(&memcg->memory, 0);
5437 
5438 	memcg_offline_kmem(memcg);
5439 	wb_memcg_offline(memcg);
5440 
5441 	drain_all_stock(memcg);
5442 
5443 	mem_cgroup_id_put(memcg);
5444 }
5445 
mem_cgroup_css_released(struct cgroup_subsys_state * css)5446 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5447 {
5448 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5449 
5450 	invalidate_reclaim_iterators(memcg);
5451 }
5452 
mem_cgroup_css_free(struct cgroup_subsys_state * css)5453 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5454 {
5455 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5456 	int __maybe_unused i;
5457 
5458 #ifdef CONFIG_CGROUP_WRITEBACK
5459 	for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5460 		wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5461 #endif
5462 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5463 		static_branch_dec(&memcg_sockets_enabled_key);
5464 
5465 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5466 		static_branch_dec(&memcg_sockets_enabled_key);
5467 
5468 	vmpressure_cleanup(&memcg->vmpressure);
5469 	cancel_work_sync(&memcg->high_work);
5470 	mem_cgroup_remove_from_trees(memcg);
5471 	memcg_free_shrinker_maps(memcg);
5472 	memcg_free_kmem(memcg);
5473 	mem_cgroup_free(memcg);
5474 }
5475 
5476 /**
5477  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5478  * @css: the target css
5479  *
5480  * Reset the states of the mem_cgroup associated with @css.  This is
5481  * invoked when the userland requests disabling on the default hierarchy
5482  * but the memcg is pinned through dependency.  The memcg should stop
5483  * applying policies and should revert to the vanilla state as it may be
5484  * made visible again.
5485  *
5486  * The current implementation only resets the essential configurations.
5487  * This needs to be expanded to cover all the visible parts.
5488  */
mem_cgroup_css_reset(struct cgroup_subsys_state * css)5489 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5490 {
5491 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5492 
5493 	page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5494 	page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5495 	page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5496 	page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5497 	page_counter_set_min(&memcg->memory, 0);
5498 	page_counter_set_low(&memcg->memory, 0);
5499 	page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5500 	memcg->soft_limit = PAGE_COUNTER_MAX;
5501 	page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5502 	memcg_wb_domain_size_changed(memcg);
5503 }
5504 
5505 #ifdef CONFIG_MMU
5506 /* Handlers for move charge at task migration. */
mem_cgroup_do_precharge(unsigned long count)5507 static int mem_cgroup_do_precharge(unsigned long count)
5508 {
5509 	int ret;
5510 
5511 	/* Try a single bulk charge without reclaim first, kswapd may wake */
5512 	ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5513 	if (!ret) {
5514 		mc.precharge += count;
5515 		return ret;
5516 	}
5517 
5518 	/* Try charges one by one with reclaim, but do not retry */
5519 	while (count--) {
5520 		ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5521 		if (ret)
5522 			return ret;
5523 		mc.precharge++;
5524 		cond_resched();
5525 	}
5526 	return 0;
5527 }
5528 
5529 union mc_target {
5530 	struct page	*page;
5531 	swp_entry_t	ent;
5532 };
5533 
5534 enum mc_target_type {
5535 	MC_TARGET_NONE = 0,
5536 	MC_TARGET_PAGE,
5537 	MC_TARGET_SWAP,
5538 	MC_TARGET_DEVICE,
5539 };
5540 
mc_handle_present_pte(struct vm_area_struct * vma,unsigned long addr,pte_t ptent)5541 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5542 						unsigned long addr, pte_t ptent)
5543 {
5544 	struct page *page = vm_normal_page(vma, addr, ptent);
5545 
5546 	if (!page || !page_mapped(page))
5547 		return NULL;
5548 	if (PageAnon(page)) {
5549 		if (!(mc.flags & MOVE_ANON))
5550 			return NULL;
5551 	} else {
5552 		if (!(mc.flags & MOVE_FILE))
5553 			return NULL;
5554 	}
5555 	if (!get_page_unless_zero(page))
5556 		return NULL;
5557 
5558 	return page;
5559 }
5560 
5561 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
mc_handle_swap_pte(struct vm_area_struct * vma,pte_t ptent,swp_entry_t * entry)5562 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5563 			pte_t ptent, swp_entry_t *entry)
5564 {
5565 	struct page *page = NULL;
5566 	swp_entry_t ent = pte_to_swp_entry(ptent);
5567 
5568 	if (!(mc.flags & MOVE_ANON))
5569 		return NULL;
5570 
5571 	/*
5572 	 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5573 	 * a device and because they are not accessible by CPU they are store
5574 	 * as special swap entry in the CPU page table.
5575 	 */
5576 	if (is_device_private_entry(ent)) {
5577 		page = device_private_entry_to_page(ent);
5578 		/*
5579 		 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5580 		 * a refcount of 1 when free (unlike normal page)
5581 		 */
5582 		if (!page_ref_add_unless(page, 1, 1))
5583 			return NULL;
5584 		return page;
5585 	}
5586 
5587 	if (non_swap_entry(ent))
5588 		return NULL;
5589 
5590 	/*
5591 	 * Because lookup_swap_cache() updates some statistics counter,
5592 	 * we call find_get_page() with swapper_space directly.
5593 	 */
5594 	page = find_get_page(swap_address_space(ent), swp_offset(ent));
5595 	entry->val = ent.val;
5596 
5597 	return page;
5598 }
5599 #else
mc_handle_swap_pte(struct vm_area_struct * vma,pte_t ptent,swp_entry_t * entry)5600 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5601 			pte_t ptent, swp_entry_t *entry)
5602 {
5603 	return NULL;
5604 }
5605 #endif
5606 
mc_handle_file_pte(struct vm_area_struct * vma,unsigned long addr,pte_t ptent,swp_entry_t * entry)5607 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5608 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
5609 {
5610 	if (!vma->vm_file) /* anonymous vma */
5611 		return NULL;
5612 	if (!(mc.flags & MOVE_FILE))
5613 		return NULL;
5614 
5615 	/* page is moved even if it's not RSS of this task(page-faulted). */
5616 	/* shmem/tmpfs may report page out on swap: account for that too. */
5617 	return find_get_incore_page(vma->vm_file->f_mapping,
5618 			linear_page_index(vma, addr));
5619 }
5620 
5621 /**
5622  * mem_cgroup_move_account - move account of the page
5623  * @page: the page
5624  * @compound: charge the page as compound or small page
5625  * @from: mem_cgroup which the page is moved from.
5626  * @to:	mem_cgroup which the page is moved to. @from != @to.
5627  *
5628  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5629  *
5630  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5631  * from old cgroup.
5632  */
mem_cgroup_move_account(struct page * page,bool compound,struct mem_cgroup * from,struct mem_cgroup * to)5633 static int mem_cgroup_move_account(struct page *page,
5634 				   bool compound,
5635 				   struct mem_cgroup *from,
5636 				   struct mem_cgroup *to)
5637 {
5638 	struct lruvec *from_vec, *to_vec;
5639 	struct pglist_data *pgdat;
5640 	unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
5641 	int ret;
5642 
5643 	VM_BUG_ON(from == to);
5644 	VM_BUG_ON_PAGE(PageLRU(page), page);
5645 	VM_BUG_ON(compound && !PageTransHuge(page));
5646 
5647 	/*
5648 	 * Prevent mem_cgroup_migrate() from looking at
5649 	 * page->mem_cgroup of its source page while we change it.
5650 	 */
5651 	ret = -EBUSY;
5652 	if (!trylock_page(page))
5653 		goto out;
5654 
5655 	ret = -EINVAL;
5656 	if (page->mem_cgroup != from)
5657 		goto out_unlock;
5658 
5659 	pgdat = page_pgdat(page);
5660 	from_vec = mem_cgroup_lruvec(from, pgdat);
5661 	to_vec = mem_cgroup_lruvec(to, pgdat);
5662 
5663 	lock_page_memcg(page);
5664 
5665 	if (PageAnon(page)) {
5666 		if (page_mapped(page)) {
5667 			__mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5668 			__mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5669 			if (PageTransHuge(page)) {
5670 				__mod_lruvec_state(from_vec, NR_ANON_THPS,
5671 						   -nr_pages);
5672 				__mod_lruvec_state(to_vec, NR_ANON_THPS,
5673 						   nr_pages);
5674 			}
5675 
5676 		}
5677 	} else {
5678 		__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5679 		__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5680 
5681 		if (PageSwapBacked(page)) {
5682 			__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5683 			__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5684 		}
5685 
5686 		if (page_mapped(page)) {
5687 			__mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5688 			__mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5689 		}
5690 
5691 		if (PageDirty(page)) {
5692 			struct address_space *mapping = page_mapping(page);
5693 
5694 			if (mapping_can_writeback(mapping)) {
5695 				__mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5696 						   -nr_pages);
5697 				__mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5698 						   nr_pages);
5699 			}
5700 		}
5701 	}
5702 
5703 	if (PageWriteback(page)) {
5704 		__mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5705 		__mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5706 	}
5707 
5708 	/*
5709 	 * All state has been migrated, let's switch to the new memcg.
5710 	 *
5711 	 * It is safe to change page->mem_cgroup here because the page
5712 	 * is referenced, charged, isolated, and locked: we can't race
5713 	 * with (un)charging, migration, LRU putback, or anything else
5714 	 * that would rely on a stable page->mem_cgroup.
5715 	 *
5716 	 * Note that lock_page_memcg is a memcg lock, not a page lock,
5717 	 * to save space. As soon as we switch page->mem_cgroup to a
5718 	 * new memcg that isn't locked, the above state can change
5719 	 * concurrently again. Make sure we're truly done with it.
5720 	 */
5721 	smp_mb();
5722 
5723 	css_get(&to->css);
5724 	css_put(&from->css);
5725 
5726 	page->mem_cgroup = to;
5727 
5728 	__unlock_page_memcg(from);
5729 
5730 	ret = 0;
5731 
5732 	local_irq_disable();
5733 	mem_cgroup_charge_statistics(to, page, nr_pages);
5734 	memcg_check_events(to, page);
5735 	mem_cgroup_charge_statistics(from, page, -nr_pages);
5736 	memcg_check_events(from, page);
5737 	local_irq_enable();
5738 out_unlock:
5739 	unlock_page(page);
5740 out:
5741 	return ret;
5742 }
5743 
5744 /**
5745  * get_mctgt_type - get target type of moving charge
5746  * @vma: the vma the pte to be checked belongs
5747  * @addr: the address corresponding to the pte to be checked
5748  * @ptent: the pte to be checked
5749  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5750  *
5751  * Returns
5752  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5753  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5754  *     move charge. if @target is not NULL, the page is stored in target->page
5755  *     with extra refcnt got(Callers should handle it).
5756  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5757  *     target for charge migration. if @target is not NULL, the entry is stored
5758  *     in target->ent.
5759  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
5760  *     (so ZONE_DEVICE page and thus not on the lru).
5761  *     For now we such page is charge like a regular page would be as for all
5762  *     intent and purposes it is just special memory taking the place of a
5763  *     regular page.
5764  *
5765  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5766  *
5767  * Called with pte lock held.
5768  */
5769 
get_mctgt_type(struct vm_area_struct * vma,unsigned long addr,pte_t ptent,union mc_target * target)5770 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5771 		unsigned long addr, pte_t ptent, union mc_target *target)
5772 {
5773 	struct page *page = NULL;
5774 	enum mc_target_type ret = MC_TARGET_NONE;
5775 	swp_entry_t ent = { .val = 0 };
5776 
5777 	if (pte_present(ptent))
5778 		page = mc_handle_present_pte(vma, addr, ptent);
5779 	else if (is_swap_pte(ptent))
5780 		page = mc_handle_swap_pte(vma, ptent, &ent);
5781 	else if (pte_none(ptent))
5782 		page = mc_handle_file_pte(vma, addr, ptent, &ent);
5783 
5784 	if (!page && !ent.val)
5785 		return ret;
5786 	if (page) {
5787 		/*
5788 		 * Do only loose check w/o serialization.
5789 		 * mem_cgroup_move_account() checks the page is valid or
5790 		 * not under LRU exclusion.
5791 		 */
5792 		if (page->mem_cgroup == mc.from) {
5793 			ret = MC_TARGET_PAGE;
5794 			if (is_device_private_page(page))
5795 				ret = MC_TARGET_DEVICE;
5796 			if (target)
5797 				target->page = page;
5798 		}
5799 		if (!ret || !target)
5800 			put_page(page);
5801 	}
5802 	/*
5803 	 * There is a swap entry and a page doesn't exist or isn't charged.
5804 	 * But we cannot move a tail-page in a THP.
5805 	 */
5806 	if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5807 	    mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5808 		ret = MC_TARGET_SWAP;
5809 		if (target)
5810 			target->ent = ent;
5811 	}
5812 	return ret;
5813 }
5814 
5815 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5816 /*
5817  * We don't consider PMD mapped swapping or file mapped pages because THP does
5818  * not support them for now.
5819  * Caller should make sure that pmd_trans_huge(pmd) is true.
5820  */
get_mctgt_type_thp(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd,union mc_target * target)5821 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5822 		unsigned long addr, pmd_t pmd, union mc_target *target)
5823 {
5824 	struct page *page = NULL;
5825 	enum mc_target_type ret = MC_TARGET_NONE;
5826 
5827 	if (unlikely(is_swap_pmd(pmd))) {
5828 		VM_BUG_ON(thp_migration_supported() &&
5829 				  !is_pmd_migration_entry(pmd));
5830 		return ret;
5831 	}
5832 	page = pmd_page(pmd);
5833 	VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5834 	if (!(mc.flags & MOVE_ANON))
5835 		return ret;
5836 	if (page->mem_cgroup == mc.from) {
5837 		ret = MC_TARGET_PAGE;
5838 		if (target) {
5839 			get_page(page);
5840 			target->page = page;
5841 		}
5842 	}
5843 	return ret;
5844 }
5845 #else
get_mctgt_type_thp(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd,union mc_target * target)5846 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5847 		unsigned long addr, pmd_t pmd, union mc_target *target)
5848 {
5849 	return MC_TARGET_NONE;
5850 }
5851 #endif
5852 
mem_cgroup_count_precharge_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)5853 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5854 					unsigned long addr, unsigned long end,
5855 					struct mm_walk *walk)
5856 {
5857 	struct vm_area_struct *vma = walk->vma;
5858 	pte_t *pte;
5859 	spinlock_t *ptl;
5860 
5861 	ptl = pmd_trans_huge_lock(pmd, vma);
5862 	if (ptl) {
5863 		/*
5864 		 * Note their can not be MC_TARGET_DEVICE for now as we do not
5865 		 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5866 		 * this might change.
5867 		 */
5868 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5869 			mc.precharge += HPAGE_PMD_NR;
5870 		spin_unlock(ptl);
5871 		return 0;
5872 	}
5873 
5874 	if (pmd_trans_unstable(pmd))
5875 		return 0;
5876 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5877 	for (; addr != end; pte++, addr += PAGE_SIZE)
5878 		if (get_mctgt_type(vma, addr, *pte, NULL))
5879 			mc.precharge++;	/* increment precharge temporarily */
5880 	pte_unmap_unlock(pte - 1, ptl);
5881 	cond_resched();
5882 
5883 	return 0;
5884 }
5885 
5886 static const struct mm_walk_ops precharge_walk_ops = {
5887 	.pmd_entry	= mem_cgroup_count_precharge_pte_range,
5888 };
5889 
mem_cgroup_count_precharge(struct mm_struct * mm)5890 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5891 {
5892 	unsigned long precharge;
5893 
5894 	mmap_read_lock(mm);
5895 	walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5896 	mmap_read_unlock(mm);
5897 
5898 	precharge = mc.precharge;
5899 	mc.precharge = 0;
5900 
5901 	return precharge;
5902 }
5903 
mem_cgroup_precharge_mc(struct mm_struct * mm)5904 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5905 {
5906 	unsigned long precharge = mem_cgroup_count_precharge(mm);
5907 
5908 	VM_BUG_ON(mc.moving_task);
5909 	mc.moving_task = current;
5910 	return mem_cgroup_do_precharge(precharge);
5911 }
5912 
5913 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
__mem_cgroup_clear_mc(void)5914 static void __mem_cgroup_clear_mc(void)
5915 {
5916 	struct mem_cgroup *from = mc.from;
5917 	struct mem_cgroup *to = mc.to;
5918 
5919 	/* we must uncharge all the leftover precharges from mc.to */
5920 	if (mc.precharge) {
5921 		cancel_charge(mc.to, mc.precharge);
5922 		mc.precharge = 0;
5923 	}
5924 	/*
5925 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5926 	 * we must uncharge here.
5927 	 */
5928 	if (mc.moved_charge) {
5929 		cancel_charge(mc.from, mc.moved_charge);
5930 		mc.moved_charge = 0;
5931 	}
5932 	/* we must fixup refcnts and charges */
5933 	if (mc.moved_swap) {
5934 		/* uncharge swap account from the old cgroup */
5935 		if (!mem_cgroup_is_root(mc.from))
5936 			page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5937 
5938 		mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5939 
5940 		/*
5941 		 * we charged both to->memory and to->memsw, so we
5942 		 * should uncharge to->memory.
5943 		 */
5944 		if (!mem_cgroup_is_root(mc.to))
5945 			page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5946 
5947 		mc.moved_swap = 0;
5948 	}
5949 	memcg_oom_recover(from);
5950 	memcg_oom_recover(to);
5951 	wake_up_all(&mc.waitq);
5952 }
5953 
mem_cgroup_clear_mc(void)5954 static void mem_cgroup_clear_mc(void)
5955 {
5956 	struct mm_struct *mm = mc.mm;
5957 
5958 	/*
5959 	 * we must clear moving_task before waking up waiters at the end of
5960 	 * task migration.
5961 	 */
5962 	mc.moving_task = NULL;
5963 	__mem_cgroup_clear_mc();
5964 	spin_lock(&mc.lock);
5965 	mc.from = NULL;
5966 	mc.to = NULL;
5967 	mc.mm = NULL;
5968 	spin_unlock(&mc.lock);
5969 
5970 	mmput(mm);
5971 }
5972 
mem_cgroup_can_attach(struct cgroup_taskset * tset)5973 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5974 {
5975 	struct cgroup_subsys_state *css;
5976 	struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5977 	struct mem_cgroup *from;
5978 	struct task_struct *leader, *p;
5979 	struct mm_struct *mm;
5980 	unsigned long move_flags;
5981 	int ret = 0;
5982 
5983 	/* charge immigration isn't supported on the default hierarchy */
5984 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5985 		return 0;
5986 
5987 	/*
5988 	 * Multi-process migrations only happen on the default hierarchy
5989 	 * where charge immigration is not used.  Perform charge
5990 	 * immigration if @tset contains a leader and whine if there are
5991 	 * multiple.
5992 	 */
5993 	p = NULL;
5994 	cgroup_taskset_for_each_leader(leader, css, tset) {
5995 		WARN_ON_ONCE(p);
5996 		p = leader;
5997 		memcg = mem_cgroup_from_css(css);
5998 	}
5999 	if (!p)
6000 		return 0;
6001 
6002 	/*
6003 	 * We are now commited to this value whatever it is. Changes in this
6004 	 * tunable will only affect upcoming migrations, not the current one.
6005 	 * So we need to save it, and keep it going.
6006 	 */
6007 	move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6008 	if (!move_flags)
6009 		return 0;
6010 
6011 	from = mem_cgroup_from_task(p);
6012 
6013 	VM_BUG_ON(from == memcg);
6014 
6015 	mm = get_task_mm(p);
6016 	if (!mm)
6017 		return 0;
6018 	/* We move charges only when we move a owner of the mm */
6019 	if (mm->owner == p) {
6020 		VM_BUG_ON(mc.from);
6021 		VM_BUG_ON(mc.to);
6022 		VM_BUG_ON(mc.precharge);
6023 		VM_BUG_ON(mc.moved_charge);
6024 		VM_BUG_ON(mc.moved_swap);
6025 
6026 		spin_lock(&mc.lock);
6027 		mc.mm = mm;
6028 		mc.from = from;
6029 		mc.to = memcg;
6030 		mc.flags = move_flags;
6031 		spin_unlock(&mc.lock);
6032 		/* We set mc.moving_task later */
6033 
6034 		ret = mem_cgroup_precharge_mc(mm);
6035 		if (ret)
6036 			mem_cgroup_clear_mc();
6037 	} else {
6038 		mmput(mm);
6039 	}
6040 	return ret;
6041 }
6042 
mem_cgroup_cancel_attach(struct cgroup_taskset * tset)6043 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6044 {
6045 	if (mc.to)
6046 		mem_cgroup_clear_mc();
6047 }
6048 
mem_cgroup_move_charge_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)6049 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6050 				unsigned long addr, unsigned long end,
6051 				struct mm_walk *walk)
6052 {
6053 	int ret = 0;
6054 	struct vm_area_struct *vma = walk->vma;
6055 	pte_t *pte;
6056 	spinlock_t *ptl;
6057 	enum mc_target_type target_type;
6058 	union mc_target target;
6059 	struct page *page;
6060 
6061 	ptl = pmd_trans_huge_lock(pmd, vma);
6062 	if (ptl) {
6063 		if (mc.precharge < HPAGE_PMD_NR) {
6064 			spin_unlock(ptl);
6065 			return 0;
6066 		}
6067 		target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6068 		if (target_type == MC_TARGET_PAGE) {
6069 			page = target.page;
6070 			if (!isolate_lru_page(page)) {
6071 				if (!mem_cgroup_move_account(page, true,
6072 							     mc.from, mc.to)) {
6073 					mc.precharge -= HPAGE_PMD_NR;
6074 					mc.moved_charge += HPAGE_PMD_NR;
6075 				}
6076 				putback_lru_page(page);
6077 			}
6078 			put_page(page);
6079 		} else if (target_type == MC_TARGET_DEVICE) {
6080 			page = target.page;
6081 			if (!mem_cgroup_move_account(page, true,
6082 						     mc.from, mc.to)) {
6083 				mc.precharge -= HPAGE_PMD_NR;
6084 				mc.moved_charge += HPAGE_PMD_NR;
6085 			}
6086 			put_page(page);
6087 		}
6088 		spin_unlock(ptl);
6089 		return 0;
6090 	}
6091 
6092 	if (pmd_trans_unstable(pmd))
6093 		return 0;
6094 retry:
6095 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6096 	for (; addr != end; addr += PAGE_SIZE) {
6097 		pte_t ptent = *(pte++);
6098 		bool device = false;
6099 		swp_entry_t ent;
6100 
6101 		if (!mc.precharge)
6102 			break;
6103 
6104 		switch (get_mctgt_type(vma, addr, ptent, &target)) {
6105 		case MC_TARGET_DEVICE:
6106 			device = true;
6107 			fallthrough;
6108 		case MC_TARGET_PAGE:
6109 			page = target.page;
6110 			/*
6111 			 * We can have a part of the split pmd here. Moving it
6112 			 * can be done but it would be too convoluted so simply
6113 			 * ignore such a partial THP and keep it in original
6114 			 * memcg. There should be somebody mapping the head.
6115 			 */
6116 			if (PageTransCompound(page))
6117 				goto put;
6118 			if (!device && isolate_lru_page(page))
6119 				goto put;
6120 			if (!mem_cgroup_move_account(page, false,
6121 						mc.from, mc.to)) {
6122 				mc.precharge--;
6123 				/* we uncharge from mc.from later. */
6124 				mc.moved_charge++;
6125 			}
6126 			if (!device)
6127 				putback_lru_page(page);
6128 put:			/* get_mctgt_type() gets the page */
6129 			put_page(page);
6130 			break;
6131 		case MC_TARGET_SWAP:
6132 			ent = target.ent;
6133 			if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6134 				mc.precharge--;
6135 				mem_cgroup_id_get_many(mc.to, 1);
6136 				/* we fixup other refcnts and charges later. */
6137 				mc.moved_swap++;
6138 			}
6139 			break;
6140 		default:
6141 			break;
6142 		}
6143 	}
6144 	pte_unmap_unlock(pte - 1, ptl);
6145 	cond_resched();
6146 
6147 	if (addr != end) {
6148 		/*
6149 		 * We have consumed all precharges we got in can_attach().
6150 		 * We try charge one by one, but don't do any additional
6151 		 * charges to mc.to if we have failed in charge once in attach()
6152 		 * phase.
6153 		 */
6154 		ret = mem_cgroup_do_precharge(1);
6155 		if (!ret)
6156 			goto retry;
6157 	}
6158 
6159 	return ret;
6160 }
6161 
6162 static const struct mm_walk_ops charge_walk_ops = {
6163 	.pmd_entry	= mem_cgroup_move_charge_pte_range,
6164 };
6165 
mem_cgroup_move_charge(void)6166 static void mem_cgroup_move_charge(void)
6167 {
6168 	lru_add_drain_all();
6169 	/*
6170 	 * Signal lock_page_memcg() to take the memcg's move_lock
6171 	 * while we're moving its pages to another memcg. Then wait
6172 	 * for already started RCU-only updates to finish.
6173 	 */
6174 	atomic_inc(&mc.from->moving_account);
6175 	synchronize_rcu();
6176 retry:
6177 	if (unlikely(!mmap_read_trylock(mc.mm))) {
6178 		/*
6179 		 * Someone who are holding the mmap_lock might be waiting in
6180 		 * waitq. So we cancel all extra charges, wake up all waiters,
6181 		 * and retry. Because we cancel precharges, we might not be able
6182 		 * to move enough charges, but moving charge is a best-effort
6183 		 * feature anyway, so it wouldn't be a big problem.
6184 		 */
6185 		__mem_cgroup_clear_mc();
6186 		cond_resched();
6187 		goto retry;
6188 	}
6189 	/*
6190 	 * When we have consumed all precharges and failed in doing
6191 	 * additional charge, the page walk just aborts.
6192 	 */
6193 	walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6194 			NULL);
6195 
6196 	mmap_read_unlock(mc.mm);
6197 	atomic_dec(&mc.from->moving_account);
6198 }
6199 
mem_cgroup_move_task(void)6200 static void mem_cgroup_move_task(void)
6201 {
6202 	if (mc.to) {
6203 		mem_cgroup_move_charge();
6204 		mem_cgroup_clear_mc();
6205 	}
6206 }
6207 #else	/* !CONFIG_MMU */
mem_cgroup_can_attach(struct cgroup_taskset * tset)6208 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6209 {
6210 	return 0;
6211 }
mem_cgroup_cancel_attach(struct cgroup_taskset * tset)6212 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6213 {
6214 }
mem_cgroup_move_task(void)6215 static void mem_cgroup_move_task(void)
6216 {
6217 }
6218 #endif
6219 
6220 /*
6221  * Cgroup retains root cgroups across [un]mount cycles making it necessary
6222  * to verify whether we're attached to the default hierarchy on each mount
6223  * attempt.
6224  */
mem_cgroup_bind(struct cgroup_subsys_state * root_css)6225 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
6226 {
6227 	/*
6228 	 * use_hierarchy is forced on the default hierarchy.  cgroup core
6229 	 * guarantees that @root doesn't have any children, so turning it
6230 	 * on for the root memcg is enough.
6231 	 */
6232 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6233 		root_mem_cgroup->use_hierarchy = true;
6234 	else
6235 		root_mem_cgroup->use_hierarchy = false;
6236 }
6237 
seq_puts_memcg_tunable(struct seq_file * m,unsigned long value)6238 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6239 {
6240 	if (value == PAGE_COUNTER_MAX)
6241 		seq_puts(m, "max\n");
6242 	else
6243 		seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6244 
6245 	return 0;
6246 }
6247 
memory_current_read(struct cgroup_subsys_state * css,struct cftype * cft)6248 static u64 memory_current_read(struct cgroup_subsys_state *css,
6249 			       struct cftype *cft)
6250 {
6251 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6252 
6253 	return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6254 }
6255 
memory_min_show(struct seq_file * m,void * v)6256 static int memory_min_show(struct seq_file *m, void *v)
6257 {
6258 	return seq_puts_memcg_tunable(m,
6259 		READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6260 }
6261 
memory_min_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6262 static ssize_t memory_min_write(struct kernfs_open_file *of,
6263 				char *buf, size_t nbytes, loff_t off)
6264 {
6265 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6266 	unsigned long min;
6267 	int err;
6268 
6269 	buf = strstrip(buf);
6270 	err = page_counter_memparse(buf, "max", &min);
6271 	if (err)
6272 		return err;
6273 
6274 	page_counter_set_min(&memcg->memory, min);
6275 
6276 	return nbytes;
6277 }
6278 
memory_low_show(struct seq_file * m,void * v)6279 static int memory_low_show(struct seq_file *m, void *v)
6280 {
6281 	return seq_puts_memcg_tunable(m,
6282 		READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6283 }
6284 
memory_low_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6285 static ssize_t memory_low_write(struct kernfs_open_file *of,
6286 				char *buf, size_t nbytes, loff_t off)
6287 {
6288 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6289 	unsigned long low;
6290 	int err;
6291 
6292 	buf = strstrip(buf);
6293 	err = page_counter_memparse(buf, "max", &low);
6294 	if (err)
6295 		return err;
6296 
6297 	page_counter_set_low(&memcg->memory, low);
6298 
6299 	return nbytes;
6300 }
6301 
memory_high_show(struct seq_file * m,void * v)6302 static int memory_high_show(struct seq_file *m, void *v)
6303 {
6304 	return seq_puts_memcg_tunable(m,
6305 		READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6306 }
6307 
memory_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6308 static ssize_t memory_high_write(struct kernfs_open_file *of,
6309 				 char *buf, size_t nbytes, loff_t off)
6310 {
6311 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6312 	unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6313 	bool drained = false;
6314 	unsigned long high;
6315 	int err;
6316 
6317 	buf = strstrip(buf);
6318 	err = page_counter_memparse(buf, "max", &high);
6319 	if (err)
6320 		return err;
6321 
6322 	for (;;) {
6323 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6324 		unsigned long reclaimed;
6325 
6326 		if (nr_pages <= high)
6327 			break;
6328 
6329 		if (signal_pending(current))
6330 			break;
6331 
6332 		if (!drained) {
6333 			drain_all_stock(memcg);
6334 			drained = true;
6335 			continue;
6336 		}
6337 
6338 		reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6339 							 GFP_KERNEL, true);
6340 
6341 		if (!reclaimed && !nr_retries--)
6342 			break;
6343 	}
6344 
6345 	page_counter_set_high(&memcg->memory, high);
6346 
6347 	memcg_wb_domain_size_changed(memcg);
6348 
6349 	return nbytes;
6350 }
6351 
memory_max_show(struct seq_file * m,void * v)6352 static int memory_max_show(struct seq_file *m, void *v)
6353 {
6354 	return seq_puts_memcg_tunable(m,
6355 		READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6356 }
6357 
memory_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6358 static ssize_t memory_max_write(struct kernfs_open_file *of,
6359 				char *buf, size_t nbytes, loff_t off)
6360 {
6361 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6362 	unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6363 	bool drained = false;
6364 	unsigned long max;
6365 	int err;
6366 
6367 	buf = strstrip(buf);
6368 	err = page_counter_memparse(buf, "max", &max);
6369 	if (err)
6370 		return err;
6371 
6372 	xchg(&memcg->memory.max, max);
6373 
6374 	for (;;) {
6375 		unsigned long nr_pages = page_counter_read(&memcg->memory);
6376 
6377 		if (nr_pages <= max)
6378 			break;
6379 
6380 		if (signal_pending(current))
6381 			break;
6382 
6383 		if (!drained) {
6384 			drain_all_stock(memcg);
6385 			drained = true;
6386 			continue;
6387 		}
6388 
6389 		if (nr_reclaims) {
6390 			if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6391 							  GFP_KERNEL, true))
6392 				nr_reclaims--;
6393 			continue;
6394 		}
6395 
6396 		memcg_memory_event(memcg, MEMCG_OOM);
6397 		if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6398 			break;
6399 	}
6400 
6401 	memcg_wb_domain_size_changed(memcg);
6402 	return nbytes;
6403 }
6404 
__memory_events_show(struct seq_file * m,atomic_long_t * events)6405 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6406 {
6407 	seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6408 	seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6409 	seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6410 	seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6411 	seq_printf(m, "oom_kill %lu\n",
6412 		   atomic_long_read(&events[MEMCG_OOM_KILL]));
6413 }
6414 
memory_events_show(struct seq_file * m,void * v)6415 static int memory_events_show(struct seq_file *m, void *v)
6416 {
6417 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6418 
6419 	__memory_events_show(m, memcg->memory_events);
6420 	return 0;
6421 }
6422 
memory_events_local_show(struct seq_file * m,void * v)6423 static int memory_events_local_show(struct seq_file *m, void *v)
6424 {
6425 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6426 
6427 	__memory_events_show(m, memcg->memory_events_local);
6428 	return 0;
6429 }
6430 
memory_stat_show(struct seq_file * m,void * v)6431 static int memory_stat_show(struct seq_file *m, void *v)
6432 {
6433 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6434 	char *buf;
6435 
6436 	buf = memory_stat_format(memcg);
6437 	if (!buf)
6438 		return -ENOMEM;
6439 	seq_puts(m, buf);
6440 	kfree(buf);
6441 	return 0;
6442 }
6443 
6444 #ifdef CONFIG_NUMA
memory_numa_stat_show(struct seq_file * m,void * v)6445 static int memory_numa_stat_show(struct seq_file *m, void *v)
6446 {
6447 	int i;
6448 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6449 
6450 	for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6451 		int nid;
6452 
6453 		if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6454 			continue;
6455 
6456 		seq_printf(m, "%s", memory_stats[i].name);
6457 		for_each_node_state(nid, N_MEMORY) {
6458 			u64 size;
6459 			struct lruvec *lruvec;
6460 
6461 			lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6462 			size = lruvec_page_state(lruvec, memory_stats[i].idx);
6463 			size *= memory_stats[i].ratio;
6464 			seq_printf(m, " N%d=%llu", nid, size);
6465 		}
6466 		seq_putc(m, '\n');
6467 	}
6468 
6469 	return 0;
6470 }
6471 #endif
6472 
memory_oom_group_show(struct seq_file * m,void * v)6473 static int memory_oom_group_show(struct seq_file *m, void *v)
6474 {
6475 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6476 
6477 	seq_printf(m, "%d\n", memcg->oom_group);
6478 
6479 	return 0;
6480 }
6481 
memory_oom_group_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)6482 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6483 				      char *buf, size_t nbytes, loff_t off)
6484 {
6485 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6486 	int ret, oom_group;
6487 
6488 	buf = strstrip(buf);
6489 	if (!buf)
6490 		return -EINVAL;
6491 
6492 	ret = kstrtoint(buf, 0, &oom_group);
6493 	if (ret)
6494 		return ret;
6495 
6496 	if (oom_group != 0 && oom_group != 1)
6497 		return -EINVAL;
6498 
6499 	memcg->oom_group = oom_group;
6500 
6501 	return nbytes;
6502 }
6503 
6504 static struct cftype memory_files[] = {
6505 	{
6506 		.name = "current",
6507 		.flags = CFTYPE_NOT_ON_ROOT,
6508 		.read_u64 = memory_current_read,
6509 	},
6510 	{
6511 		.name = "min",
6512 		.flags = CFTYPE_NOT_ON_ROOT,
6513 		.seq_show = memory_min_show,
6514 		.write = memory_min_write,
6515 	},
6516 	{
6517 		.name = "low",
6518 		.flags = CFTYPE_NOT_ON_ROOT,
6519 		.seq_show = memory_low_show,
6520 		.write = memory_low_write,
6521 	},
6522 	{
6523 		.name = "high",
6524 		.flags = CFTYPE_NOT_ON_ROOT,
6525 		.seq_show = memory_high_show,
6526 		.write = memory_high_write,
6527 	},
6528 	{
6529 		.name = "max",
6530 		.flags = CFTYPE_NOT_ON_ROOT,
6531 		.seq_show = memory_max_show,
6532 		.write = memory_max_write,
6533 	},
6534 	{
6535 		.name = "events",
6536 		.flags = CFTYPE_NOT_ON_ROOT,
6537 		.file_offset = offsetof(struct mem_cgroup, events_file),
6538 		.seq_show = memory_events_show,
6539 	},
6540 	{
6541 		.name = "events.local",
6542 		.flags = CFTYPE_NOT_ON_ROOT,
6543 		.file_offset = offsetof(struct mem_cgroup, events_local_file),
6544 		.seq_show = memory_events_local_show,
6545 	},
6546 	{
6547 		.name = "stat",
6548 		.seq_show = memory_stat_show,
6549 	},
6550 #ifdef CONFIG_NUMA
6551 	{
6552 		.name = "numa_stat",
6553 		.seq_show = memory_numa_stat_show,
6554 	},
6555 #endif
6556 	{
6557 		.name = "oom.group",
6558 		.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6559 		.seq_show = memory_oom_group_show,
6560 		.write = memory_oom_group_write,
6561 	},
6562 	{ }	/* terminate */
6563 };
6564 
6565 struct cgroup_subsys memory_cgrp_subsys = {
6566 	.css_alloc = mem_cgroup_css_alloc,
6567 	.css_online = mem_cgroup_css_online,
6568 	.css_offline = mem_cgroup_css_offline,
6569 	.css_released = mem_cgroup_css_released,
6570 	.css_free = mem_cgroup_css_free,
6571 	.css_reset = mem_cgroup_css_reset,
6572 	.can_attach = mem_cgroup_can_attach,
6573 	.cancel_attach = mem_cgroup_cancel_attach,
6574 	.post_attach = mem_cgroup_move_task,
6575 	.bind = mem_cgroup_bind,
6576 	.dfl_cftypes = memory_files,
6577 	.legacy_cftypes = mem_cgroup_legacy_files,
6578 	.early_init = 0,
6579 };
6580 
6581 /*
6582  * This function calculates an individual cgroup's effective
6583  * protection which is derived from its own memory.min/low, its
6584  * parent's and siblings' settings, as well as the actual memory
6585  * distribution in the tree.
6586  *
6587  * The following rules apply to the effective protection values:
6588  *
6589  * 1. At the first level of reclaim, effective protection is equal to
6590  *    the declared protection in memory.min and memory.low.
6591  *
6592  * 2. To enable safe delegation of the protection configuration, at
6593  *    subsequent levels the effective protection is capped to the
6594  *    parent's effective protection.
6595  *
6596  * 3. To make complex and dynamic subtrees easier to configure, the
6597  *    user is allowed to overcommit the declared protection at a given
6598  *    level. If that is the case, the parent's effective protection is
6599  *    distributed to the children in proportion to how much protection
6600  *    they have declared and how much of it they are utilizing.
6601  *
6602  *    This makes distribution proportional, but also work-conserving:
6603  *    if one cgroup claims much more protection than it uses memory,
6604  *    the unused remainder is available to its siblings.
6605  *
6606  * 4. Conversely, when the declared protection is undercommitted at a
6607  *    given level, the distribution of the larger parental protection
6608  *    budget is NOT proportional. A cgroup's protection from a sibling
6609  *    is capped to its own memory.min/low setting.
6610  *
6611  * 5. However, to allow protecting recursive subtrees from each other
6612  *    without having to declare each individual cgroup's fixed share
6613  *    of the ancestor's claim to protection, any unutilized -
6614  *    "floating" - protection from up the tree is distributed in
6615  *    proportion to each cgroup's *usage*. This makes the protection
6616  *    neutral wrt sibling cgroups and lets them compete freely over
6617  *    the shared parental protection budget, but it protects the
6618  *    subtree as a whole from neighboring subtrees.
6619  *
6620  * Note that 4. and 5. are not in conflict: 4. is about protecting
6621  * against immediate siblings whereas 5. is about protecting against
6622  * neighboring subtrees.
6623  */
effective_protection(unsigned long usage,unsigned long parent_usage,unsigned long setting,unsigned long parent_effective,unsigned long siblings_protected)6624 static unsigned long effective_protection(unsigned long usage,
6625 					  unsigned long parent_usage,
6626 					  unsigned long setting,
6627 					  unsigned long parent_effective,
6628 					  unsigned long siblings_protected)
6629 {
6630 	unsigned long protected;
6631 	unsigned long ep;
6632 
6633 	protected = min(usage, setting);
6634 	/*
6635 	 * If all cgroups at this level combined claim and use more
6636 	 * protection then what the parent affords them, distribute
6637 	 * shares in proportion to utilization.
6638 	 *
6639 	 * We are using actual utilization rather than the statically
6640 	 * claimed protection in order to be work-conserving: claimed
6641 	 * but unused protection is available to siblings that would
6642 	 * otherwise get a smaller chunk than what they claimed.
6643 	 */
6644 	if (siblings_protected > parent_effective)
6645 		return protected * parent_effective / siblings_protected;
6646 
6647 	/*
6648 	 * Ok, utilized protection of all children is within what the
6649 	 * parent affords them, so we know whatever this child claims
6650 	 * and utilizes is effectively protected.
6651 	 *
6652 	 * If there is unprotected usage beyond this value, reclaim
6653 	 * will apply pressure in proportion to that amount.
6654 	 *
6655 	 * If there is unutilized protection, the cgroup will be fully
6656 	 * shielded from reclaim, but we do return a smaller value for
6657 	 * protection than what the group could enjoy in theory. This
6658 	 * is okay. With the overcommit distribution above, effective
6659 	 * protection is always dependent on how memory is actually
6660 	 * consumed among the siblings anyway.
6661 	 */
6662 	ep = protected;
6663 
6664 	/*
6665 	 * If the children aren't claiming (all of) the protection
6666 	 * afforded to them by the parent, distribute the remainder in
6667 	 * proportion to the (unprotected) memory of each cgroup. That
6668 	 * way, cgroups that aren't explicitly prioritized wrt each
6669 	 * other compete freely over the allowance, but they are
6670 	 * collectively protected from neighboring trees.
6671 	 *
6672 	 * We're using unprotected memory for the weight so that if
6673 	 * some cgroups DO claim explicit protection, we don't protect
6674 	 * the same bytes twice.
6675 	 *
6676 	 * Check both usage and parent_usage against the respective
6677 	 * protected values. One should imply the other, but they
6678 	 * aren't read atomically - make sure the division is sane.
6679 	 */
6680 	if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6681 		return ep;
6682 	if (parent_effective > siblings_protected &&
6683 	    parent_usage > siblings_protected &&
6684 	    usage > protected) {
6685 		unsigned long unclaimed;
6686 
6687 		unclaimed = parent_effective - siblings_protected;
6688 		unclaimed *= usage - protected;
6689 		unclaimed /= parent_usage - siblings_protected;
6690 
6691 		ep += unclaimed;
6692 	}
6693 
6694 	return ep;
6695 }
6696 
6697 /**
6698  * mem_cgroup_protected - check if memory consumption is in the normal range
6699  * @root: the top ancestor of the sub-tree being checked
6700  * @memcg: the memory cgroup to check
6701  *
6702  * WARNING: This function is not stateless! It can only be used as part
6703  *          of a top-down tree iteration, not for isolated queries.
6704  */
mem_cgroup_calculate_protection(struct mem_cgroup * root,struct mem_cgroup * memcg)6705 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6706 				     struct mem_cgroup *memcg)
6707 {
6708 	unsigned long usage, parent_usage;
6709 	struct mem_cgroup *parent;
6710 
6711 	if (mem_cgroup_disabled())
6712 		return;
6713 
6714 	if (!root)
6715 		root = root_mem_cgroup;
6716 
6717 	/*
6718 	 * Effective values of the reclaim targets are ignored so they
6719 	 * can be stale. Have a look at mem_cgroup_protection for more
6720 	 * details.
6721 	 * TODO: calculation should be more robust so that we do not need
6722 	 * that special casing.
6723 	 */
6724 	if (memcg == root)
6725 		return;
6726 
6727 	usage = page_counter_read(&memcg->memory);
6728 	if (!usage)
6729 		return;
6730 
6731 	parent = parent_mem_cgroup(memcg);
6732 	/* No parent means a non-hierarchical mode on v1 memcg */
6733 	if (!parent)
6734 		return;
6735 
6736 	if (parent == root) {
6737 		memcg->memory.emin = READ_ONCE(memcg->memory.min);
6738 		memcg->memory.elow = READ_ONCE(memcg->memory.low);
6739 		return;
6740 	}
6741 
6742 	parent_usage = page_counter_read(&parent->memory);
6743 
6744 	WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6745 			READ_ONCE(memcg->memory.min),
6746 			READ_ONCE(parent->memory.emin),
6747 			atomic_long_read(&parent->memory.children_min_usage)));
6748 
6749 	WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6750 			READ_ONCE(memcg->memory.low),
6751 			READ_ONCE(parent->memory.elow),
6752 			atomic_long_read(&parent->memory.children_low_usage)));
6753 }
6754 
6755 /**
6756  * mem_cgroup_charge - charge a newly allocated page to a cgroup
6757  * @page: page to charge
6758  * @mm: mm context of the victim
6759  * @gfp_mask: reclaim mode
6760  *
6761  * Try to charge @page to the memcg that @mm belongs to, reclaiming
6762  * pages according to @gfp_mask if necessary.
6763  *
6764  * Returns 0 on success. Otherwise, an error code is returned.
6765  */
mem_cgroup_charge(struct page * page,struct mm_struct * mm,gfp_t gfp_mask)6766 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
6767 {
6768 	unsigned int nr_pages = thp_nr_pages(page);
6769 	struct mem_cgroup *memcg = NULL;
6770 	int ret = 0;
6771 
6772 	if (mem_cgroup_disabled())
6773 		goto out;
6774 
6775 	if (PageSwapCache(page)) {
6776 		swp_entry_t ent = { .val = page_private(page), };
6777 		unsigned short id;
6778 
6779 		/*
6780 		 * Every swap fault against a single page tries to charge the
6781 		 * page, bail as early as possible.  shmem_unuse() encounters
6782 		 * already charged pages, too.  page->mem_cgroup is protected
6783 		 * by the page lock, which serializes swap cache removal, which
6784 		 * in turn serializes uncharging.
6785 		 */
6786 		VM_BUG_ON_PAGE(!PageLocked(page), page);
6787 		if (compound_head(page)->mem_cgroup)
6788 			goto out;
6789 
6790 		id = lookup_swap_cgroup_id(ent);
6791 		rcu_read_lock();
6792 		memcg = mem_cgroup_from_id(id);
6793 		if (memcg && !css_tryget_online(&memcg->css))
6794 			memcg = NULL;
6795 		rcu_read_unlock();
6796 	}
6797 
6798 	if (!memcg)
6799 		memcg = get_mem_cgroup_from_mm(mm);
6800 
6801 	ret = try_charge(memcg, gfp_mask, nr_pages);
6802 	if (ret)
6803 		goto out_put;
6804 
6805 	css_get(&memcg->css);
6806 	commit_charge(page, memcg);
6807 
6808 	local_irq_disable();
6809 	mem_cgroup_charge_statistics(memcg, page, nr_pages);
6810 	memcg_check_events(memcg, page);
6811 	local_irq_enable();
6812 
6813 	if (PageSwapCache(page)) {
6814 		swp_entry_t entry = { .val = page_private(page) };
6815 		/*
6816 		 * The swap entry might not get freed for a long time,
6817 		 * let's not wait for it.  The page already received a
6818 		 * memory+swap charge, drop the swap entry duplicate.
6819 		 */
6820 		mem_cgroup_uncharge_swap(entry, nr_pages);
6821 	}
6822 
6823 out_put:
6824 	css_put(&memcg->css);
6825 out:
6826 	return ret;
6827 }
6828 
6829 struct uncharge_gather {
6830 	struct mem_cgroup *memcg;
6831 	unsigned long nr_pages;
6832 	unsigned long pgpgout;
6833 	unsigned long nr_kmem;
6834 	struct page *dummy_page;
6835 };
6836 
uncharge_gather_clear(struct uncharge_gather * ug)6837 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6838 {
6839 	memset(ug, 0, sizeof(*ug));
6840 }
6841 
uncharge_batch(const struct uncharge_gather * ug)6842 static void uncharge_batch(const struct uncharge_gather *ug)
6843 {
6844 	unsigned long flags;
6845 
6846 	if (!mem_cgroup_is_root(ug->memcg)) {
6847 		page_counter_uncharge(&ug->memcg->memory, ug->nr_pages);
6848 		if (do_memsw_account())
6849 			page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages);
6850 		if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6851 			page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6852 		memcg_oom_recover(ug->memcg);
6853 	}
6854 
6855 	local_irq_save(flags);
6856 	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6857 	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
6858 	memcg_check_events(ug->memcg, ug->dummy_page);
6859 	local_irq_restore(flags);
6860 
6861 	/* drop reference from uncharge_page */
6862 	css_put(&ug->memcg->css);
6863 }
6864 
uncharge_page(struct page * page,struct uncharge_gather * ug)6865 static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6866 {
6867 	unsigned long nr_pages;
6868 
6869 	VM_BUG_ON_PAGE(PageLRU(page), page);
6870 
6871 	if (!page->mem_cgroup)
6872 		return;
6873 
6874 	/*
6875 	 * Nobody should be changing or seriously looking at
6876 	 * page->mem_cgroup at this point, we have fully
6877 	 * exclusive access to the page.
6878 	 */
6879 
6880 	if (ug->memcg != page->mem_cgroup) {
6881 		if (ug->memcg) {
6882 			uncharge_batch(ug);
6883 			uncharge_gather_clear(ug);
6884 		}
6885 		ug->memcg = page->mem_cgroup;
6886 
6887 		/* pairs with css_put in uncharge_batch */
6888 		css_get(&ug->memcg->css);
6889 	}
6890 
6891 	nr_pages = compound_nr(page);
6892 	ug->nr_pages += nr_pages;
6893 
6894 	if (!PageKmemcg(page)) {
6895 		ug->pgpgout++;
6896 	} else {
6897 		ug->nr_kmem += nr_pages;
6898 		__ClearPageKmemcg(page);
6899 	}
6900 
6901 	ug->dummy_page = page;
6902 	page->mem_cgroup = NULL;
6903 	css_put(&ug->memcg->css);
6904 }
6905 
uncharge_list(struct list_head * page_list)6906 static void uncharge_list(struct list_head *page_list)
6907 {
6908 	struct uncharge_gather ug;
6909 	struct list_head *next;
6910 
6911 	uncharge_gather_clear(&ug);
6912 
6913 	/*
6914 	 * Note that the list can be a single page->lru; hence the
6915 	 * do-while loop instead of a simple list_for_each_entry().
6916 	 */
6917 	next = page_list->next;
6918 	do {
6919 		struct page *page;
6920 
6921 		page = list_entry(next, struct page, lru);
6922 		next = page->lru.next;
6923 
6924 		uncharge_page(page, &ug);
6925 	} while (next != page_list);
6926 
6927 	if (ug.memcg)
6928 		uncharge_batch(&ug);
6929 }
6930 
6931 /**
6932  * mem_cgroup_uncharge - uncharge a page
6933  * @page: page to uncharge
6934  *
6935  * Uncharge a page previously charged with mem_cgroup_charge().
6936  */
mem_cgroup_uncharge(struct page * page)6937 void mem_cgroup_uncharge(struct page *page)
6938 {
6939 	struct uncharge_gather ug;
6940 
6941 	if (mem_cgroup_disabled())
6942 		return;
6943 
6944 	/* Don't touch page->lru of any random page, pre-check: */
6945 	if (!page->mem_cgroup)
6946 		return;
6947 
6948 	uncharge_gather_clear(&ug);
6949 	uncharge_page(page, &ug);
6950 	uncharge_batch(&ug);
6951 }
6952 
6953 /**
6954  * mem_cgroup_uncharge_list - uncharge a list of page
6955  * @page_list: list of pages to uncharge
6956  *
6957  * Uncharge a list of pages previously charged with
6958  * mem_cgroup_charge().
6959  */
mem_cgroup_uncharge_list(struct list_head * page_list)6960 void mem_cgroup_uncharge_list(struct list_head *page_list)
6961 {
6962 	if (mem_cgroup_disabled())
6963 		return;
6964 
6965 	if (!list_empty(page_list))
6966 		uncharge_list(page_list);
6967 }
6968 
6969 /**
6970  * mem_cgroup_migrate - charge a page's replacement
6971  * @oldpage: currently circulating page
6972  * @newpage: replacement page
6973  *
6974  * Charge @newpage as a replacement page for @oldpage. @oldpage will
6975  * be uncharged upon free.
6976  *
6977  * Both pages must be locked, @newpage->mapping must be set up.
6978  */
mem_cgroup_migrate(struct page * oldpage,struct page * newpage)6979 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6980 {
6981 	struct mem_cgroup *memcg;
6982 	unsigned int nr_pages;
6983 	unsigned long flags;
6984 
6985 	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6986 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
6987 	VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6988 	VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6989 		       newpage);
6990 
6991 	if (mem_cgroup_disabled())
6992 		return;
6993 
6994 	/* Page cache replacement: new page already charged? */
6995 	if (newpage->mem_cgroup)
6996 		return;
6997 
6998 	/* Swapcache readahead pages can get replaced before being charged */
6999 	memcg = oldpage->mem_cgroup;
7000 	if (!memcg)
7001 		return;
7002 
7003 	/* Force-charge the new page. The old one will be freed soon */
7004 	nr_pages = thp_nr_pages(newpage);
7005 
7006 	page_counter_charge(&memcg->memory, nr_pages);
7007 	if (do_memsw_account())
7008 		page_counter_charge(&memcg->memsw, nr_pages);
7009 
7010 	css_get(&memcg->css);
7011 	commit_charge(newpage, memcg);
7012 
7013 	local_irq_save(flags);
7014 	mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
7015 	memcg_check_events(memcg, newpage);
7016 	local_irq_restore(flags);
7017 }
7018 
7019 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7020 EXPORT_SYMBOL(memcg_sockets_enabled_key);
7021 
mem_cgroup_sk_alloc(struct sock * sk)7022 void mem_cgroup_sk_alloc(struct sock *sk)
7023 {
7024 	struct mem_cgroup *memcg;
7025 
7026 	if (!mem_cgroup_sockets_enabled)
7027 		return;
7028 
7029 	/* Do not associate the sock with unrelated interrupted task's memcg. */
7030 	if (in_interrupt())
7031 		return;
7032 
7033 	rcu_read_lock();
7034 	memcg = mem_cgroup_from_task(current);
7035 	if (memcg == root_mem_cgroup)
7036 		goto out;
7037 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7038 		goto out;
7039 	if (css_tryget(&memcg->css))
7040 		sk->sk_memcg = memcg;
7041 out:
7042 	rcu_read_unlock();
7043 }
7044 
mem_cgroup_sk_free(struct sock * sk)7045 void mem_cgroup_sk_free(struct sock *sk)
7046 {
7047 	if (sk->sk_memcg)
7048 		css_put(&sk->sk_memcg->css);
7049 }
7050 
7051 /**
7052  * mem_cgroup_charge_skmem - charge socket memory
7053  * @memcg: memcg to charge
7054  * @nr_pages: number of pages to charge
7055  *
7056  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7057  * @memcg's configured limit, %false if the charge had to be forced.
7058  */
mem_cgroup_charge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)7059 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7060 {
7061 	gfp_t gfp_mask = GFP_KERNEL;
7062 
7063 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7064 		struct page_counter *fail;
7065 
7066 		if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7067 			memcg->tcpmem_pressure = 0;
7068 			return true;
7069 		}
7070 		page_counter_charge(&memcg->tcpmem, nr_pages);
7071 		memcg->tcpmem_pressure = 1;
7072 		return false;
7073 	}
7074 
7075 	/* Don't block in the packet receive path */
7076 	if (in_softirq())
7077 		gfp_mask = GFP_NOWAIT;
7078 
7079 	mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7080 
7081 	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
7082 		return true;
7083 
7084 	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
7085 	return false;
7086 }
7087 
7088 /**
7089  * mem_cgroup_uncharge_skmem - uncharge socket memory
7090  * @memcg: memcg to uncharge
7091  * @nr_pages: number of pages to uncharge
7092  */
mem_cgroup_uncharge_skmem(struct mem_cgroup * memcg,unsigned int nr_pages)7093 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7094 {
7095 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7096 		page_counter_uncharge(&memcg->tcpmem, nr_pages);
7097 		return;
7098 	}
7099 
7100 	mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7101 
7102 	refill_stock(memcg, nr_pages);
7103 }
7104 
cgroup_memory(char * s)7105 static int __init cgroup_memory(char *s)
7106 {
7107 	char *token;
7108 
7109 	while ((token = strsep(&s, ",")) != NULL) {
7110 		if (!*token)
7111 			continue;
7112 		if (!strcmp(token, "nosocket"))
7113 			cgroup_memory_nosocket = true;
7114 		if (!strcmp(token, "nokmem"))
7115 			cgroup_memory_nokmem = true;
7116 	}
7117 	return 0;
7118 }
7119 __setup("cgroup.memory=", cgroup_memory);
7120 
7121 /*
7122  * subsys_initcall() for memory controller.
7123  *
7124  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7125  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7126  * basically everything that doesn't depend on a specific mem_cgroup structure
7127  * should be initialized from here.
7128  */
mem_cgroup_init(void)7129 static int __init mem_cgroup_init(void)
7130 {
7131 	int cpu, node;
7132 
7133 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7134 				  memcg_hotplug_cpu_dead);
7135 
7136 	for_each_possible_cpu(cpu)
7137 		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7138 			  drain_local_stock);
7139 
7140 	for_each_node(node) {
7141 		struct mem_cgroup_tree_per_node *rtpn;
7142 
7143 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7144 				    node_online(node) ? node : NUMA_NO_NODE);
7145 
7146 		rtpn->rb_root = RB_ROOT;
7147 		rtpn->rb_rightmost = NULL;
7148 		spin_lock_init(&rtpn->lock);
7149 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
7150 	}
7151 
7152 	return 0;
7153 }
7154 subsys_initcall(mem_cgroup_init);
7155 
7156 #ifdef CONFIG_MEMCG_SWAP
mem_cgroup_id_get_online(struct mem_cgroup * memcg)7157 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7158 {
7159 	while (!refcount_inc_not_zero(&memcg->id.ref)) {
7160 		/*
7161 		 * The root cgroup cannot be destroyed, so it's refcount must
7162 		 * always be >= 1.
7163 		 */
7164 		if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7165 			VM_BUG_ON(1);
7166 			break;
7167 		}
7168 		memcg = parent_mem_cgroup(memcg);
7169 		if (!memcg)
7170 			memcg = root_mem_cgroup;
7171 	}
7172 	return memcg;
7173 }
7174 
7175 /**
7176  * mem_cgroup_swapout - transfer a memsw charge to swap
7177  * @page: page whose memsw charge to transfer
7178  * @entry: swap entry to move the charge to
7179  *
7180  * Transfer the memsw charge of @page to @entry.
7181  */
mem_cgroup_swapout(struct page * page,swp_entry_t entry)7182 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
7183 {
7184 	struct mem_cgroup *memcg, *swap_memcg;
7185 	unsigned int nr_entries;
7186 	unsigned short oldid;
7187 
7188 	VM_BUG_ON_PAGE(PageLRU(page), page);
7189 	VM_BUG_ON_PAGE(page_count(page), page);
7190 
7191 	if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7192 		return;
7193 
7194 	memcg = page->mem_cgroup;
7195 
7196 	/* Readahead page, never charged */
7197 	if (!memcg)
7198 		return;
7199 
7200 	/*
7201 	 * In case the memcg owning these pages has been offlined and doesn't
7202 	 * have an ID allocated to it anymore, charge the closest online
7203 	 * ancestor for the swap instead and transfer the memory+swap charge.
7204 	 */
7205 	swap_memcg = mem_cgroup_id_get_online(memcg);
7206 	nr_entries = thp_nr_pages(page);
7207 	/* Get references for the tail pages, too */
7208 	if (nr_entries > 1)
7209 		mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7210 	oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7211 				   nr_entries);
7212 	VM_BUG_ON_PAGE(oldid, page);
7213 	mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7214 
7215 	page->mem_cgroup = NULL;
7216 
7217 	if (!mem_cgroup_is_root(memcg))
7218 		page_counter_uncharge(&memcg->memory, nr_entries);
7219 
7220 	if (!cgroup_memory_noswap && memcg != swap_memcg) {
7221 		if (!mem_cgroup_is_root(swap_memcg))
7222 			page_counter_charge(&swap_memcg->memsw, nr_entries);
7223 		page_counter_uncharge(&memcg->memsw, nr_entries);
7224 	}
7225 
7226 	/*
7227 	 * Interrupts should be disabled here because the caller holds the
7228 	 * i_pages lock which is taken with interrupts-off. It is
7229 	 * important here to have the interrupts disabled because it is the
7230 	 * only synchronisation we have for updating the per-CPU variables.
7231 	 */
7232 	VM_BUG_ON(!irqs_disabled());
7233 	mem_cgroup_charge_statistics(memcg, page, -nr_entries);
7234 	memcg_check_events(memcg, page);
7235 
7236 	css_put(&memcg->css);
7237 }
7238 
7239 /**
7240  * mem_cgroup_try_charge_swap - try charging swap space for a page
7241  * @page: page being added to swap
7242  * @entry: swap entry to charge
7243  *
7244  * Try to charge @page's memcg for the swap space at @entry.
7245  *
7246  * Returns 0 on success, -ENOMEM on failure.
7247  */
mem_cgroup_try_charge_swap(struct page * page,swp_entry_t entry)7248 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7249 {
7250 	unsigned int nr_pages = thp_nr_pages(page);
7251 	struct page_counter *counter;
7252 	struct mem_cgroup *memcg;
7253 	unsigned short oldid;
7254 
7255 	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7256 		return 0;
7257 
7258 	memcg = page->mem_cgroup;
7259 
7260 	/* Readahead page, never charged */
7261 	if (!memcg)
7262 		return 0;
7263 
7264 	if (!entry.val) {
7265 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7266 		return 0;
7267 	}
7268 
7269 	memcg = mem_cgroup_id_get_online(memcg);
7270 
7271 	if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7272 	    !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7273 		memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7274 		memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7275 		mem_cgroup_id_put(memcg);
7276 		return -ENOMEM;
7277 	}
7278 
7279 	/* Get references for the tail pages, too */
7280 	if (nr_pages > 1)
7281 		mem_cgroup_id_get_many(memcg, nr_pages - 1);
7282 	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7283 	VM_BUG_ON_PAGE(oldid, page);
7284 	mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7285 
7286 	return 0;
7287 }
7288 
7289 /**
7290  * mem_cgroup_uncharge_swap - uncharge swap space
7291  * @entry: swap entry to uncharge
7292  * @nr_pages: the amount of swap space to uncharge
7293  */
mem_cgroup_uncharge_swap(swp_entry_t entry,unsigned int nr_pages)7294 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7295 {
7296 	struct mem_cgroup *memcg;
7297 	unsigned short id;
7298 
7299 	id = swap_cgroup_record(entry, 0, nr_pages);
7300 	rcu_read_lock();
7301 	memcg = mem_cgroup_from_id(id);
7302 	if (memcg) {
7303 		if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7304 			if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7305 				page_counter_uncharge(&memcg->swap, nr_pages);
7306 			else
7307 				page_counter_uncharge(&memcg->memsw, nr_pages);
7308 		}
7309 		mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7310 		mem_cgroup_id_put_many(memcg, nr_pages);
7311 	}
7312 	rcu_read_unlock();
7313 }
7314 
mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg)7315 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7316 {
7317 	long nr_swap_pages = get_nr_swap_pages();
7318 
7319 	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7320 		return nr_swap_pages;
7321 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7322 		nr_swap_pages = min_t(long, nr_swap_pages,
7323 				      READ_ONCE(memcg->swap.max) -
7324 				      page_counter_read(&memcg->swap));
7325 	return nr_swap_pages;
7326 }
7327 
mem_cgroup_swap_full(struct page * page)7328 bool mem_cgroup_swap_full(struct page *page)
7329 {
7330 	struct mem_cgroup *memcg;
7331 
7332 	VM_BUG_ON_PAGE(!PageLocked(page), page);
7333 
7334 	if (vm_swap_full())
7335 		return true;
7336 	if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7337 		return false;
7338 
7339 	memcg = page->mem_cgroup;
7340 	if (!memcg)
7341 		return false;
7342 
7343 	for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7344 		unsigned long usage = page_counter_read(&memcg->swap);
7345 
7346 		if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7347 		    usage * 2 >= READ_ONCE(memcg->swap.max))
7348 			return true;
7349 	}
7350 
7351 	return false;
7352 }
7353 
setup_swap_account(char * s)7354 static int __init setup_swap_account(char *s)
7355 {
7356 	if (!strcmp(s, "1"))
7357 		cgroup_memory_noswap = 0;
7358 	else if (!strcmp(s, "0"))
7359 		cgroup_memory_noswap = 1;
7360 	return 1;
7361 }
7362 __setup("swapaccount=", setup_swap_account);
7363 
swap_current_read(struct cgroup_subsys_state * css,struct cftype * cft)7364 static u64 swap_current_read(struct cgroup_subsys_state *css,
7365 			     struct cftype *cft)
7366 {
7367 	struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7368 
7369 	return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7370 }
7371 
swap_high_show(struct seq_file * m,void * v)7372 static int swap_high_show(struct seq_file *m, void *v)
7373 {
7374 	return seq_puts_memcg_tunable(m,
7375 		READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7376 }
7377 
swap_high_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)7378 static ssize_t swap_high_write(struct kernfs_open_file *of,
7379 			       char *buf, size_t nbytes, loff_t off)
7380 {
7381 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7382 	unsigned long high;
7383 	int err;
7384 
7385 	buf = strstrip(buf);
7386 	err = page_counter_memparse(buf, "max", &high);
7387 	if (err)
7388 		return err;
7389 
7390 	page_counter_set_high(&memcg->swap, high);
7391 
7392 	return nbytes;
7393 }
7394 
swap_max_show(struct seq_file * m,void * v)7395 static int swap_max_show(struct seq_file *m, void *v)
7396 {
7397 	return seq_puts_memcg_tunable(m,
7398 		READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7399 }
7400 
swap_max_write(struct kernfs_open_file * of,char * buf,size_t nbytes,loff_t off)7401 static ssize_t swap_max_write(struct kernfs_open_file *of,
7402 			      char *buf, size_t nbytes, loff_t off)
7403 {
7404 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7405 	unsigned long max;
7406 	int err;
7407 
7408 	buf = strstrip(buf);
7409 	err = page_counter_memparse(buf, "max", &max);
7410 	if (err)
7411 		return err;
7412 
7413 	xchg(&memcg->swap.max, max);
7414 
7415 	return nbytes;
7416 }
7417 
swap_events_show(struct seq_file * m,void * v)7418 static int swap_events_show(struct seq_file *m, void *v)
7419 {
7420 	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7421 
7422 	seq_printf(m, "high %lu\n",
7423 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7424 	seq_printf(m, "max %lu\n",
7425 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7426 	seq_printf(m, "fail %lu\n",
7427 		   atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7428 
7429 	return 0;
7430 }
7431 
7432 static struct cftype swap_files[] = {
7433 	{
7434 		.name = "swap.current",
7435 		.flags = CFTYPE_NOT_ON_ROOT,
7436 		.read_u64 = swap_current_read,
7437 	},
7438 	{
7439 		.name = "swap.high",
7440 		.flags = CFTYPE_NOT_ON_ROOT,
7441 		.seq_show = swap_high_show,
7442 		.write = swap_high_write,
7443 	},
7444 	{
7445 		.name = "swap.max",
7446 		.flags = CFTYPE_NOT_ON_ROOT,
7447 		.seq_show = swap_max_show,
7448 		.write = swap_max_write,
7449 	},
7450 	{
7451 		.name = "swap.events",
7452 		.flags = CFTYPE_NOT_ON_ROOT,
7453 		.file_offset = offsetof(struct mem_cgroup, swap_events_file),
7454 		.seq_show = swap_events_show,
7455 	},
7456 	{ }	/* terminate */
7457 };
7458 
7459 static struct cftype memsw_files[] = {
7460 	{
7461 		.name = "memsw.usage_in_bytes",
7462 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7463 		.read_u64 = mem_cgroup_read_u64,
7464 	},
7465 	{
7466 		.name = "memsw.max_usage_in_bytes",
7467 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7468 		.write = mem_cgroup_reset,
7469 		.read_u64 = mem_cgroup_read_u64,
7470 	},
7471 	{
7472 		.name = "memsw.limit_in_bytes",
7473 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7474 		.write = mem_cgroup_write,
7475 		.read_u64 = mem_cgroup_read_u64,
7476 	},
7477 	{
7478 		.name = "memsw.failcnt",
7479 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7480 		.write = mem_cgroup_reset,
7481 		.read_u64 = mem_cgroup_read_u64,
7482 	},
7483 	{ },	/* terminate */
7484 };
7485 
7486 /*
7487  * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7488  * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7489  * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7490  * boot parameter. This may result in premature OOPS inside
7491  * mem_cgroup_get_nr_swap_pages() function in corner cases.
7492  */
mem_cgroup_swap_init(void)7493 static int __init mem_cgroup_swap_init(void)
7494 {
7495 	/* No memory control -> no swap control */
7496 	if (mem_cgroup_disabled())
7497 		cgroup_memory_noswap = true;
7498 
7499 	if (cgroup_memory_noswap)
7500 		return 0;
7501 
7502 	WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7503 	WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7504 
7505 	return 0;
7506 }
7507 core_initcall(mem_cgroup_swap_init);
7508 
7509 #endif /* CONFIG_MEMCG_SWAP */
7510