Lines Matching refs:memcg

144 	struct mem_cgroup *memcg;  member
158 int (*register_event)(struct mem_cgroup *memcg,
165 void (*unregister_event)(struct mem_cgroup *memcg,
177 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
178 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
249 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument
251 if (!memcg) in memcg_to_vmpressure()
252 memcg = root_mem_cgroup; in memcg_to_vmpressure()
253 return &memcg->vmpressure; in memcg_to_vmpressure()
333 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, in memcg_reparent_objcgs() argument
338 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); in memcg_reparent_objcgs()
343 list_add(&objcg->list, &memcg->objcg_list); in memcg_reparent_objcgs()
345 list_for_each_entry(iter, &memcg->objcg_list, list) in memcg_reparent_objcgs()
346 WRITE_ONCE(iter->memcg, parent); in memcg_reparent_objcgs()
348 list_splice(&memcg->objcg_list, &parent->objcg_list); in memcg_reparent_objcgs()
420 struct mem_cgroup *memcg; in mem_cgroup_css_from_page() local
422 memcg = page_memcg(page); in mem_cgroup_css_from_page()
424 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) in mem_cgroup_css_from_page()
425 memcg = root_mem_cgroup; in mem_cgroup_css_from_page()
427 return &memcg->css; in mem_cgroup_css_from_page()
445 struct mem_cgroup *memcg; in page_cgroup_ino() local
449 memcg = page_memcg_check(page); in page_cgroup_ino()
451 while (memcg && !(memcg->css.flags & CSS_ONLINE)) in page_cgroup_ino()
452 memcg = parent_mem_cgroup(memcg); in page_cgroup_ino()
453 if (memcg) in page_cgroup_ino()
454 ino = cgroup_ino(memcg->css.cgroup); in page_cgroup_ino()
460 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) in mem_cgroup_page_nodeinfo() argument
464 return memcg->nodeinfo[nid]; in mem_cgroup_page_nodeinfo()
539 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) in soft_limit_excess() argument
541 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess()
542 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess()
551 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) in mem_cgroup_update_tree() argument
564 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in mem_cgroup_update_tree()
565 mz = mem_cgroup_page_nodeinfo(memcg, page); in mem_cgroup_update_tree()
566 excess = soft_limit_excess(memcg); in mem_cgroup_update_tree()
588 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) in mem_cgroup_remove_from_trees() argument
595 mz = memcg->nodeinfo[nid]; in mem_cgroup_remove_from_trees()
620 if (!soft_limit_excess(mz->memcg) || in __mem_cgroup_largest_soft_limit_node()
621 !css_tryget(&mz->memcg->css)) in __mem_cgroup_largest_soft_limit_node()
644 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) in __mod_memcg_state() argument
649 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); in __mod_memcg_state()
650 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); in __mod_memcg_state()
654 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) in memcg_page_state_local() argument
660 x += per_cpu(memcg->vmstats_percpu->state[idx], cpu); in memcg_page_state_local()
672 struct mem_cgroup *memcg; in __mod_memcg_lruvec_state() local
675 memcg = pn->memcg; in __mod_memcg_lruvec_state()
678 __mod_memcg_state(memcg, idx, val); in __mod_memcg_lruvec_state()
709 struct mem_cgroup *memcg; in __mod_lruvec_page_state() local
714 memcg = page_memcg(head); in __mod_lruvec_page_state()
716 if (!memcg) { in __mod_lruvec_page_state()
722 lruvec = mem_cgroup_lruvec(memcg, pgdat); in __mod_lruvec_page_state()
731 struct mem_cgroup *memcg; in __mod_lruvec_kmem_state() local
735 memcg = mem_cgroup_from_obj(p); in __mod_lruvec_kmem_state()
743 if (!memcg) { in __mod_lruvec_kmem_state()
746 lruvec = mem_cgroup_lruvec(memcg, pgdat); in __mod_lruvec_kmem_state()
760 struct mem_cgroup *memcg; in mod_objcg_mlstate() local
764 memcg = obj_cgroup_memcg(objcg); in mod_objcg_mlstate()
765 lruvec = mem_cgroup_lruvec(memcg, pgdat); in mod_objcg_mlstate()
776 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, in __count_memcg_events() argument
782 __this_cpu_add(memcg->vmstats_percpu->events[idx], count); in __count_memcg_events()
783 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); in __count_memcg_events()
786 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) in memcg_events() argument
788 return READ_ONCE(memcg->vmstats.events[event]); in memcg_events()
791 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) in memcg_events_local() argument
797 x += per_cpu(memcg->vmstats_percpu->events[event], cpu); in memcg_events_local()
801 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, in mem_cgroup_charge_statistics() argument
807 __count_memcg_events(memcg, PGPGIN, 1); in mem_cgroup_charge_statistics()
809 __count_memcg_events(memcg, PGPGOUT, 1); in mem_cgroup_charge_statistics()
813 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); in mem_cgroup_charge_statistics()
816 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, in mem_cgroup_event_ratelimit() argument
821 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); in mem_cgroup_event_ratelimit()
822 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); in mem_cgroup_event_ratelimit()
835 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); in mem_cgroup_event_ratelimit()
845 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) in memcg_check_events() argument
848 if (unlikely(mem_cgroup_event_ratelimit(memcg, in memcg_check_events()
852 do_softlimit = mem_cgroup_event_ratelimit(memcg, in memcg_check_events()
854 mem_cgroup_threshold(memcg); in memcg_check_events()
856 mem_cgroup_update_tree(memcg, page); in memcg_check_events()
895 struct mem_cgroup *memcg; in get_mem_cgroup_from_mm() local
910 memcg = active_memcg(); in get_mem_cgroup_from_mm()
911 if (unlikely(memcg)) { in get_mem_cgroup_from_mm()
913 css_get(&memcg->css); in get_mem_cgroup_from_mm()
914 return memcg; in get_mem_cgroup_from_mm()
923 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); in get_mem_cgroup_from_mm()
924 if (unlikely(!memcg)) in get_mem_cgroup_from_mm()
925 memcg = root_mem_cgroup; in get_mem_cgroup_from_mm()
926 } while (!css_tryget(&memcg->css)); in get_mem_cgroup_from_mm()
928 return memcg; in get_mem_cgroup_from_mm()
968 struct mem_cgroup *memcg = NULL; in mem_cgroup_iter() local
1029 memcg = mem_cgroup_from_css(css); in mem_cgroup_iter()
1037 memcg = NULL; in mem_cgroup_iter()
1046 (void)cmpxchg(&iter->position, pos, memcg); in mem_cgroup_iter()
1051 if (!memcg) in mem_cgroup_iter()
1062 return memcg; in mem_cgroup_iter()
1095 struct mem_cgroup *memcg = dead_memcg; in invalidate_reclaim_iterators() local
1099 __invalidate_reclaim_iterators(memcg, dead_memcg); in invalidate_reclaim_iterators()
1100 last = memcg; in invalidate_reclaim_iterators()
1101 } while ((memcg = parent_mem_cgroup(memcg))); in invalidate_reclaim_iterators()
1127 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, in mem_cgroup_scan_tasks() argument
1133 BUG_ON(memcg == root_mem_cgroup); in mem_cgroup_scan_tasks()
1135 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_scan_tasks()
1144 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_scan_tasks()
1154 struct mem_cgroup *memcg; in lruvec_memcg_debug() local
1159 memcg = page_memcg(page); in lruvec_memcg_debug()
1161 if (!memcg) in lruvec_memcg_debug()
1164 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != memcg, page); in lruvec_memcg_debug()
1260 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) in mem_cgroup_margin() argument
1266 count = page_counter_read(&memcg->memory); in mem_cgroup_margin()
1267 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin()
1272 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin()
1273 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin()
1290 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) in mem_cgroup_under_move() argument
1305 ret = mem_cgroup_is_descendant(from, memcg) || in mem_cgroup_under_move()
1306 mem_cgroup_is_descendant(to, memcg); in mem_cgroup_under_move()
1312 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) in mem_cgroup_wait_acct_move() argument
1315 if (mem_cgroup_under_move(memcg)) { in mem_cgroup_wait_acct_move()
1392 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, in memcg_page_state_output() argument
1395 return memcg_page_state(memcg, item) * memcg_page_state_unit(item); in memcg_page_state_output()
1398 static char *memory_stat_format(struct mem_cgroup *memcg) in memory_stat_format() argument
1417 cgroup_rstat_flush(memcg->css.cgroup); in memory_stat_format()
1422 size = memcg_page_state_output(memcg, memory_stats[i].idx); in memory_stat_format()
1426 size += memcg_page_state_output(memcg, in memory_stat_format()
1435 memcg_events(memcg, PGFAULT)); in memory_stat_format()
1437 memcg_events(memcg, PGMAJFAULT)); in memory_stat_format()
1439 memcg_events(memcg, PGREFILL)); in memory_stat_format()
1441 memcg_events(memcg, PGSCAN_KSWAPD) + in memory_stat_format()
1442 memcg_events(memcg, PGSCAN_DIRECT)); in memory_stat_format()
1444 memcg_events(memcg, PGSTEAL_KSWAPD) + in memory_stat_format()
1445 memcg_events(memcg, PGSTEAL_DIRECT)); in memory_stat_format()
1447 memcg_events(memcg, PGACTIVATE)); in memory_stat_format()
1449 memcg_events(memcg, PGDEACTIVATE)); in memory_stat_format()
1451 memcg_events(memcg, PGLAZYFREE)); in memory_stat_format()
1453 memcg_events(memcg, PGLAZYFREED)); in memory_stat_format()
1457 memcg_events(memcg, THP_FAULT_ALLOC)); in memory_stat_format()
1459 memcg_events(memcg, THP_COLLAPSE_ALLOC)); in memory_stat_format()
1478 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) in mem_cgroup_print_oom_context() argument
1482 if (memcg) { in mem_cgroup_print_oom_context()
1484 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_context()
1499 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) in mem_cgroup_print_oom_meminfo() argument
1504 K((u64)page_counter_read(&memcg->memory)), in mem_cgroup_print_oom_meminfo()
1505 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); in mem_cgroup_print_oom_meminfo()
1508 K((u64)page_counter_read(&memcg->swap)), in mem_cgroup_print_oom_meminfo()
1509 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); in mem_cgroup_print_oom_meminfo()
1512 K((u64)page_counter_read(&memcg->memsw)), in mem_cgroup_print_oom_meminfo()
1513 K((u64)memcg->memsw.max), memcg->memsw.failcnt); in mem_cgroup_print_oom_meminfo()
1515 K((u64)page_counter_read(&memcg->kmem)), in mem_cgroup_print_oom_meminfo()
1516 K((u64)memcg->kmem.max), memcg->kmem.failcnt); in mem_cgroup_print_oom_meminfo()
1520 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_meminfo()
1522 buf = memory_stat_format(memcg); in mem_cgroup_print_oom_meminfo()
1532 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) in mem_cgroup_get_max() argument
1534 unsigned long max = READ_ONCE(memcg->memory.max); in mem_cgroup_get_max()
1537 if (mem_cgroup_swappiness(memcg)) in mem_cgroup_get_max()
1538 max += min(READ_ONCE(memcg->swap.max), in mem_cgroup_get_max()
1541 if (mem_cgroup_swappiness(memcg)) { in mem_cgroup_get_max()
1543 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; in mem_cgroup_get_max()
1551 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) in mem_cgroup_size() argument
1553 return page_counter_read(&memcg->memory); in mem_cgroup_size()
1556 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, in mem_cgroup_out_of_memory() argument
1562 .memcg = memcg, in mem_cgroup_out_of_memory()
1571 if (mem_cgroup_margin(memcg) >= (1 << order)) in mem_cgroup_out_of_memory()
1647 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) in mem_cgroup_oom_trylock() argument
1653 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_oom_trylock()
1660 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_oom_trylock()
1671 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_oom_trylock()
1673 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_oom_trylock()
1686 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) in mem_cgroup_oom_unlock() argument
1692 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_oom_unlock()
1697 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) in mem_cgroup_mark_under_oom() argument
1702 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_mark_under_oom()
1707 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) in mem_cgroup_unmark_under_oom() argument
1716 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_unmark_under_oom()
1725 struct mem_cgroup *memcg; member
1737 oom_wait_memcg = oom_wait_info->memcg; in memcg_oom_wake_function()
1745 static void memcg_oom_recover(struct mem_cgroup *memcg) in memcg_oom_recover() argument
1755 if (memcg && memcg->under_oom) in memcg_oom_recover()
1756 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); in memcg_oom_recover()
1766 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) in mem_cgroup_oom() argument
1774 memcg_memory_event(memcg, MEMCG_OOM); in mem_cgroup_oom()
1794 if (memcg->oom_kill_disable) { in mem_cgroup_oom()
1797 css_get(&memcg->css); in mem_cgroup_oom()
1798 current->memcg_in_oom = memcg; in mem_cgroup_oom()
1805 mem_cgroup_mark_under_oom(memcg); in mem_cgroup_oom()
1807 locked = mem_cgroup_oom_trylock(memcg); in mem_cgroup_oom()
1810 mem_cgroup_oom_notify(memcg); in mem_cgroup_oom()
1812 mem_cgroup_unmark_under_oom(memcg); in mem_cgroup_oom()
1813 if (mem_cgroup_out_of_memory(memcg, mask, order)) in mem_cgroup_oom()
1819 mem_cgroup_oom_unlock(memcg); in mem_cgroup_oom()
1843 struct mem_cgroup *memcg = current->memcg_in_oom; in mem_cgroup_oom_synchronize() local
1848 if (!memcg) in mem_cgroup_oom_synchronize()
1854 owait.memcg = memcg; in mem_cgroup_oom_synchronize()
1861 mem_cgroup_mark_under_oom(memcg); in mem_cgroup_oom_synchronize()
1863 locked = mem_cgroup_oom_trylock(memcg); in mem_cgroup_oom_synchronize()
1866 mem_cgroup_oom_notify(memcg); in mem_cgroup_oom_synchronize()
1868 if (locked && !memcg->oom_kill_disable) { in mem_cgroup_oom_synchronize()
1869 mem_cgroup_unmark_under_oom(memcg); in mem_cgroup_oom_synchronize()
1871 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, in mem_cgroup_oom_synchronize()
1875 mem_cgroup_unmark_under_oom(memcg); in mem_cgroup_oom_synchronize()
1880 mem_cgroup_oom_unlock(memcg); in mem_cgroup_oom_synchronize()
1886 memcg_oom_recover(memcg); in mem_cgroup_oom_synchronize()
1890 css_put(&memcg->css); in mem_cgroup_oom_synchronize()
1908 struct mem_cgroup *memcg; in mem_cgroup_get_oom_group() local
1918 memcg = mem_cgroup_from_task(victim); in mem_cgroup_get_oom_group()
1919 if (memcg == root_mem_cgroup) in mem_cgroup_get_oom_group()
1927 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) in mem_cgroup_get_oom_group()
1935 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in mem_cgroup_get_oom_group()
1936 if (memcg->oom_group) in mem_cgroup_get_oom_group()
1937 oom_group = memcg; in mem_cgroup_get_oom_group()
1939 if (memcg == oom_domain) in mem_cgroup_get_oom_group()
1951 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) in mem_cgroup_print_oom_group() argument
1954 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_group()
1971 struct mem_cgroup *memcg; in lock_page_memcg() local
1984 memcg = page_memcg(head); in lock_page_memcg()
1985 if (unlikely(!memcg)) in lock_page_memcg()
1990 might_lock(&memcg->move_lock); in lock_page_memcg()
1994 if (atomic_read(&memcg->moving_account) <= 0) in lock_page_memcg()
1997 spin_lock_irqsave(&memcg->move_lock, flags); in lock_page_memcg()
1998 if (memcg != page_memcg(head)) { in lock_page_memcg()
1999 spin_unlock_irqrestore(&memcg->move_lock, flags); in lock_page_memcg()
2009 memcg->move_lock_task = current; in lock_page_memcg()
2010 memcg->move_lock_flags = flags; in lock_page_memcg()
2014 static void __unlock_page_memcg(struct mem_cgroup *memcg) in __unlock_page_memcg() argument
2016 if (memcg && memcg->move_lock_task == current) { in __unlock_page_memcg()
2017 unsigned long flags = memcg->move_lock_flags; in __unlock_page_memcg()
2019 memcg->move_lock_task = NULL; in __unlock_page_memcg()
2020 memcg->move_lock_flags = 0; in __unlock_page_memcg()
2022 spin_unlock_irqrestore(&memcg->move_lock, flags); in __unlock_page_memcg()
2127 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in consume_stock() argument
2139 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { in consume_stock()
2196 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in refill_stock() argument
2204 if (stock->cached != memcg) { /* reset if necessary */ in refill_stock()
2206 css_get(&memcg->css); in refill_stock()
2207 stock->cached = memcg; in refill_stock()
2237 struct mem_cgroup *memcg; in drain_all_stock() local
2241 memcg = stock->cached; in drain_all_stock()
2242 if (memcg && stock->nr_pages && in drain_all_stock()
2243 mem_cgroup_is_descendant(memcg, root_memcg)) in drain_all_stock()
2271 static unsigned long reclaim_high(struct mem_cgroup *memcg, in reclaim_high() argument
2280 if (page_counter_read(&memcg->memory) <= in reclaim_high()
2281 READ_ONCE(memcg->memory.high)) in reclaim_high()
2284 memcg_memory_event(memcg, MEMCG_HIGH); in reclaim_high()
2287 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, in reclaim_high()
2290 } while ((memcg = parent_mem_cgroup(memcg)) && in reclaim_high()
2291 !mem_cgroup_is_root(memcg)); in reclaim_high()
2298 struct mem_cgroup *memcg; in high_work_func() local
2300 memcg = container_of(work, struct mem_cgroup, high_work); in high_work_func()
2301 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); in high_work_func()
2375 static u64 mem_find_max_overage(struct mem_cgroup *memcg) in mem_find_max_overage() argument
2380 overage = calculate_overage(page_counter_read(&memcg->memory), in mem_find_max_overage()
2381 READ_ONCE(memcg->memory.high)); in mem_find_max_overage()
2383 } while ((memcg = parent_mem_cgroup(memcg)) && in mem_find_max_overage()
2384 !mem_cgroup_is_root(memcg)); in mem_find_max_overage()
2389 static u64 swap_find_max_overage(struct mem_cgroup *memcg) in swap_find_max_overage() argument
2394 overage = calculate_overage(page_counter_read(&memcg->swap), in swap_find_max_overage()
2395 READ_ONCE(memcg->swap.high)); in swap_find_max_overage()
2397 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); in swap_find_max_overage()
2399 } while ((memcg = parent_mem_cgroup(memcg)) && in swap_find_max_overage()
2400 !mem_cgroup_is_root(memcg)); in swap_find_max_overage()
2409 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, in calculate_high_delay() argument
2452 struct mem_cgroup *memcg; in mem_cgroup_handle_over_high() local
2458 memcg = get_mem_cgroup_from_mm(current->mm); in mem_cgroup_handle_over_high()
2471 nr_reclaimed = reclaim_high(memcg, in mem_cgroup_handle_over_high()
2479 penalty_jiffies = calculate_high_delay(memcg, nr_pages, in mem_cgroup_handle_over_high()
2480 mem_find_max_overage(memcg)); in mem_cgroup_handle_over_high()
2482 penalty_jiffies += calculate_high_delay(memcg, nr_pages, in mem_cgroup_handle_over_high()
2483 swap_find_max_overage(memcg)); in mem_cgroup_handle_over_high()
2521 css_put(&memcg->css); in mem_cgroup_handle_over_high()
2524 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge_memcg() argument
2538 if (consume_stock(memcg, nr_pages)) in try_charge_memcg()
2542 page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge_memcg()
2543 if (page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge_memcg()
2546 page_counter_uncharge(&memcg->memsw, batch); in try_charge_memcg()
2661 page_counter_charge(&memcg->memory, nr_pages); in try_charge_memcg()
2663 page_counter_charge(&memcg->memsw, nr_pages); in try_charge_memcg()
2669 refill_stock(memcg, batch - nr_pages); in try_charge_memcg()
2683 mem_high = page_counter_read(&memcg->memory) > in try_charge_memcg()
2684 READ_ONCE(memcg->memory.high); in try_charge_memcg()
2685 swap_high = page_counter_read(&memcg->swap) > in try_charge_memcg()
2686 READ_ONCE(memcg->swap.high); in try_charge_memcg()
2691 schedule_work(&memcg->high_work); in try_charge_memcg()
2711 } while ((memcg = parent_mem_cgroup(memcg))); in try_charge_memcg()
2716 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge() argument
2719 if (mem_cgroup_is_root(memcg)) in try_charge()
2722 return try_charge_memcg(memcg, gfp_mask, nr_pages); in try_charge()
2726 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) in cancel_charge() argument
2728 if (mem_cgroup_is_root(memcg)) in cancel_charge()
2731 page_counter_uncharge(&memcg->memory, nr_pages); in cancel_charge()
2733 page_counter_uncharge(&memcg->memsw, nr_pages); in cancel_charge()
2737 static void commit_charge(struct page *page, struct mem_cgroup *memcg) in commit_charge() argument
2748 page->memcg_data = (unsigned long)memcg; in commit_charge()
2753 struct mem_cgroup *memcg; in get_mem_cgroup_from_objcg() local
2757 memcg = obj_cgroup_memcg(objcg); in get_mem_cgroup_from_objcg()
2758 if (unlikely(!css_tryget(&memcg->css))) in get_mem_cgroup_from_objcg()
2762 return memcg; in get_mem_cgroup_from_objcg()
2859 struct mem_cgroup *memcg; in get_obj_cgroup_from_current() local
2866 memcg = active_memcg(); in get_obj_cgroup_from_current()
2868 memcg = mem_cgroup_from_task(current); in get_obj_cgroup_from_current()
2870 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { in get_obj_cgroup_from_current()
2871 objcg = rcu_dereference(memcg->objcg); in get_obj_cgroup_from_current()
2932 struct mem_cgroup *memcg; in obj_cgroup_uncharge_pages() local
2934 memcg = get_mem_cgroup_from_objcg(objcg); in obj_cgroup_uncharge_pages()
2937 page_counter_uncharge(&memcg->kmem, nr_pages); in obj_cgroup_uncharge_pages()
2938 refill_stock(memcg, nr_pages); in obj_cgroup_uncharge_pages()
2940 css_put(&memcg->css); in obj_cgroup_uncharge_pages()
2955 struct mem_cgroup *memcg; in obj_cgroup_charge_pages() local
2958 memcg = get_mem_cgroup_from_objcg(objcg); in obj_cgroup_charge_pages()
2960 ret = try_charge_memcg(memcg, gfp, nr_pages); in obj_cgroup_charge_pages()
2965 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { in obj_cgroup_charge_pages()
2973 page_counter_charge(&memcg->kmem, nr_pages); in obj_cgroup_charge_pages()
2976 cancel_charge(memcg, nr_pages); in obj_cgroup_charge_pages()
2980 css_put(&memcg->css); in obj_cgroup_charge_pages()
3160 struct mem_cgroup *memcg; in obj_stock_flush_required() local
3163 memcg = obj_cgroup_memcg(stock->task_obj.cached_objcg); in obj_stock_flush_required()
3164 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) in obj_stock_flush_required()
3168 memcg = obj_cgroup_memcg(stock->irq_obj.cached_objcg); in obj_stock_flush_required()
3169 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) in obj_stock_flush_required()
3260 struct mem_cgroup *memcg = page_memcg(head); in split_page_memcg() local
3263 if (mem_cgroup_disabled() || !memcg) in split_page_memcg()
3272 css_get_many(&memcg->css, nr - 1); in split_page_memcg()
3315 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, in mem_cgroup_resize_max() argument
3322 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; in mem_cgroup_resize_max()
3335 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : in mem_cgroup_resize_max()
3336 max <= memcg->memsw.max; in mem_cgroup_resize_max()
3351 drain_all_stock(memcg); in mem_cgroup_resize_max()
3356 if (!try_to_free_mem_cgroup_pages(memcg, 1, in mem_cgroup_resize_max()
3364 memcg_oom_recover(memcg); in mem_cgroup_resize_max()
3408 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, in mem_cgroup_soft_limit_reclaim()
3423 excess = soft_limit_excess(mz->memcg); in mem_cgroup_soft_limit_reclaim()
3435 css_put(&mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3448 css_put(&next_mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3457 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) in mem_cgroup_force_empty() argument
3464 drain_all_stock(memcg); in mem_cgroup_force_empty()
3467 while (nr_retries && page_counter_read(&memcg->memory)) { in mem_cgroup_force_empty()
3473 progress = try_to_free_mem_cgroup_pages(memcg, 1, in mem_cgroup_force_empty()
3490 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in mem_cgroup_force_empty_write() local
3492 if (mem_cgroup_is_root(memcg)) in mem_cgroup_force_empty_write()
3494 return mem_cgroup_force_empty(memcg) ?: nbytes; in mem_cgroup_force_empty_write()
3516 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) in mem_cgroup_usage() argument
3520 if (mem_cgroup_is_root(memcg)) { in mem_cgroup_usage()
3522 cgroup_rstat_flush_irqsafe(memcg->css.cgroup); in mem_cgroup_usage()
3523 val = memcg_page_state(memcg, NR_FILE_PAGES) + in mem_cgroup_usage()
3524 memcg_page_state(memcg, NR_ANON_MAPPED); in mem_cgroup_usage()
3526 val += memcg_page_state(memcg, MEMCG_SWAP); in mem_cgroup_usage()
3529 val = page_counter_read(&memcg->memory); in mem_cgroup_usage()
3531 val = page_counter_read(&memcg->memsw); in mem_cgroup_usage()
3547 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_read_u64() local
3552 counter = &memcg->memory; in mem_cgroup_read_u64()
3555 counter = &memcg->memsw; in mem_cgroup_read_u64()
3558 counter = &memcg->kmem; in mem_cgroup_read_u64()
3561 counter = &memcg->tcpmem; in mem_cgroup_read_u64()
3569 if (counter == &memcg->memory) in mem_cgroup_read_u64()
3570 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; in mem_cgroup_read_u64()
3571 if (counter == &memcg->memsw) in mem_cgroup_read_u64()
3572 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; in mem_cgroup_read_u64()
3581 return (u64)memcg->soft_limit * PAGE_SIZE; in mem_cgroup_read_u64()
3588 static int memcg_online_kmem(struct mem_cgroup *memcg) in memcg_online_kmem() argument
3596 BUG_ON(memcg->kmemcg_id >= 0); in memcg_online_kmem()
3597 BUG_ON(memcg->kmem_state); in memcg_online_kmem()
3608 objcg->memcg = memcg; in memcg_online_kmem()
3609 rcu_assign_pointer(memcg->objcg, objcg); in memcg_online_kmem()
3613 memcg->kmemcg_id = memcg_id; in memcg_online_kmem()
3614 memcg->kmem_state = KMEM_ONLINE; in memcg_online_kmem()
3619 static void memcg_offline_kmem(struct mem_cgroup *memcg) in memcg_offline_kmem() argument
3625 if (memcg->kmem_state != KMEM_ONLINE) in memcg_offline_kmem()
3628 memcg->kmem_state = KMEM_ALLOCATED; in memcg_offline_kmem()
3630 parent = parent_mem_cgroup(memcg); in memcg_offline_kmem()
3634 memcg_reparent_objcgs(memcg, parent); in memcg_offline_kmem()
3636 kmemcg_id = memcg->kmemcg_id; in memcg_offline_kmem()
3648 css_for_each_descendant_pre(css, &memcg->css) { in memcg_offline_kmem()
3660 static void memcg_free_kmem(struct mem_cgroup *memcg) in memcg_free_kmem() argument
3663 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) in memcg_free_kmem()
3664 memcg_offline_kmem(memcg); in memcg_free_kmem()
3667 static int memcg_online_kmem(struct mem_cgroup *memcg) in memcg_online_kmem() argument
3671 static void memcg_offline_kmem(struct mem_cgroup *memcg) in memcg_offline_kmem() argument
3674 static void memcg_free_kmem(struct mem_cgroup *memcg) in memcg_free_kmem() argument
3679 static int memcg_update_kmem_max(struct mem_cgroup *memcg, in memcg_update_kmem_max() argument
3685 ret = page_counter_set_max(&memcg->kmem, max); in memcg_update_kmem_max()
3690 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) in memcg_update_tcp_max() argument
3696 ret = page_counter_set_max(&memcg->tcpmem, max); in memcg_update_tcp_max()
3700 if (!memcg->tcpmem_active) { in memcg_update_tcp_max()
3718 memcg->tcpmem_active = true; in memcg_update_tcp_max()
3732 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in mem_cgroup_write() local
3743 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ in mem_cgroup_write()
3749 ret = mem_cgroup_resize_max(memcg, nr_pages, false); in mem_cgroup_write()
3752 ret = mem_cgroup_resize_max(memcg, nr_pages, true); in mem_cgroup_write()
3758 ret = memcg_update_kmem_max(memcg, nr_pages); in mem_cgroup_write()
3761 ret = memcg_update_tcp_max(memcg, nr_pages); in mem_cgroup_write()
3766 memcg->soft_limit = nr_pages; in mem_cgroup_write()
3776 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in mem_cgroup_reset() local
3781 counter = &memcg->memory; in mem_cgroup_reset()
3784 counter = &memcg->memsw; in mem_cgroup_reset()
3787 counter = &memcg->kmem; in mem_cgroup_reset()
3790 counter = &memcg->tcpmem; in mem_cgroup_reset()
3820 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_move_charge_write() local
3831 memcg->move_charge_at_immigrate = val; in mem_cgroup_move_charge_write()
3848 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, in mem_cgroup_node_nr_lru_pages() argument
3851 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); in mem_cgroup_node_nr_lru_pages()
3868 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, in mem_cgroup_nr_lru_pages() argument
3879 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); in mem_cgroup_nr_lru_pages()
3881 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); in mem_cgroup_nr_lru_pages()
3901 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memcg_numa_stat_show() local
3903 cgroup_rstat_flush(memcg->css.cgroup); in memcg_numa_stat_show()
3907 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
3911 mem_cgroup_node_nr_lru_pages(memcg, nid, in memcg_numa_stat_show()
3919 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
3923 mem_cgroup_node_nr_lru_pages(memcg, nid, in memcg_numa_stat_show()
3968 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memcg_stat_show() local
3975 cgroup_rstat_flush(memcg->css.cgroup); in memcg_stat_show()
3982 nr = memcg_page_state_local(memcg, memcg1_stats[i]); in memcg_stat_show()
3988 memcg_events_local(memcg, memcg1_events[i])); in memcg_stat_show()
3992 memcg_page_state_local(memcg, NR_LRU_BASE + i) * in memcg_stat_show()
3997 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { in memcg_stat_show()
4012 nr = memcg_page_state(memcg, memcg1_stats[i]); in memcg_stat_show()
4020 (u64)memcg_events(memcg, memcg1_events[i])); in memcg_stat_show()
4024 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * in memcg_stat_show()
4035 mz = memcg->nodeinfo[pgdat->node_id]; in memcg_stat_show()
4051 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_swappiness_read() local
4053 return mem_cgroup_swappiness(memcg); in mem_cgroup_swappiness_read()
4059 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_swappiness_write() local
4064 if (!mem_cgroup_is_root(memcg)) in mem_cgroup_swappiness_write()
4065 memcg->swappiness = val; in mem_cgroup_swappiness_write()
4072 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) in __mem_cgroup_threshold() argument
4080 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold()
4082 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold()
4087 usage = mem_cgroup_usage(memcg, swap); in __mem_cgroup_threshold()
4123 static void mem_cgroup_threshold(struct mem_cgroup *memcg) in mem_cgroup_threshold() argument
4125 while (memcg) { in mem_cgroup_threshold()
4126 __mem_cgroup_threshold(memcg, false); in mem_cgroup_threshold()
4128 __mem_cgroup_threshold(memcg, true); in mem_cgroup_threshold()
4130 memcg = parent_mem_cgroup(memcg); in mem_cgroup_threshold()
4148 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) in mem_cgroup_oom_notify_cb() argument
4154 list_for_each_entry(ev, &memcg->oom_notify, list) in mem_cgroup_oom_notify_cb()
4161 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) in mem_cgroup_oom_notify() argument
4165 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_oom_notify()
4169 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, in __mem_cgroup_usage_register_event() argument
4182 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4185 thresholds = &memcg->thresholds; in __mem_cgroup_usage_register_event()
4186 usage = mem_cgroup_usage(memcg, false); in __mem_cgroup_usage_register_event()
4188 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_register_event()
4189 usage = mem_cgroup_usage(memcg, true); in __mem_cgroup_usage_register_event()
4195 __mem_cgroup_threshold(memcg, type == _MEMSWAP); in __mem_cgroup_usage_register_event()
4244 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4249 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, in mem_cgroup_usage_register_event() argument
4252 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); in mem_cgroup_usage_register_event()
4255 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, in memsw_cgroup_usage_register_event() argument
4258 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); in memsw_cgroup_usage_register_event()
4261 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, in __mem_cgroup_usage_unregister_event() argument
4269 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4272 thresholds = &memcg->thresholds; in __mem_cgroup_usage_unregister_event()
4273 usage = mem_cgroup_usage(memcg, false); in __mem_cgroup_usage_unregister_event()
4275 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_unregister_event()
4276 usage = mem_cgroup_usage(memcg, true); in __mem_cgroup_usage_unregister_event()
4284 __mem_cgroup_threshold(memcg, type == _MEMSWAP); in __mem_cgroup_usage_unregister_event()
4343 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4346 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, in mem_cgroup_usage_unregister_event() argument
4349 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); in mem_cgroup_usage_unregister_event()
4352 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, in memsw_cgroup_usage_unregister_event() argument
4355 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); in memsw_cgroup_usage_unregister_event()
4358 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, in mem_cgroup_oom_register_event() argument
4370 list_add(&event->list, &memcg->oom_notify); in mem_cgroup_oom_register_event()
4373 if (memcg->under_oom) in mem_cgroup_oom_register_event()
4380 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, in mem_cgroup_oom_unregister_event() argument
4387 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event()
4399 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); in mem_cgroup_oom_control_read() local
4401 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); in mem_cgroup_oom_control_read()
4402 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); in mem_cgroup_oom_control_read()
4404 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); in mem_cgroup_oom_control_read()
4411 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_oom_control_write() local
4414 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1))) in mem_cgroup_oom_control_write()
4417 memcg->oom_kill_disable = val; in mem_cgroup_oom_control_write()
4419 memcg_oom_recover(memcg); in mem_cgroup_oom_control_write()
4428 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) in memcg_wb_domain_init() argument
4430 return wb_domain_init(&memcg->cgwb_domain, gfp); in memcg_wb_domain_init()
4433 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) in memcg_wb_domain_exit() argument
4435 wb_domain_exit(&memcg->cgwb_domain); in memcg_wb_domain_exit()
4438 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) in memcg_wb_domain_size_changed() argument
4440 wb_domain_size_changed(&memcg->cgwb_domain); in memcg_wb_domain_size_changed()
4445 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_domain() local
4447 if (!memcg->css.parent) in mem_cgroup_wb_domain()
4450 return &memcg->cgwb_domain; in mem_cgroup_wb_domain()
4475 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_stats() local
4478 cgroup_rstat_flush_irqsafe(memcg->css.cgroup); in mem_cgroup_wb_stats()
4480 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); in mem_cgroup_wb_stats()
4481 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); in mem_cgroup_wb_stats()
4482 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) + in mem_cgroup_wb_stats()
4483 memcg_page_state(memcg, NR_ACTIVE_FILE); in mem_cgroup_wb_stats()
4486 while ((parent = parent_mem_cgroup(memcg))) { in mem_cgroup_wb_stats()
4487 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), in mem_cgroup_wb_stats()
4488 READ_ONCE(memcg->memory.high)); in mem_cgroup_wb_stats()
4489 unsigned long used = page_counter_read(&memcg->memory); in mem_cgroup_wb_stats()
4492 memcg = parent; in mem_cgroup_wb_stats()
4543 struct mem_cgroup *memcg = page_memcg(page); in mem_cgroup_track_foreign_dirty_slowpath() local
4558 frn = &memcg->cgwb_frn[i]; in mem_cgroup_track_foreign_dirty_slowpath()
4585 frn = &memcg->cgwb_frn[oldest]; in mem_cgroup_track_foreign_dirty_slowpath()
4595 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_flush_foreign() local
4601 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; in mem_cgroup_flush_foreign()
4622 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) in memcg_wb_domain_init() argument
4627 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) in memcg_wb_domain_exit() argument
4631 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) in memcg_wb_domain_size_changed() argument
4659 struct mem_cgroup *memcg = event->memcg; in memcg_event_remove() local
4663 event->unregister_event(memcg, event->eventfd); in memcg_event_remove()
4670 css_put(&memcg->css); in memcg_event_remove()
4683 struct mem_cgroup *memcg = event->memcg; in memcg_event_wake() local
4696 spin_lock(&memcg->event_list_lock); in memcg_event_wake()
4705 spin_unlock(&memcg->event_list_lock); in memcg_event_wake()
4733 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in memcg_write_event_control() local
4759 event->memcg = memcg; in memcg_write_event_control()
4831 ret = event->register_event(memcg, event->eventfd, buf); in memcg_write_event_control()
4837 spin_lock_irq(&memcg->event_list_lock); in memcg_write_event_control()
4838 list_add(&event->list, &memcg->event_list); in memcg_write_event_control()
4839 spin_unlock_irq(&memcg->event_list_lock); in memcg_write_event_control()
5015 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) in mem_cgroup_id_remove() argument
5017 if (memcg->id.id > 0) { in mem_cgroup_id_remove()
5018 idr_remove(&mem_cgroup_idr, memcg->id.id); in mem_cgroup_id_remove()
5019 memcg->id.id = 0; in mem_cgroup_id_remove()
5023 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, in mem_cgroup_id_get_many() argument
5026 refcount_add(n, &memcg->id.ref); in mem_cgroup_id_get_many()
5029 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) in mem_cgroup_id_put_many() argument
5031 if (refcount_sub_and_test(n, &memcg->id.ref)) { in mem_cgroup_id_put_many()
5032 mem_cgroup_id_remove(memcg); in mem_cgroup_id_put_many()
5035 css_put(&memcg->css); in mem_cgroup_id_put_many()
5039 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) in mem_cgroup_id_put() argument
5041 mem_cgroup_id_put_many(memcg, 1); in mem_cgroup_id_put()
5056 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) in alloc_mem_cgroup_per_node_info() argument
5084 pn->memcg = memcg; in alloc_mem_cgroup_per_node_info()
5086 memcg->nodeinfo[node] = pn; in alloc_mem_cgroup_per_node_info()
5090 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) in free_mem_cgroup_per_node_info() argument
5092 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in free_mem_cgroup_per_node_info()
5101 static void __mem_cgroup_free(struct mem_cgroup *memcg) in __mem_cgroup_free() argument
5106 free_mem_cgroup_per_node_info(memcg, node); in __mem_cgroup_free()
5107 free_percpu(memcg->vmstats_percpu); in __mem_cgroup_free()
5108 kfree(memcg); in __mem_cgroup_free()
5111 static void mem_cgroup_free(struct mem_cgroup *memcg) in mem_cgroup_free() argument
5113 memcg_wb_domain_exit(memcg); in mem_cgroup_free()
5114 __mem_cgroup_free(memcg); in mem_cgroup_free()
5119 struct mem_cgroup *memcg; in mem_cgroup_alloc() local
5128 memcg = kzalloc(size, GFP_KERNEL); in mem_cgroup_alloc()
5129 if (!memcg) in mem_cgroup_alloc()
5132 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, in mem_cgroup_alloc()
5135 if (memcg->id.id < 0) { in mem_cgroup_alloc()
5136 error = memcg->id.id; in mem_cgroup_alloc()
5140 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, in mem_cgroup_alloc()
5142 if (!memcg->vmstats_percpu) in mem_cgroup_alloc()
5146 if (alloc_mem_cgroup_per_node_info(memcg, node)) in mem_cgroup_alloc()
5149 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) in mem_cgroup_alloc()
5152 INIT_WORK(&memcg->high_work, high_work_func); in mem_cgroup_alloc()
5153 INIT_LIST_HEAD(&memcg->oom_notify); in mem_cgroup_alloc()
5154 mutex_init(&memcg->thresholds_lock); in mem_cgroup_alloc()
5155 spin_lock_init(&memcg->move_lock); in mem_cgroup_alloc()
5156 vmpressure_init(&memcg->vmpressure); in mem_cgroup_alloc()
5157 INIT_LIST_HEAD(&memcg->event_list); in mem_cgroup_alloc()
5158 spin_lock_init(&memcg->event_list_lock); in mem_cgroup_alloc()
5159 memcg->socket_pressure = jiffies; in mem_cgroup_alloc()
5161 memcg->kmemcg_id = -1; in mem_cgroup_alloc()
5162 INIT_LIST_HEAD(&memcg->objcg_list); in mem_cgroup_alloc()
5165 INIT_LIST_HEAD(&memcg->cgwb_list); in mem_cgroup_alloc()
5167 memcg->cgwb_frn[i].done = in mem_cgroup_alloc()
5171 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); in mem_cgroup_alloc()
5172 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); in mem_cgroup_alloc()
5173 memcg->deferred_split_queue.split_queue_len = 0; in mem_cgroup_alloc()
5175 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); in mem_cgroup_alloc()
5176 return memcg; in mem_cgroup_alloc()
5178 mem_cgroup_id_remove(memcg); in mem_cgroup_alloc()
5179 __mem_cgroup_free(memcg); in mem_cgroup_alloc()
5187 struct mem_cgroup *memcg, *old_memcg; in mem_cgroup_css_alloc() local
5191 memcg = mem_cgroup_alloc(); in mem_cgroup_css_alloc()
5193 if (IS_ERR(memcg)) in mem_cgroup_css_alloc()
5194 return ERR_CAST(memcg); in mem_cgroup_css_alloc()
5196 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5197 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
5198 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5200 memcg->swappiness = mem_cgroup_swappiness(parent); in mem_cgroup_css_alloc()
5201 memcg->oom_kill_disable = parent->oom_kill_disable; in mem_cgroup_css_alloc()
5203 page_counter_init(&memcg->memory, &parent->memory); in mem_cgroup_css_alloc()
5204 page_counter_init(&memcg->swap, &parent->swap); in mem_cgroup_css_alloc()
5205 page_counter_init(&memcg->kmem, &parent->kmem); in mem_cgroup_css_alloc()
5206 page_counter_init(&memcg->tcpmem, &parent->tcpmem); in mem_cgroup_css_alloc()
5208 page_counter_init(&memcg->memory, NULL); in mem_cgroup_css_alloc()
5209 page_counter_init(&memcg->swap, NULL); in mem_cgroup_css_alloc()
5210 page_counter_init(&memcg->kmem, NULL); in mem_cgroup_css_alloc()
5211 page_counter_init(&memcg->tcpmem, NULL); in mem_cgroup_css_alloc()
5213 root_mem_cgroup = memcg; in mem_cgroup_css_alloc()
5214 return &memcg->css; in mem_cgroup_css_alloc()
5218 error = memcg_online_kmem(memcg); in mem_cgroup_css_alloc()
5225 return &memcg->css; in mem_cgroup_css_alloc()
5227 mem_cgroup_id_remove(memcg); in mem_cgroup_css_alloc()
5228 mem_cgroup_free(memcg); in mem_cgroup_css_alloc()
5234 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_online() local
5241 if (alloc_shrinker_info(memcg)) { in mem_cgroup_css_online()
5242 mem_cgroup_id_remove(memcg); in mem_cgroup_css_online()
5247 refcount_set(&memcg->id.ref, 1); in mem_cgroup_css_online()
5250 if (unlikely(mem_cgroup_is_root(memcg))) in mem_cgroup_css_online()
5258 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_offline() local
5266 spin_lock_irq(&memcg->event_list_lock); in mem_cgroup_css_offline()
5267 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in mem_cgroup_css_offline()
5271 spin_unlock_irq(&memcg->event_list_lock); in mem_cgroup_css_offline()
5273 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_offline()
5274 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_offline()
5276 memcg_offline_kmem(memcg); in mem_cgroup_css_offline()
5277 reparent_shrinker_deferred(memcg); in mem_cgroup_css_offline()
5278 wb_memcg_offline(memcg); in mem_cgroup_css_offline()
5280 drain_all_stock(memcg); in mem_cgroup_css_offline()
5282 mem_cgroup_id_put(memcg); in mem_cgroup_css_offline()
5287 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_released() local
5289 invalidate_reclaim_iterators(memcg); in mem_cgroup_css_released()
5294 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_free() local
5299 wb_wait_for_completion(&memcg->cgwb_frn[i].done); in mem_cgroup_css_free()
5304 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) in mem_cgroup_css_free()
5307 vmpressure_cleanup(&memcg->vmpressure); in mem_cgroup_css_free()
5308 cancel_work_sync(&memcg->high_work); in mem_cgroup_css_free()
5309 mem_cgroup_remove_from_trees(memcg); in mem_cgroup_css_free()
5310 free_shrinker_info(memcg); in mem_cgroup_css_free()
5311 memcg_free_kmem(memcg); in mem_cgroup_css_free()
5312 mem_cgroup_free(memcg); in mem_cgroup_css_free()
5330 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_reset() local
5332 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5333 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5334 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5335 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5336 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_reset()
5337 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_reset()
5338 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5339 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_reset()
5340 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5341 memcg_wb_domain_size_changed(memcg); in mem_cgroup_css_reset()
5361 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_rstat_flush() local
5362 struct mem_cgroup *parent = parent_mem_cgroup(memcg); in mem_cgroup_css_rstat_flush()
5367 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); in mem_cgroup_css_rstat_flush()
5375 delta = memcg->vmstats.state_pending[i]; in mem_cgroup_css_rstat_flush()
5377 memcg->vmstats.state_pending[i] = 0; in mem_cgroup_css_rstat_flush()
5390 memcg->vmstats.state[i] += delta; in mem_cgroup_css_rstat_flush()
5396 delta = memcg->vmstats.events_pending[i]; in mem_cgroup_css_rstat_flush()
5398 memcg->vmstats.events_pending[i] = 0; in mem_cgroup_css_rstat_flush()
5409 memcg->vmstats.events[i] += delta; in mem_cgroup_css_rstat_flush()
5415 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; in mem_cgroup_css_rstat_flush()
5915 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ in mem_cgroup_can_attach() local
5936 memcg = mem_cgroup_from_css(css); in mem_cgroup_can_attach()
5946 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); in mem_cgroup_can_attach()
5952 VM_BUG_ON(from == memcg); in mem_cgroup_can_attach()
5968 mc.to = memcg; in mem_cgroup_can_attach()
6172 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in memory_current_read() local
6174 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; in memory_current_read()
6186 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_min_write() local
6195 page_counter_set_min(&memcg->memory, min); in memory_min_write()
6209 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_low_write() local
6218 page_counter_set_low(&memcg->memory, low); in memory_low_write()
6232 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_high_write() local
6243 page_counter_set_high(&memcg->memory, high); in memory_high_write()
6246 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_high_write()
6256 drain_all_stock(memcg); in memory_high_write()
6261 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
6268 memcg_wb_domain_size_changed(memcg); in memory_high_write()
6281 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_max_write() local
6292 xchg(&memcg->memory.max, max); in memory_max_write()
6295 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_max_write()
6304 drain_all_stock(memcg); in memory_max_write()
6310 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write()
6316 memcg_memory_event(memcg, MEMCG_OOM); in memory_max_write()
6317 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) in memory_max_write()
6321 memcg_wb_domain_size_changed(memcg); in memory_max_write()
6337 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_events_show() local
6339 __memory_events_show(m, memcg->memory_events); in memory_events_show()
6345 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_events_local_show() local
6347 __memory_events_show(m, memcg->memory_events_local); in memory_events_local_show()
6353 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_stat_show() local
6356 buf = memory_stat_format(memcg); in memory_stat_show()
6374 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_numa_stat_show() local
6376 cgroup_rstat_flush(memcg->css.cgroup); in memory_numa_stat_show()
6389 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); in memory_numa_stat_show()
6403 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_oom_group_show() local
6405 seq_printf(m, "%d\n", memcg->oom_group); in memory_oom_group_show()
6413 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_oom_group_write() local
6427 memcg->oom_group = oom_group; in memory_oom_group_write()
6634 struct mem_cgroup *memcg) in mem_cgroup_calculate_protection() argument
6652 if (memcg == root) in mem_cgroup_calculate_protection()
6655 usage = page_counter_read(&memcg->memory); in mem_cgroup_calculate_protection()
6659 parent = parent_mem_cgroup(memcg); in mem_cgroup_calculate_protection()
6665 memcg->memory.emin = READ_ONCE(memcg->memory.min); in mem_cgroup_calculate_protection()
6666 memcg->memory.elow = READ_ONCE(memcg->memory.low); in mem_cgroup_calculate_protection()
6672 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, in mem_cgroup_calculate_protection()
6673 READ_ONCE(memcg->memory.min), in mem_cgroup_calculate_protection()
6677 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, in mem_cgroup_calculate_protection()
6678 READ_ONCE(memcg->memory.low), in mem_cgroup_calculate_protection()
6683 static int charge_memcg(struct page *page, struct mem_cgroup *memcg, gfp_t gfp) in charge_memcg() argument
6688 ret = try_charge(memcg, gfp, nr_pages); in charge_memcg()
6692 css_get(&memcg->css); in charge_memcg()
6693 commit_charge(page, memcg); in charge_memcg()
6696 mem_cgroup_charge_statistics(memcg, page, nr_pages); in charge_memcg()
6697 memcg_check_events(memcg, page); in charge_memcg()
6720 struct mem_cgroup *memcg; in __mem_cgroup_charge() local
6723 memcg = get_mem_cgroup_from_mm(mm); in __mem_cgroup_charge()
6724 ret = charge_memcg(page, memcg, gfp_mask); in __mem_cgroup_charge()
6725 css_put(&memcg->css); in __mem_cgroup_charge()
6745 struct mem_cgroup *memcg; in mem_cgroup_swapin_charge_page() local
6754 memcg = mem_cgroup_from_id(id); in mem_cgroup_swapin_charge_page()
6755 if (!memcg || !css_tryget_online(&memcg->css)) in mem_cgroup_swapin_charge_page()
6756 memcg = get_mem_cgroup_from_mm(mm); in mem_cgroup_swapin_charge_page()
6759 ret = charge_memcg(page, memcg, gfp); in mem_cgroup_swapin_charge_page()
6761 css_put(&memcg->css); in mem_cgroup_swapin_charge_page()
6799 struct mem_cgroup *memcg; member
6816 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); in uncharge_batch()
6818 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); in uncharge_batch()
6820 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); in uncharge_batch()
6821 memcg_oom_recover(ug->memcg); in uncharge_batch()
6825 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); in uncharge_batch()
6826 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); in uncharge_batch()
6827 memcg_check_events(ug->memcg, ug->dummy_page); in uncharge_batch()
6831 css_put(&ug->memcg->css); in uncharge_batch()
6837 struct mem_cgroup *memcg; in uncharge_page() local
6854 memcg = get_mem_cgroup_from_objcg(objcg); in uncharge_page()
6856 memcg = __page_memcg(page); in uncharge_page()
6859 if (!memcg) in uncharge_page()
6862 if (ug->memcg != memcg) { in uncharge_page()
6863 if (ug->memcg) { in uncharge_page()
6867 ug->memcg = memcg; in uncharge_page()
6871 css_get(&memcg->css); in uncharge_page()
6884 if (!mem_cgroup_is_root(memcg)) in uncharge_page()
6891 css_put(&memcg->css); in uncharge_page()
6928 if (ug.memcg) in __mem_cgroup_uncharge_list()
6944 struct mem_cgroup *memcg; in mem_cgroup_migrate() local
6961 memcg = page_memcg(oldpage); in mem_cgroup_migrate()
6962 VM_WARN_ON_ONCE_PAGE(!memcg, oldpage); in mem_cgroup_migrate()
6963 if (!memcg) in mem_cgroup_migrate()
6969 if (!mem_cgroup_is_root(memcg)) { in mem_cgroup_migrate()
6970 page_counter_charge(&memcg->memory, nr_pages); in mem_cgroup_migrate()
6972 page_counter_charge(&memcg->memsw, nr_pages); in mem_cgroup_migrate()
6975 css_get(&memcg->css); in mem_cgroup_migrate()
6976 commit_charge(newpage, memcg); in mem_cgroup_migrate()
6979 mem_cgroup_charge_statistics(memcg, newpage, nr_pages); in mem_cgroup_migrate()
6980 memcg_check_events(memcg, newpage); in mem_cgroup_migrate()
6989 struct mem_cgroup *memcg; in mem_cgroup_sk_alloc() local
6999 memcg = mem_cgroup_from_task(current); in mem_cgroup_sk_alloc()
7000 if (memcg == root_mem_cgroup) in mem_cgroup_sk_alloc()
7002 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) in mem_cgroup_sk_alloc()
7004 if (css_tryget(&memcg->css)) in mem_cgroup_sk_alloc()
7005 sk->sk_memcg = memcg; in mem_cgroup_sk_alloc()
7025 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, in mem_cgroup_charge_skmem() argument
7031 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { in mem_cgroup_charge_skmem()
7032 memcg->tcpmem_pressure = 0; in mem_cgroup_charge_skmem()
7035 memcg->tcpmem_pressure = 1; in mem_cgroup_charge_skmem()
7037 page_counter_charge(&memcg->tcpmem, nr_pages); in mem_cgroup_charge_skmem()
7043 if (try_charge(memcg, gfp_mask, nr_pages) == 0) { in mem_cgroup_charge_skmem()
7044 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); in mem_cgroup_charge_skmem()
7056 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) in mem_cgroup_uncharge_skmem() argument
7059 page_counter_uncharge(&memcg->tcpmem, nr_pages); in mem_cgroup_uncharge_skmem()
7063 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); in mem_cgroup_uncharge_skmem()
7065 refill_stock(memcg, nr_pages); in mem_cgroup_uncharge_skmem()
7128 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) in mem_cgroup_id_get_online() argument
7130 while (!refcount_inc_not_zero(&memcg->id.ref)) { in mem_cgroup_id_get_online()
7135 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { in mem_cgroup_id_get_online()
7139 memcg = parent_mem_cgroup(memcg); in mem_cgroup_id_get_online()
7140 if (!memcg) in mem_cgroup_id_get_online()
7141 memcg = root_mem_cgroup; in mem_cgroup_id_get_online()
7143 return memcg; in mem_cgroup_id_get_online()
7155 struct mem_cgroup *memcg, *swap_memcg; in mem_cgroup_swapout() local
7168 memcg = page_memcg(page); in mem_cgroup_swapout()
7170 VM_WARN_ON_ONCE_PAGE(!memcg, page); in mem_cgroup_swapout()
7171 if (!memcg) in mem_cgroup_swapout()
7179 swap_memcg = mem_cgroup_id_get_online(memcg); in mem_cgroup_swapout()
7191 if (!mem_cgroup_is_root(memcg)) in mem_cgroup_swapout()
7192 page_counter_uncharge(&memcg->memory, nr_entries); in mem_cgroup_swapout()
7194 if (!cgroup_memory_noswap && memcg != swap_memcg) { in mem_cgroup_swapout()
7197 page_counter_uncharge(&memcg->memsw, nr_entries); in mem_cgroup_swapout()
7207 mem_cgroup_charge_statistics(memcg, page, -nr_entries); in mem_cgroup_swapout()
7208 memcg_check_events(memcg, page); in mem_cgroup_swapout()
7210 css_put(&memcg->css); in mem_cgroup_swapout()
7226 struct mem_cgroup *memcg; in __mem_cgroup_try_charge_swap() local
7232 memcg = page_memcg(page); in __mem_cgroup_try_charge_swap()
7234 VM_WARN_ON_ONCE_PAGE(!memcg, page); in __mem_cgroup_try_charge_swap()
7235 if (!memcg) in __mem_cgroup_try_charge_swap()
7239 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); in __mem_cgroup_try_charge_swap()
7243 memcg = mem_cgroup_id_get_online(memcg); in __mem_cgroup_try_charge_swap()
7245 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) && in __mem_cgroup_try_charge_swap()
7246 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { in __mem_cgroup_try_charge_swap()
7247 memcg_memory_event(memcg, MEMCG_SWAP_MAX); in __mem_cgroup_try_charge_swap()
7248 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); in __mem_cgroup_try_charge_swap()
7249 mem_cgroup_id_put(memcg); in __mem_cgroup_try_charge_swap()
7255 mem_cgroup_id_get_many(memcg, nr_pages - 1); in __mem_cgroup_try_charge_swap()
7256 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); in __mem_cgroup_try_charge_swap()
7258 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); in __mem_cgroup_try_charge_swap()
7270 struct mem_cgroup *memcg; in __mem_cgroup_uncharge_swap() local
7275 memcg = mem_cgroup_from_id(id); in __mem_cgroup_uncharge_swap()
7276 if (memcg) { in __mem_cgroup_uncharge_swap()
7277 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) { in __mem_cgroup_uncharge_swap()
7279 page_counter_uncharge(&memcg->swap, nr_pages); in __mem_cgroup_uncharge_swap()
7281 page_counter_uncharge(&memcg->memsw, nr_pages); in __mem_cgroup_uncharge_swap()
7283 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); in __mem_cgroup_uncharge_swap()
7284 mem_cgroup_id_put_many(memcg, nr_pages); in __mem_cgroup_uncharge_swap()
7289 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) in mem_cgroup_get_nr_swap_pages() argument
7295 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) in mem_cgroup_get_nr_swap_pages()
7297 READ_ONCE(memcg->swap.max) - in mem_cgroup_get_nr_swap_pages()
7298 page_counter_read(&memcg->swap)); in mem_cgroup_get_nr_swap_pages()
7304 struct mem_cgroup *memcg; in mem_cgroup_swap_full() local
7313 memcg = page_memcg(page); in mem_cgroup_swap_full()
7314 if (!memcg) in mem_cgroup_swap_full()
7317 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { in mem_cgroup_swap_full()
7318 unsigned long usage = page_counter_read(&memcg->swap); in mem_cgroup_swap_full()
7320 if (usage * 2 >= READ_ONCE(memcg->swap.high) || in mem_cgroup_swap_full()
7321 usage * 2 >= READ_ONCE(memcg->swap.max)) in mem_cgroup_swap_full()
7341 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in swap_current_read() local
7343 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; in swap_current_read()
7355 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in swap_high_write() local
7364 page_counter_set_high(&memcg->swap, high); in swap_high_write()
7378 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in swap_max_write() local
7387 xchg(&memcg->swap.max, max); in swap_max_write()
7394 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in swap_events_show() local
7397 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); in swap_events_show()
7399 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); in swap_events_show()
7401 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); in swap_events_show()