Lines Matching +full:precharge +full:- +full:current
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
115 * Cgroups above their limits are maintained in a RB-Tree, independent of
195 unsigned long precharge; member
212 /* for encoding cft->private value on file */
244 return tsk_is_oom_victim(current) || fatal_signal_pending(current) || in should_force_charge()
245 (current->flags & PF_EXITING); in should_force_charge()
253 return &memcg->vmpressure; in memcg_to_vmpressure()
281 * objcg->nr_charged_bytes can't have an arbitrary byte value. in obj_cgroup_release()
285 * 1) CPU0: objcg == stock->cached_objcg in obj_cgroup_release()
290 * objcg->nr_charged_bytes = PAGE_SIZE - 92 in obj_cgroup_release()
292 * 92 bytes are added to stock->nr_bytes in obj_cgroup_release()
294 * 92 bytes are added to objcg->nr_charged_bytes in obj_cgroup_release()
299 nr_bytes = atomic_read(&objcg->nr_charged_bytes); in obj_cgroup_release()
300 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); in obj_cgroup_release()
307 list_del(&objcg->list); in obj_cgroup_release()
323 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, in obj_cgroup_alloc()
329 INIT_LIST_HEAD(&objcg->list); in obj_cgroup_alloc()
338 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); in memcg_reparent_objcgs()
343 list_add(&objcg->list, &memcg->objcg_list); in memcg_reparent_objcgs()
345 list_for_each_entry(iter, &memcg->objcg_list, list) in memcg_reparent_objcgs()
346 WRITE_ONCE(iter->memcg, parent); in memcg_reparent_objcgs()
348 list_splice(&memcg->objcg_list, &parent->objcg_list); in memcg_reparent_objcgs()
352 percpu_ref_kill(&objcg->refcnt); in memcg_reparent_objcgs()
359 * but only a few kmem-limited. Or also, if we have, for instance, 200
360 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
363 * The current size of the caches array is stored in memcg_nr_cache_ids. It
384 * the alloc/free process all the time. In a small machine, 4 kmem-limited
408 * mem_cgroup_css_from_page - css of the memcg associated with a page
427 return &memcg->css; in mem_cgroup_css_from_page()
431 * page_cgroup_ino - return inode number of the memcg a page is charged to
451 while (memcg && !(memcg->css.flags & CSS_ONLINE)) in page_cgroup_ino()
454 ino = cgroup_ino(memcg->css.cgroup); in page_cgroup_ino()
464 return memcg->nodeinfo[nid]; in mem_cgroup_page_nodeinfo()
485 struct rb_node **p = &mctz->rb_root.rb_node; in __mem_cgroup_insert_exceeded()
490 if (mz->on_tree) in __mem_cgroup_insert_exceeded()
493 mz->usage_in_excess = new_usage_in_excess; in __mem_cgroup_insert_exceeded()
494 if (!mz->usage_in_excess) in __mem_cgroup_insert_exceeded()
500 if (mz->usage_in_excess < mz_node->usage_in_excess) { in __mem_cgroup_insert_exceeded()
501 p = &(*p)->rb_left; in __mem_cgroup_insert_exceeded()
504 p = &(*p)->rb_right; in __mem_cgroup_insert_exceeded()
509 mctz->rb_rightmost = &mz->tree_node; in __mem_cgroup_insert_exceeded()
511 rb_link_node(&mz->tree_node, parent, p); in __mem_cgroup_insert_exceeded()
512 rb_insert_color(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_insert_exceeded()
513 mz->on_tree = true; in __mem_cgroup_insert_exceeded()
519 if (!mz->on_tree) in __mem_cgroup_remove_exceeded()
522 if (&mz->tree_node == mctz->rb_rightmost) in __mem_cgroup_remove_exceeded()
523 mctz->rb_rightmost = rb_prev(&mz->tree_node); in __mem_cgroup_remove_exceeded()
525 rb_erase(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_remove_exceeded()
526 mz->on_tree = false; in __mem_cgroup_remove_exceeded()
534 spin_lock_irqsave(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
536 spin_unlock_irqrestore(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
541 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess()
542 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess()
546 excess = nr_pages - soft_limit; in soft_limit_excess()
568 * We have to update the tree if mz is on RB-tree or in mem_cgroup_update_tree()
571 if (excess || mz->on_tree) { in mem_cgroup_update_tree()
574 spin_lock_irqsave(&mctz->lock, flags); in mem_cgroup_update_tree()
575 /* if on-tree, remove it */ in mem_cgroup_update_tree()
576 if (mz->on_tree) in mem_cgroup_update_tree()
579 * Insert again. mz->usage_in_excess will be updated. in mem_cgroup_update_tree()
583 spin_unlock_irqrestore(&mctz->lock, flags); in mem_cgroup_update_tree()
595 mz = memcg->nodeinfo[nid]; in mem_cgroup_remove_from_trees()
609 if (!mctz->rb_rightmost) in __mem_cgroup_largest_soft_limit_node()
612 mz = rb_entry(mctz->rb_rightmost, in __mem_cgroup_largest_soft_limit_node()
620 if (!soft_limit_excess(mz->memcg) || in __mem_cgroup_largest_soft_limit_node()
621 !css_tryget(&mz->memcg->css)) in __mem_cgroup_largest_soft_limit_node()
632 spin_lock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
634 spin_unlock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
639 * __mod_memcg_state - update cgroup memory statistics
641 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
649 __this_cpu_add(memcg->vmstats_percpu->state[idx], val); in __mod_memcg_state()
650 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); in __mod_memcg_state()
660 x += per_cpu(memcg->vmstats_percpu->state[idx], cpu); in memcg_page_state_local()
675 memcg = pn->memcg; in __mod_memcg_lruvec_state()
681 __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val); in __mod_memcg_lruvec_state()
685 * __mod_lruvec_state - update lruvec memory statistics
692 * change of state at this level: per-node, per-cgroup, per-lruvec.
740 * when we free the slab object, we need to update the per-memcg in __mod_lruvec_kmem_state()
771 * __count_memcg_events - account VM events in a cgroup
782 __this_cpu_add(memcg->vmstats_percpu->events[idx], count); in __count_memcg_events()
783 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id()); in __count_memcg_events()
788 return READ_ONCE(memcg->vmstats.events[event]); in memcg_events()
797 x += per_cpu(memcg->vmstats_percpu->events[event], cpu); in memcg_events_local()
810 nr_pages = -nr_pages; /* for event */ in mem_cgroup_charge_statistics()
813 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); in mem_cgroup_charge_statistics()
821 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); in mem_cgroup_event_ratelimit()
822 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); in mem_cgroup_event_ratelimit()
824 if ((long)(next - val) < 0) { in mem_cgroup_event_ratelimit()
835 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); in mem_cgroup_event_ratelimit()
863 * mm_update_next_owner() may clear mm->owner to NULL in mem_cgroup_from_task()
879 return current->active_memcg; in active_memcg()
886 * Obtain a reference on mm->memcg and returns it if successful. If mm
889 * 2) current->mm->memcg, if available
913 css_get(&memcg->css); in get_mem_cgroup_from_mm()
916 mm = current->mm; in get_mem_cgroup_from_mm()
923 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); in get_mem_cgroup_from_mm()
926 } while (!css_tryget(&memcg->css)); in get_mem_cgroup_from_mm()
939 if (!in_task() || !current->mm || (current->flags & PF_KTHREAD)) in memcg_kmem_bypass()
946 * mem_cgroup_iter - iterate over memory cgroup hierarchy
952 * @root itself, or %NULL after a full round-trip.
956 * to cancel a hierarchy walk before the round-trip is complete.
985 mz = root->nodeinfo[reclaim->pgdat->node_id]; in mem_cgroup_iter()
986 iter = &mz->iter; in mem_cgroup_iter()
988 if (prev && reclaim->generation != iter->generation) in mem_cgroup_iter()
992 pos = READ_ONCE(iter->position); in mem_cgroup_iter()
993 if (!pos || css_tryget(&pos->css)) in mem_cgroup_iter()
996 * css reference reached zero, so iter->position will in mem_cgroup_iter()
997 * be cleared by ->css_released. However, we should not in mem_cgroup_iter()
998 * rely on this happening soon, because ->css_released in mem_cgroup_iter()
999 * is called from a work queue, and by busy-waiting we in mem_cgroup_iter()
1000 * might block it. So we clear iter->position right in mem_cgroup_iter()
1003 (void)cmpxchg(&iter->position, pos, NULL); in mem_cgroup_iter()
1008 css = &pos->css; in mem_cgroup_iter()
1011 css = css_next_descendant_pre(css, &root->css); in mem_cgroup_iter()
1016 * the hierarchy - make sure they see at least in mem_cgroup_iter()
1031 if (css == &root->css) in mem_cgroup_iter()
1046 (void)cmpxchg(&iter->position, pos, memcg); in mem_cgroup_iter()
1049 css_put(&pos->css); in mem_cgroup_iter()
1052 iter->generation++; in mem_cgroup_iter()
1054 reclaim->generation = iter->generation; in mem_cgroup_iter()
1060 css_put(&prev->css); in mem_cgroup_iter()
1066 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1076 css_put(&prev->css); in mem_cgroup_iter_break()
1087 mz = from->nodeinfo[nid]; in __invalidate_reclaim_iterators()
1088 iter = &mz->iter; in __invalidate_reclaim_iterators()
1089 cmpxchg(&iter->position, dead_memcg, NULL); in __invalidate_reclaim_iterators()
1104 * When cgruop1 non-hierarchy mode is used, in invalidate_reclaim_iterators()
1115 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1121 * descendants and calls @fn for each task. If @fn returns a non-zero
1139 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); in mem_cgroup_scan_tasks()
1169 * lock_page_lruvec - lock and return lruvec for a given page.
1173 * - page locked
1174 * - PageLRU cleared
1175 * - lock_page_memcg()
1176 * - page->_refcount is zero
1183 spin_lock(&lruvec->lru_lock); in lock_page_lruvec()
1195 spin_lock_irq(&lruvec->lru_lock); in lock_page_lruvec_irq()
1207 spin_lock_irqsave(&lruvec->lru_lock, *flags); in lock_page_lruvec_irqsave()
1215 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1236 lru_size = &mz->lru_zone_size[zid][lru]; in mem_cgroup_update_lru_size()
1254 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1266 count = page_counter_read(&memcg->memory); in mem_cgroup_margin()
1267 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin()
1269 margin = limit - count; in mem_cgroup_margin()
1272 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin()
1273 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin()
1275 margin = min(margin, limit - count); in mem_cgroup_margin()
1287 * moving cgroups. This is for waiting at high-memory pressure
1314 if (mc.moving_task && current != mc.moving_task) { in mem_cgroup_wait_acct_move()
1412 * 1) generic big picture -> specifics and details in memory_stat_format()
1413 * 2) reflecting userspace activity -> reflecting kernel heuristics in memory_stat_format()
1415 * Current memory state: in memory_stat_format()
1417 cgroup_rstat_flush(memcg->css.cgroup); in memory_stat_format()
1468 #define K(x) ((x) << (PAGE_SHIFT-10))
1484 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_context()
1504 K((u64)page_counter_read(&memcg->memory)), in mem_cgroup_print_oom_meminfo()
1505 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); in mem_cgroup_print_oom_meminfo()
1508 K((u64)page_counter_read(&memcg->swap)), in mem_cgroup_print_oom_meminfo()
1509 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); in mem_cgroup_print_oom_meminfo()
1512 K((u64)page_counter_read(&memcg->memsw)), in mem_cgroup_print_oom_meminfo()
1513 K((u64)memcg->memsw.max), memcg->memsw.failcnt); in mem_cgroup_print_oom_meminfo()
1515 K((u64)page_counter_read(&memcg->kmem)), in mem_cgroup_print_oom_meminfo()
1516 K((u64)memcg->kmem.max), memcg->kmem.failcnt); in mem_cgroup_print_oom_meminfo()
1520 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_meminfo()
1534 unsigned long max = READ_ONCE(memcg->memory.max); in mem_cgroup_get_max()
1538 max += min(READ_ONCE(memcg->swap.max), in mem_cgroup_get_max()
1543 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; in mem_cgroup_get_max()
1553 return page_counter_read(&memcg->memory); in mem_cgroup_size()
1644 * Check OOM-Killer is already running under our hierarchy.
1654 if (iter->oom_lock) { in mem_cgroup_oom_trylock()
1663 iter->oom_lock = true; in mem_cgroup_oom_trylock()
1676 iter->oom_lock = false; in mem_cgroup_oom_trylock()
1693 iter->oom_lock = false; in mem_cgroup_oom_unlock()
1703 iter->under_oom++; in mem_cgroup_mark_under_oom()
1717 if (iter->under_oom > 0) in mem_cgroup_unmark_under_oom()
1718 iter->under_oom--; in mem_cgroup_unmark_under_oom()
1737 oom_wait_memcg = oom_wait_info->memcg; in memcg_oom_wake_function()
1748 * For the following lockless ->under_oom test, the only required in memcg_oom_recover()
1755 if (memcg && memcg->under_oom) in memcg_oom_recover()
1786 * On the other hand, in-kernel OOM killer allows for an async victim in mem_cgroup_oom()
1794 if (memcg->oom_kill_disable) { in mem_cgroup_oom()
1795 if (!current->in_user_fault) in mem_cgroup_oom()
1797 css_get(&memcg->css); in mem_cgroup_oom()
1798 current->memcg_in_oom = memcg; in mem_cgroup_oom()
1799 current->memcg_oom_gfp_mask = mask; in mem_cgroup_oom()
1800 current->memcg_oom_order = order; in mem_cgroup_oom()
1825 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1843 struct mem_cgroup *memcg = current->memcg_in_oom; in mem_cgroup_oom_synchronize()
1857 owait.wait.private = current; in mem_cgroup_oom_synchronize()
1868 if (locked && !memcg->oom_kill_disable) { in mem_cgroup_oom_synchronize()
1871 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, in mem_cgroup_oom_synchronize()
1872 current->memcg_oom_order); in mem_cgroup_oom_synchronize()
1882 * There is no guarantee that an OOM-lock contender in mem_cgroup_oom_synchronize()
1889 current->memcg_in_oom = NULL; in mem_cgroup_oom_synchronize()
1890 css_put(&memcg->css); in mem_cgroup_oom_synchronize()
1895 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1897 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1900 * by killing all belonging OOM-killable tasks.
1902 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1933 * highest-level memory cgroup with oom.group set. in mem_cgroup_get_oom_group()
1936 if (memcg->oom_group) in mem_cgroup_get_oom_group()
1944 css_get(&oom_group->css); in mem_cgroup_get_oom_group()
1954 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_group()
1959 * lock_page_memcg - lock a page and memcg binding
1976 * path can get away without acquiring the memcg->move_lock in lock_page_memcg()
1990 might_lock(&memcg->move_lock); in lock_page_memcg()
1994 if (atomic_read(&memcg->moving_account) <= 0) in lock_page_memcg()
1997 spin_lock_irqsave(&memcg->move_lock, flags); in lock_page_memcg()
1999 spin_unlock_irqrestore(&memcg->move_lock, flags); in lock_page_memcg()
2005 * critical sections holding the fast-path RCU lock and one in lock_page_memcg()
2009 memcg->move_lock_task = current; in lock_page_memcg()
2010 memcg->move_lock_flags = flags; in lock_page_memcg()
2016 if (memcg && memcg->move_lock_task == current) { in __unlock_page_memcg()
2017 unsigned long flags = memcg->move_lock_flags; in __unlock_page_memcg()
2019 memcg->move_lock_task = NULL; in __unlock_page_memcg()
2020 memcg->move_lock_flags = 0; in __unlock_page_memcg()
2022 spin_unlock_irqrestore(&memcg->move_lock, flags); in __unlock_page_memcg()
2029 * unlock_page_memcg - unlock a page and memcg binding
2088 * which is cheap in non-preempt kernel. The interrupt context object stock
2100 return &stock->task_obj; in get_obj_stock()
2105 return &stock->irq_obj; in get_obj_stock()
2121 * The charges will only happen if @memcg matches the current cpu's memcg
2139 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { in consume_stock()
2140 stock->nr_pages -= nr_pages; in consume_stock()
2154 struct mem_cgroup *old = stock->cached; in drain_stock()
2159 if (stock->nr_pages) { in drain_stock()
2160 page_counter_uncharge(&old->memory, stock->nr_pages); in drain_stock()
2162 page_counter_uncharge(&old->memsw, stock->nr_pages); in drain_stock()
2163 stock->nr_pages = 0; in drain_stock()
2166 css_put(&old->css); in drain_stock()
2167 stock->cached = NULL; in drain_stock()
2183 drain_obj_stock(&stock->irq_obj); in drain_local_stock()
2185 drain_obj_stock(&stock->task_obj); in drain_local_stock()
2187 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); in drain_local_stock()
2204 if (stock->cached != memcg) { /* reset if necessary */ in refill_stock()
2206 css_get(&memcg->css); in refill_stock()
2207 stock->cached = memcg; in refill_stock()
2209 stock->nr_pages += nr_pages; in refill_stock()
2211 if (stock->nr_pages > MEMCG_CHARGE_BATCH) in refill_stock()
2218 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2229 * Notify other cpus that system-wide "drain" is running in drain_all_stock()
2232 * per-cpu data. CPU up doesn't touch memcg_stock at all. in drain_all_stock()
2241 memcg = stock->cached; in drain_all_stock()
2242 if (memcg && stock->nr_pages && in drain_all_stock()
2250 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { in drain_all_stock()
2252 drain_local_stock(&stock->work); in drain_all_stock()
2254 schedule_work_on(cpu, &stock->work); in drain_all_stock()
2280 if (page_counter_read(&memcg->memory) <= in reclaim_high()
2281 READ_ONCE(memcg->memory.high)) in reclaim_high()
2316 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2318 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2323 * reasonable delay curve compared to precision-adjusted overage, not
2328 * +-------+------------------------+
2330 * +-------+------------------------+
2352 * +-------+------------------------+
2370 overage = usage - high; in calculate_overage()
2380 overage = calculate_overage(page_counter_read(&memcg->memory), in mem_find_max_overage()
2381 READ_ONCE(memcg->memory.high)); in mem_find_max_overage()
2394 overage = calculate_overage(page_counter_read(&memcg->swap), in swap_find_max_overage()
2395 READ_ONCE(memcg->swap.high)); in swap_find_max_overage()
2432 * N-sized allocations are throttled approximately the same as one in calculate_high_delay()
2433 * 4N-sized allocation. in calculate_high_delay()
2436 * larger the current charge patch is than that. in calculate_high_delay()
2450 unsigned int nr_pages = current->memcg_nr_pages_over_high; in mem_cgroup_handle_over_high()
2458 memcg = get_mem_cgroup_from_mm(current->mm); in mem_cgroup_handle_over_high()
2459 current->memcg_nr_pages_over_high = 0; in mem_cgroup_handle_over_high()
2506 if (nr_reclaimed || nr_retries--) { in mem_cgroup_handle_over_high()
2514 * need to account for any ill-begotten jiffies to pay them off later. in mem_cgroup_handle_over_high()
2521 css_put(&memcg->css); in mem_cgroup_handle_over_high()
2542 page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge_memcg()
2543 if (page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge_memcg()
2546 page_counter_uncharge(&memcg->memsw, batch); in try_charge_memcg()
2569 * memory shortage. Allow dying and OOM-killed tasks to in try_charge_memcg()
2582 if (unlikely(current->flags & PF_MEMALLOC)) in try_charge_memcg()
2585 if (unlikely(task_in_memcg_oom(current))) in try_charge_memcg()
2627 if (nr_retries--) in try_charge_memcg()
2633 if (fatal_signal_pending(current)) in try_charge_memcg()
2654 return -ENOMEM; in try_charge_memcg()
2661 page_counter_charge(&memcg->memory, nr_pages); in try_charge_memcg()
2663 page_counter_charge(&memcg->memsw, nr_pages); in try_charge_memcg()
2669 refill_stock(memcg, batch - nr_pages); in try_charge_memcg()
2676 * not recorded as it most likely matches current's and won't in try_charge_memcg()
2683 mem_high = page_counter_read(&memcg->memory) > in try_charge_memcg()
2684 READ_ONCE(memcg->memory.high); in try_charge_memcg()
2685 swap_high = page_counter_read(&memcg->swap) > in try_charge_memcg()
2686 READ_ONCE(memcg->swap.high); in try_charge_memcg()
2691 schedule_work(&memcg->high_work); in try_charge_memcg()
2703 * Target some best-effort fairness between the tasks, in try_charge_memcg()
2707 current->memcg_nr_pages_over_high += batch; in try_charge_memcg()
2708 set_notify_resume(current); in try_charge_memcg()
2731 page_counter_uncharge(&memcg->memory, nr_pages); in cancel_charge()
2733 page_counter_uncharge(&memcg->memsw, nr_pages); in cancel_charge()
2743 * - the page lock in commit_charge()
2744 * - LRU isolation in commit_charge()
2745 * - lock_page_memcg() in commit_charge()
2746 * - exclusive reference in commit_charge()
2748 page->memcg_data = (unsigned long)memcg; in commit_charge()
2758 if (unlikely(!css_tryget(&memcg->css))) in get_mem_cgroup_from_objcg()
2784 return -ENOMEM; in memcg_alloc_page_obj_cgroups()
2793 page->memcg_data = memcg_data; in memcg_alloc_page_obj_cgroups()
2794 } else if (cmpxchg(&page->memcg_data, 0, memcg_data)) { in memcg_alloc_page_obj_cgroups()
2830 * Slab objects are accounted individually, not per-page. in mem_cgroup_from_obj()
2832 * the page->obj_cgroups. in mem_cgroup_from_obj()
2838 off = obj_to_index(page->slab_cache, page, p); in mem_cgroup_from_obj()
2868 memcg = mem_cgroup_from_task(current); in get_obj_cgroup_from_current()
2871 objcg = rcu_dereference(memcg->objcg); in get_obj_cgroup_from_current()
2937 page_counter_uncharge(&memcg->kmem, nr_pages); in obj_cgroup_uncharge_pages()
2940 css_put(&memcg->css); in obj_cgroup_uncharge_pages()
2965 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { in obj_cgroup_charge_pages()
2973 page_counter_charge(&memcg->kmem, nr_pages); in obj_cgroup_charge_pages()
2977 ret = -ENOMEM; in obj_cgroup_charge_pages()
2980 css_put(&memcg->css); in obj_cgroup_charge_pages()
2986 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3002 page->memcg_data = (unsigned long)objcg | in __memcg_kmem_charge_page()
3026 page->memcg_data = 0; in __memcg_kmem_uncharge_page()
3042 if (stock->cached_objcg != objcg) { in mod_objcg_state()
3045 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) in mod_objcg_state()
3046 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; in mod_objcg_state()
3047 stock->cached_objcg = objcg; in mod_objcg_state()
3048 stock->cached_pgdat = pgdat; in mod_objcg_state()
3049 } else if (stock->cached_pgdat != pgdat) { in mod_objcg_state()
3051 struct pglist_data *oldpg = stock->cached_pgdat; in mod_objcg_state()
3053 if (stock->nr_slab_reclaimable_b) { in mod_objcg_state()
3055 stock->nr_slab_reclaimable_b); in mod_objcg_state()
3056 stock->nr_slab_reclaimable_b = 0; in mod_objcg_state()
3058 if (stock->nr_slab_unreclaimable_b) { in mod_objcg_state()
3060 stock->nr_slab_unreclaimable_b); in mod_objcg_state()
3061 stock->nr_slab_unreclaimable_b = 0; in mod_objcg_state()
3063 stock->cached_pgdat = pgdat; in mod_objcg_state()
3066 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b in mod_objcg_state()
3067 : &stock->nr_slab_unreclaimable_b; in mod_objcg_state()
3096 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { in consume_obj_stock()
3097 stock->nr_bytes -= nr_bytes; in consume_obj_stock()
3108 struct obj_cgroup *old = stock->cached_objcg; in drain_obj_stock()
3113 if (stock->nr_bytes) { in drain_obj_stock()
3114 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; in drain_obj_stock()
3115 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); in drain_obj_stock()
3121 * The leftover is flushed to the centralized per-memcg value. in drain_obj_stock()
3123 * to a per-cpu stock (probably, on an other CPU), see in drain_obj_stock()
3126 * How often it's flushed is a trade-off between the memory in drain_obj_stock()
3130 atomic_add(nr_bytes, &old->nr_charged_bytes); in drain_obj_stock()
3131 stock->nr_bytes = 0; in drain_obj_stock()
3135 * Flush the vmstat data in current stock in drain_obj_stock()
3137 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { in drain_obj_stock()
3138 if (stock->nr_slab_reclaimable_b) { in drain_obj_stock()
3139 mod_objcg_mlstate(old, stock->cached_pgdat, in drain_obj_stock()
3141 stock->nr_slab_reclaimable_b); in drain_obj_stock()
3142 stock->nr_slab_reclaimable_b = 0; in drain_obj_stock()
3144 if (stock->nr_slab_unreclaimable_b) { in drain_obj_stock()
3145 mod_objcg_mlstate(old, stock->cached_pgdat, in drain_obj_stock()
3147 stock->nr_slab_unreclaimable_b); in drain_obj_stock()
3148 stock->nr_slab_unreclaimable_b = 0; in drain_obj_stock()
3150 stock->cached_pgdat = NULL; in drain_obj_stock()
3154 stock->cached_objcg = NULL; in drain_obj_stock()
3162 if (in_task() && stock->task_obj.cached_objcg) { in obj_stock_flush_required()
3163 memcg = obj_cgroup_memcg(stock->task_obj.cached_objcg); in obj_stock_flush_required()
3167 if (stock->irq_obj.cached_objcg) { in obj_stock_flush_required()
3168 memcg = obj_cgroup_memcg(stock->irq_obj.cached_objcg); in obj_stock_flush_required()
3183 if (stock->cached_objcg != objcg) { /* reset if necessary */ in refill_obj_stock()
3186 stock->cached_objcg = objcg; in refill_obj_stock()
3187 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) in refill_obj_stock()
3188 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; in refill_obj_stock()
3191 stock->nr_bytes += nr_bytes; in refill_obj_stock()
3193 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { in refill_obj_stock()
3194 nr_pages = stock->nr_bytes >> PAGE_SHIFT; in refill_obj_stock()
3195 stock->nr_bytes &= (PAGE_SIZE - 1); in refill_obj_stock()
3213 * In theory, objcg->nr_charged_bytes can have enough in obj_cgroup_charge()
3214 * pre-charged bytes to satisfy the allocation. However, in obj_cgroup_charge()
3215 * flushing objcg->nr_charged_bytes requires two atomic in obj_cgroup_charge()
3216 * operations, and objcg->nr_charged_bytes can't be big. in obj_cgroup_charge()
3217 * The shared objcg->nr_charged_bytes can also become a in obj_cgroup_charge()
3221 * objcg->nr_charged_bytes later on when objcg changes. in obj_cgroup_charge()
3223 * The stock's nr_bytes may contain enough pre-charged bytes in obj_cgroup_charge()
3225 * on the pre-charged bytes not being changed outside of in obj_cgroup_charge()
3227 * pre-charged bytes as well when charging pages. To avoid a in obj_cgroup_charge()
3230 * to temporarily allow the pre-charged bytes to exceed the page in obj_cgroup_charge()
3231 * size limit. The maximum reachable value of the pre-charged in obj_cgroup_charge()
3232 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data in obj_cgroup_charge()
3236 nr_bytes = size & (PAGE_SIZE - 1); in obj_cgroup_charge()
3243 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false); in obj_cgroup_charge()
3267 head[i].memcg_data = head->memcg_data; in split_page_memcg()
3270 obj_cgroup_get_many(__page_objcg(head), nr - 1); in split_page_memcg()
3272 css_get_many(&memcg->css, nr - 1); in split_page_memcg()
3277 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3285 * Returns 0 on success, -EINVAL on failure.
3299 mod_memcg_state(from, MEMCG_SWAP, -1); in mem_cgroup_move_swap_account()
3303 return -EINVAL; in mem_cgroup_move_swap_account()
3309 return -EINVAL; in mem_cgroup_move_swap_account()
3322 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; in mem_cgroup_resize_max()
3325 if (signal_pending(current)) { in mem_cgroup_resize_max()
3326 ret = -EINTR; in mem_cgroup_resize_max()
3335 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : in mem_cgroup_resize_max()
3336 max <= memcg->memsw.max; in mem_cgroup_resize_max()
3339 ret = -EINVAL; in mem_cgroup_resize_max()
3342 if (max > counter->max) in mem_cgroup_resize_max()
3358 ret = -EBUSY; in mem_cgroup_resize_max()
3384 mctz = soft_limit_tree_node(pgdat->node_id); in mem_cgroup_soft_limit_reclaim()
3391 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) in mem_cgroup_soft_limit_reclaim()
3408 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, in mem_cgroup_soft_limit_reclaim()
3412 spin_lock_irq(&mctz->lock); in mem_cgroup_soft_limit_reclaim()
3423 excess = soft_limit_excess(mz->memcg); in mem_cgroup_soft_limit_reclaim()
3434 spin_unlock_irq(&mctz->lock); in mem_cgroup_soft_limit_reclaim()
3435 css_put(&mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3448 css_put(&next_mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3461 /* we call try-to-free pages for make this cgroup empty */ in mem_cgroup_force_empty()
3467 while (nr_retries && page_counter_read(&memcg->memory)) { in mem_cgroup_force_empty()
3470 if (signal_pending(current)) in mem_cgroup_force_empty()
3471 return -EINTR; in mem_cgroup_force_empty()
3476 nr_retries--; in mem_cgroup_force_empty()
3493 return -EINVAL; in mem_cgroup_force_empty_write()
3509 pr_warn_once("Non-hierarchical mode is deprecated. " in mem_cgroup_hierarchy_write()
3510 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_hierarchy_write()
3513 return -EINVAL; in mem_cgroup_hierarchy_write()
3522 cgroup_rstat_flush_irqsafe(memcg->css.cgroup); in mem_cgroup_usage()
3529 val = page_counter_read(&memcg->memory); in mem_cgroup_usage()
3531 val = page_counter_read(&memcg->memsw); in mem_cgroup_usage()
3550 switch (MEMFILE_TYPE(cft->private)) { in mem_cgroup_read_u64()
3552 counter = &memcg->memory; in mem_cgroup_read_u64()
3555 counter = &memcg->memsw; in mem_cgroup_read_u64()
3558 counter = &memcg->kmem; in mem_cgroup_read_u64()
3561 counter = &memcg->tcpmem; in mem_cgroup_read_u64()
3567 switch (MEMFILE_ATTR(cft->private)) { in mem_cgroup_read_u64()
3569 if (counter == &memcg->memory) in mem_cgroup_read_u64()
3571 if (counter == &memcg->memsw) in mem_cgroup_read_u64()
3575 return (u64)counter->max * PAGE_SIZE; in mem_cgroup_read_u64()
3577 return (u64)counter->watermark * PAGE_SIZE; in mem_cgroup_read_u64()
3579 return counter->failcnt; in mem_cgroup_read_u64()
3581 return (u64)memcg->soft_limit * PAGE_SIZE; in mem_cgroup_read_u64()
3596 BUG_ON(memcg->kmemcg_id >= 0); in memcg_online_kmem()
3597 BUG_ON(memcg->kmem_state); in memcg_online_kmem()
3606 return -ENOMEM; in memcg_online_kmem()
3608 objcg->memcg = memcg; in memcg_online_kmem()
3609 rcu_assign_pointer(memcg->objcg, objcg); in memcg_online_kmem()
3613 memcg->kmemcg_id = memcg_id; in memcg_online_kmem()
3614 memcg->kmem_state = KMEM_ONLINE; in memcg_online_kmem()
3625 if (memcg->kmem_state != KMEM_ONLINE) in memcg_offline_kmem()
3628 memcg->kmem_state = KMEM_ALLOCATED; in memcg_offline_kmem()
3636 kmemcg_id = memcg->kmemcg_id; in memcg_offline_kmem()
3644 * ordering is imposed by list_lru_node->lock taken by in memcg_offline_kmem()
3648 css_for_each_descendant_pre(css, &memcg->css) { in memcg_offline_kmem()
3650 BUG_ON(child->kmemcg_id != kmemcg_id); in memcg_offline_kmem()
3651 child->kmemcg_id = parent->kmemcg_id; in memcg_offline_kmem()
3663 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) in memcg_free_kmem()
3685 ret = page_counter_set_max(&memcg->kmem, max); in memcg_update_kmem_max()
3696 ret = page_counter_set_max(&memcg->tcpmem, max); in memcg_update_tcp_max()
3700 if (!memcg->tcpmem_active) { in memcg_update_tcp_max()
3718 memcg->tcpmem_active = true; in memcg_update_tcp_max()
3737 ret = page_counter_memparse(buf, "-1", &nr_pages); in mem_cgroup_write()
3741 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_write()
3744 ret = -EINVAL; in mem_cgroup_write()
3747 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_write()
3756 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_write()
3766 memcg->soft_limit = nr_pages; in mem_cgroup_write()
3779 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_reset()
3781 counter = &memcg->memory; in mem_cgroup_reset()
3784 counter = &memcg->memsw; in mem_cgroup_reset()
3787 counter = &memcg->kmem; in mem_cgroup_reset()
3790 counter = &memcg->tcpmem; in mem_cgroup_reset()
3796 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_reset()
3801 counter->failcnt = 0; in mem_cgroup_reset()
3813 return mem_cgroup_from_css(css)->move_charge_at_immigrate; in mem_cgroup_move_charge_read()
3823 return -EINVAL; in mem_cgroup_move_charge_write()
3826 * No kind of locking is needed in here, because ->can_attach() will in mem_cgroup_move_charge_write()
3831 memcg->move_charge_at_immigrate = val; in mem_cgroup_move_charge_write()
3838 return -ENOSYS; in mem_cgroup_move_charge_write()
3846 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
3903 cgroup_rstat_flush(memcg->css.cgroup); in memcg_numa_stat_show()
3906 seq_printf(m, "%s=%lu", stat->name, in memcg_numa_stat_show()
3907 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
3912 stat->lru_mask, false)); in memcg_numa_stat_show()
3918 seq_printf(m, "hierarchical_%s=%lu", stat->name, in memcg_numa_stat_show()
3919 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
3924 stat->lru_mask, true)); in memcg_numa_stat_show()
3975 cgroup_rstat_flush(memcg->css.cgroup); in memcg_stat_show()
3998 memory = min(memory, READ_ONCE(mi->memory.max)); in memcg_stat_show()
3999 memsw = min(memsw, READ_ONCE(mi->memsw.max)); in memcg_stat_show()
4035 mz = memcg->nodeinfo[pgdat->node_id]; in memcg_stat_show()
4037 anon_cost += mz->lruvec.anon_cost; in memcg_stat_show()
4038 file_cost += mz->lruvec.file_cost; in memcg_stat_show()
4062 return -EINVAL; in mem_cgroup_swappiness_write()
4065 memcg->swappiness = val; in mem_cgroup_swappiness_write()
4080 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold()
4082 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold()
4094 i = t->current_threshold; in __mem_cgroup_threshold()
4102 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) in __mem_cgroup_threshold()
4103 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold()
4114 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) in __mem_cgroup_threshold()
4115 eventfd_signal(t->entries[i].eventfd, 1); in __mem_cgroup_threshold()
4118 t->current_threshold = i - 1; in __mem_cgroup_threshold()
4139 if (_a->threshold > _b->threshold) in compare_thresholds()
4142 if (_a->threshold < _b->threshold) in compare_thresholds()
4143 return -1; in compare_thresholds()
4154 list_for_each_entry(ev, &memcg->oom_notify, list) in mem_cgroup_oom_notify_cb()
4155 eventfd_signal(ev->eventfd, 1); in mem_cgroup_oom_notify_cb()
4178 ret = page_counter_memparse(args, "-1", &threshold); in __mem_cgroup_usage_register_event()
4182 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4185 thresholds = &memcg->thresholds; in __mem_cgroup_usage_register_event()
4188 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_register_event()
4194 if (thresholds->primary) in __mem_cgroup_usage_register_event()
4197 size = thresholds->primary ? thresholds->primary->size + 1 : 1; in __mem_cgroup_usage_register_event()
4202 ret = -ENOMEM; in __mem_cgroup_usage_register_event()
4205 new->size = size; in __mem_cgroup_usage_register_event()
4208 if (thresholds->primary) in __mem_cgroup_usage_register_event()
4209 memcpy(new->entries, thresholds->primary->entries, in __mem_cgroup_usage_register_event()
4210 flex_array_size(new, entries, size - 1)); in __mem_cgroup_usage_register_event()
4213 new->entries[size - 1].eventfd = eventfd; in __mem_cgroup_usage_register_event()
4214 new->entries[size - 1].threshold = threshold; in __mem_cgroup_usage_register_event()
4216 /* Sort thresholds. Registering of new threshold isn't time-critical */ in __mem_cgroup_usage_register_event()
4217 sort(new->entries, size, sizeof(*new->entries), in __mem_cgroup_usage_register_event()
4220 /* Find current threshold */ in __mem_cgroup_usage_register_event()
4221 new->current_threshold = -1; in __mem_cgroup_usage_register_event()
4223 if (new->entries[i].threshold <= usage) { in __mem_cgroup_usage_register_event()
4225 * new->current_threshold will not be used until in __mem_cgroup_usage_register_event()
4229 ++new->current_threshold; in __mem_cgroup_usage_register_event()
4235 kfree(thresholds->spare); in __mem_cgroup_usage_register_event()
4236 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_register_event()
4238 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_register_event()
4244 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4269 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4272 thresholds = &memcg->thresholds; in __mem_cgroup_usage_unregister_event()
4275 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_unregister_event()
4280 if (!thresholds->primary) in __mem_cgroup_usage_unregister_event()
4288 for (i = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
4289 if (thresholds->primary->entries[i].eventfd != eventfd) in __mem_cgroup_usage_unregister_event()
4295 new = thresholds->spare; in __mem_cgroup_usage_unregister_event()
4308 new->size = size; in __mem_cgroup_usage_unregister_event()
4310 /* Copy thresholds and find current threshold */ in __mem_cgroup_usage_unregister_event()
4311 new->current_threshold = -1; in __mem_cgroup_usage_unregister_event()
4312 for (i = 0, j = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
4313 if (thresholds->primary->entries[i].eventfd == eventfd) in __mem_cgroup_usage_unregister_event()
4316 new->entries[j] = thresholds->primary->entries[i]; in __mem_cgroup_usage_unregister_event()
4317 if (new->entries[j].threshold <= usage) { in __mem_cgroup_usage_unregister_event()
4319 * new->current_threshold will not be used in __mem_cgroup_usage_unregister_event()
4323 ++new->current_threshold; in __mem_cgroup_usage_unregister_event()
4330 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_unregister_event()
4332 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_unregister_event()
4339 kfree(thresholds->spare); in __mem_cgroup_usage_unregister_event()
4340 thresholds->spare = NULL; in __mem_cgroup_usage_unregister_event()
4343 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4365 return -ENOMEM; in mem_cgroup_oom_register_event()
4369 event->eventfd = eventfd; in mem_cgroup_oom_register_event()
4370 list_add(&event->list, &memcg->oom_notify); in mem_cgroup_oom_register_event()
4373 if (memcg->under_oom) in mem_cgroup_oom_register_event()
4387 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event()
4388 if (ev->eventfd == eventfd) { in mem_cgroup_oom_unregister_event()
4389 list_del(&ev->list); in mem_cgroup_oom_unregister_event()
4401 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); in mem_cgroup_oom_control_read()
4402 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); in mem_cgroup_oom_control_read()
4404 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); in mem_cgroup_oom_control_read()
4415 return -EINVAL; in mem_cgroup_oom_control_write()
4417 memcg->oom_kill_disable = val; in mem_cgroup_oom_control_write()
4430 return wb_domain_init(&memcg->cgwb_domain, gfp); in memcg_wb_domain_init()
4435 wb_domain_exit(&memcg->cgwb_domain); in memcg_wb_domain_exit()
4440 wb_domain_size_changed(&memcg->cgwb_domain); in memcg_wb_domain_size_changed()
4445 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_domain()
4447 if (!memcg->css.parent) in mem_cgroup_wb_domain()
4450 return &memcg->cgwb_domain; in mem_cgroup_wb_domain()
4454 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4462 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4465 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4475 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_stats()
4478 cgroup_rstat_flush_irqsafe(memcg->css.cgroup); in mem_cgroup_wb_stats()
4487 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), in mem_cgroup_wb_stats()
4488 READ_ONCE(memcg->memory.high)); in mem_cgroup_wb_stats()
4489 unsigned long used = page_counter_read(&memcg->memory); in mem_cgroup_wb_stats()
4491 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); in mem_cgroup_wb_stats()
4500 * tracks ownership per-page while the latter per-inode. This was a
4501 * deliberate design decision because honoring per-page ownership in the
4503 * and deemed unnecessary given that write-sharing an inode across
4504 * different cgroups isn't a common use-case.
4506 * Combined with inode majority-writer ownership switching, this works well
4527 * page - a page whose memcg and writeback ownerships don't match - is
4533 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4547 int oldest = -1; in mem_cgroup_track_foreign_dirty_slowpath()
4558 frn = &memcg->cgwb_frn[i]; in mem_cgroup_track_foreign_dirty_slowpath()
4559 if (frn->bdi_id == wb->bdi->id && in mem_cgroup_track_foreign_dirty_slowpath()
4560 frn->memcg_id == wb->memcg_css->id) in mem_cgroup_track_foreign_dirty_slowpath()
4562 if (time_before64(frn->at, oldest_at) && in mem_cgroup_track_foreign_dirty_slowpath()
4563 atomic_read(&frn->done.cnt) == 1) { in mem_cgroup_track_foreign_dirty_slowpath()
4565 oldest_at = frn->at; in mem_cgroup_track_foreign_dirty_slowpath()
4571 * Re-using an existing one. Update timestamp lazily to in mem_cgroup_track_foreign_dirty_slowpath()
4573 * reasonably up-to-date and significantly shorter than in mem_cgroup_track_foreign_dirty_slowpath()
4581 if (time_before64(frn->at, now - update_intv)) in mem_cgroup_track_foreign_dirty_slowpath()
4582 frn->at = now; in mem_cgroup_track_foreign_dirty_slowpath()
4585 frn = &memcg->cgwb_frn[oldest]; in mem_cgroup_track_foreign_dirty_slowpath()
4586 frn->bdi_id = wb->bdi->id; in mem_cgroup_track_foreign_dirty_slowpath()
4587 frn->memcg_id = wb->memcg_css->id; in mem_cgroup_track_foreign_dirty_slowpath()
4588 frn->at = now; in mem_cgroup_track_foreign_dirty_slowpath()
4595 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_flush_foreign()
4601 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; in mem_cgroup_flush_foreign()
4609 if (time_after64(frn->at, now - intv) && in mem_cgroup_flush_foreign()
4610 atomic_read(&frn->done.cnt) == 1) { in mem_cgroup_flush_foreign()
4611 frn->at = 0; in mem_cgroup_flush_foreign()
4612 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); in mem_cgroup_flush_foreign()
4613 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, in mem_cgroup_flush_foreign()
4615 &frn->done); in mem_cgroup_flush_foreign()
4642 * This is way over-engineered. It tries to support fully configurable
4659 struct mem_cgroup *memcg = event->memcg; in memcg_event_remove()
4661 remove_wait_queue(event->wqh, &event->wait); in memcg_event_remove()
4663 event->unregister_event(memcg, event->eventfd); in memcg_event_remove()
4666 eventfd_signal(event->eventfd, 1); in memcg_event_remove()
4668 eventfd_ctx_put(event->eventfd); in memcg_event_remove()
4670 css_put(&memcg->css); in memcg_event_remove()
4676 * Called with wqh->lock held and interrupts disabled.
4683 struct mem_cgroup *memcg = event->memcg; in memcg_event_wake()
4693 * side will require wqh->lock via remove_wait_queue(), in memcg_event_wake()
4696 spin_lock(&memcg->event_list_lock); in memcg_event_wake()
4697 if (!list_empty(&event->list)) { in memcg_event_wake()
4698 list_del_init(&event->list); in memcg_event_wake()
4703 schedule_work(&event->remove); in memcg_event_wake()
4705 spin_unlock(&memcg->event_list_lock); in memcg_event_wake()
4717 event->wqh = wqh; in memcg_event_ptable_queue_proc()
4718 add_wait_queue(wqh, &event->wait); in memcg_event_ptable_queue_proc()
4747 return -EINVAL; in memcg_write_event_control()
4752 return -EINVAL; in memcg_write_event_control()
4757 return -ENOMEM; in memcg_write_event_control()
4759 event->memcg = memcg; in memcg_write_event_control()
4760 INIT_LIST_HEAD(&event->list); in memcg_write_event_control()
4761 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); in memcg_write_event_control()
4762 init_waitqueue_func_entry(&event->wait, memcg_event_wake); in memcg_write_event_control()
4763 INIT_WORK(&event->remove, memcg_event_remove); in memcg_write_event_control()
4767 ret = -EBADF; in memcg_write_event_control()
4771 event->eventfd = eventfd_ctx_fileget(efile.file); in memcg_write_event_control()
4772 if (IS_ERR(event->eventfd)) { in memcg_write_event_control()
4773 ret = PTR_ERR(event->eventfd); in memcg_write_event_control()
4779 ret = -EBADF; in memcg_write_event_control()
4797 name = cfile.file->f_path.dentry->d_name.name; in memcg_write_event_control()
4800 event->register_event = mem_cgroup_usage_register_event; in memcg_write_event_control()
4801 event->unregister_event = mem_cgroup_usage_unregister_event; in memcg_write_event_control()
4803 event->register_event = mem_cgroup_oom_register_event; in memcg_write_event_control()
4804 event->unregister_event = mem_cgroup_oom_unregister_event; in memcg_write_event_control()
4806 event->register_event = vmpressure_register_event; in memcg_write_event_control()
4807 event->unregister_event = vmpressure_unregister_event; in memcg_write_event_control()
4809 event->register_event = memsw_cgroup_usage_register_event; in memcg_write_event_control()
4810 event->unregister_event = memsw_cgroup_usage_unregister_event; in memcg_write_event_control()
4812 ret = -EINVAL; in memcg_write_event_control()
4821 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent, in memcg_write_event_control()
4823 ret = -EINVAL; in memcg_write_event_control()
4831 ret = event->register_event(memcg, event->eventfd, buf); in memcg_write_event_control()
4835 vfs_poll(efile.file, &event->pt); in memcg_write_event_control()
4837 spin_lock_irq(&memcg->event_list_lock); in memcg_write_event_control()
4838 list_add(&event->list, &memcg->event_list); in memcg_write_event_control()
4839 spin_unlock_irq(&memcg->event_list_lock); in memcg_write_event_control()
4851 eventfd_ctx_put(event->eventfd); in memcg_write_event_control()
4992 * Swap-out records and page cache shadow entries need to store memcg
4995 * memory-controlled cgroups to 64k.
5002 * even when there are much fewer than 64k cgroups - possibly none.
5004 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5017 if (memcg->id.id > 0) { in mem_cgroup_id_remove()
5018 idr_remove(&mem_cgroup_idr, memcg->id.id); in mem_cgroup_id_remove()
5019 memcg->id.id = 0; in mem_cgroup_id_remove()
5026 refcount_add(n, &memcg->id.ref); in mem_cgroup_id_get_many()
5031 if (refcount_sub_and_test(n, &memcg->id.ref)) { in mem_cgroup_id_put_many()
5035 css_put(&memcg->css); in mem_cgroup_id_put_many()
5045 * mem_cgroup_from_id - look up a memcg from a memcg id
5069 tmp = -1; in alloc_mem_cgroup_per_node_info()
5074 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu, in alloc_mem_cgroup_per_node_info()
5076 if (!pn->lruvec_stats_percpu) { in alloc_mem_cgroup_per_node_info()
5081 lruvec_init(&pn->lruvec); in alloc_mem_cgroup_per_node_info()
5082 pn->usage_in_excess = 0; in alloc_mem_cgroup_per_node_info()
5083 pn->on_tree = false; in alloc_mem_cgroup_per_node_info()
5084 pn->memcg = memcg; in alloc_mem_cgroup_per_node_info()
5086 memcg->nodeinfo[node] = pn; in alloc_mem_cgroup_per_node_info()
5092 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in free_mem_cgroup_per_node_info()
5097 free_percpu(pn->lruvec_stats_percpu); in free_mem_cgroup_per_node_info()
5107 free_percpu(memcg->vmstats_percpu); in __mem_cgroup_free()
5123 long error = -ENOMEM; in mem_cgroup_alloc()
5132 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, in mem_cgroup_alloc()
5135 if (memcg->id.id < 0) { in mem_cgroup_alloc()
5136 error = memcg->id.id; in mem_cgroup_alloc()
5140 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, in mem_cgroup_alloc()
5142 if (!memcg->vmstats_percpu) in mem_cgroup_alloc()
5152 INIT_WORK(&memcg->high_work, high_work_func); in mem_cgroup_alloc()
5153 INIT_LIST_HEAD(&memcg->oom_notify); in mem_cgroup_alloc()
5154 mutex_init(&memcg->thresholds_lock); in mem_cgroup_alloc()
5155 spin_lock_init(&memcg->move_lock); in mem_cgroup_alloc()
5156 vmpressure_init(&memcg->vmpressure); in mem_cgroup_alloc()
5157 INIT_LIST_HEAD(&memcg->event_list); in mem_cgroup_alloc()
5158 spin_lock_init(&memcg->event_list_lock); in mem_cgroup_alloc()
5159 memcg->socket_pressure = jiffies; in mem_cgroup_alloc()
5161 memcg->kmemcg_id = -1; in mem_cgroup_alloc()
5162 INIT_LIST_HEAD(&memcg->objcg_list); in mem_cgroup_alloc()
5165 INIT_LIST_HEAD(&memcg->cgwb_list); in mem_cgroup_alloc()
5167 memcg->cgwb_frn[i].done = in mem_cgroup_alloc()
5171 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); in mem_cgroup_alloc()
5172 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); in mem_cgroup_alloc()
5173 memcg->deferred_split_queue.split_queue_len = 0; in mem_cgroup_alloc()
5175 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); in mem_cgroup_alloc()
5188 long error = -ENOMEM; in mem_cgroup_css_alloc()
5196 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5197 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
5198 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5200 memcg->swappiness = mem_cgroup_swappiness(parent); in mem_cgroup_css_alloc()
5201 memcg->oom_kill_disable = parent->oom_kill_disable; in mem_cgroup_css_alloc()
5203 page_counter_init(&memcg->memory, &parent->memory); in mem_cgroup_css_alloc()
5204 page_counter_init(&memcg->swap, &parent->swap); in mem_cgroup_css_alloc()
5205 page_counter_init(&memcg->kmem, &parent->kmem); in mem_cgroup_css_alloc()
5206 page_counter_init(&memcg->tcpmem, &parent->tcpmem); in mem_cgroup_css_alloc()
5208 page_counter_init(&memcg->memory, NULL); in mem_cgroup_css_alloc()
5209 page_counter_init(&memcg->swap, NULL); in mem_cgroup_css_alloc()
5210 page_counter_init(&memcg->kmem, NULL); in mem_cgroup_css_alloc()
5211 page_counter_init(&memcg->tcpmem, NULL); in mem_cgroup_css_alloc()
5214 return &memcg->css; in mem_cgroup_css_alloc()
5225 return &memcg->css; in mem_cgroup_css_alloc()
5243 return -ENOMEM; in mem_cgroup_css_online()
5247 refcount_set(&memcg->id.ref, 1); in mem_cgroup_css_online()
5266 spin_lock_irq(&memcg->event_list_lock); in mem_cgroup_css_offline()
5267 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in mem_cgroup_css_offline()
5268 list_del_init(&event->list); in mem_cgroup_css_offline()
5269 schedule_work(&event->remove); in mem_cgroup_css_offline()
5271 spin_unlock_irq(&memcg->event_list_lock); in mem_cgroup_css_offline()
5273 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_offline()
5274 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_offline()
5299 wb_wait_for_completion(&memcg->cgwb_frn[i].done); in mem_cgroup_css_free()
5304 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) in mem_cgroup_css_free()
5307 vmpressure_cleanup(&memcg->vmpressure); in mem_cgroup_css_free()
5308 cancel_work_sync(&memcg->high_work); in mem_cgroup_css_free()
5316 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5325 * The current implementation only resets the essential configurations.
5332 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5333 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5334 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5335 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5336 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_reset()
5337 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_reset()
5338 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5339 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_reset()
5340 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5349 cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup); in mem_cgroup_flush_stats()
5367 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); in mem_cgroup_css_rstat_flush()
5372 * below us. We're in a per-cpu loop here and this is in mem_cgroup_css_rstat_flush()
5375 delta = memcg->vmstats.state_pending[i]; in mem_cgroup_css_rstat_flush()
5377 memcg->vmstats.state_pending[i] = 0; in mem_cgroup_css_rstat_flush()
5380 v = READ_ONCE(statc->state[i]); in mem_cgroup_css_rstat_flush()
5381 if (v != statc->state_prev[i]) { in mem_cgroup_css_rstat_flush()
5382 delta += v - statc->state_prev[i]; in mem_cgroup_css_rstat_flush()
5383 statc->state_prev[i] = v; in mem_cgroup_css_rstat_flush()
5390 memcg->vmstats.state[i] += delta; in mem_cgroup_css_rstat_flush()
5392 parent->vmstats.state_pending[i] += delta; in mem_cgroup_css_rstat_flush()
5396 delta = memcg->vmstats.events_pending[i]; in mem_cgroup_css_rstat_flush()
5398 memcg->vmstats.events_pending[i] = 0; in mem_cgroup_css_rstat_flush()
5400 v = READ_ONCE(statc->events[i]); in mem_cgroup_css_rstat_flush()
5401 if (v != statc->events_prev[i]) { in mem_cgroup_css_rstat_flush()
5402 delta += v - statc->events_prev[i]; in mem_cgroup_css_rstat_flush()
5403 statc->events_prev[i] = v; in mem_cgroup_css_rstat_flush()
5409 memcg->vmstats.events[i] += delta; in mem_cgroup_css_rstat_flush()
5411 parent->vmstats.events_pending[i] += delta; in mem_cgroup_css_rstat_flush()
5415 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; in mem_cgroup_css_rstat_flush()
5420 ppn = parent->nodeinfo[nid]; in mem_cgroup_css_rstat_flush()
5422 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu); in mem_cgroup_css_rstat_flush()
5425 delta = pn->lruvec_stats.state_pending[i]; in mem_cgroup_css_rstat_flush()
5427 pn->lruvec_stats.state_pending[i] = 0; in mem_cgroup_css_rstat_flush()
5429 v = READ_ONCE(lstatc->state[i]); in mem_cgroup_css_rstat_flush()
5430 if (v != lstatc->state_prev[i]) { in mem_cgroup_css_rstat_flush()
5431 delta += v - lstatc->state_prev[i]; in mem_cgroup_css_rstat_flush()
5432 lstatc->state_prev[i] = v; in mem_cgroup_css_rstat_flush()
5438 pn->lruvec_stats.state[i] += delta; in mem_cgroup_css_rstat_flush()
5440 ppn->lruvec_stats.state_pending[i] += delta; in mem_cgroup_css_rstat_flush()
5454 mc.precharge += count; in mem_cgroup_do_precharge()
5459 while (count--) { in mem_cgroup_do_precharge()
5463 mc.precharge++; in mem_cgroup_do_precharge()
5535 entry->val = ent.val; in mc_handle_swap_pte()
5550 if (!vma->vm_file) /* anonymous vma */ in mc_handle_file_pte()
5555 /* page is moved even if it's not RSS of this task(page-faulted). */ in mc_handle_file_pte()
5557 return find_get_incore_page(vma->vm_file->f_mapping, in mc_handle_file_pte()
5562 * mem_cgroup_move_account - move account of the page
5591 ret = -EBUSY; in mem_cgroup_move_account()
5595 ret = -EINVAL; in mem_cgroup_move_account()
5607 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages); in mem_cgroup_move_account()
5611 -nr_pages); in mem_cgroup_move_account()
5617 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages); in mem_cgroup_move_account()
5621 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages); in mem_cgroup_move_account()
5626 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages); in mem_cgroup_move_account()
5635 -nr_pages); in mem_cgroup_move_account()
5643 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages); in mem_cgroup_move_account()
5662 css_get(&to->css); in mem_cgroup_move_account()
5663 css_put(&from->css); in mem_cgroup_move_account()
5665 page->memcg_data = (unsigned long)to; in mem_cgroup_move_account()
5674 mem_cgroup_charge_statistics(from, page, -nr_pages); in mem_cgroup_move_account()
5684 * get_mctgt_type - get target type of moving charge
5693 * move charge. if @target is not NULL, the page is stored in target->page
5697 * in target->ent.
5736 target->page = page; in get_mctgt_type()
5743 * But we cannot move a tail-page in a THP. in get_mctgt_type()
5749 target->ent = ent; in get_mctgt_type()
5779 target->page = page; in get_mctgt_type_thp()
5796 struct vm_area_struct *vma = walk->vma; in mem_cgroup_count_precharge_pte_range()
5808 mc.precharge += HPAGE_PMD_NR; in mem_cgroup_count_precharge_pte_range()
5815 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range()
5818 mc.precharge++; /* increment precharge temporarily */ in mem_cgroup_count_precharge_pte_range()
5819 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_count_precharge_pte_range()
5831 unsigned long precharge; in mem_cgroup_count_precharge() local
5834 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL); in mem_cgroup_count_precharge()
5837 precharge = mc.precharge; in mem_cgroup_count_precharge()
5838 mc.precharge = 0; in mem_cgroup_count_precharge()
5840 return precharge; in mem_cgroup_count_precharge()
5845 unsigned long precharge = mem_cgroup_count_precharge(mm); in mem_cgroup_precharge_mc() local
5848 mc.moving_task = current; in mem_cgroup_precharge_mc()
5849 return mem_cgroup_do_precharge(precharge); in mem_cgroup_precharge_mc()
5859 if (mc.precharge) { in __mem_cgroup_clear_mc()
5860 cancel_charge(mc.to, mc.precharge); in __mem_cgroup_clear_mc()
5861 mc.precharge = 0; in __mem_cgroup_clear_mc()
5875 page_counter_uncharge(&mc.from->memsw, mc.moved_swap); in __mem_cgroup_clear_mc()
5880 * we charged both to->memory and to->memsw, so we in __mem_cgroup_clear_mc()
5881 * should uncharge to->memory. in __mem_cgroup_clear_mc()
5884 page_counter_uncharge(&mc.to->memory, mc.moved_swap); in __mem_cgroup_clear_mc()
5927 * Multi-process migrations only happen on the default hierarchy in mem_cgroup_can_attach()
5943 * tunable will only affect upcoming migrations, not the current one. in mem_cgroup_can_attach()
5946 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); in mem_cgroup_can_attach()
5958 if (mm->owner == p) { in mem_cgroup_can_attach()
5961 VM_BUG_ON(mc.precharge); in mem_cgroup_can_attach()
5993 struct vm_area_struct *vma = walk->vma; in mem_cgroup_move_charge_pte_range()
6002 if (mc.precharge < HPAGE_PMD_NR) { in mem_cgroup_move_charge_pte_range()
6012 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
6022 mc.precharge -= HPAGE_PMD_NR; in mem_cgroup_move_charge_pte_range()
6034 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_move_charge_pte_range()
6040 if (!mc.precharge) in mem_cgroup_move_charge_pte_range()
6061 mc.precharge--; in mem_cgroup_move_charge_pte_range()
6073 mc.precharge--; in mem_cgroup_move_charge_pte_range()
6083 pte_unmap_unlock(pte - 1, ptl); in mem_cgroup_move_charge_pte_range()
6111 * for already started RCU-only updates to finish. in mem_cgroup_move_charge()
6113 atomic_inc(&mc.from->moving_account); in mem_cgroup_move_charge()
6121 * to move enough charges, but moving charge is a best-effort in mem_cgroup_move_charge()
6132 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops, in mem_cgroup_move_charge()
6136 atomic_dec(&mc.from->moving_account); in mem_cgroup_move_charge()
6174 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; in memory_current_read()
6180 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); in memory_min_show()
6195 page_counter_set_min(&memcg->memory, min); in memory_min_write()
6203 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); in memory_low_show()
6218 page_counter_set_low(&memcg->memory, low); in memory_low_write()
6226 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); in memory_high_show()
6243 page_counter_set_high(&memcg->memory, high); in memory_high_write()
6246 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_high_write()
6252 if (signal_pending(current)) in memory_high_write()
6261 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
6264 if (!reclaimed && !nr_retries--) in memory_high_write()
6275 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); in memory_max_show()
6292 xchg(&memcg->memory.max, max); in memory_max_write()
6295 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_max_write()
6300 if (signal_pending(current)) in memory_max_write()
6310 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write()
6312 nr_reclaims--; in memory_max_write()
6339 __memory_events_show(m, memcg->memory_events); in memory_events_show()
6347 __memory_events_show(m, memcg->memory_events_local); in memory_events_local_show()
6358 return -ENOMEM; in memory_stat_show()
6376 cgroup_rstat_flush(memcg->css.cgroup); in memory_numa_stat_show()
6405 seq_printf(m, "%d\n", memcg->oom_group); in memory_oom_group_show()
6418 return -EINVAL; in memory_oom_group_write()
6425 return -EINVAL; in memory_oom_group_write()
6427 memcg->oom_group = oom_group; in memory_oom_group_write()
6434 .name = "current",
6530 * This makes distribution proportional, but also work-conserving:
6541 * of the ancestor's claim to protection, any unutilized -
6542 * "floating" - protection from up the tree is distributed in
6568 * claimed protection in order to be work-conserving: claimed in effective_protection()
6606 * aren't read atomically - make sure the division is sane. in effective_protection()
6615 unclaimed = parent_effective - siblings_protected; in effective_protection()
6616 unclaimed *= usage - protected; in effective_protection()
6617 unclaimed /= parent_usage - siblings_protected; in effective_protection()
6626 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
6627 * @root: the top ancestor of the sub-tree being checked
6631 * of a top-down tree iteration, not for isolated queries.
6655 usage = page_counter_read(&memcg->memory); in mem_cgroup_calculate_protection()
6660 /* No parent means a non-hierarchical mode on v1 memcg */ in mem_cgroup_calculate_protection()
6665 memcg->memory.emin = READ_ONCE(memcg->memory.min); in mem_cgroup_calculate_protection()
6666 memcg->memory.elow = READ_ONCE(memcg->memory.low); in mem_cgroup_calculate_protection()
6670 parent_usage = page_counter_read(&parent->memory); in mem_cgroup_calculate_protection()
6672 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, in mem_cgroup_calculate_protection()
6673 READ_ONCE(memcg->memory.min), in mem_cgroup_calculate_protection()
6674 READ_ONCE(parent->memory.emin), in mem_cgroup_calculate_protection()
6675 atomic_long_read(&parent->memory.children_min_usage))); in mem_cgroup_calculate_protection()
6677 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, in mem_cgroup_calculate_protection()
6678 READ_ONCE(memcg->memory.low), in mem_cgroup_calculate_protection()
6679 READ_ONCE(parent->memory.elow), in mem_cgroup_calculate_protection()
6680 atomic_long_read(&parent->memory.children_low_usage))); in mem_cgroup_calculate_protection()
6692 css_get(&memcg->css); in charge_memcg()
6704 * __mem_cgroup_charge - charge a newly allocated page to a cgroup
6725 css_put(&memcg->css); in __mem_cgroup_charge()
6731 * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin
6755 if (!memcg || !css_tryget_online(&memcg->css)) in mem_cgroup_swapin_charge_page()
6761 css_put(&memcg->css); in mem_cgroup_swapin_charge_page()
6766 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
6784 * so this is a non-issue here. Memory and swap charge lifetimes in mem_cgroup_swapin_uncharge_swap()
6815 if (ug->nr_memory) { in uncharge_batch()
6816 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); in uncharge_batch()
6818 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); in uncharge_batch()
6819 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem) in uncharge_batch()
6820 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); in uncharge_batch()
6821 memcg_oom_recover(ug->memcg); in uncharge_batch()
6825 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); in uncharge_batch()
6826 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); in uncharge_batch()
6827 memcg_check_events(ug->memcg, ug->dummy_page); in uncharge_batch()
6831 css_put(&ug->memcg->css); in uncharge_batch()
6862 if (ug->memcg != memcg) { in uncharge_page()
6863 if (ug->memcg) { in uncharge_page()
6867 ug->memcg = memcg; in uncharge_page()
6868 ug->dummy_page = page; in uncharge_page()
6871 css_get(&memcg->css); in uncharge_page()
6877 ug->nr_memory += nr_pages; in uncharge_page()
6878 ug->nr_kmem += nr_pages; in uncharge_page()
6880 page->memcg_data = 0; in uncharge_page()
6885 ug->nr_memory += nr_pages; in uncharge_page()
6886 ug->pgpgout++; in uncharge_page()
6888 page->memcg_data = 0; in uncharge_page()
6891 css_put(&memcg->css); in uncharge_page()
6895 * __mem_cgroup_uncharge - uncharge a page
6904 /* Don't touch page->lru of any random page, pre-check: */ in __mem_cgroup_uncharge()
6914 * __mem_cgroup_uncharge_list - uncharge a list of page
6933 * mem_cgroup_migrate - charge a page's replacement
6940 * Both pages must be locked, @newpage->mapping must be set up.
6966 /* Force-charge the new page. The old one will be freed soon */ in mem_cgroup_migrate()
6970 page_counter_charge(&memcg->memory, nr_pages); in mem_cgroup_migrate()
6972 page_counter_charge(&memcg->memsw, nr_pages); in mem_cgroup_migrate()
6975 css_get(&memcg->css); in mem_cgroup_migrate()
6999 memcg = mem_cgroup_from_task(current); in mem_cgroup_sk_alloc()
7002 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) in mem_cgroup_sk_alloc()
7004 if (css_tryget(&memcg->css)) in mem_cgroup_sk_alloc()
7005 sk->sk_memcg = memcg; in mem_cgroup_sk_alloc()
7012 if (sk->sk_memcg) in mem_cgroup_sk_free()
7013 css_put(&sk->sk_memcg->css); in mem_cgroup_sk_free()
7017 * mem_cgroup_charge_skmem - charge socket memory
7031 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { in mem_cgroup_charge_skmem()
7032 memcg->tcpmem_pressure = 0; in mem_cgroup_charge_skmem()
7035 memcg->tcpmem_pressure = 1; in mem_cgroup_charge_skmem()
7037 page_counter_charge(&memcg->tcpmem, nr_pages); in mem_cgroup_charge_skmem()
7052 * mem_cgroup_uncharge_skmem - uncharge socket memory
7059 page_counter_uncharge(&memcg->tcpmem, nr_pages); in mem_cgroup_uncharge_skmem()
7063 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); in mem_cgroup_uncharge_skmem()
7088 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7098 * used for per-memcg-per-cpu caching of per-node statistics. In order in mem_cgroup_init()
7108 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, in mem_cgroup_init()
7117 rtpn->rb_root = RB_ROOT; in mem_cgroup_init()
7118 rtpn->rb_rightmost = NULL; in mem_cgroup_init()
7119 spin_lock_init(&rtpn->lock); in mem_cgroup_init()
7130 while (!refcount_inc_not_zero(&memcg->id.ref)) { in mem_cgroup_id_get_online()
7147 * mem_cgroup_swapout - transfer a memsw charge to swap
7183 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); in mem_cgroup_swapout()
7189 page->memcg_data = 0; in mem_cgroup_swapout()
7192 page_counter_uncharge(&memcg->memory, nr_entries); in mem_cgroup_swapout()
7196 page_counter_charge(&swap_memcg->memsw, nr_entries); in mem_cgroup_swapout()
7197 page_counter_uncharge(&memcg->memsw, nr_entries); in mem_cgroup_swapout()
7202 * i_pages lock which is taken with interrupts-off. It is in mem_cgroup_swapout()
7204 * only synchronisation we have for updating the per-CPU variables. in mem_cgroup_swapout()
7207 mem_cgroup_charge_statistics(memcg, page, -nr_entries); in mem_cgroup_swapout()
7210 css_put(&memcg->css); in mem_cgroup_swapout()
7214 * __mem_cgroup_try_charge_swap - try charging swap space for a page
7220 * Returns 0 on success, -ENOMEM on failure.
7246 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { in __mem_cgroup_try_charge_swap()
7250 return -ENOMEM; in __mem_cgroup_try_charge_swap()
7255 mem_cgroup_id_get_many(memcg, nr_pages - 1); in __mem_cgroup_try_charge_swap()
7264 * __mem_cgroup_uncharge_swap - uncharge swap space
7279 page_counter_uncharge(&memcg->swap, nr_pages); in __mem_cgroup_uncharge_swap()
7281 page_counter_uncharge(&memcg->memsw, nr_pages); in __mem_cgroup_uncharge_swap()
7283 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); in __mem_cgroup_uncharge_swap()
7297 READ_ONCE(memcg->swap.max) - in mem_cgroup_get_nr_swap_pages()
7298 page_counter_read(&memcg->swap)); in mem_cgroup_get_nr_swap_pages()
7318 unsigned long usage = page_counter_read(&memcg->swap); in mem_cgroup_swap_full()
7320 if (usage * 2 >= READ_ONCE(memcg->swap.high) || in mem_cgroup_swap_full()
7321 usage * 2 >= READ_ONCE(memcg->swap.max)) in mem_cgroup_swap_full()
7343 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; in swap_current_read()
7349 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); in swap_high_show()
7364 page_counter_set_high(&memcg->swap, high); in swap_high_write()
7372 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); in swap_max_show()
7387 xchg(&memcg->swap.max, max); in swap_max_write()
7397 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); in swap_events_show()
7399 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); in swap_events_show()
7401 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); in swap_events_show()
7408 .name = "swap.current",
7469 /* No memory control -> no swap control */ in mem_cgroup_swap_init()