/Linux-v5.4/mm/ |
D | memcontrol.c | 143 struct mem_cgroup *memcg; member 157 int (*register_event)(struct mem_cgroup *memcg, 164 void (*unregister_event)(struct mem_cgroup *memcg, 176 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 177 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 256 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument 258 if (!memcg) in memcg_to_vmpressure() 259 memcg = root_mem_cgroup; in memcg_to_vmpressure() 260 return &memcg->vmpressure; in memcg_to_vmpressure() 331 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, in memcg_expand_one_shrinker_map() argument [all …]
|
D | vmpressure.c | 78 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in vmpressure_parent() local 80 memcg = parent_mem_cgroup(memcg); in vmpressure_parent() 81 if (!memcg) in vmpressure_parent() 83 return memcg_to_vmpressure(memcg); in vmpressure_parent() 240 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument 243 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); in vmpressure() 283 if (!memcg || memcg == root_mem_cgroup) in vmpressure() 307 memcg->socket_pressure = jiffies + HZ; in vmpressure() 323 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio() argument 339 vmpressure(gfp, memcg, true, vmpressure_win, 0); in vmpressure_prio() [all …]
|
D | slab.h | 83 struct mem_cgroup *memcg; member 338 return READ_ONCE(s->memcg_params.memcg); in memcg_from_slab_page() 351 struct mem_cgroup *memcg; in memcg_charge_slab() local 356 memcg = READ_ONCE(s->memcg_params.memcg); in memcg_charge_slab() 357 while (memcg && !css_tryget_online(&memcg->css)) in memcg_charge_slab() 358 memcg = parent_mem_cgroup(memcg); in memcg_charge_slab() 361 if (unlikely(!memcg || mem_cgroup_is_root(memcg))) { in memcg_charge_slab() 368 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); in memcg_charge_slab() 372 lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); in memcg_charge_slab() 377 css_put_many(&memcg->css, 1 << order); in memcg_charge_slab() [all …]
|
D | workingset.c | 226 struct mem_cgroup *memcg = page_memcg(page); in workingset_eviction() local 227 int memcgid = mem_cgroup_id(memcg); in workingset_eviction() 236 lruvec = mem_cgroup_lruvec(pgdat, memcg); in workingset_eviction() 254 struct mem_cgroup *memcg; in workingset_refault() local 280 memcg = mem_cgroup_from_id(memcgid); in workingset_refault() 281 if (!mem_cgroup_disabled() && !memcg) in workingset_refault() 283 lruvec = mem_cgroup_lruvec(pgdat, memcg); in workingset_refault() 334 struct mem_cgroup *memcg; in workingset_activation() local 345 memcg = page_memcg_rcu(page); in workingset_activation() 346 if (!mem_cgroup_disabled() && !memcg) in workingset_activation() [all …]
|
D | vmscan.c | 262 struct mem_cgroup *memcg = sc->target_mem_cgroup; in sane_reclaim() local 264 if (!memcg) in sane_reclaim() 274 struct mem_cgroup *memcg, in set_memcg_congestion() argument 279 if (!memcg) in set_memcg_congestion() 282 mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id); in set_memcg_congestion() 287 struct mem_cgroup *memcg) in memcg_congested() argument 291 mn = mem_cgroup_nodeinfo(memcg, pgdat->node_id); in memcg_congested() 316 struct mem_cgroup *memcg, bool congested) in set_memcg_congestion() argument 321 struct mem_cgroup *memcg) in memcg_congested() argument 594 struct mem_cgroup *memcg, int priority) in shrink_slab_memcg() argument [all …]
|
D | list_lru.c | 75 struct mem_cgroup *memcg = NULL; in list_lru_from_kmem() local 80 memcg = mem_cgroup_from_kmem(ptr); in list_lru_from_kmem() 81 if (!memcg) in list_lru_from_kmem() 84 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); in list_lru_from_kmem() 87 *memcg_ptr = memcg; in list_lru_from_kmem() 129 struct mem_cgroup *memcg; in list_lru_add() local 134 l = list_lru_from_kmem(nlru, item, &memcg); in list_lru_add() 138 memcg_set_shrinker_bit(memcg, nid, in list_lru_add() 185 int nid, struct mem_cgroup *memcg) in list_lru_count_one() argument 192 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); in list_lru_count_one() [all …]
|
D | slab_common.c | 184 mem_cgroup_put(s->memcg_params.memcg); in destroy_memcg_params() 185 WRITE_ONCE(s->memcg_params.memcg, NULL); in destroy_memcg_params() 238 void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg) in memcg_link_cache() argument 243 css_get(&memcg->css); in memcg_link_cache() 244 s->memcg_params.memcg = memcg; in memcg_link_cache() 248 &s->memcg_params.memcg->kmem_caches); in memcg_link_cache() 383 struct mem_cgroup *memcg, struct kmem_cache *root_cache) in create_cache() argument 413 memcg_link_cache(s, memcg); in create_cache() 640 void memcg_create_kmem_cache(struct mem_cgroup *memcg, in memcg_create_kmem_cache() argument 644 struct cgroup_subsys_state *css = &memcg->css; in memcg_create_kmem_cache() [all …]
|
D | oom_kill.c | 69 return oc->memcg != NULL; in is_memcg_oom() 261 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1; in constrained_alloc() 368 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); in select_bad_process() 429 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); in dump_tasks() 447 mem_cgroup_print_oom_context(oc->memcg, victim); in dump_oom_summary() 462 mem_cgroup_print_oom_meminfo(oc->memcg); in dump_header() 981 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg); in oom_kill_process() 1126 .memcg = NULL, in pagefault_out_of_memory()
|
D | huge_memory.c | 502 struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; in get_deferred_split_queue() local 505 if (memcg) in get_deferred_split_queue() 506 return &memcg->deferred_split_queue; in get_deferred_split_queue() 577 struct mem_cgroup *memcg; in __do_huge_pmd_anonymous_page() local 584 if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) { in __do_huge_pmd_anonymous_page() 619 mem_cgroup_cancel_charge(page, memcg, true); in __do_huge_pmd_anonymous_page() 630 mem_cgroup_commit_charge(page, memcg, false, true); in __do_huge_pmd_anonymous_page() 638 count_memcg_events(memcg, THP_FAULT_ALLOC, 1); in __do_huge_pmd_anonymous_page() 647 mem_cgroup_cancel_charge(page, memcg, true); in __do_huge_pmd_anonymous_page() 1201 struct mem_cgroup *memcg; in do_huge_pmd_wp_page_fallback() local [all …]
|
D | khugepaged.c | 957 struct mem_cgroup *memcg; in collapse_huge_page() local 980 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { in collapse_huge_page() 988 mem_cgroup_cancel_charge(new_page, memcg, true); in collapse_huge_page() 996 mem_cgroup_cancel_charge(new_page, memcg, true); in collapse_huge_page() 1007 mem_cgroup_cancel_charge(new_page, memcg, true); in collapse_huge_page() 1093 mem_cgroup_commit_charge(new_page, memcg, false, true); in collapse_huge_page() 1094 count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1); in collapse_huge_page() 1111 mem_cgroup_cancel_charge(new_page, memcg, true); in collapse_huge_page() 1499 struct mem_cgroup *memcg; in collapse_file() local 1518 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { in collapse_file() [all …]
|
D | userfaultfd.c | 28 struct mem_cgroup *memcg; in mcopy_atomic_pte() local 69 if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false)) in mcopy_atomic_pte() 92 mem_cgroup_commit_charge(page, memcg, false, false); in mcopy_atomic_pte() 106 mem_cgroup_cancel_charge(page, memcg, false); in mcopy_atomic_pte()
|
D | rmap.c | 749 struct mem_cgroup *memcg; member 819 struct mem_cgroup *memcg = pra->memcg; in invalid_page_referenced_vma() local 821 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma() 839 struct mem_cgroup *memcg, in page_referenced() argument 845 .memcg = memcg, in page_referenced() 871 if (memcg) { in page_referenced()
|
D | memory.c | 2314 struct mem_cgroup *memcg; in wp_page_copy() local 2333 if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false)) in wp_page_copy() 2368 mem_cgroup_commit_charge(new_page, memcg, false, false); in wp_page_copy() 2407 mem_cgroup_cancel_charge(new_page, memcg, false); in wp_page_copy() 2739 struct mem_cgroup *memcg; in do_swap_page() local 2845 &memcg, false)) { in do_swap_page() 2892 mem_cgroup_commit_charge(page, memcg, false, false); in do_swap_page() 2896 mem_cgroup_commit_charge(page, memcg, true, false); in do_swap_page() 2932 mem_cgroup_cancel_charge(page, memcg, false); in do_swap_page() 2953 struct mem_cgroup *memcg; in do_anonymous_page() local [all …]
|
D | backing-dev.c | 529 struct mem_cgroup *memcg; in cgwb_create() local 537 memcg = mem_cgroup_from_css(memcg_css); in cgwb_create() 540 memcg_cgwb_list = &memcg->cgwb_list; in cgwb_create() 743 void wb_memcg_offline(struct mem_cgroup *memcg) in wb_memcg_offline() argument 745 struct list_head *memcg_cgwb_list = &memcg->cgwb_list; in wb_memcg_offline()
|
/Linux-v5.4/include/linux/ |
D | memcontrol.h | 140 struct mem_cgroup *memcg; /* Back pointer, we cannot */ member 349 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) in mem_cgroup_is_root() argument 351 return (memcg == root_mem_cgroup); in mem_cgroup_is_root() 359 static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, in mem_cgroup_protection() argument 366 return READ_ONCE(memcg->memory.emin); in mem_cgroup_protection() 368 return max(READ_ONCE(memcg->memory.emin), in mem_cgroup_protection() 369 READ_ONCE(memcg->memory.elow)); in mem_cgroup_protection() 373 struct mem_cgroup *memcg); 381 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 383 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, [all …]
|
D | vmpressure.h | 33 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, 35 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); 39 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); 41 extern int vmpressure_register_event(struct mem_cgroup *memcg, 44 extern void vmpressure_unregister_event(struct mem_cgroup *memcg, 47 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument 49 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure_prio() argument
|
D | list_lru.h | 117 int nid, struct mem_cgroup *memcg); 123 return list_lru_count_one(lru, sc->nid, sc->memcg); in list_lru_shrink_count() 167 int nid, struct mem_cgroup *memcg, 184 int nid, struct mem_cgroup *memcg, 195 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk() 203 return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk_irq()
|
D | swap.h | 355 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 629 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) in mem_cgroup_swappiness() argument 636 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) in mem_cgroup_swappiness() 639 return memcg->swappiness; in mem_cgroup_swappiness() 649 extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node, 652 static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, in mem_cgroup_throttle_swaprate() argument 662 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); 680 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) in mem_cgroup_get_nr_swap_pages() argument
|
D | rmap.h | 196 struct mem_cgroup *memcg, unsigned long *vm_flags); 285 struct mem_cgroup *memcg, in page_referenced() argument
|
D | shrinker.h | 33 struct mem_cgroup *memcg; member
|
D | oom.h | 37 struct mem_cgroup *memcg; member
|
/Linux-v5.4/tools/testing/selftests/cgroup/ |
D | test_memcontrol.c | 161 char *memcg; in test_memcg_current() local 163 memcg = cg_name(root, "memcg_test"); in test_memcg_current() 164 if (!memcg) in test_memcg_current() 167 if (cg_create(memcg)) in test_memcg_current() 170 current = cg_read_long(memcg, "memory.current"); in test_memcg_current() 174 if (cg_run(memcg, alloc_anon_50M_check, NULL)) in test_memcg_current() 177 if (cg_run(memcg, alloc_pagecache_50M_check, NULL)) in test_memcg_current() 183 cg_destroy(memcg); in test_memcg_current() 184 free(memcg); in test_memcg_current() 590 char *memcg; in test_memcg_high() local [all …]
|
/Linux-v5.4/Documentation/admin-guide/cgroup-v1/ |
D | memcg_test.rst | 9 Because VM is getting complex (one of reasons is memcg...), memcg's behavior 10 is complex. This is a document for memcg's internal behavior. 61 At commit(), the page is associated with the memcg. 114 But brief explanation of the behavior of memcg around shmem will be 136 Each memcg has its own private LRU. Now, its handling is under global 138 Almost all routines around memcg's LRU is called by global LRU's 142 memcg's private LRU and call __isolate_lru_page() to extract a page 154 9.1 Small limit to memcg. 157 When you do test to do racy case, it's good test to set memcg's limit 167 Historically, memcg's shmem handling was poor and we saw some amount [all …]
|
/Linux-v5.4/include/linux/sched/ |
D | mm.h | 319 static inline void memalloc_use_memcg(struct mem_cgroup *memcg) in memalloc_use_memcg() argument 322 current->active_memcg = memcg; in memalloc_use_memcg() 336 static inline void memalloc_use_memcg(struct mem_cgroup *memcg) in memalloc_use_memcg() argument
|
/Linux-v5.4/fs/notify/ |
D | group.c | 27 mem_cgroup_put(group->memcg); in fsnotify_final_destroy_group()
|