/Linux-v5.15/mm/ |
D | memcontrol.c | 144 struct mem_cgroup *memcg; member 158 int (*register_event)(struct mem_cgroup *memcg, 165 void (*unregister_event)(struct mem_cgroup *memcg, 177 static void mem_cgroup_threshold(struct mem_cgroup *memcg); 178 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); 249 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument 251 if (!memcg) in memcg_to_vmpressure() 252 memcg = root_mem_cgroup; in memcg_to_vmpressure() 253 return &memcg->vmpressure; in memcg_to_vmpressure() 333 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, in memcg_reparent_objcgs() argument [all …]
|
D | vmpressure.c | 77 struct mem_cgroup *memcg = vmpressure_to_memcg(vmpr); in vmpressure_parent() local 79 memcg = parent_mem_cgroup(memcg); in vmpressure_parent() 80 if (!memcg) in vmpressure_parent() 82 return memcg_to_vmpressure(memcg); in vmpressure_parent() 239 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument 247 vmpr = memcg_to_vmpressure(memcg); in vmpressure() 287 if (!memcg || mem_cgroup_is_root(memcg)) in vmpressure() 311 memcg->socket_pressure = jiffies + HZ; in vmpressure() 327 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio() argument 343 vmpressure(gfp, memcg, true, vmpressure_win, 0); in vmpressure_prio() [all …]
|
D | vmscan.c | 208 static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg, in shrinker_info_protected() argument 211 return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info, in shrinker_info_protected() 215 static int expand_one_shrinker_info(struct mem_cgroup *memcg, in expand_one_shrinker_info() argument 225 pn = memcg->nodeinfo[nid]; in expand_one_shrinker_info() 226 old = shrinker_info_protected(memcg, nid); in expand_one_shrinker_info() 253 void free_shrinker_info(struct mem_cgroup *memcg) in free_shrinker_info() argument 260 pn = memcg->nodeinfo[nid]; in free_shrinker_info() 267 int alloc_shrinker_info(struct mem_cgroup *memcg) in alloc_shrinker_info() argument 280 free_shrinker_info(memcg); in alloc_shrinker_info() 286 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); in alloc_shrinker_info() [all …]
|
D | list_lru.c | 65 struct mem_cgroup *memcg = NULL; in list_lru_from_kmem() local 70 memcg = mem_cgroup_from_obj(ptr); in list_lru_from_kmem() 71 if (!memcg) in list_lru_from_kmem() 74 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); in list_lru_from_kmem() 77 *memcg_ptr = memcg; in list_lru_from_kmem() 119 struct mem_cgroup *memcg; in list_lru_add() local 124 l = list_lru_from_kmem(nlru, item, &memcg); in list_lru_add() 128 set_shrinker_bit(memcg, nid, in list_lru_add() 175 int nid, struct mem_cgroup *memcg) in list_lru_count_one() argument 182 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); in list_lru_count_one() [all …]
|
D | workingset.c | 292 struct mem_cgroup *memcg; in workingset_refault() local 350 memcg = page_memcg(page); in workingset_refault() 351 lruvec = mem_cgroup_lruvec(memcg, pgdat); in workingset_refault() 368 if (mem_cgroup_get_nr_swap_pages(memcg) > 0) { in workingset_refault() 400 struct mem_cgroup *memcg; in workingset_activation() local 411 memcg = page_memcg_rcu(page); in workingset_activation() 412 if (!mem_cgroup_disabled() && !memcg) in workingset_activation() 493 if (sc->memcg) { in count_shadow_nodes() 497 lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); in count_shadow_nodes()
|
D | mmap_lock.c | 202 struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm); in get_mm_memcg_path() local 204 if (memcg == NULL) in get_mm_memcg_path() 206 if (unlikely(memcg->css.cgroup == NULL)) in get_mm_memcg_path() 213 cgroup_path(memcg->css.cgroup, buf, MEMCG_PATH_BUF_SIZE); in get_mm_memcg_path() 216 css_put(&memcg->css); in get_mm_memcg_path()
|
D | oom_kill.c | 73 return oc->memcg != NULL; in is_memcg_oom() 263 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1; in constrained_alloc() 372 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); in select_bad_process() 432 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); in dump_tasks() 450 mem_cgroup_print_oom_context(oc->memcg, victim); in dump_oom_summary() 465 mem_cgroup_print_oom_meminfo(oc->memcg); in dump_header() 987 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg); in oom_kill_process() 1132 .memcg = NULL, in pagefault_out_of_memory()
|
D | page_io.c | 270 struct mem_cgroup *memcg; in bio_associate_blkg_from_page() local 272 memcg = page_memcg(page); in bio_associate_blkg_from_page() 273 if (!memcg) in bio_associate_blkg_from_page() 277 css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys); in bio_associate_blkg_from_page()
|
D | rmap.c | 767 struct mem_cgroup *memcg; member 837 struct mem_cgroup *memcg = pra->memcg; in invalid_page_referenced_vma() local 839 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma() 857 struct mem_cgroup *memcg, in page_referenced() argument 863 .memcg = memcg, in page_referenced() 889 if (memcg) { in page_referenced()
|
D | backing-dev.c | 451 struct mem_cgroup *memcg; in cgwb_create() local 459 memcg = mem_cgroup_from_css(memcg_css); in cgwb_create() 462 memcg_cgwb_list = &memcg->cgwb_list; in cgwb_create() 713 void wb_memcg_offline(struct mem_cgroup *memcg) in wb_memcg_offline() argument 715 struct list_head *memcg_cgwb_list = &memcg->cgwb_list; in wb_memcg_offline()
|
/Linux-v5.15/include/linux/ |
D | memcontrol.h | 153 struct mem_cgroup *memcg; /* Back pointer, we cannot */ member 223 struct mem_cgroup *memcg; member 383 return READ_ONCE(objcg->memcg); in obj_cgroup_memcg() 596 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) in mem_cgroup_is_root() argument 598 return (memcg == root_mem_cgroup); in mem_cgroup_is_root() 607 struct mem_cgroup *memcg, in mem_cgroup_protection() argument 649 if (root == memcg) in mem_cgroup_protection() 652 *min = READ_ONCE(memcg->memory.emin); in mem_cgroup_protection() 653 *low = READ_ONCE(memcg->memory.elow); in mem_cgroup_protection() 657 struct mem_cgroup *memcg); [all …]
|
D | vmpressure.h | 33 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, 35 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); 39 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); 41 extern int vmpressure_register_event(struct mem_cgroup *memcg, 44 extern void vmpressure_unregister_event(struct mem_cgroup *memcg, 47 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument 49 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure_prio() argument
|
D | list_lru.h | 117 int nid, struct mem_cgroup *memcg); 123 return list_lru_count_one(lru, sc->nid, sc->memcg); in list_lru_shrink_count() 167 int nid, struct mem_cgroup *memcg, 184 int nid, struct mem_cgroup *memcg, 195 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk() 203 return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk_irq()
|
D | swap.h | 382 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 704 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) in mem_cgroup_swappiness() argument 711 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) in mem_cgroup_swappiness() 714 return memcg->swappiness; in mem_cgroup_swappiness() 755 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); 773 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) in mem_cgroup_get_nr_swap_pages() argument
|
D | rmap.h | 192 struct mem_cgroup *memcg, unsigned long *vm_flags); 287 struct mem_cgroup *memcg, in page_referenced() argument
|
D | shrinker.h | 33 struct mem_cgroup *memcg; member
|
D | oom.h | 37 struct mem_cgroup *memcg; member
|
D | backing-dev.h | 177 void wb_memcg_offline(struct mem_cgroup *memcg); 399 static inline void wb_memcg_offline(struct mem_cgroup *memcg) in wb_memcg_offline() argument
|
/Linux-v5.15/tools/testing/selftests/cgroup/ |
D | test_memcontrol.c | 161 char *memcg; in test_memcg_current() local 163 memcg = cg_name(root, "memcg_test"); in test_memcg_current() 164 if (!memcg) in test_memcg_current() 167 if (cg_create(memcg)) in test_memcg_current() 170 current = cg_read_long(memcg, "memory.current"); in test_memcg_current() 174 if (cg_run(memcg, alloc_anon_50M_check, NULL)) in test_memcg_current() 177 if (cg_run(memcg, alloc_pagecache_50M_check, NULL)) in test_memcg_current() 183 cg_destroy(memcg); in test_memcg_current() 184 free(memcg); in test_memcg_current() 590 char *memcg; in test_memcg_high() local [all …]
|
/Linux-v5.15/tools/cgroup/ |
D | memcg_slabinfo.py | 42 memcg = container_of(css, 'struct mem_cgroup', 'css') 43 MEMCGS[css.cgroup.kn.id.value_()] = memcg 171 memcg = MEMCGS[cgroup_id] 187 obj_cgroups.add(memcg.objcg.value_()) 189 memcg.objcg_list.address_of_(), 221 memcg.kmem_caches.address_of_(),
|
/Linux-v5.15/Documentation/admin-guide/cgroup-v1/ |
D | memcg_test.rst | 9 Because VM is getting complex (one of reasons is memcg...), memcg's behavior 10 is complex. This is a document for memcg's internal behavior. 61 At commit(), the page is associated with the memcg. 114 But brief explanation of the behavior of memcg around shmem will be 136 Each memcg has its own vector of LRUs (inactive anon, active anon, 138 each LRU handled under a single lru_lock for that memcg and node. 145 9.1 Small limit to memcg. 148 When you do test to do racy case, it's good test to set memcg's limit 158 Historically, memcg's shmem handling was poor and we saw some amount 248 Besides management of swap is one of complicated parts of memcg, [all …]
|
D | memory.rst | 19 see patch's title and function names tend to use "memcg". 47 - pages are linked to per-memcg LRU exclusively, and there is no global LRU. 207 Since page's memcg recorded into swap whatever memsw enabled, the page will 294 lock_page_memcg (memcg->move_lock) 336 from inside the memcg. The creation is done lazily, so some objects can still be 338 belong to the same memcg. This only fails to hold when a task is migrated to a 339 different memcg during the page allocation by the cache. 358 This is the standard memcg limitation mechanism already present before kmem 517 Though rmdir() offlines memcg, but the memcg may still stay there due to 607 there is a swap storage available. This might lead to memcg OOM killer [all …]
|
/Linux-v5.15/include/linux/sched/ |
D | mm.h | 305 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument 311 this_cpu_write(int_active_memcg, memcg); in set_active_memcg() 314 current->active_memcg = memcg; in set_active_memcg() 321 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument
|
/Linux-v5.15/fs/notify/ |
D | group.c | 27 mem_cgroup_put(group->memcg); in fsnotify_final_destroy_group()
|
/Linux-v5.15/Documentation/vm/ |
D | hwpoison.rst | 136 corrupt-filter-memcg 138 number of the memcg. 148 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
|