Home
last modified time | relevance | path

Searched refs:memcg (Results 1 – 25 of 38) sorted by relevance

12

/Linux-v5.10/mm/
Dmemcontrol.c135 struct mem_cgroup *memcg; member
149 int (*register_event)(struct mem_cgroup *memcg,
156 void (*unregister_event)(struct mem_cgroup *memcg,
168 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
169 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
240 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument
242 if (!memcg) in memcg_to_vmpressure()
243 memcg = root_mem_cgroup; in memcg_to_vmpressure()
244 return &memcg->vmpressure; in memcg_to_vmpressure()
258 struct mem_cgroup *memcg; in obj_cgroup_release() local
[all …]
Dvmpressure.c78 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in vmpressure_parent() local
80 memcg = parent_mem_cgroup(memcg); in vmpressure_parent()
81 if (!memcg) in vmpressure_parent()
83 return memcg_to_vmpressure(memcg); in vmpressure_parent()
240 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument
243 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); in vmpressure()
283 if (!memcg || mem_cgroup_is_root(memcg)) in vmpressure()
307 memcg->socket_pressure = jiffies + HZ; in vmpressure()
323 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio() argument
339 vmpressure(gfp, memcg, true, vmpressure_win, 0); in vmpressure_prio()
[all …]
Dworkingset.c290 struct mem_cgroup *memcg; in workingset_refault() local
348 memcg = page_memcg(page); in workingset_refault()
349 lruvec = mem_cgroup_lruvec(memcg, pgdat); in workingset_refault()
365 if (mem_cgroup_get_nr_swap_pages(memcg) > 0) { in workingset_refault()
399 struct mem_cgroup *memcg; in workingset_activation() local
410 memcg = page_memcg_rcu(page); in workingset_activation()
411 if (!mem_cgroup_disabled() && !memcg) in workingset_activation()
490 if (sc->memcg) { in count_shadow_nodes()
494 lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); in count_shadow_nodes()
Dlist_lru.c65 struct mem_cgroup *memcg = NULL; in list_lru_from_kmem() local
70 memcg = mem_cgroup_from_obj(ptr); in list_lru_from_kmem()
71 if (!memcg) in list_lru_from_kmem()
74 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); in list_lru_from_kmem()
77 *memcg_ptr = memcg; in list_lru_from_kmem()
119 struct mem_cgroup *memcg; in list_lru_add() local
124 l = list_lru_from_kmem(nlru, item, &memcg); in list_lru_add()
128 memcg_set_shrinker_bit(memcg, nid, in list_lru_add()
175 int nid, struct mem_cgroup *memcg) in list_lru_count_one() argument
182 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg)); in list_lru_count_one()
[all …]
Dvmscan.c546 struct mem_cgroup *memcg, int priority) in shrink_slab_memcg() argument
552 if (!mem_cgroup_online(memcg)) in shrink_slab_memcg()
558 map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map, in shrink_slab_memcg()
567 .memcg = memcg, in shrink_slab_memcg()
606 memcg_set_shrinker_bit(memcg, nid, i); in shrink_slab_memcg()
621 struct mem_cgroup *memcg, int priority) in shrink_slab_memcg() argument
648 struct mem_cgroup *memcg, in shrink_slab() argument
661 if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg)) in shrink_slab()
662 return shrink_slab_memcg(gfp_mask, nid, memcg, priority); in shrink_slab()
671 .memcg = memcg, in shrink_slab()
[all …]
Doom_kill.c72 return oc->memcg != NULL; in is_memcg_oom()
260 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1; in constrained_alloc()
369 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); in select_bad_process()
430 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); in dump_tasks()
448 mem_cgroup_print_oom_context(oc->memcg, victim); in dump_oom_summary()
463 mem_cgroup_print_oom_meminfo(oc->memcg); in dump_header()
985 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg); in oom_kill_process()
1130 .memcg = NULL, in pagefault_out_of_memory()
Drmap.c762 struct mem_cgroup *memcg; member
832 struct mem_cgroup *memcg = pra->memcg; in invalid_page_referenced_vma() local
834 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma()
852 struct mem_cgroup *memcg, in page_referenced() argument
858 .memcg = memcg, in page_referenced()
884 if (memcg) { in page_referenced()
Dbacking-dev.c430 struct mem_cgroup *memcg; in cgwb_create() local
438 memcg = mem_cgroup_from_css(memcg_css); in cgwb_create()
441 memcg_cgwb_list = &memcg->cgwb_list; in cgwb_create()
643 void wb_memcg_offline(struct mem_cgroup *memcg) in wb_memcg_offline() argument
645 struct list_head *memcg_cgwb_list = &memcg->cgwb_list; in wb_memcg_offline()
Dslab.h309 struct mem_cgroup *memcg; in mod_objcg_state() local
313 memcg = obj_cgroup_memcg(objcg); in mod_objcg_state()
314 lruvec = mem_cgroup_lruvec(memcg, pgdat); in mod_objcg_state()
Dhuge_memory.c473 struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; in get_deferred_split_queue() local
476 if (memcg) in get_deferred_split_queue()
477 return &memcg->deferred_split_queue; in get_deferred_split_queue()
2767 struct mem_cgroup *memcg = compound_head(page)->mem_cgroup; in deferred_split_huge_page() local
2792 if (memcg) in deferred_split_huge_page()
2793 memcg_set_shrinker_bit(memcg, page_to_nid(page), in deferred_split_huge_page()
2807 if (sc->memcg) in deferred_split_count()
2808 ds_queue = &sc->memcg->deferred_split_queue; in deferred_split_count()
2824 if (sc->memcg) in deferred_split_scan()
2825 ds_queue = &sc->memcg->deferred_split_queue; in deferred_split_scan()
/Linux-v5.10/include/linux/
Dmemcontrol.h127 struct mem_cgroup *memcg; /* Back pointer, we cannot */ member
197 struct mem_cgroup *memcg; member
353 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) in mem_cgroup_is_root() argument
355 return (memcg == root_mem_cgroup); in mem_cgroup_is_root()
364 struct mem_cgroup *memcg, in mem_cgroup_protection() argument
403 if (root == memcg) in mem_cgroup_protection()
407 return READ_ONCE(memcg->memory.emin); in mem_cgroup_protection()
409 return max(READ_ONCE(memcg->memory.emin), in mem_cgroup_protection()
410 READ_ONCE(memcg->memory.elow)); in mem_cgroup_protection()
414 struct mem_cgroup *memcg);
[all …]
Dvmpressure.h33 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
35 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
39 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
41 extern int vmpressure_register_event(struct mem_cgroup *memcg,
44 extern void vmpressure_unregister_event(struct mem_cgroup *memcg,
47 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument
49 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure_prio() argument
Dlist_lru.h117 int nid, struct mem_cgroup *memcg);
123 return list_lru_count_one(lru, sc->nid, sc->memcg); in list_lru_shrink_count()
167 int nid, struct mem_cgroup *memcg,
184 int nid, struct mem_cgroup *memcg,
195 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk()
203 return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk_irq()
Dswap.h362 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
656 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) in mem_cgroup_swappiness() argument
663 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) in mem_cgroup_swappiness()
666 return memcg->swappiness; in mem_cgroup_swappiness()
687 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
705 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) in mem_cgroup_get_nr_swap_pages() argument
Drmap.h196 struct mem_cgroup *memcg, unsigned long *vm_flags);
285 struct mem_cgroup *memcg, in page_referenced() argument
Dshrinker.h33 struct mem_cgroup *memcg; member
Doom.h37 struct mem_cgroup *memcg; member
Dbacking-dev.h177 void wb_memcg_offline(struct mem_cgroup *memcg);
380 static inline void wb_memcg_offline(struct mem_cgroup *memcg) in wb_memcg_offline() argument
/Linux-v5.10/tools/testing/selftests/cgroup/
Dtest_memcontrol.c161 char *memcg; in test_memcg_current() local
163 memcg = cg_name(root, "memcg_test"); in test_memcg_current()
164 if (!memcg) in test_memcg_current()
167 if (cg_create(memcg)) in test_memcg_current()
170 current = cg_read_long(memcg, "memory.current"); in test_memcg_current()
174 if (cg_run(memcg, alloc_anon_50M_check, NULL)) in test_memcg_current()
177 if (cg_run(memcg, alloc_pagecache_50M_check, NULL)) in test_memcg_current()
183 cg_destroy(memcg); in test_memcg_current()
184 free(memcg); in test_memcg_current()
590 char *memcg; in test_memcg_high() local
[all …]
/Linux-v5.10/tools/cgroup/
Dmemcg_slabinfo.py42 memcg = container_of(css, 'struct mem_cgroup', 'css')
43 MEMCGS[css.cgroup.kn.id.value_()] = memcg
171 memcg = MEMCGS[cgroup_id]
187 obj_cgroups.add(memcg.objcg.value_())
189 memcg.objcg_list.address_of_(),
221 memcg.kmem_caches.address_of_(),
/Linux-v5.10/Documentation/admin-guide/cgroup-v1/
Dmemcg_test.rst9 Because VM is getting complex (one of reasons is memcg...), memcg's behavior
10 is complex. This is a document for memcg's internal behavior.
61 At commit(), the page is associated with the memcg.
114 But brief explanation of the behavior of memcg around shmem will be
136 Each memcg has its own private LRU. Now, its handling is under global
138 Almost all routines around memcg's LRU is called by global LRU's
142 memcg's private LRU and call __isolate_lru_page() to extract a page
154 9.1 Small limit to memcg.
157 When you do test to do racy case, it's good test to set memcg's limit
167 Historically, memcg's shmem handling was poor and we saw some amount
[all …]
Dmemory.rst19 see patch's title and function names tend to use "memcg".
47 - pages are linked to per-memcg LRU exclusively, and there is no global LRU.
205 Since page's memcg recorded into swap whatever memsw enabled, the page will
337 from inside the memcg. The creation is done lazily, so some objects can still be
339 belong to the same memcg. This only fails to hold when a task is migrated to a
340 different memcg during the page allocation by the cache.
359 This is the standard memcg limitation mechanism already present before kmem
521 Though rmdir() offlines memcg, but the memcg may still stay there due to
613 there is a swap storage available. This might lead to memcg OOM killer
641 This is similar to numa_maps but operates on a per-memcg basis. This is
[all …]
/Linux-v5.10/include/linux/sched/
Dmm.h295 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument
301 this_cpu_write(int_active_memcg, memcg); in set_active_memcg()
304 current->active_memcg = memcg; in set_active_memcg()
311 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument
/Linux-v5.10/fs/notify/
Dgroup.c27 mem_cgroup_put(group->memcg); in fsnotify_final_destroy_group()
/Linux-v5.10/Documentation/vm/
Dhwpoison.rst136 corrupt-filter-memcg
138 number of the memcg.
148 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg

12