Home
last modified time | relevance | path

Searched refs:lruvec (Results 1 – 16 of 16) sorted by relevance

/Linux-v5.15/mm/
Dswap.c83 struct lruvec *lruvec; in __page_cache_release() local
86 lruvec = lock_page_lruvec_irqsave(page, &flags); in __page_cache_release()
87 del_page_from_lru_list(page, lruvec); in __page_cache_release()
89 unlock_page_lruvec_irqrestore(lruvec, flags); in __page_cache_release()
183 void (*move_fn)(struct page *page, struct lruvec *lruvec)) in pagevec_lru_move_fn() argument
186 struct lruvec *lruvec = NULL; in pagevec_lru_move_fn() local
196 lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags); in pagevec_lru_move_fn()
197 (*move_fn)(page, lruvec); in pagevec_lru_move_fn()
201 if (lruvec) in pagevec_lru_move_fn()
202 unlock_page_lruvec_irqrestore(lruvec, flags); in pagevec_lru_move_fn()
[all …]
Dworkingset.c229 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages) in workingset_age_nonresident() argument
243 atomic_long_add(nr_pages, &lruvec->nonresident_age); in workingset_age_nonresident()
244 } while ((lruvec = parent_lruvec(lruvec))); in workingset_age_nonresident()
259 struct lruvec *lruvec; in workingset_eviction() local
267 lruvec = mem_cgroup_lruvec(target_memcg, pgdat); in workingset_eviction()
269 memcgid = mem_cgroup_id(lruvec_memcg(lruvec)); in workingset_eviction()
270 eviction = atomic_long_read(&lruvec->nonresident_age); in workingset_eviction()
271 workingset_age_nonresident(lruvec, thp_nr_pages(page)); in workingset_eviction()
288 struct lruvec *eviction_lruvec; in workingset_refault()
294 struct lruvec *lruvec; in workingset_refault() local
[all …]
Dvmscan.c591 static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, in lruvec_lru_size() argument
598 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size()
604 size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid); in lruvec_lru_size()
1935 static __always_inline void update_lru_sizes(struct lruvec *lruvec, in update_lru_sizes() argument
1944 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); in update_lru_sizes()
1971 struct lruvec *lruvec, struct list_head *dst, in isolate_lru_pages() argument
1975 struct list_head *src = &lruvec->lists[lru]; in isolate_lru_pages()
2061 update_lru_sizes(lruvec, lru, nr_zone_taken); in isolate_lru_pages()
2099 struct lruvec *lruvec; in isolate_lru_page() local
2102 lruvec = lock_page_lruvec_irq(page); in isolate_lru_page()
[all …]
Dmmzone.c75 void lruvec_init(struct lruvec *lruvec) in lruvec_init() argument
79 memset(lruvec, 0, sizeof(struct lruvec)); in lruvec_init()
80 spin_lock_init(&lruvec->lru_lock); in lruvec_init()
83 INIT_LIST_HEAD(&lruvec->lists[lru]); in lruvec_init()
Dmemcontrol.c668 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, in __mod_memcg_lruvec_state() argument
674 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); in __mod_memcg_lruvec_state()
694 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, in __mod_lruvec_state() argument
698 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); in __mod_lruvec_state()
702 __mod_memcg_lruvec_state(lruvec, idx, val); in __mod_lruvec_state()
711 struct lruvec *lruvec; in __mod_lruvec_page_state() local
722 lruvec = mem_cgroup_lruvec(memcg, pgdat); in __mod_lruvec_page_state()
723 __mod_lruvec_state(lruvec, idx, val); in __mod_lruvec_page_state()
732 struct lruvec *lruvec; in __mod_lruvec_kmem_state() local
746 lruvec = mem_cgroup_lruvec(memcg, pgdat); in __mod_lruvec_kmem_state()
[all …]
Dmlock.c266 struct lruvec *lruvec = NULL; in __munlock_pagevec() local
281 lruvec = relock_page_lruvec_irq(page, lruvec); in __munlock_pagevec()
282 del_page_from_lru_list(page, lruvec); in __munlock_pagevec()
299 if (lruvec) { in __munlock_pagevec()
301 unlock_page_lruvec_irq(lruvec); in __munlock_pagevec()
Dcompaction.c799 struct lruvec *lruvec; in isolate_migratepages_block() local
801 struct lruvec *locked = NULL; in isolate_migratepages_block()
1025 lruvec = mem_cgroup_page_lruvec(page); in isolate_migratepages_block()
1028 if (lruvec != locked) { in isolate_migratepages_block()
1032 compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); in isolate_migratepages_block()
1033 locked = lruvec; in isolate_migratepages_block()
1035 lruvec_memcg_debug(lruvec, page); in isolate_migratepages_block()
1061 del_page_from_lru_list(page, lruvec); in isolate_migratepages_block()
Dhuge_memory.c2320 struct lruvec *lruvec, struct list_head *list) in lru_add_page_tail() argument
2325 lockdep_assert_held(&lruvec->lru_lock); in lru_add_page_tail()
2341 struct lruvec *lruvec, struct list_head *list) in __split_huge_page_tail() argument
2402 lru_add_page_tail(head, page_tail, lruvec, list); in __split_huge_page_tail()
2409 struct lruvec *lruvec; in __split_huge_page() local
2427 lruvec = lock_page_lruvec(head); in __split_huge_page()
2432 __split_huge_page_tail(head, i, lruvec, list); in __split_huge_page()
2450 unlock_page_lruvec(lruvec); in __split_huge_page()
Dmigrate.c472 struct lruvec *old_lruvec, *new_lruvec; in migrate_page_move_mapping()
/Linux-v5.15/include/linux/
Dmemcontrol.h138 struct lruvec lruvec; member
728 static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg, in mem_cgroup_lruvec()
732 struct lruvec *lruvec; in mem_cgroup_lruvec() local
735 lruvec = &pgdat->__lruvec; in mem_cgroup_lruvec()
743 lruvec = &mz->lruvec; in mem_cgroup_lruvec()
750 if (unlikely(lruvec->pgdat != pgdat)) in mem_cgroup_lruvec()
751 lruvec->pgdat = pgdat; in mem_cgroup_lruvec()
752 return lruvec; in mem_cgroup_lruvec()
761 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page) in mem_cgroup_page_lruvec()
774 struct lruvec *lock_page_lruvec(struct page *page);
[all …]
Dmm_inline.h27 static __always_inline void update_lru_size(struct lruvec *lruvec, in update_lru_size() argument
31 struct pglist_data *pgdat = lruvec_pgdat(lruvec); in update_lru_size()
33 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); in update_lru_size()
37 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); in update_lru_size()
83 struct lruvec *lruvec) in add_page_to_lru_list() argument
87 update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); in add_page_to_lru_list()
88 list_add(&page->lru, &lruvec->lists[lru]); in add_page_to_lru_list()
92 struct lruvec *lruvec) in add_page_to_lru_list_tail() argument
96 update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); in add_page_to_lru_list_tail()
97 list_add_tail(&page->lru, &lruvec->lists[lru]); in add_page_to_lru_list_tail()
[all …]
Dvmstat.h474 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
477 static inline void mod_lruvec_state(struct lruvec *lruvec, in mod_lruvec_state() argument
483 __mod_lruvec_state(lruvec, idx, val); in mod_lruvec_state()
502 static inline void __mod_lruvec_state(struct lruvec *lruvec, in __mod_lruvec_state() argument
505 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); in __mod_lruvec_state()
508 static inline void mod_lruvec_state(struct lruvec *lruvec, in mod_lruvec_state() argument
511 mod_node_page_state(lruvec_pgdat(lruvec), idx, val); in mod_lruvec_state()
528 static inline void inc_lruvec_state(struct lruvec *lruvec, in inc_lruvec_state() argument
531 mod_lruvec_state(lruvec, idx, 1); in inc_lruvec_state()
Dmmzone.h297 struct lruvec { struct
894 struct lruvec __lruvec;
952 extern void lruvec_init(struct lruvec *lruvec);
954 static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) in lruvec_pgdat() argument
957 return lruvec->pgdat; in lruvec_pgdat()
959 return container_of(lruvec, struct pglist_data, __lruvec); in lruvec_pgdat()
Dswap.h324 void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
345 extern void lru_note_cost(struct lruvec *lruvec, bool file,
/Linux-v5.15/Documentation/trace/
Devents-kmem.rst72 contention on the lruvec->lru_lock.
/Linux-v5.15/Documentation/admin-guide/cgroup-v1/
Dmemory.rst296 lruvec->lru_lock.
299 lruvec->lru_lock; PG_lru bit of page->flags is cleared before
300 isolating a page from its LRU under lruvec->lru_lock.