/Linux-v6.1/mm/ |
D | swap.c | 80 static void __page_cache_release(struct folio *folio) in __page_cache_release() argument 82 if (folio_test_lru(folio)) { in __page_cache_release() 86 lruvec = folio_lruvec_lock_irqsave(folio, &flags); in __page_cache_release() 87 lruvec_del_folio(lruvec, folio); in __page_cache_release() 88 __folio_clear_lru_flags(folio); in __page_cache_release() 92 if (unlikely(folio_test_mlocked(folio))) { in __page_cache_release() 93 long nr_pages = folio_nr_pages(folio); in __page_cache_release() 95 __folio_clear_mlocked(folio); in __page_cache_release() 96 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); in __page_cache_release() 101 static void __folio_put_small(struct folio *folio) in __folio_put_small() argument [all …]
|
D | filemap.c | 125 struct folio *folio, void *shadow) in page_cache_delete() argument 127 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete() 133 if (!folio_test_hugetlb(folio)) { in page_cache_delete() 134 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete() 135 nr = folio_nr_pages(folio); in page_cache_delete() 138 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in page_cache_delete() 143 folio->mapping = NULL; in page_cache_delete() 149 struct folio *folio) in filemap_unaccount_folio() argument 153 VM_BUG_ON_FOLIO(folio_mapped(folio), folio); in filemap_unaccount_folio() 154 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) { in filemap_unaccount_folio() [all …]
|
D | truncate.c | 84 struct folio *folio = fbatch->folios[i]; in truncate_folio_batch_exceptionals() local 87 if (!xa_is_value(folio)) { in truncate_folio_batch_exceptionals() 88 fbatch->folios[j++] = folio; in truncate_folio_batch_exceptionals() 97 __clear_shadow_entry(mapping, index, folio); in truncate_folio_batch_exceptionals() 154 void folio_invalidate(struct folio *folio, size_t offset, size_t length) in folio_invalidate() argument 156 const struct address_space_operations *aops = folio->mapping->a_ops; in folio_invalidate() 159 aops->invalidate_folio(folio, offset, length); in folio_invalidate() 173 static void truncate_cleanup_folio(struct folio *folio) in truncate_cleanup_folio() argument 175 if (folio_mapped(folio)) in truncate_cleanup_folio() 176 unmap_mapping_folio(folio); in truncate_cleanup_folio() [all …]
|
D | rmap.c | 492 struct anon_vma *folio_get_anon_vma(struct folio *folio) in folio_get_anon_vma() argument 498 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_get_anon_vma() 501 if (!folio_mapped(folio)) in folio_get_anon_vma() 517 if (!folio_mapped(folio)) { in folio_get_anon_vma() 536 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, in folio_lock_anon_vma_read() argument 544 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_lock_anon_vma_read() 547 if (!folio_mapped(folio)) in folio_lock_anon_vma_read() 558 if (!folio_mapped(folio)) { in folio_lock_anon_vma_read() 577 if (!folio_mapped(folio)) { in folio_lock_anon_vma_read() 749 struct folio *folio = page_folio(page); in page_address_in_vma() local [all …]
|
D | swap_state.c | 88 int add_to_swap_cache(struct folio *folio, swp_entry_t entry, in add_to_swap_cache() argument 93 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio)); in add_to_swap_cache() 94 unsigned long i, nr = folio_nr_pages(folio); in add_to_swap_cache() 97 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in add_to_swap_cache() 98 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); in add_to_swap_cache() 99 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); in add_to_swap_cache() 101 folio_ref_add(folio, nr); in add_to_swap_cache() 102 folio_set_swapcache(folio); in add_to_swap_cache() 110 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio); in add_to_swap_cache() 116 set_page_private(folio_page(folio, i), entry.val + i); in add_to_swap_cache() [all …]
|
D | swap.h | 33 bool add_to_swap(struct folio *folio); 35 int add_to_swap_cache(struct folio *folio, swp_entry_t entry, 37 void __delete_from_swap_cache(struct folio *folio, 39 void delete_from_swap_cache(struct folio *folio); 42 struct folio *swap_cache_get_folio(swp_entry_t entry, 60 static inline unsigned int folio_swap_flags(struct folio *folio) in folio_swap_flags() argument 62 return page_swap_info(&folio->page)->flags; in folio_swap_flags() 101 static inline struct folio *swap_cache_get_folio(swp_entry_t entry, in swap_cache_get_folio() 113 static inline bool add_to_swap(struct folio *folio) in add_to_swap() argument 123 static inline int add_to_swap_cache(struct folio *folio, swp_entry_t entry, in add_to_swap_cache() argument [all …]
|
D | workingset.c | 220 static void *lru_gen_eviction(struct folio *folio) in lru_gen_eviction() argument 227 int type = folio_is_file_lru(folio); in lru_gen_eviction() 228 int delta = folio_nr_pages(folio); in lru_gen_eviction() 229 int refs = folio_lru_refs(folio); in lru_gen_eviction() 231 struct mem_cgroup *memcg = folio_memcg(folio); in lru_gen_eviction() 232 struct pglist_data *pgdat = folio_pgdat(folio); in lru_gen_eviction() 247 static void lru_gen_refault(struct folio *folio, void *shadow) in lru_gen_refault() argument 258 int type = folio_is_file_lru(folio); in lru_gen_refault() 259 int delta = folio_nr_pages(folio); in lru_gen_refault() 263 if (pgdat != folio_pgdat(folio)) in lru_gen_refault() [all …]
|
D | shmem.c | 140 struct folio **foliop, enum sgp_type sgp, 549 struct folio *folio; in shmem_unused_huge_shrink() local 602 folio = filemap_get_folio(inode->i_mapping, index); in shmem_unused_huge_shrink() 603 if (!folio) in shmem_unused_huge_shrink() 607 if (!folio_test_large(folio)) { in shmem_unused_huge_shrink() 608 folio_put(folio); in shmem_unused_huge_shrink() 619 if (!folio_trylock(folio)) { in shmem_unused_huge_shrink() 620 folio_put(folio); in shmem_unused_huge_shrink() 624 ret = split_folio(folio); in shmem_unused_huge_shrink() 625 folio_unlock(folio); in shmem_unused_huge_shrink() [all …]
|
D | vmscan.c | 179 struct folio *prev; \ 1050 static inline int is_page_cache_freeable(struct folio *folio) in is_page_cache_freeable() argument 1057 return folio_ref_count(folio) - folio_test_private(folio) == in is_page_cache_freeable() 1058 1 + folio_nr_pages(folio); in is_page_cache_freeable() 1074 struct folio *folio, int error) in handle_write_error() argument 1076 folio_lock(folio); in handle_write_error() 1077 if (folio_mapping(folio) == mapping) in handle_write_error() 1079 folio_unlock(folio); in handle_write_error() 1189 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, in __acct_reclaim_writeback() argument 1194 node_stat_add_folio(folio, NR_THROTTLED_WRITTEN); in __acct_reclaim_writeback() [all …]
|
/Linux-v6.1/include/linux/ |
D | pagemap.h | 349 struct address_space *folio_mapping(struct folio *); 350 struct address_space *swapcache_mapping(struct folio *); 364 static inline struct address_space *folio_file_mapping(struct folio *folio) in folio_file_mapping() argument 366 if (unlikely(folio_test_swapcache(folio))) in folio_file_mapping() 367 return swapcache_mapping(folio); in folio_file_mapping() 369 return folio->mapping; in folio_file_mapping() 382 struct folio *folio = page_folio(page); in page_mapping_file() local 384 if (unlikely(folio_test_swapcache(folio))) in page_mapping_file() 386 return folio_mapping(folio); in page_mapping_file() 398 static inline struct inode *folio_inode(struct folio *folio) in folio_inode() argument [all …]
|
D | mm_inline.h | 27 static inline int folio_is_file_lru(struct folio *folio) in folio_is_file_lru() argument 29 return !folio_test_swapbacked(folio); in folio_is_file_lru() 65 static __always_inline void __folio_clear_lru_flags(struct folio *folio) in __folio_clear_lru_flags() argument 67 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); in __folio_clear_lru_flags() 69 __folio_clear_lru(folio); in __folio_clear_lru_flags() 72 if (folio_test_active(folio) && folio_test_unevictable(folio)) in __folio_clear_lru_flags() 75 __folio_clear_active(folio); in __folio_clear_lru_flags() 76 __folio_clear_unevictable(folio); in __folio_clear_lru_flags() 86 static __always_inline enum lru_list folio_lru_list(struct folio *folio) in folio_lru_list() argument 90 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); in folio_lru_list() [all …]
|
D | page_ref.h | 87 static inline int folio_ref_count(const struct folio *folio) in folio_ref_count() argument 89 return page_ref_count(&folio->page); in folio_ref_count() 104 static inline void folio_set_count(struct folio *folio, int v) in folio_set_count() argument 106 set_page_count(&folio->page, v); in folio_set_count() 125 static inline void folio_ref_add(struct folio *folio, int nr) in folio_ref_add() argument 127 page_ref_add(&folio->page, nr); in folio_ref_add() 137 static inline void folio_ref_sub(struct folio *folio, int nr) in folio_ref_sub() argument 139 page_ref_sub(&folio->page, nr); in folio_ref_sub() 151 static inline int folio_ref_sub_return(struct folio *folio, int nr) in folio_ref_sub_return() argument 153 return page_ref_sub_return(&folio->page, nr); in folio_ref_sub_return() [all …]
|
D | page_idle.h | 16 static inline bool folio_test_young(struct folio *folio) in folio_test_young() argument 18 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_test_young() 30 static inline void folio_set_young(struct folio *folio) in folio_set_young() argument 32 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_set_young() 41 static inline bool folio_test_clear_young(struct folio *folio) in folio_test_clear_young() argument 43 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_test_clear_young() 55 static inline bool folio_test_idle(struct folio *folio) in folio_test_idle() argument 57 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_test_idle() 69 static inline void folio_set_idle(struct folio *folio) in folio_set_idle() argument 71 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_set_idle() [all …]
|
D | memcontrol.h | 351 static inline bool folio_memcg_kmem(struct folio *folio); 375 static inline struct mem_cgroup *__folio_memcg(struct folio *folio) in __folio_memcg() argument 377 unsigned long memcg_data = folio->memcg_data; in __folio_memcg() 379 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); in __folio_memcg() 380 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); in __folio_memcg() 381 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); in __folio_memcg() 396 static inline struct obj_cgroup *__folio_objcg(struct folio *folio) in __folio_objcg() argument 398 unsigned long memcg_data = folio->memcg_data; in __folio_objcg() 400 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); in __folio_objcg() 401 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); in __folio_objcg() [all …]
|
D | vmstat.h | 412 static inline void __zone_stat_mod_folio(struct folio *folio, in __zone_stat_mod_folio() argument 415 __mod_zone_page_state(folio_zone(folio), item, nr); in __zone_stat_mod_folio() 418 static inline void __zone_stat_add_folio(struct folio *folio, in __zone_stat_add_folio() argument 421 __mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); in __zone_stat_add_folio() 424 static inline void __zone_stat_sub_folio(struct folio *folio, in __zone_stat_sub_folio() argument 427 __mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio)); in __zone_stat_sub_folio() 430 static inline void zone_stat_mod_folio(struct folio *folio, in zone_stat_mod_folio() argument 433 mod_zone_page_state(folio_zone(folio), item, nr); in zone_stat_mod_folio() 436 static inline void zone_stat_add_folio(struct folio *folio, in zone_stat_add_folio() argument 439 mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); in zone_stat_add_folio() [all …]
|
D | page-flags.h | 276 const struct page *: (const struct folio *)_compound_head(p), \ 277 struct page *: (struct folio *)_compound_head(p))) 288 #define folio_page(folio, n) nth_page(&(folio)->page, n) argument 315 static unsigned long *folio_flags(struct folio *folio, unsigned n) in folio_flags() argument 317 struct page *page = &folio->page; in folio_flags() 380 static __always_inline bool folio_test_##lname(struct folio *folio) \ 381 { return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 387 void folio_set_##lname(struct folio *folio) \ 388 { set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \ 394 void folio_clear_##lname(struct folio *folio) \ [all …]
|
D | swap.h | 352 static inline swp_entry_t folio_swap_entry(struct folio *folio) in folio_swap_entry() argument 354 swp_entry_t entry = { .val = page_private(&folio->page) }; in folio_swap_entry() 358 static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry) in folio_set_swap_entry() argument 360 folio->private = (void *)entry.val; in folio_set_swap_entry() 365 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); 366 void workingset_refault(struct folio *folio, void *shadow); 367 void workingset_activation(struct folio *folio); 388 void lru_note_cost_folio(struct folio *); 389 void folio_add_lru(struct folio *); 390 void folio_add_lru_vma(struct folio *, struct vm_area_struct *); [all …]
|
D | rmap.h | 169 struct anon_vma *folio_get_anon_vma(struct folio *folio); 357 int folio_referenced(struct folio *, int is_locked, 360 void try_to_migrate(struct folio *folio, enum ttu_flags flags); 361 void try_to_unmap(struct folio *, enum ttu_flags flags); 426 int folio_mkclean(struct folio *); 431 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked); 454 bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma, 456 int (*done)(struct folio *folio); 457 struct anon_vma *(*anon_lock)(struct folio *folio, 462 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc); [all …]
|
D | migrate.h | 65 int migrate_folio_extra(struct address_space *mapping, struct folio *dst, 66 struct folio *src, enum migrate_mode mode, int extra_count); 67 int migrate_folio(struct address_space *mapping, struct folio *dst, 68 struct folio *src, enum migrate_mode mode); 76 struct folio *dst, struct folio *src); 79 void folio_migrate_flags(struct folio *newfolio, struct folio *folio); 80 void folio_migrate_copy(struct folio *newfolio, struct folio *folio); 82 struct folio *newfolio, struct folio *folio, int extra_count); 98 struct folio *dst, struct folio *src) in migrate_huge_page_move_mapping() 120 static inline bool folio_test_movable(struct folio *folio) in folio_test_movable() argument [all …]
|
/Linux-v6.1/fs/afs/ |
D | write.c | 25 bool afs_dirty_folio(struct address_space *mapping, struct folio *folio) in afs_dirty_folio() argument 27 return fscache_dirty_folio(mapping, folio, in afs_dirty_folio() 30 static void afs_folio_start_fscache(bool caching, struct folio *folio) in afs_folio_start_fscache() argument 33 folio_start_fscache(folio); in afs_folio_start_fscache() 36 static void afs_folio_start_fscache(bool caching, struct folio *folio) in afs_folio_start_fscache() argument 49 struct folio *folio; in afs_write_begin() local 63 ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata); in afs_write_begin() 67 index = folio_index(folio); in afs_write_begin() 75 if (folio_test_private(folio)) { in afs_write_begin() 76 priv = (unsigned long)folio_get_private(folio); in afs_write_begin() [all …]
|
/Linux-v6.1/include/trace/events/ |
D | pagemap.h | 19 #define trace_pagemap_flags(folio) ( \ argument 20 (folio_test_anon(folio) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \ 21 (folio_mapped(folio) ? PAGEMAP_MAPPED : 0) | \ 22 (folio_test_swapcache(folio) ? PAGEMAP_SWAPCACHE : 0) | \ 23 (folio_test_swapbacked(folio) ? PAGEMAP_SWAPBACKED : 0) | \ 24 (folio_test_mappedtodisk(folio) ? PAGEMAP_MAPPEDDISK : 0) | \ 25 (folio_test_private(folio) ? PAGEMAP_BUFFERS : 0) \ 30 TP_PROTO(struct folio *folio), 32 TP_ARGS(folio), 35 __field(struct folio *, folio ) [all …]
|
/Linux-v6.1/fs/iomap/ |
D | buffered-io.c | 37 static inline struct iomap_page *to_iomap_page(struct folio *folio) in to_iomap_page() argument 39 if (folio_test_private(folio)) in to_iomap_page() 40 return folio_get_private(folio); in to_iomap_page() 47 iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags) in iomap_page_create() argument 49 struct iomap_page *iop = to_iomap_page(folio); in iomap_page_create() 50 unsigned int nr_blocks = i_blocks_per_folio(inode, folio); in iomap_page_create() 65 if (folio_test_uptodate(folio)) in iomap_page_create() 67 folio_attach_private(folio, iop); in iomap_page_create() 72 static void iomap_page_release(struct folio *folio) in iomap_page_release() argument 74 struct iomap_page *iop = folio_detach_private(folio); in iomap_page_release() [all …]
|
/Linux-v6.1/fs/9p/ |
D | vfs_addr.c | 122 static bool v9fs_release_folio(struct folio *folio, gfp_t gfp) in v9fs_release_folio() argument 124 struct inode *inode = folio_inode(folio); in v9fs_release_folio() 126 if (folio_test_private(folio)) in v9fs_release_folio() 129 if (folio_test_fscache(folio)) { in v9fs_release_folio() 132 folio_wait_fscache(folio); in v9fs_release_folio() 139 static void v9fs_invalidate_folio(struct folio *folio, size_t offset, in v9fs_invalidate_folio() argument 142 folio_wait_fscache(folio); in v9fs_invalidate_folio() 159 static int v9fs_vfs_write_folio_locked(struct folio *folio) in v9fs_vfs_write_folio_locked() argument 161 struct inode *inode = folio_inode(folio); in v9fs_vfs_write_folio_locked() 164 loff_t start = folio_pos(folio); in v9fs_vfs_write_folio_locked() [all …]
|
/Linux-v6.1/fs/netfs/ |
D | buffered_read.c | 19 struct folio *folio; in netfs_rreq_unlock_folios() local 47 xas_for_each(&xas, folio, last_page) { in netfs_rreq_unlock_folios() 51 if (xas_retry(&xas, folio)) in netfs_rreq_unlock_folios() 54 pg_end = folio_pos(folio) + folio_size(folio) - 1; in netfs_rreq_unlock_folios() 64 folio_start_fscache(folio); in netfs_rreq_unlock_folios() 84 flush_dcache_folio(folio); in netfs_rreq_unlock_folios() 85 folio_mark_uptodate(folio); in netfs_rreq_unlock_folios() 89 if (folio_index(folio) == rreq->no_unlock_folio && in netfs_rreq_unlock_folios() 93 folio_unlock(folio); in netfs_rreq_unlock_folios() 220 int netfs_read_folio(struct file *file, struct folio *folio) in netfs_read_folio() argument [all …]
|
/Linux-v6.1/mm/damon/ |
D | paddr.c | 19 static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma, in __damon_pa_mkold() argument 22 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in __damon_pa_mkold() 36 struct folio *folio; in damon_pa_mkold() local 46 folio = page_folio(page); in damon_pa_mkold() 48 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { in damon_pa_mkold() 49 folio_set_idle(folio); in damon_pa_mkold() 53 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); in damon_pa_mkold() 54 if (need_lock && !folio_trylock(folio)) in damon_pa_mkold() 57 rmap_walk(folio, &rwc); in damon_pa_mkold() 60 folio_unlock(folio); in damon_pa_mkold() [all …]
|