Home
last modified time | relevance | path

Searched refs:folios (Results 1 – 25 of 41) sorted by relevance

12

/Linux-v6.6/fs/ramfs/
Dfile-nommu.c235 ret = (unsigned long) folio_address(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area()
236 pfn = folio_pfn(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area()
240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { in ramfs_nommu_get_unmapped_area()
244 nr_pages += folio_nr_pages(fbatch.folios[loop]); in ramfs_nommu_get_unmapped_area()
/Linux-v6.6/include/linux/
Dpagevec.h31 struct folio *folios[PAGEVEC_SIZE]; member
74 fbatch->folios[fbatch->nr++] = folio; in folio_batch_add()
Dmm_inline.h259 list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in lru_gen_add_folio()
261 list_add(&folio->lru, &lrugen->folios[gen][type][zone]); in lru_gen_add_folio()
Dmm.h1511 struct folio **folios; member
1529 static inline void folios_put(struct folio **folios, unsigned int nr) in folios_put() argument
1531 release_pages(folios, nr); in folios_put()
/Linux-v6.6/mm/
Dtruncate.c70 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals()
83 struct folio *folio = fbatch->folios[i]; in truncate_folio_batch_exceptionals()
87 fbatch->folios[j++] = folio; in truncate_folio_batch_exceptionals()
367 truncate_cleanup_folio(fbatch.folios[i]); in truncate_inode_pages_range()
370 folio_unlock(fbatch.folios[i]); in truncate_inode_pages_range()
414 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range()
510 struct folio *folio = fbatch.folios[i]; in mapping_try_invalidate()
636 struct folio *folio = fbatch.folios[i]; in invalidate_inode_pages2_range()
Dswap.c210 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru()
224 folios_put(fbatch->folios, folio_batch_count(fbatch)); in folio_batch_move_lru()
403 struct folio *batch_folio = fbatch->folios[i]; in __lru_cache_activate_folio()
1062 release_pages(fbatch->folios, folio_batch_count(fbatch)); in __folio_batch_release()
1081 struct folio *folio = fbatch->folios[i]; in folio_batch_remove_exceptionals()
1083 fbatch->folios[j++] = folio; in folio_batch_remove_exceptionals()
Dmigrate.c1820 LIST_HEAD(folios); in migrate_pages_sync()
1826 reason, &folios, split_folios, &astats, in migrate_pages_sync()
1834 list_splice_tail(&folios, ret_folios); in migrate_pages_sync()
1844 list_splice_tail_init(&folios, from); in migrate_pages_sync()
1846 list_move(from->next, &folios); in migrate_pages_sync()
1847 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, in migrate_pages_sync()
1850 list_splice_tail_init(&folios, ret_folios); in migrate_pages_sync()
1892 LIST_HEAD(folios); in migrate_pages()
1920 list_cut_before(&folios, from, &folio2->lru); in migrate_pages()
1922 list_splice_init(from, &folios); in migrate_pages()
[all …]
Dfilemap.c282 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
302 if (folio != fbatch->folios[i]) { in page_cache_delete_batch()
304 fbatch->folios[i]->index, folio); in page_cache_delete_batch()
331 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch()
343 filemap_free_folio(mapping, fbatch->folios[i]); in delete_from_page_cache_batch()
523 struct folio *folio = fbatch.folios[i]; in __filemap_fdatawait_range()
2042 folio = fbatch->folios[idx]; in find_get_entries()
2106 folio = fbatch->folios[idx]; in find_lock_entries()
2234 folio = fbatch->folios[nr - 1]; in filemap_get_folios_contig()
2570 folio = fbatch->folios[folio_batch_count(fbatch) - 1]; in filemap_get_pages()
[all …]
Dmlock.c194 folio = fbatch->folios[i]; in mlock_folio_batch()
197 fbatch->folios[i] = folio; in mlock_folio_batch()
209 folios_put(fbatch->folios, folio_batch_count(fbatch)); in mlock_folio_batch()
Dvmscan.c4371 struct list_head *head = &lrugen->folios[old_gen][type][zone]; in inc_min_seq()
4382 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); in inc_min_seq()
4410 if (!list_empty(&lrugen->folios[gen][type][zone])) in try_to_inc_min_seq()
4931 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4940 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4950 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4958 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
5023 struct list_head *head = &lrugen->folios[gen][type][zone]; in scan_folios()
5618 if (!list_empty(&lrugen->folios[gen][type][zone])) in state_is_valid()
5663 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone]; in drain_evictable()
[all …]
/Linux-v6.6/Documentation/mm/
Dunevictable-lru.rst13 folios.
28 folios and to hide these folios from vmscan. This mechanism is based on a patch
72 The Unevictable LRU infrastructure maintains unevictable folios as if they were
75 (1) We get to "treat unevictable folios just like we treat other folios in the
80 (2) We want to be able to migrate unevictable folios between nodes for memory
82 can only migrate folios that it can successfully isolate from the LRU
84 maintain folios elsewhere than on an LRU-like list, where they can be
88 anonymous, swap-backed folios. This differentiation is only important
89 while the folios are, in fact, evictable.
171 list. Instead, vmscan will do this if and when it encounters the folios during
[all …]
Dmultigen_lru.rst92 truncated generation number is an index to ``lrugen->folios[]``. The
96 ``lrugen->folios[]``; otherwise it stores zero.
100 generations, tiers do not have dedicated ``lrugen->folios[]``. In
131 increments ``min_seq`` when ``lrugen->folios[]`` indexed by
226 since each node and memcg combination has an LRU of folios (see
232 the active/inactive LRU (of folios):
255 The multi-gen LRU (of folios) can be disassembled into the following
/Linux-v6.6/fs/nilfs2/
Dpage.c255 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_dirty_pages()
309 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_back_pages()
370 struct folio *folio = fbatch.folios[i]; in nilfs_clear_dirty_pages()
513 folio = fbatch.folios[i]; in nilfs_find_uncommitted_extent()
/Linux-v6.6/fs/smb/client/
Dcifsencrypt.c106 struct folio *folios[16], *folio; in cifs_shash_xarray() local
119 nr = xa_extract(iter->xarray, (void **)folios, index, last, in cifs_shash_xarray()
120 ARRAY_SIZE(folios), XA_PRESENT); in cifs_shash_xarray()
125 folio = folios[i]; in cifs_shash_xarray()
144 } while (nr == ARRAY_SIZE(folios)); in cifs_shash_xarray()
/Linux-v6.6/Documentation/filesystems/
Dnetfs_library.rst104 * Handle folios that span multiple pages.
109 don't match folio sizes or folio alignments and that may cross folios.
363 it transferred. The filesystem also should not deal with setting folios
367 Note that the helpers have the folios locked, but not pinned. It is
391 [Optional] This is called after the folios in the request have all been
438 * Once the data is read, the folios that have been fully read/cleared:
446 * Any folios that need writing to the cache will then have DIO writes issued.
450 * Writes to the cache will proceed asynchronously and the folios will have the
Dfsverity.rst633 ``->readahead()`` methods must be modified to verify folios before
689 verification. Finally, folios where no decryption or verity error
690 occurred are marked Uptodate, and the folios are unlocked.
856 - To prevent bypassing verification, folios must not be marked
858 filesystem is responsible for marking folios Uptodate via
Dvfs.rst702 on dirty pages, and ->release_folio on clean folios with the private
859 If the mapping does not support large folios, the folio will
869 In normal operation, folios are read through the ->readahead()
976 release_folio is called on folios with private data to tell the
986 some or all folios in an address_space. This can happen
991 and needs to be certain that all folios are invalidated, then
1031 some filesystems have more complex state (unstable folios in NFS
/Linux-v6.6/Documentation/core-api/
Dpin_user_pages.rst58 For large folios, the GUP_PIN_COUNTING_BIAS scheme is not used. Instead,
62 This approach for large folios avoids the counting upper limit problems
68 This also means that huge pages and large folios do not suffer
273 fields, and to better report on large folios in general. Specifically,
274 for large folios, the exact pincount is reported.
/Linux-v6.6/fs/ceph/
Daddr.c1035 page = &fbatch.folios[i]->page; in ceph_writepages_start()
1175 fbatch.folios[i] = NULL; in ceph_writepages_start()
1186 if (!fbatch.folios[j]) in ceph_writepages_start()
1189 fbatch.folios[n] = fbatch.folios[j]; in ceph_writepages_start()
1349 fbatch.nr ? fbatch.folios[0] : NULL); in ceph_writepages_start()
1372 page = &fbatch.folios[i]->page; in ceph_writepages_start()
/Linux-v6.6/fs/gfs2/
Daops.c221 nr_pages += folio_nr_pages(fbatch->folios[i]); in gfs2_write_jdata_batch()
229 struct folio *folio = fbatch->folios[i]; in gfs2_write_jdata_batch()
/Linux-v6.6/fs/afs/
Dwrite.c557 folio = fbatch.folios[i]; in afs_extend_writeback()
726 folio = fbatch.folios[i]; in afs_writepages_region()
/Linux-v6.6/fs/btrfs/tests/
Dextent-io-tests.c37 struct folio *folio = fbatch.folios[i]; in process_page_range()
/Linux-v6.6/fs/f2fs/
Dnode.c1546 struct page *page = &fbatch.folios[i]->page; in last_fsync_dnode()
1772 struct page *page = &fbatch.folios[i]->page; in f2fs_fsync_node_pages()
1919 struct page *page = &fbatch.folios[i]->page; in f2fs_flush_inline_data()
1973 struct page *page = &fbatch.folios[i]->page; in f2fs_sync_node_pages()
Dcheckpoint.c428 struct folio *folio = fbatch.folios[i]; in f2fs_sync_meta_pages()
432 folio_nr_pages(fbatch.folios[i-1])) { in f2fs_sync_meta_pages()
/Linux-v6.6/fs/btrfs/
Dextent_io.c224 struct folio *folio = fbatch.folios[i]; in __process_pages_contig()
275 struct page *page = &fbatch.folios[i]->page; in lock_delalloc_pages()
1909 struct folio *folio = fbatch.folios[i]; in btree_write_cache_pages()
2066 struct folio *folio = fbatch.folios[i]; in extent_write_cache_pages()

12