Lines Matching refs:folio
125 struct folio *folio, void *shadow) in page_cache_delete() argument
127 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
133 if (!folio_test_hugetlb(folio)) { in page_cache_delete()
134 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete()
135 nr = folio_nr_pages(folio); in page_cache_delete()
138 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in page_cache_delete()
143 folio->mapping = NULL; in page_cache_delete()
149 struct folio *folio) in filemap_unaccount_folio() argument
153 VM_BUG_ON_FOLIO(folio_mapped(folio), folio); in filemap_unaccount_folio()
154 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) { in filemap_unaccount_folio()
156 current->comm, folio_pfn(folio)); in filemap_unaccount_folio()
157 dump_page(&folio->page, "still mapped when deleted"); in filemap_unaccount_folio()
161 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio()
162 int mapcount = page_mapcount(&folio->page); in filemap_unaccount_folio()
164 if (folio_ref_count(folio) >= mapcount + 2) { in filemap_unaccount_folio()
171 page_mapcount_reset(&folio->page); in filemap_unaccount_folio()
172 folio_ref_sub(folio, mapcount); in filemap_unaccount_folio()
178 if (folio_test_hugetlb(folio)) in filemap_unaccount_folio()
181 nr = folio_nr_pages(folio); in filemap_unaccount_folio()
183 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); in filemap_unaccount_folio()
184 if (folio_test_swapbacked(folio)) { in filemap_unaccount_folio()
185 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); in filemap_unaccount_folio()
186 if (folio_test_pmd_mappable(folio)) in filemap_unaccount_folio()
187 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); in filemap_unaccount_folio()
188 } else if (folio_test_pmd_mappable(folio)) { in filemap_unaccount_folio()
189 __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr); in filemap_unaccount_folio()
207 if (WARN_ON_ONCE(folio_test_dirty(folio) && in filemap_unaccount_folio()
209 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio()
217 void __filemap_remove_folio(struct folio *folio, void *shadow) in __filemap_remove_folio() argument
219 struct address_space *mapping = folio->mapping; in __filemap_remove_folio()
221 trace_mm_filemap_delete_from_page_cache(folio); in __filemap_remove_folio()
222 filemap_unaccount_folio(mapping, folio); in __filemap_remove_folio()
223 page_cache_delete(mapping, folio, shadow); in __filemap_remove_folio()
226 void filemap_free_folio(struct address_space *mapping, struct folio *folio) in filemap_free_folio() argument
228 void (*free_folio)(struct folio *); in filemap_free_folio()
233 free_folio(folio); in filemap_free_folio()
235 if (folio_test_large(folio) && !folio_test_hugetlb(folio)) in filemap_free_folio()
236 refs = folio_nr_pages(folio); in filemap_free_folio()
237 folio_put_refs(folio, refs); in filemap_free_folio()
248 void filemap_remove_folio(struct folio *folio) in filemap_remove_folio() argument
250 struct address_space *mapping = folio->mapping; in filemap_remove_folio()
252 BUG_ON(!folio_test_locked(folio)); in filemap_remove_folio()
255 __filemap_remove_folio(folio, NULL); in filemap_remove_folio()
261 filemap_free_folio(mapping, folio); in filemap_remove_folio()
283 struct folio *folio; in page_cache_delete_batch() local
286 xas_for_each(&xas, folio, ULONG_MAX) { in page_cache_delete_batch()
291 if (xa_is_value(folio)) in page_cache_delete_batch()
300 if (folio != fbatch->folios[i]) { in page_cache_delete_batch()
301 VM_BUG_ON_FOLIO(folio->index > in page_cache_delete_batch()
302 fbatch->folios[i]->index, folio); in page_cache_delete_batch()
306 WARN_ON_ONCE(!folio_test_locked(folio)); in page_cache_delete_batch()
308 folio->mapping = NULL; in page_cache_delete_batch()
313 total_pages += folio_nr_pages(folio); in page_cache_delete_batch()
329 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch() local
331 trace_mm_filemap_delete_from_page_cache(folio); in delete_from_page_cache_batch()
332 filemap_unaccount_folio(mapping, folio); in delete_from_page_cache_batch()
635 struct folio *folio; in filemap_range_has_writeback() local
641 xas_for_each(&xas, folio, max) { in filemap_range_has_writeback()
642 if (xas_retry(&xas, folio)) in filemap_range_has_writeback()
644 if (xa_is_value(folio)) in filemap_range_has_writeback()
646 if (folio_test_dirty(folio) || folio_test_locked(folio) || in filemap_range_has_writeback()
647 folio_test_writeback(folio)) in filemap_range_has_writeback()
651 return folio != NULL; in filemap_range_has_writeback()
802 struct folio *fold = page_folio(old); in replace_page_cache_page()
803 struct folio *fnew = page_folio(new); in replace_page_cache_page()
805 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; in replace_page_cache_page()
840 struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp) in __filemap_add_folio() argument
843 int huge = folio_test_hugetlb(folio); in __filemap_add_folio()
847 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in __filemap_add_folio()
848 VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio); in __filemap_add_folio()
852 int error = mem_cgroup_charge(folio, NULL, gfp); in __filemap_add_folio()
853 VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); in __filemap_add_folio()
857 xas_set_order(&xas, index, folio_order(folio)); in __filemap_add_folio()
858 nr = folio_nr_pages(folio); in __filemap_add_folio()
862 folio_ref_add(folio, nr); in __filemap_add_folio()
863 folio->mapping = mapping; in __filemap_add_folio()
864 folio->index = xas.xa_index; in __filemap_add_folio()
870 if (order > folio_order(folio)) in __filemap_add_folio()
887 if (order > folio_order(folio)) { in __filemap_add_folio()
895 xas_store(&xas, folio); in __filemap_add_folio()
903 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); in __filemap_add_folio()
904 if (folio_test_pmd_mappable(folio)) in __filemap_add_folio()
905 __lruvec_stat_mod_folio(folio, in __filemap_add_folio()
915 trace_mm_filemap_add_to_page_cache(folio); in __filemap_add_folio()
919 mem_cgroup_uncharge(folio); in __filemap_add_folio()
920 folio->mapping = NULL; in __filemap_add_folio()
922 folio_put_refs(folio, nr); in __filemap_add_folio()
927 int filemap_add_folio(struct address_space *mapping, struct folio *folio, in filemap_add_folio() argument
933 __folio_set_locked(folio); in filemap_add_folio()
934 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow); in filemap_add_folio()
936 __folio_clear_locked(folio); in filemap_add_folio()
946 WARN_ON_ONCE(folio_test_active(folio)); in filemap_add_folio()
948 workingset_refault(folio, shadow); in filemap_add_folio()
949 folio_add_lru(folio); in filemap_add_folio()
956 struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) in filemap_alloc_folio()
959 struct folio *folio; in filemap_alloc_folio() local
966 folio = __folio_alloc_node(gfp, order, n); in filemap_alloc_folio()
967 } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie)); in filemap_alloc_folio()
969 return folio; in filemap_alloc_folio()
1028 static wait_queue_head_t *folio_waitqueue(struct folio *folio) in folio_waitqueue() argument
1030 return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)]; in folio_waitqueue()
1093 if (test_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1096 if (test_and_set_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1128 static void folio_wake_bit(struct folio *folio, int bit_nr) in folio_wake_bit() argument
1130 wait_queue_head_t *q = folio_waitqueue(folio); in folio_wake_bit()
1135 key.folio = folio; in folio_wake_bit()
1170 folio_clear_waiters(folio); in folio_wake_bit()
1175 static void folio_wake(struct folio *folio, int bit) in folio_wake() argument
1177 if (!folio_test_waiters(folio)) in folio_wake()
1179 folio_wake_bit(folio, bit); in folio_wake()
1201 static inline bool folio_trylock_flag(struct folio *folio, int bit_nr, in folio_trylock_flag() argument
1205 if (test_and_set_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1207 } else if (test_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1217 static inline int folio_wait_bit_common(struct folio *folio, int bit_nr, in folio_wait_bit_common() argument
1220 wait_queue_head_t *q = folio_waitqueue(folio); in folio_wait_bit_common()
1229 !folio_test_uptodate(folio) && folio_test_workingset(folio)) { in folio_wait_bit_common()
1237 wait_page.folio = folio; in folio_wait_bit_common()
1263 folio_set_waiters(folio); in folio_wait_bit_common()
1264 if (!folio_trylock_flag(folio, bit_nr, wait)) in folio_wait_bit_common()
1277 folio_put(folio); in folio_wait_bit_common()
1314 if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0)))) in folio_wait_bit_common()
1381 struct folio *folio = page_folio(pfn_swap_entry_to_page(entry)); in migration_entry_wait_on_locked() local
1383 q = folio_waitqueue(folio); in migration_entry_wait_on_locked()
1384 if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) { in migration_entry_wait_on_locked()
1392 wait_page.folio = folio; in migration_entry_wait_on_locked()
1397 folio_set_waiters(folio); in migration_entry_wait_on_locked()
1398 if (!folio_trylock_flag(folio, PG_locked, wait)) in migration_entry_wait_on_locked()
1438 void folio_wait_bit(struct folio *folio, int bit_nr) in folio_wait_bit() argument
1440 folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); in folio_wait_bit()
1444 int folio_wait_bit_killable(struct folio *folio, int bit_nr) in folio_wait_bit_killable() argument
1446 return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED); in folio_wait_bit_killable()
1463 static int folio_put_wait_locked(struct folio *folio, int state) in folio_put_wait_locked() argument
1465 return folio_wait_bit_common(folio, PG_locked, state, DROP); in folio_put_wait_locked()
1475 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter) in folio_add_wait_queue() argument
1477 wait_queue_head_t *q = folio_waitqueue(folio); in folio_add_wait_queue()
1482 folio_set_waiters(folio); in folio_add_wait_queue()
1519 void folio_unlock(struct folio *folio) in folio_unlock() argument
1524 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in folio_unlock()
1525 if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0))) in folio_unlock()
1526 folio_wake_bit(folio, PG_locked); in folio_unlock()
1541 void folio_end_private_2(struct folio *folio) in folio_end_private_2() argument
1543 VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio); in folio_end_private_2()
1544 clear_bit_unlock(PG_private_2, folio_flags(folio, 0)); in folio_end_private_2()
1545 folio_wake_bit(folio, PG_private_2); in folio_end_private_2()
1546 folio_put(folio); in folio_end_private_2()
1556 void folio_wait_private_2(struct folio *folio) in folio_wait_private_2() argument
1558 while (folio_test_private_2(folio)) in folio_wait_private_2()
1559 folio_wait_bit(folio, PG_private_2); in folio_wait_private_2()
1574 int folio_wait_private_2_killable(struct folio *folio) in folio_wait_private_2_killable() argument
1578 while (folio_test_private_2(folio)) { in folio_wait_private_2_killable()
1579 ret = folio_wait_bit_killable(folio, PG_private_2); in folio_wait_private_2_killable()
1592 void folio_end_writeback(struct folio *folio) in folio_end_writeback() argument
1601 if (folio_test_reclaim(folio)) { in folio_end_writeback()
1602 folio_clear_reclaim(folio); in folio_end_writeback()
1603 folio_rotate_reclaimable(folio); in folio_end_writeback()
1612 folio_get(folio); in folio_end_writeback()
1613 if (!__folio_end_writeback(folio)) in folio_end_writeback()
1617 folio_wake(folio, PG_writeback); in folio_end_writeback()
1618 acct_reclaim_writeback(folio); in folio_end_writeback()
1619 folio_put(folio); in folio_end_writeback()
1629 struct folio *folio = page_folio(page); in page_endio() local
1633 folio_mark_uptodate(folio); in page_endio()
1635 folio_clear_uptodate(folio); in page_endio()
1636 folio_set_error(folio); in page_endio()
1638 folio_unlock(folio); in page_endio()
1643 folio_set_error(folio); in page_endio()
1644 mapping = folio_mapping(folio); in page_endio()
1648 folio_end_writeback(folio); in page_endio()
1657 void __folio_lock(struct folio *folio) in __folio_lock() argument
1659 folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE, in __folio_lock()
1664 int __folio_lock_killable(struct folio *folio) in __folio_lock_killable() argument
1666 return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE, in __folio_lock_killable()
1671 static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) in __folio_lock_async() argument
1673 struct wait_queue_head *q = folio_waitqueue(folio); in __folio_lock_async()
1676 wait->folio = folio; in __folio_lock_async()
1681 folio_set_waiters(folio); in __folio_lock_async()
1682 ret = !folio_trylock(folio); in __folio_lock_async()
1708 bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, in __folio_lock_or_retry() argument
1721 folio_wait_locked_killable(folio); in __folio_lock_or_retry()
1723 folio_wait_locked(folio); in __folio_lock_or_retry()
1729 ret = __folio_lock_killable(folio); in __folio_lock_or_retry()
1735 __folio_lock(folio); in __folio_lock_or_retry()
1848 struct folio *folio; in mapping_get_entry() local
1853 folio = xas_load(&xas); in mapping_get_entry()
1854 if (xas_retry(&xas, folio)) in mapping_get_entry()
1860 if (!folio || xa_is_value(folio)) in mapping_get_entry()
1863 if (!folio_try_get_rcu(folio)) in mapping_get_entry()
1866 if (unlikely(folio != xas_reload(&xas))) { in mapping_get_entry()
1867 folio_put(folio); in mapping_get_entry()
1873 return folio; in mapping_get_entry()
1909 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, in __filemap_get_folio()
1912 struct folio *folio; in __filemap_get_folio() local
1915 folio = mapping_get_entry(mapping, index); in __filemap_get_folio()
1916 if (xa_is_value(folio)) { in __filemap_get_folio()
1918 return folio; in __filemap_get_folio()
1919 folio = NULL; in __filemap_get_folio()
1921 if (!folio) in __filemap_get_folio()
1926 if (!folio_trylock(folio)) { in __filemap_get_folio()
1927 folio_put(folio); in __filemap_get_folio()
1931 folio_lock(folio); in __filemap_get_folio()
1935 if (unlikely(folio->mapping != mapping)) { in __filemap_get_folio()
1936 folio_unlock(folio); in __filemap_get_folio()
1937 folio_put(folio); in __filemap_get_folio()
1940 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); in __filemap_get_folio()
1944 folio_mark_accessed(folio); in __filemap_get_folio()
1947 if (folio_test_idle(folio)) in __filemap_get_folio()
1948 folio_clear_idle(folio); in __filemap_get_folio()
1952 folio_wait_stable(folio); in __filemap_get_folio()
1954 if (!folio && (fgp_flags & FGP_CREAT)) { in __filemap_get_folio()
1965 folio = filemap_alloc_folio(gfp, 0); in __filemap_get_folio()
1966 if (!folio) in __filemap_get_folio()
1974 __folio_set_referenced(folio); in __filemap_get_folio()
1976 err = filemap_add_folio(mapping, folio, index, gfp); in __filemap_get_folio()
1978 folio_put(folio); in __filemap_get_folio()
1979 folio = NULL; in __filemap_get_folio()
1988 if (folio && (fgp_flags & FGP_FOR_MMAP)) in __filemap_get_folio()
1989 folio_unlock(folio); in __filemap_get_folio()
1992 return folio; in __filemap_get_folio()
1996 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max, in find_get_entry()
1999 struct folio *folio; in find_get_entry() local
2003 folio = xas_find(xas, max); in find_get_entry()
2005 folio = xas_find_marked(xas, max, mark); in find_get_entry()
2007 if (xas_retry(xas, folio)) in find_get_entry()
2014 if (!folio || xa_is_value(folio)) in find_get_entry()
2015 return folio; in find_get_entry()
2017 if (!folio_try_get_rcu(folio)) in find_get_entry()
2020 if (unlikely(folio != xas_reload(xas))) { in find_get_entry()
2021 folio_put(folio); in find_get_entry()
2025 return folio; in find_get_entry()
2055 struct folio *folio; in find_get_entries() local
2058 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { in find_get_entries()
2060 if (!folio_batch_add(fbatch, folio)) in find_get_entries()
2092 struct folio *folio; in find_lock_entries() local
2095 while ((folio = find_get_entry(&xas, end, XA_PRESENT))) { in find_lock_entries()
2096 if (!xa_is_value(folio)) { in find_lock_entries()
2097 if (folio->index < start) in find_lock_entries()
2099 if (folio->index + folio_nr_pages(folio) - 1 > end) in find_lock_entries()
2101 if (!folio_trylock(folio)) in find_lock_entries()
2103 if (folio->mapping != mapping || in find_lock_entries()
2104 folio_test_writeback(folio)) in find_lock_entries()
2106 VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), in find_lock_entries()
2107 folio); in find_lock_entries()
2110 if (!folio_batch_add(fbatch, folio)) in find_lock_entries()
2114 folio_unlock(folio); in find_lock_entries()
2116 folio_put(folio); in find_lock_entries()
2148 struct folio *folio; in filemap_get_folios() local
2151 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { in filemap_get_folios()
2153 if (xa_is_value(folio)) in filemap_get_folios()
2155 if (!folio_batch_add(fbatch, folio)) { in filemap_get_folios()
2156 unsigned long nr = folio_nr_pages(folio); in filemap_get_folios()
2158 if (folio_test_hugetlb(folio)) in filemap_get_folios()
2160 *start = folio->index + nr; in filemap_get_folios()
2183 bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max) in folio_more_pages() argument
2185 if (!folio_test_large(folio) || folio_test_hugetlb(folio)) in folio_more_pages()
2189 return index < folio->index + folio_nr_pages(folio) - 1; in folio_more_pages()
2212 struct folio *folio; in filemap_get_folios_contig() local
2216 for (folio = xas_load(&xas); folio && xas.xa_index <= end; in filemap_get_folios_contig()
2217 folio = xas_next(&xas)) { in filemap_get_folios_contig()
2218 if (xas_retry(&xas, folio)) in filemap_get_folios_contig()
2224 if (xa_is_value(folio)) in filemap_get_folios_contig()
2227 if (!folio_try_get_rcu(folio)) in filemap_get_folios_contig()
2230 if (unlikely(folio != xas_reload(&xas))) in filemap_get_folios_contig()
2233 if (!folio_batch_add(fbatch, folio)) { in filemap_get_folios_contig()
2234 nr = folio_nr_pages(folio); in filemap_get_folios_contig()
2236 if (folio_test_hugetlb(folio)) in filemap_get_folios_contig()
2238 *start = folio->index + nr; in filemap_get_folios_contig()
2243 folio_put(folio); in filemap_get_folios_contig()
2253 folio = fbatch->folios[nr - 1]; in filemap_get_folios_contig()
2254 if (folio_test_hugetlb(folio)) in filemap_get_folios_contig()
2255 *start = folio->index + 1; in filemap_get_folios_contig()
2257 *start = folio->index + folio_nr_pages(folio); in filemap_get_folios_contig()
2285 struct folio *folio; in find_get_pages_range_tag() local
2292 while ((folio = find_get_entry(&xas, end, tag))) { in find_get_pages_range_tag()
2298 if (xa_is_value(folio)) in find_get_pages_range_tag()
2301 pages[ret] = &folio->page; in find_get_pages_range_tag()
2303 *index = folio->index + folio_nr_pages(folio); in find_get_pages_range_tag()
2358 struct folio *folio; in filemap_get_read_batch() local
2361 for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) { in filemap_get_read_batch()
2362 if (xas_retry(&xas, folio)) in filemap_get_read_batch()
2364 if (xas.xa_index > max || xa_is_value(folio)) in filemap_get_read_batch()
2366 if (xa_is_sibling(folio)) in filemap_get_read_batch()
2368 if (!folio_try_get_rcu(folio)) in filemap_get_read_batch()
2371 if (unlikely(folio != xas_reload(&xas))) in filemap_get_read_batch()
2374 if (!folio_batch_add(fbatch, folio)) in filemap_get_read_batch()
2376 if (!folio_test_uptodate(folio)) in filemap_get_read_batch()
2378 if (folio_test_readahead(folio)) in filemap_get_read_batch()
2380 xas_advance(&xas, folio->index + folio_nr_pages(folio) - 1); in filemap_get_read_batch()
2383 folio_put(folio); in filemap_get_read_batch()
2391 struct folio *folio) in filemap_read_folio() argument
2393 bool workingset = folio_test_workingset(folio); in filemap_read_folio()
2402 folio_clear_error(folio); in filemap_read_folio()
2407 error = filler(file, folio); in filemap_read_folio()
2413 error = folio_wait_locked_killable(folio); in filemap_read_folio()
2416 if (folio_test_uptodate(folio)) in filemap_read_folio()
2424 loff_t pos, struct iov_iter *iter, struct folio *folio) in filemap_range_uptodate() argument
2428 if (folio_test_uptodate(folio)) in filemap_range_uptodate()
2435 if (mapping->host->i_blkbits >= folio_shift(folio)) in filemap_range_uptodate()
2439 if (folio_pos(folio) > pos) { in filemap_range_uptodate()
2440 count -= folio_pos(folio) - pos; in filemap_range_uptodate()
2443 pos -= folio_pos(folio); in filemap_range_uptodate()
2446 return mapping->a_ops->is_partially_uptodate(folio, pos, count); in filemap_range_uptodate()
2451 struct folio *folio) in filemap_update_page() argument
2462 if (!folio_trylock(folio)) { in filemap_update_page()
2472 folio_put_wait_locked(folio, TASK_KILLABLE); in filemap_update_page()
2475 error = __folio_lock_async(folio, iocb->ki_waitq); in filemap_update_page()
2481 if (!folio->mapping) in filemap_update_page()
2485 if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, folio)) in filemap_update_page()
2493 folio); in filemap_update_page()
2496 folio_unlock(folio); in filemap_update_page()
2500 folio_put(folio); in filemap_update_page()
2508 struct folio *folio; in filemap_create_folio() local
2511 folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0); in filemap_create_folio()
2512 if (!folio) in filemap_create_folio()
2529 error = filemap_add_folio(mapping, folio, index, in filemap_create_folio()
2536 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_create_folio()
2541 folio_batch_add(fbatch, folio); in filemap_create_folio()
2545 folio_put(folio); in filemap_create_folio()
2550 struct address_space *mapping, struct folio *folio, in filemap_readahead() argument
2553 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index); in filemap_readahead()
2557 page_cache_async_ra(&ractl, folio, last_index - folio->index); in filemap_readahead()
2569 struct folio *folio; in filemap_get_pages() local
2595 folio = fbatch->folios[folio_batch_count(fbatch) - 1]; in filemap_get_pages()
2596 if (folio_test_readahead(folio)) { in filemap_get_pages()
2597 err = filemap_readahead(iocb, filp, mapping, folio, last_index); in filemap_get_pages()
2601 if (!folio_test_uptodate(folio)) { in filemap_get_pages()
2605 err = filemap_update_page(iocb, mapping, iter, folio); in filemap_get_pages()
2613 folio_put(folio); in filemap_get_pages()
2621 static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio) in pos_same_folio() argument
2623 unsigned int shift = folio_shift(folio); in pos_same_folio()
2707 struct folio *folio = fbatch.folios[i]; in filemap_read() local
2708 size_t fsize = folio_size(folio); in filemap_read()
2714 if (end_offset < folio_pos(folio)) in filemap_read()
2717 folio_mark_accessed(folio); in filemap_read()
2724 flush_dcache_folio(folio); in filemap_read()
2726 copied = copy_folio_to_iter(folio, offset, bytes, iter); in filemap_read()
2826 struct address_space *mapping, struct folio *folio, in folio_seek_hole_data() argument
2832 if (xa_is_value(folio) || folio_test_uptodate(folio)) in folio_seek_hole_data()
2839 folio_lock(folio); in folio_seek_hole_data()
2840 if (unlikely(folio->mapping != mapping)) in folio_seek_hole_data()
2843 offset = offset_in_folio(folio, start) & ~(bsz - 1); in folio_seek_hole_data()
2846 if (ops->is_partially_uptodate(folio, offset, bsz) == in folio_seek_hole_data()
2851 } while (offset < folio_size(folio)); in folio_seek_hole_data()
2853 folio_unlock(folio); in folio_seek_hole_data()
2858 static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio) in seek_folio_size() argument
2860 if (xa_is_value(folio)) in seek_folio_size()
2862 return folio_size(folio); in seek_folio_size()
2889 struct folio *folio; in mapping_seek_hole_data() local
2895 while ((folio = find_get_entry(&xas, max, XA_PRESENT))) { in mapping_seek_hole_data()
2905 seek_size = seek_folio_size(&xas, folio); in mapping_seek_hole_data()
2907 start = folio_seek_hole_data(&xas, mapping, folio, start, pos, in mapping_seek_hole_data()
2915 if (!xa_is_value(folio)) in mapping_seek_hole_data()
2916 folio_put(folio); in mapping_seek_hole_data()
2922 if (folio && !xa_is_value(folio)) in mapping_seek_hole_data()
2923 folio_put(folio); in mapping_seek_hole_data()
2943 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio, in lock_folio_maybe_drop_mmap() argument
2946 if (folio_trylock(folio)) in lock_folio_maybe_drop_mmap()
2959 if (__folio_lock_killable(folio)) { in lock_folio_maybe_drop_mmap()
2971 __folio_lock(folio); in lock_folio_maybe_drop_mmap()
3053 struct folio *folio) in do_async_mmap_readahead() argument
3069 if (folio_test_readahead(folio)) { in do_async_mmap_readahead()
3071 page_cache_async_ra(&ractl, folio, ra->ra_pages); in do_async_mmap_readahead()
3107 struct folio *folio; in filemap_fault() local
3118 folio = filemap_get_folio(mapping, index); in filemap_fault()
3119 if (likely(folio)) { in filemap_fault()
3125 fpin = do_async_mmap_readahead(vmf, folio); in filemap_fault()
3126 if (unlikely(!folio_test_uptodate(folio))) { in filemap_fault()
3145 folio = __filemap_get_folio(mapping, index, in filemap_fault()
3148 if (!folio) { in filemap_fault()
3156 if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin)) in filemap_fault()
3160 if (unlikely(folio->mapping != mapping)) { in filemap_fault()
3161 folio_unlock(folio); in filemap_fault()
3162 folio_put(folio); in filemap_fault()
3165 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); in filemap_fault()
3171 if (unlikely(!folio_test_uptodate(folio))) { in filemap_fault()
3179 folio_unlock(folio); in filemap_fault()
3180 folio_put(folio); in filemap_fault()
3192 folio_unlock(folio); in filemap_fault()
3204 folio_unlock(folio); in filemap_fault()
3205 folio_put(folio); in filemap_fault()
3209 vmf->page = folio_file_page(folio, index); in filemap_fault()
3220 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_fault()
3223 folio_put(folio); in filemap_fault()
3237 if (folio) in filemap_fault()
3238 folio_put(folio); in filemap_fault()
3280 static struct folio *next_uptodate_page(struct folio *folio, in next_uptodate_page() argument
3287 if (!folio) in next_uptodate_page()
3289 if (xas_retry(xas, folio)) in next_uptodate_page()
3291 if (xa_is_value(folio)) in next_uptodate_page()
3293 if (folio_test_locked(folio)) in next_uptodate_page()
3295 if (!folio_try_get_rcu(folio)) in next_uptodate_page()
3298 if (unlikely(folio != xas_reload(xas))) in next_uptodate_page()
3300 if (!folio_test_uptodate(folio) || folio_test_readahead(folio)) in next_uptodate_page()
3302 if (!folio_trylock(folio)) in next_uptodate_page()
3304 if (folio->mapping != mapping) in next_uptodate_page()
3306 if (!folio_test_uptodate(folio)) in next_uptodate_page()
3311 return folio; in next_uptodate_page()
3313 folio_unlock(folio); in next_uptodate_page()
3315 folio_put(folio); in next_uptodate_page()
3316 } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL); in next_uptodate_page()
3321 static inline struct folio *first_map_page(struct address_space *mapping, in first_map_page()
3329 static inline struct folio *next_map_page(struct address_space *mapping, in next_map_page()
3346 struct folio *folio; in filemap_map_pages() local
3352 folio = first_map_page(mapping, &xas, end_pgoff); in filemap_map_pages()
3353 if (!folio) in filemap_map_pages()
3356 if (filemap_map_pmd(vmf, &folio->page)) { in filemap_map_pages()
3365 page = folio_file_page(folio, xas.xa_index); in filemap_map_pages()
3391 if (folio_more_pages(folio, xas.xa_index, end_pgoff)) { in filemap_map_pages()
3393 folio_ref_inc(folio); in filemap_map_pages()
3396 folio_unlock(folio); in filemap_map_pages()
3399 if (folio_more_pages(folio, xas.xa_index, end_pgoff)) { in filemap_map_pages()
3403 folio_unlock(folio); in filemap_map_pages()
3404 folio_put(folio); in filemap_map_pages()
3405 } while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL); in filemap_map_pages()
3417 struct folio *folio = page_folio(vmf->page); in filemap_page_mkwrite() local
3422 folio_lock(folio); in filemap_page_mkwrite()
3423 if (folio->mapping != mapping) { in filemap_page_mkwrite()
3424 folio_unlock(folio); in filemap_page_mkwrite()
3433 folio_mark_dirty(folio); in filemap_page_mkwrite()
3434 folio_wait_stable(folio); in filemap_page_mkwrite()
3487 static struct folio *do_read_cache_folio(struct address_space *mapping, in do_read_cache_folio()
3490 struct folio *folio; in do_read_cache_folio() local
3496 folio = filemap_get_folio(mapping, index); in do_read_cache_folio()
3497 if (!folio) { in do_read_cache_folio()
3498 folio = filemap_alloc_folio(gfp, 0); in do_read_cache_folio()
3499 if (!folio) in do_read_cache_folio()
3501 err = filemap_add_folio(mapping, folio, index, gfp); in do_read_cache_folio()
3503 folio_put(folio); in do_read_cache_folio()
3512 if (folio_test_uptodate(folio)) in do_read_cache_folio()
3515 if (!folio_trylock(folio)) { in do_read_cache_folio()
3516 folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE); in do_read_cache_folio()
3521 if (!folio->mapping) { in do_read_cache_folio()
3522 folio_unlock(folio); in do_read_cache_folio()
3523 folio_put(folio); in do_read_cache_folio()
3528 if (folio_test_uptodate(folio)) { in do_read_cache_folio()
3529 folio_unlock(folio); in do_read_cache_folio()
3534 err = filemap_read_folio(file, filler, folio); in do_read_cache_folio()
3536 folio_put(folio); in do_read_cache_folio()
3543 folio_mark_accessed(folio); in do_read_cache_folio()
3544 return folio; in do_read_cache_folio()
3563 struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index, in read_cache_folio()
3574 struct folio *folio; in do_read_cache_page() local
3576 folio = do_read_cache_folio(mapping, index, filler, file, gfp); in do_read_cache_page()
3577 if (IS_ERR(folio)) in do_read_cache_page()
3578 return &folio->page; in do_read_cache_page()
3579 return folio_file_page(folio, index); in do_read_cache_page()
3939 bool filemap_release_folio(struct folio *folio, gfp_t gfp) in filemap_release_folio() argument
3941 struct address_space * const mapping = folio->mapping; in filemap_release_folio()
3943 BUG_ON(!folio_test_locked(folio)); in filemap_release_folio()
3944 if (folio_test_writeback(folio)) in filemap_release_folio()
3948 return mapping->a_ops->release_folio(folio, gfp); in filemap_release_folio()
3949 return try_to_free_buffers(folio); in filemap_release_folio()