Lines Matching full:page

65  * finished 'unifying' the page and buffer cache and SMP-threaded the
66 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
125 struct page *page, void *shadow) in page_cache_delete() argument
127 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
133 if (!PageHuge(page)) { in page_cache_delete()
134 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete()
135 nr = compound_nr(page); in page_cache_delete()
138 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_cache_delete()
139 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_delete()
140 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_delete()
145 page->mapping = NULL; in page_cache_delete()
146 /* Leave page->index set: truncation lookup relies upon it */ in page_cache_delete()
151 struct page *page) in unaccount_page_cache_page() argument
158 * stale data around in the cleancache once our page is gone in unaccount_page_cache_page()
160 if (PageUptodate(page) && PageMappedToDisk(page)) in unaccount_page_cache_page()
161 cleancache_put_page(page); in unaccount_page_cache_page()
163 cleancache_invalidate_page(mapping, page); in unaccount_page_cache_page()
165 VM_BUG_ON_PAGE(PageTail(page), page); in unaccount_page_cache_page()
166 VM_BUG_ON_PAGE(page_mapped(page), page); in unaccount_page_cache_page()
167 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { in unaccount_page_cache_page()
170 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", in unaccount_page_cache_page()
171 current->comm, page_to_pfn(page)); in unaccount_page_cache_page()
172 dump_page(page, "still mapped when deleted"); in unaccount_page_cache_page()
176 mapcount = page_mapcount(page); in unaccount_page_cache_page()
178 page_count(page) >= mapcount + 2) { in unaccount_page_cache_page()
181 * a good bet that actually the page is unmapped, in unaccount_page_cache_page()
183 * some other bad page check should catch it later. in unaccount_page_cache_page()
185 page_mapcount_reset(page); in unaccount_page_cache_page()
186 page_ref_sub(page, mapcount); in unaccount_page_cache_page()
190 /* hugetlb pages do not participate in page cache accounting. */ in unaccount_page_cache_page()
191 if (PageHuge(page)) in unaccount_page_cache_page()
194 nr = thp_nr_pages(page); in unaccount_page_cache_page()
196 __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr); in unaccount_page_cache_page()
197 if (PageSwapBacked(page)) { in unaccount_page_cache_page()
198 __mod_lruvec_page_state(page, NR_SHMEM, -nr); in unaccount_page_cache_page()
199 if (PageTransHuge(page)) in unaccount_page_cache_page()
200 __mod_lruvec_page_state(page, NR_SHMEM_THPS, -nr); in unaccount_page_cache_page()
201 } else if (PageTransHuge(page)) { in unaccount_page_cache_page()
202 __mod_lruvec_page_state(page, NR_FILE_THPS, -nr); in unaccount_page_cache_page()
207 * At this point page must be either written or cleaned by in unaccount_page_cache_page()
208 * truncate. Dirty page here signals a bug and loss of in unaccount_page_cache_page()
211 * This fixes dirty accounting after removing the page entirely in unaccount_page_cache_page()
213 * page and anyway will be cleared before returning page into in unaccount_page_cache_page()
216 if (WARN_ON_ONCE(PageDirty(page))) in unaccount_page_cache_page()
217 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); in unaccount_page_cache_page()
221 * Delete a page from the page cache and free it. Caller has to make
222 * sure the page is locked and that nobody else uses it - or that usage
225 void __delete_from_page_cache(struct page *page, void *shadow) in __delete_from_page_cache() argument
227 struct address_space *mapping = page->mapping; in __delete_from_page_cache()
229 trace_mm_filemap_delete_from_page_cache(page); in __delete_from_page_cache()
231 unaccount_page_cache_page(mapping, page); in __delete_from_page_cache()
232 page_cache_delete(mapping, page, shadow); in __delete_from_page_cache()
236 struct page *page) in page_cache_free_page() argument
238 void (*freepage)(struct page *); in page_cache_free_page()
242 freepage(page); in page_cache_free_page()
244 if (PageTransHuge(page) && !PageHuge(page)) { in page_cache_free_page()
245 page_ref_sub(page, thp_nr_pages(page)); in page_cache_free_page()
246 VM_BUG_ON_PAGE(page_count(page) <= 0, page); in page_cache_free_page()
248 put_page(page); in page_cache_free_page()
253 * delete_from_page_cache - delete page from page cache
254 * @page: the page which the kernel is trying to remove from page cache
256 * This must be called only on pages that have been verified to be in the page
257 * cache and locked. It will never put the page into the free list, the caller
258 * has a reference on the page.
260 void delete_from_page_cache(struct page *page) in delete_from_page_cache() argument
262 struct address_space *mapping = page_mapping(page); in delete_from_page_cache()
264 BUG_ON(!PageLocked(page)); in delete_from_page_cache()
266 __delete_from_page_cache(page, NULL); in delete_from_page_cache()
269 page_cache_free_page(mapping, page); in delete_from_page_cache()
274 * page_cache_delete_batch - delete several pages from page cache
279 * from the mapping. The function expects @pvec to be sorted by page index
293 struct page *page; in page_cache_delete_batch() local
296 xas_for_each(&xas, page, ULONG_MAX) { in page_cache_delete_batch()
301 if (xa_is_value(page)) in page_cache_delete_batch()
304 * A page got inserted in our range? Skip it. We have our in page_cache_delete_batch()
306 * If we see a page whose index is higher than ours, it in page_cache_delete_batch()
307 * means our page has been removed, which shouldn't be in page_cache_delete_batch()
310 if (page != pvec->pages[i]) { in page_cache_delete_batch()
311 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, in page_cache_delete_batch()
312 page); in page_cache_delete_batch()
316 WARN_ON_ONCE(!PageLocked(page)); in page_cache_delete_batch()
318 if (page->index == xas.xa_index) in page_cache_delete_batch()
319 page->mapping = NULL; in page_cache_delete_batch()
320 /* Leave page->index set: truncation lookup relies on it */ in page_cache_delete_batch()
323 * Move to the next page in the vector if this is a regular in page_cache_delete_batch()
324 * page or the index is of the last sub-page of this compound in page_cache_delete_batch()
325 * page. in page_cache_delete_batch()
327 if (page->index + compound_nr(page) - 1 == xas.xa_index) in page_cache_delete_batch()
418 * these two operations is that if a dirty page/buffer is encountered, it must
471 * filemap_range_has_page - check if a page exists in range.
476 * Find at least one page in the range supplied, usually used to check if
479 * Return: %true if at least one page exists in the specified range,
485 struct page *page; in filemap_range_has_page() local
494 page = xas_find(&xas, max); in filemap_range_has_page()
495 if (xas_retry(&xas, page)) in filemap_range_has_page()
498 if (xa_is_value(page)) in filemap_range_has_page()
501 * We don't need to try to pin this page; we're about to in filemap_range_has_page()
503 * there was a page here recently. in filemap_range_has_page()
509 return page != NULL; in filemap_range_has_page()
534 struct page *page = pvec.pages[i]; in __filemap_fdatawait_range() local
536 wait_on_page_writeback(page); in __filemap_fdatawait_range()
537 ClearPageError(page); in __filemap_fdatawait_range()
648 * Find at least one page in the range supplied, usually used to check if
654 * doing O_DIRECT to a page in this range, %false otherwise.
661 struct page *page; in filemap_range_needs_writeback() local
672 xas_for_each(&xas, page, max) { in filemap_range_needs_writeback()
673 if (xas_retry(&xas, page)) in filemap_range_needs_writeback()
675 if (xa_is_value(page)) in filemap_range_needs_writeback()
677 if (PageDirty(page) || PageLocked(page) || PageWriteback(page)) in filemap_range_needs_writeback()
681 return page != NULL; in filemap_range_needs_writeback()
824 * replace_page_cache_page - replace a pagecache page with a new one
825 * @old: page to be replaced
826 * @new: page to replace with
828 * This function replaces a page in the pagecache with a new one. On
829 * success it acquires the pagecache reference for the new page and
830 * drops it for the old page. Both the old and new pages must be
831 * locked. This function does not add the new page to the LRU, the
836 void replace_page_cache_page(struct page *old, struct page *new) in replace_page_cache_page()
839 void (*freepage)(struct page *) = mapping->a_ops->freepage; in replace_page_cache_page()
857 /* hugetlb pages do not participate in page cache accounting. */ in replace_page_cache_page()
873 noinline int __add_to_page_cache_locked(struct page *page, in __add_to_page_cache_locked() argument
879 int huge = PageHuge(page); in __add_to_page_cache_locked()
883 VM_BUG_ON_PAGE(!PageLocked(page), page); in __add_to_page_cache_locked()
884 VM_BUG_ON_PAGE(PageSwapBacked(page), page); in __add_to_page_cache_locked()
887 get_page(page); in __add_to_page_cache_locked()
888 page->mapping = mapping; in __add_to_page_cache_locked()
889 page->index = offset; in __add_to_page_cache_locked()
892 error = mem_cgroup_charge(page, NULL, gfp); in __add_to_page_cache_locked()
904 if (order > thp_order(page)) in __add_to_page_cache_locked()
921 if (order > thp_order(page)) { in __add_to_page_cache_locked()
927 xas_store(&xas, page); in __add_to_page_cache_locked()
933 /* hugetlb pages do not participate in page cache accounting */ in __add_to_page_cache_locked()
935 __inc_lruvec_page_state(page, NR_FILE_PAGES); in __add_to_page_cache_locked()
943 mem_cgroup_uncharge(page); in __add_to_page_cache_locked()
947 trace_mm_filemap_add_to_page_cache(page); in __add_to_page_cache_locked()
950 page->mapping = NULL; in __add_to_page_cache_locked()
951 /* Leave page->index set: truncation relies upon it */ in __add_to_page_cache_locked()
952 put_page(page); in __add_to_page_cache_locked()
958 * add_to_page_cache_locked - add a locked page to the pagecache
959 * @page: page to add
960 * @mapping: the page's address_space
961 * @offset: page index
962 * @gfp_mask: page allocation mode
964 * This function is used to add a page to the pagecache. It must be locked.
965 * This function does not add the page to the LRU. The caller must do that.
969 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, in add_to_page_cache_locked() argument
972 return __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_locked()
977 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, in add_to_page_cache_lru() argument
983 __SetPageLocked(page); in add_to_page_cache_lru()
984 ret = __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_lru()
987 __ClearPageLocked(page); in add_to_page_cache_lru()
990 * The page might have been evicted from cache only in add_to_page_cache_lru()
992 * any other repeatedly accessed page. in add_to_page_cache_lru()
997 WARN_ON_ONCE(PageActive(page)); in add_to_page_cache_lru()
999 workingset_refault(page, shadow); in add_to_page_cache_lru()
1000 lru_cache_add(page); in add_to_page_cache_lru()
1007 struct page *__page_cache_alloc(gfp_t gfp) in __page_cache_alloc()
1010 struct page *page; in __page_cache_alloc() local
1017 page = __alloc_pages_node(n, gfp, 0); in __page_cache_alloc()
1018 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); in __page_cache_alloc()
1020 return page; in __page_cache_alloc()
1071 * sure the appropriate page became available, this saves space
1079 static wait_queue_head_t *page_waitqueue(struct page *page) in page_waitqueue() argument
1081 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)]; in page_waitqueue()
1095 * The page wait code treats the "wait->flags" somewhat unusually, because
1144 if (test_bit(key->bit_nr, &key->page->flags)) in wake_page_function()
1147 if (test_and_set_bit(key->bit_nr, &key->page->flags)) in wake_page_function()
1179 static void wake_up_page_bit(struct page *page, int bit_nr) in wake_up_page_bit() argument
1181 wait_queue_head_t *q = page_waitqueue(page); in wake_up_page_bit()
1186 key.page = page; in wake_up_page_bit()
1213 * hash, so in that case check for a page match. That prevents a long- in wake_up_page_bit()
1216 * It is still possible to miss a case here, when we woke page waiters in wake_up_page_bit()
1218 * page waiters. in wake_up_page_bit()
1221 ClearPageWaiters(page); in wake_up_page_bit()
1224 * our page waiters, but the hashed waitqueue has waiters for in wake_up_page_bit()
1233 static void wake_up_page(struct page *page, int bit) in wake_up_page() argument
1235 if (!PageWaiters(page)) in wake_up_page()
1237 wake_up_page_bit(page, bit); in wake_up_page()
1244 EXCLUSIVE, /* Hold ref to page and take the bit when woken, like
1247 SHARED, /* Hold ref to page and check the bit when woken, like
1250 DROP, /* Drop ref to page before wait, no check when woken,
1256 * Attempt to check (or get) the page bit, and mark us done
1259 static inline bool trylock_page_bit_common(struct page *page, int bit_nr, in trylock_page_bit_common() argument
1263 if (test_and_set_bit(bit_nr, &page->flags)) in trylock_page_bit_common()
1265 } else if (test_bit(bit_nr, &page->flags)) in trylock_page_bit_common()
1276 struct page *page, int bit_nr, int state, enum behavior behavior) in wait_on_page_bit_common() argument
1286 !PageUptodate(page) && PageWorkingset(page)) { in wait_on_page_bit_common()
1287 if (!PageSwapBacked(page)) { in wait_on_page_bit_common()
1297 wait_page.page = page; in wait_on_page_bit_common()
1310 * page bit synchronously. in wait_on_page_bit_common()
1316 * page queue), and add ourselves to the wait in wait_on_page_bit_common()
1323 SetPageWaiters(page); in wait_on_page_bit_common()
1324 if (!trylock_page_bit_common(page, bit_nr, wait)) in wait_on_page_bit_common()
1331 * see whether the page bit testing has already in wait_on_page_bit_common()
1334 * We can drop our reference to the page. in wait_on_page_bit_common()
1337 put_page(page); in wait_on_page_bit_common()
1374 if (unlikely(test_and_set_bit(bit_nr, &page->flags))) in wait_on_page_bit_common()
1414 void wait_on_page_bit(struct page *page, int bit_nr) in wait_on_page_bit() argument
1416 wait_queue_head_t *q = page_waitqueue(page); in wait_on_page_bit()
1417 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); in wait_on_page_bit()
1421 int wait_on_page_bit_killable(struct page *page, int bit_nr) in wait_on_page_bit_killable() argument
1423 wait_queue_head_t *q = page_waitqueue(page); in wait_on_page_bit_killable()
1424 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED); in wait_on_page_bit_killable()
1430 * @page: The page to wait for.
1433 * The caller should hold a reference on @page. They expect the page to
1435 * (for example) by holding the reference while waiting for the page to
1437 * dereference @page.
1439 * Return: 0 if the page was unlocked or -EINTR if interrupted by a signal.
1441 int put_and_wait_on_page_locked(struct page *page, int state) in put_and_wait_on_page_locked() argument
1445 page = compound_head(page); in put_and_wait_on_page_locked()
1446 q = page_waitqueue(page); in put_and_wait_on_page_locked()
1447 return wait_on_page_bit_common(q, page, PG_locked, state, DROP); in put_and_wait_on_page_locked()
1451 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
1452 * @page: Page defining the wait queue of interest
1455 * Add an arbitrary @waiter to the wait queue for the nominated @page.
1457 void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) in add_page_wait_queue() argument
1459 wait_queue_head_t *q = page_waitqueue(page); in add_page_wait_queue()
1464 SetPageWaiters(page); in add_page_wait_queue()
1493 * unlock_page - unlock a locked page
1494 * @page: the page
1496 * Unlocks the page and wakes up sleepers in wait_on_page_locked().
1507 void unlock_page(struct page *page) in unlock_page() argument
1510 page = compound_head(page); in unlock_page()
1511 VM_BUG_ON_PAGE(!PageLocked(page), page); in unlock_page()
1512 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags)) in unlock_page()
1513 wake_up_page_bit(page, PG_locked); in unlock_page()
1519 * @page: The page
1521 * Clear the PG_private_2 bit on a page and wake up any sleepers waiting for
1522 * this. The page ref held for PG_private_2 being set is released.
1524 * This is, for example, used when a netfs page is being written to a local
1525 * disk cache, thereby allowing writes to the cache for the same page to be
1528 void end_page_private_2(struct page *page) in end_page_private_2() argument
1530 page = compound_head(page); in end_page_private_2()
1531 VM_BUG_ON_PAGE(!PagePrivate2(page), page); in end_page_private_2()
1532 clear_bit_unlock(PG_private_2, &page->flags); in end_page_private_2()
1533 wake_up_page_bit(page, PG_private_2); in end_page_private_2()
1534 put_page(page); in end_page_private_2()
1539 * wait_on_page_private_2 - Wait for PG_private_2 to be cleared on a page
1540 * @page: The page to wait on
1542 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a page.
1544 void wait_on_page_private_2(struct page *page) in wait_on_page_private_2() argument
1546 page = compound_head(page); in wait_on_page_private_2()
1547 while (PagePrivate2(page)) in wait_on_page_private_2()
1548 wait_on_page_bit(page, PG_private_2); in wait_on_page_private_2()
1553 * wait_on_page_private_2_killable - Wait for PG_private_2 to be cleared on a page
1554 * @page: The page to wait on
1556 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a page or until a
1563 int wait_on_page_private_2_killable(struct page *page) in wait_on_page_private_2_killable() argument
1567 page = compound_head(page); in wait_on_page_private_2_killable()
1568 while (PagePrivate2(page)) { in wait_on_page_private_2_killable()
1569 ret = wait_on_page_bit_killable(page, PG_private_2); in wait_on_page_private_2_killable()
1579 * end_page_writeback - end writeback against a page
1580 * @page: the page
1582 void end_page_writeback(struct page *page) in end_page_writeback() argument
1587 * shuffle a page marked for immediate reclaim is too mild to in end_page_writeback()
1589 * ever page writeback. in end_page_writeback()
1591 if (PageReclaim(page)) { in end_page_writeback()
1592 ClearPageReclaim(page); in end_page_writeback()
1593 rotate_reclaimable_page(page); in end_page_writeback()
1597 * Writeback does not hold a page reference of its own, relying in end_page_writeback()
1599 * But here we must make sure that the page is not freed and in end_page_writeback()
1602 get_page(page); in end_page_writeback()
1603 if (!test_clear_page_writeback(page)) in end_page_writeback()
1607 wake_up_page(page, PG_writeback); in end_page_writeback()
1608 put_page(page); in end_page_writeback()
1613 * After completing I/O on a page, call this routine to update the page
1616 void page_endio(struct page *page, bool is_write, int err) in page_endio() argument
1620 SetPageUptodate(page); in page_endio()
1622 ClearPageUptodate(page); in page_endio()
1623 SetPageError(page); in page_endio()
1625 unlock_page(page); in page_endio()
1630 SetPageError(page); in page_endio()
1631 mapping = page_mapping(page); in page_endio()
1635 end_page_writeback(page); in page_endio()
1641 * __lock_page - get a lock on the page, assuming we need to sleep to get it
1642 * @__page: the page to lock
1644 void __lock_page(struct page *__page) in __lock_page()
1646 struct page *page = compound_head(__page); in __lock_page() local
1647 wait_queue_head_t *q = page_waitqueue(page); in __lock_page()
1648 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, in __lock_page()
1653 int __lock_page_killable(struct page *__page) in __lock_page_killable()
1655 struct page *page = compound_head(__page); in __lock_page_killable() local
1656 wait_queue_head_t *q = page_waitqueue(page); in __lock_page_killable()
1657 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, in __lock_page_killable()
1662 int __lock_page_async(struct page *page, struct wait_page_queue *wait) in __lock_page_async() argument
1664 struct wait_queue_head *q = page_waitqueue(page); in __lock_page_async()
1667 wait->page = page; in __lock_page_async()
1672 SetPageWaiters(page); in __lock_page_async()
1673 ret = !trylock_page(page); in __lock_page_async()
1690 * 1 - page is locked; mmap_lock is still held.
1691 * 0 - page is not locked.
1697 * with the page locked and the mmap_lock unperturbed.
1699 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, in __lock_page_or_retry() argument
1712 wait_on_page_locked_killable(page); in __lock_page_or_retry()
1714 wait_on_page_locked(page); in __lock_page_or_retry()
1720 ret = __lock_page_killable(page); in __lock_page_or_retry()
1726 __lock_page(page); in __lock_page_or_retry()
1733 * page_cache_next_miss() - Find the next gap in the page cache.
1769 * page_cache_prev_miss() - Find the previous gap in the page cache.
1805 * mapping_get_entry - Get a page cache entry.
1807 * @index: The page cache index.
1809 * Looks up the page cache slot at @mapping & @index. If there is a
1810 * page cache page, the head page is returned with an increased refcount.
1812 * If the slot holds a shadow entry of a previously evicted page, or a
1815 * Return: The head page or shadow entry, %NULL if nothing is found.
1817 static struct page *mapping_get_entry(struct address_space *mapping, in mapping_get_entry()
1821 struct page *page; in mapping_get_entry() local
1826 page = xas_load(&xas); in mapping_get_entry()
1827 if (xas_retry(&xas, page)) in mapping_get_entry()
1830 * A shadow entry of a recently evicted page, or a swap entry from in mapping_get_entry()
1831 * shmem/tmpfs. Return it without attempting to raise page count. in mapping_get_entry()
1833 if (!page || xa_is_value(page)) in mapping_get_entry()
1836 if (!page_cache_get_speculative(page)) in mapping_get_entry()
1840 * Has the page moved or been split? in mapping_get_entry()
1844 if (unlikely(page != xas_reload(&xas))) { in mapping_get_entry()
1845 put_page(page); in mapping_get_entry()
1851 return page; in mapping_get_entry()
1855 * pagecache_get_page - Find and get a reference to a page.
1857 * @index: The page index.
1858 * @fgp_flags: %FGP flags modify how the page is returned.
1861 * Looks up the page cache entry at @mapping & @index.
1865 * * %FGP_ACCESSED - The page will be marked accessed.
1866 * * %FGP_LOCK - The page is returned locked.
1867 * * %FGP_HEAD - If the page is present and a THP, return the head page
1868 * rather than the exact page specified by the index.
1870 * instead of allocating a new page to replace it.
1871 * * %FGP_CREAT - If no page is present then a new page is allocated using
1872 * @gfp_mask and added to the page cache and the VM's LRU list.
1873 * The page is returned locked and with an increased refcount.
1875 * page is already in cache. If the page was allocated, unlock it before
1877 * * %FGP_WRITE - The page will be written
1879 * * %FGP_NOWAIT - Don't get blocked by page lock
1884 * If there is a page cache page, it is returned with an increased refcount.
1886 * Return: The found page or %NULL otherwise.
1888 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, in pagecache_get_page()
1891 struct page *page; in pagecache_get_page() local
1894 page = mapping_get_entry(mapping, index); in pagecache_get_page()
1895 if (xa_is_value(page)) { in pagecache_get_page()
1897 return page; in pagecache_get_page()
1898 page = NULL; in pagecache_get_page()
1900 if (!page) in pagecache_get_page()
1905 if (!trylock_page(page)) { in pagecache_get_page()
1906 put_page(page); in pagecache_get_page()
1910 lock_page(page); in pagecache_get_page()
1913 /* Has the page been truncated? */ in pagecache_get_page()
1914 if (unlikely(page->mapping != mapping)) { in pagecache_get_page()
1915 unlock_page(page); in pagecache_get_page()
1916 put_page(page); in pagecache_get_page()
1919 VM_BUG_ON_PAGE(!thp_contains(page, index), page); in pagecache_get_page()
1923 mark_page_accessed(page); in pagecache_get_page()
1926 if (page_is_idle(page)) in pagecache_get_page()
1927 clear_page_idle(page); in pagecache_get_page()
1930 page = find_subpage(page, index); in pagecache_get_page()
1933 if (!page && (fgp_flags & FGP_CREAT)) { in pagecache_get_page()
1940 page = __page_cache_alloc(gfp_mask); in pagecache_get_page()
1941 if (!page) in pagecache_get_page()
1949 __SetPageReferenced(page); in pagecache_get_page()
1951 err = add_to_page_cache_lru(page, mapping, index, gfp_mask); in pagecache_get_page()
1953 put_page(page); in pagecache_get_page()
1954 page = NULL; in pagecache_get_page()
1960 * add_to_page_cache_lru locks the page, and for mmap we expect in pagecache_get_page()
1961 * an unlocked page. in pagecache_get_page()
1963 if (page && (fgp_flags & FGP_FOR_MMAP)) in pagecache_get_page()
1964 unlock_page(page); in pagecache_get_page()
1967 return page; in pagecache_get_page()
1971 static inline struct page *find_get_entry(struct xa_state *xas, pgoff_t max, in find_get_entry()
1974 struct page *page; in find_get_entry() local
1978 page = xas_find(xas, max); in find_get_entry()
1980 page = xas_find_marked(xas, max, mark); in find_get_entry()
1982 if (xas_retry(xas, page)) in find_get_entry()
1985 * A shadow entry of a recently evicted page, a swap in find_get_entry()
1987 * without attempting to raise page count. in find_get_entry()
1989 if (!page || xa_is_value(page)) in find_get_entry()
1990 return page; in find_get_entry()
1992 if (!page_cache_get_speculative(page)) in find_get_entry()
1995 /* Has the page moved or been split? */ in find_get_entry()
1996 if (unlikely(page != xas_reload(xas))) { in find_get_entry()
1997 put_page(page); in find_get_entry()
2001 return page; in find_get_entry()
2010 * @start: The starting page cache index
2011 * @end: The final page index (inclusive).
2019 * The search returns a group of mapping-contiguous page cache entries
2026 * If it finds a Transparent Huge Page, head or tail, find_get_entries()
2027 * stops at that page: the caller is likely to have a better way to handle
2028 * the compound page as a whole, and then skip its extent, than repeatedly
2037 struct page *page; in find_get_entries() local
2042 while ((page = find_get_entry(&xas, end, XA_PRESENT))) { in find_get_entries()
2047 if (!xa_is_value(page) && PageTransHuge(page) && in find_get_entries()
2048 !PageHuge(page)) { in find_get_entries()
2049 page = find_subpage(page, xas.xa_index); in find_get_entries()
2054 pvec->pages[ret] = page; in find_get_entries()
2067 * @start: The starting page cache index.
2068 * @end: The final page index (inclusive).
2075 * somebody else or under writeback are skipped. Only the head page of
2089 struct page *page; in find_lock_entries() local
2092 while ((page = find_get_entry(&xas, end, XA_PRESENT))) { in find_lock_entries()
2093 if (!xa_is_value(page)) { in find_lock_entries()
2094 if (page->index < start) in find_lock_entries()
2096 VM_BUG_ON_PAGE(page->index != xas.xa_index, page); in find_lock_entries()
2097 if (page->index + thp_nr_pages(page) - 1 > end) in find_lock_entries()
2099 if (!trylock_page(page)) in find_lock_entries()
2101 if (page->mapping != mapping || PageWriteback(page)) in find_lock_entries()
2103 VM_BUG_ON_PAGE(!thp_contains(page, xas.xa_index), in find_lock_entries()
2104 page); in find_lock_entries()
2107 if (!pagevec_add(pvec, page)) in find_lock_entries()
2111 unlock_page(page); in find_lock_entries()
2113 put_page(page); in find_lock_entries()
2115 if (!xa_is_value(page) && PageTransHuge(page)) { in find_lock_entries()
2116 unsigned int nr_pages = thp_nr_pages(page); in find_lock_entries()
2119 xas_set(&xas, page->index + nr_pages); in find_lock_entries()
2132 * @start: The starting page index
2133 * @end: The final page index (inclusive)
2144 * We also update @start to index the next page for the traversal.
2152 struct page **pages) in find_get_pages_range()
2155 struct page *page; in find_get_pages_range() local
2162 while ((page = find_get_entry(&xas, end, XA_PRESENT))) { in find_get_pages_range()
2164 if (xa_is_value(page)) in find_get_pages_range()
2167 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_range()
2175 * We come here when there is no page beyond @end. We take care to not in find_get_pages_range()
2177 * breaks the iteration when there is a page at index -1 but that is in find_get_pages_range()
2193 * @index: The starting page index
2203 unsigned int nr_pages, struct page **pages) in find_get_pages_contig()
2206 struct page *page; in find_get_pages_contig() local
2213 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in find_get_pages_contig()
2214 if (xas_retry(&xas, page)) in find_get_pages_contig()
2220 if (xa_is_value(page)) in find_get_pages_contig()
2223 if (!page_cache_get_speculative(page)) in find_get_pages_contig()
2226 /* Has the page moved or been split? */ in find_get_pages_contig()
2227 if (unlikely(page != xas_reload(&xas))) in find_get_pages_contig()
2230 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_contig()
2235 put_page(page); in find_get_pages_contig()
2247 * @index: the starting page index
2248 * @end: The final page index (inclusive)
2255 * page we return, ready for the next iteration.
2261 struct page **pages) in find_get_pages_range_tag()
2264 struct page *page; in find_get_pages_range_tag() local
2271 while ((page = find_get_entry(&xas, end, tag))) { in find_get_pages_range_tag()
2274 * is lockless so there is a window for page reclaim to evict in find_get_pages_range_tag()
2275 * a page we saw tagged. Skip over it. in find_get_pages_range_tag()
2277 if (xa_is_value(page)) in find_get_pages_range_tag()
2280 pages[ret] = page; in find_get_pages_range_tag()
2282 *index = page->index + thp_nr_pages(page); in find_get_pages_range_tag()
2290 * iteration when there is a page at index -1 but that is already in find_get_pages_range_tag()
2329 * middle of a THP, the entire THP will be returned. The last page in
2337 struct page *head; in filemap_get_read_batch()
2348 /* Has the page moved or been split? */ in filemap_get_read_batch()
2370 struct page *page) in filemap_read_page() argument
2379 ClearPageError(page); in filemap_read_page()
2380 /* Start the actual read. The read will unlock the page. */ in filemap_read_page()
2381 error = mapping->a_ops->readpage(file, page); in filemap_read_page()
2385 error = wait_on_page_locked_killable(page); in filemap_read_page()
2388 if (PageUptodate(page)) in filemap_read_page()
2395 loff_t pos, struct iov_iter *iter, struct page *page) in filemap_range_uptodate() argument
2399 if (PageUptodate(page)) in filemap_range_uptodate()
2406 if (mapping->host->i_blkbits >= (PAGE_SHIFT + thp_order(page))) in filemap_range_uptodate()
2410 if (page_offset(page) > pos) { in filemap_range_uptodate()
2411 count -= page_offset(page) - pos; in filemap_range_uptodate()
2414 pos -= page_offset(page); in filemap_range_uptodate()
2417 return mapping->a_ops->is_partially_uptodate(page, pos, count); in filemap_range_uptodate()
2422 struct page *page) in filemap_update_page() argument
2433 if (!trylock_page(page)) { in filemap_update_page()
2439 put_and_wait_on_page_locked(page, TASK_KILLABLE); in filemap_update_page()
2442 error = __lock_page_async(page, iocb->ki_waitq); in filemap_update_page()
2448 if (!page->mapping) in filemap_update_page()
2452 if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, page)) in filemap_update_page()
2459 error = filemap_read_page(iocb->ki_filp, mapping, page); in filemap_update_page()
2462 unlock_page(page); in filemap_update_page()
2466 put_page(page); in filemap_update_page()
2474 struct page *page; in filemap_create_page() local
2477 page = page_cache_alloc(mapping); in filemap_create_page()
2478 if (!page) in filemap_create_page()
2484 * after evicting page cache during truncate and before actually in filemap_create_page()
2486 * inserting the page into page cache as the locked page would then be in filemap_create_page()
2494 error = add_to_page_cache_lru(page, mapping, index, in filemap_create_page()
2501 error = filemap_read_page(file, mapping, page); in filemap_create_page()
2506 pagevec_add(pvec, page); in filemap_create_page()
2510 put_page(page); in filemap_create_page()
2515 struct address_space *mapping, struct page *page, in filemap_readahead() argument
2520 page_cache_async_readahead(mapping, &file->f_ra, file, page, in filemap_readahead()
2521 page->index, last_index - page->index); in filemap_readahead()
2533 struct page *page; in filemap_get_pages() local
2559 page = pvec->pages[pagevec_count(pvec) - 1]; in filemap_get_pages()
2560 if (PageReadahead(page)) { in filemap_get_pages()
2561 err = filemap_readahead(iocb, filp, mapping, page, last_index); in filemap_get_pages()
2565 if (!PageUptodate(page)) { in filemap_get_pages()
2568 err = filemap_update_page(iocb, mapping, iter, page); in filemap_get_pages()
2576 put_page(page); in filemap_get_pages()
2585 * filemap_read - Read data from the page cache.
2590 * Copies data from the page cache. If the data is not currently present,
2637 * part of the page is not copied back to userspace (unless in filemap_read()
2652 * When a sequential read accesses a page several times, only in filemap_read()
2660 struct page *page = pvec.pages[i]; in filemap_read() local
2661 size_t page_size = thp_size(page); in filemap_read()
2667 if (end_offset < page_offset(page)) in filemap_read()
2670 mark_page_accessed(page); in filemap_read()
2672 * If users can be writing to this page using arbitrary in filemap_read()
2674 * before reading the page on the kernel side. in filemap_read()
2679 for (j = 0; j < thp_nr_pages(page); j++) in filemap_read()
2680 flush_dcache_page(page + j); in filemap_read()
2683 copied = copy_page_to_iter(page, offset, bytes, iter); in filemap_read()
2712 * that can use the page cache directly.
2784 struct address_space *mapping, struct page *page, in page_seek_hole_data() argument
2790 if (xa_is_value(page) || PageUptodate(page)) in page_seek_hole_data()
2797 lock_page(page); in page_seek_hole_data()
2798 if (unlikely(page->mapping != mapping)) in page_seek_hole_data()
2801 offset = offset_in_thp(page, start) & ~(bsz - 1); in page_seek_hole_data()
2804 if (ops->is_partially_uptodate(page, offset, bsz) == seek_data) in page_seek_hole_data()
2808 } while (offset < thp_size(page)); in page_seek_hole_data()
2810 unlock_page(page); in page_seek_hole_data()
2816 unsigned int seek_page_size(struct xa_state *xas, struct page *page) in seek_page_size() argument
2818 if (xa_is_value(page)) in seek_page_size()
2820 return thp_size(page); in seek_page_size()
2824 * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
2830 * If the page cache knows which blocks contain holes and which blocks
2847 struct page *page; in mapping_seek_hole_data() local
2853 while ((page = find_get_entry(&xas, max, XA_PRESENT))) { in mapping_seek_hole_data()
2863 seek_size = seek_page_size(&xas, page); in mapping_seek_hole_data()
2865 start = page_seek_hole_data(&xas, mapping, page, start, pos, in mapping_seek_hole_data()
2873 if (!xa_is_value(page)) in mapping_seek_hole_data()
2874 put_page(page); in mapping_seek_hole_data()
2880 if (page && !xa_is_value(page)) in mapping_seek_hole_data()
2881 put_page(page); in mapping_seek_hole_data()
2890 * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
2892 * @page - the page to lock.
2896 * It differs in that it actually returns the page locked if it returns 1 and 0
2897 * if it couldn't lock the page. If we did have to drop the mmap_lock then fpin
2900 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, in lock_page_maybe_drop_mmap() argument
2903 if (trylock_page(page)) in lock_page_maybe_drop_mmap()
2916 if (__lock_page_killable(page)) { in lock_page_maybe_drop_mmap()
2928 __lock_page(page); in lock_page_maybe_drop_mmap()
2934 * Synchronous readahead happens when we don't even find a page in the page
2986 * Asynchronous readahead happens when we find the page and PG_readahead,
2991 struct page *page) in do_async_mmap_readahead() argument
3006 if (PageReadahead(page)) { in do_async_mmap_readahead()
3009 page, offset, ra->ra_pages); in do_async_mmap_readahead()
3015 * filemap_fault - read in file data for page fault handling
3019 * mapped memory region to read in file data during a page fault.
3022 * it in the page cache, and handles the special cases reasonably without
3046 struct page *page; in filemap_fault() local
3055 * Do we have something in the page cache already? in filemap_fault()
3057 page = find_get_page(mapping, offset); in filemap_fault()
3058 if (likely(page)) { in filemap_fault()
3060 * We found the page, so try async readahead before waiting for in filemap_fault()
3064 fpin = do_async_mmap_readahead(vmf, page); in filemap_fault()
3065 if (unlikely(!PageUptodate(page))) { in filemap_fault()
3070 /* No page in the page cache at all */ in filemap_fault()
3084 page = pagecache_get_page(mapping, offset, in filemap_fault()
3087 if (!page) { in filemap_fault()
3095 if (!lock_page_maybe_drop_mmap(vmf, page, &fpin)) in filemap_fault()
3099 if (unlikely(compound_head(page)->mapping != mapping)) { in filemap_fault()
3100 unlock_page(page); in filemap_fault()
3101 put_page(page); in filemap_fault()
3104 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); in filemap_fault()
3107 * We have a locked page in the page cache, now we need to check in filemap_fault()
3110 if (unlikely(!PageUptodate(page))) { in filemap_fault()
3112 * The page was in cache and uptodate and now it is not. in filemap_fault()
3113 * Strange but possible since we didn't hold the page lock all in filemap_fault()
3118 unlock_page(page); in filemap_fault()
3119 put_page(page); in filemap_fault()
3131 unlock_page(page); in filemap_fault()
3138 * Found the page and have a reference on it. in filemap_fault()
3139 * We must recheck i_size under page lock. in filemap_fault()
3143 unlock_page(page); in filemap_fault()
3144 put_page(page); in filemap_fault()
3148 vmf->page = page; in filemap_fault()
3153 * Umm, take care of errors if the page isn't up-to-date. in filemap_fault()
3159 error = filemap_read_page(file, mapping, page); in filemap_fault()
3162 put_page(page); in filemap_fault()
3174 * page. in filemap_fault()
3176 if (page) in filemap_fault()
3177 put_page(page); in filemap_fault()
3186 static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page) in filemap_map_pmd() argument
3190 /* Huge page is mapped? No need to proceed. */ in filemap_map_pmd()
3192 unlock_page(page); in filemap_map_pmd()
3193 put_page(page); in filemap_map_pmd()
3197 if (pmd_none(*vmf->pmd) && PageTransHuge(page)) { in filemap_map_pmd()
3198 vm_fault_t ret = do_set_pmd(vmf, page); in filemap_map_pmd()
3200 /* The page is mapped successfully, reference consumed. */ in filemap_map_pmd()
3201 unlock_page(page); in filemap_map_pmd()
3218 unlock_page(page); in filemap_map_pmd()
3219 put_page(page); in filemap_map_pmd()
3226 static struct page *next_uptodate_page(struct page *page, in next_uptodate_page() argument
3233 if (!page) in next_uptodate_page()
3235 if (xas_retry(xas, page)) in next_uptodate_page()
3237 if (xa_is_value(page)) in next_uptodate_page()
3239 if (PageLocked(page)) in next_uptodate_page()
3241 if (!page_cache_get_speculative(page)) in next_uptodate_page()
3243 /* Has the page moved or been split? */ in next_uptodate_page()
3244 if (unlikely(page != xas_reload(xas))) in next_uptodate_page()
3246 if (!PageUptodate(page) || PageReadahead(page)) in next_uptodate_page()
3248 if (PageHWPoison(page)) in next_uptodate_page()
3250 if (!trylock_page(page)) in next_uptodate_page()
3252 if (page->mapping != mapping) in next_uptodate_page()
3254 if (!PageUptodate(page)) in next_uptodate_page()
3259 return page; in next_uptodate_page()
3261 unlock_page(page); in next_uptodate_page()
3263 put_page(page); in next_uptodate_page()
3264 } while ((page = xas_next_entry(xas, end_pgoff)) != NULL); in next_uptodate_page()
3269 static inline struct page *first_map_page(struct address_space *mapping, in first_map_page()
3277 static inline struct page *next_map_page(struct address_space *mapping, in next_map_page()
3294 struct page *head, *page; in filemap_map_pages() local
3311 page = find_subpage(head, xas.xa_index); in filemap_map_pages()
3312 if (PageHWPoison(page)) in filemap_map_pages()
3329 do_set_pte(vmf, page, addr); in filemap_map_pages()
3330 /* no need to invalidate: a not-present page won't be cached */ in filemap_map_pages()
3349 struct page *page = vmf->page; in filemap_page_mkwrite() local
3354 lock_page(page); in filemap_page_mkwrite()
3355 if (page->mapping != mapping) { in filemap_page_mkwrite()
3356 unlock_page(page); in filemap_page_mkwrite()
3361 * We mark the page dirty already here so that when freeze is in in filemap_page_mkwrite()
3363 * see the dirty page and writeprotect it again. in filemap_page_mkwrite()
3365 set_page_dirty(page); in filemap_page_mkwrite()
3366 wait_for_stable_page(page); in filemap_page_mkwrite()
3419 static struct page *wait_on_page_read(struct page *page) in wait_on_page_read() argument
3421 if (!IS_ERR(page)) { in wait_on_page_read()
3422 wait_on_page_locked(page); in wait_on_page_read()
3423 if (!PageUptodate(page)) { in wait_on_page_read()
3424 put_page(page); in wait_on_page_read()
3425 page = ERR_PTR(-EIO); in wait_on_page_read()
3428 return page; in wait_on_page_read()
3431 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page()
3433 int (*filler)(void *, struct page *), in do_read_cache_page() argument
3437 struct page *page; in do_read_cache_page() local
3440 page = find_get_page(mapping, index); in do_read_cache_page()
3441 if (!page) { in do_read_cache_page()
3442 page = __page_cache_alloc(gfp); in do_read_cache_page()
3443 if (!page) in do_read_cache_page()
3445 err = add_to_page_cache_lru(page, mapping, index, gfp); in do_read_cache_page()
3447 put_page(page); in do_read_cache_page()
3456 err = filler(data, page); in do_read_cache_page()
3458 err = mapping->a_ops->readpage(data, page); in do_read_cache_page()
3461 put_page(page); in do_read_cache_page()
3465 page = wait_on_page_read(page); in do_read_cache_page()
3466 if (IS_ERR(page)) in do_read_cache_page()
3467 return page; in do_read_cache_page()
3470 if (PageUptodate(page)) in do_read_cache_page()
3474 * Page is not up to date and may be locked due to one of the following in do_read_cache_page()
3475 * case a: Page is being filled and the page lock is held in do_read_cache_page()
3476 * case b: Read/write error clearing the page uptodate status in do_read_cache_page()
3477 * case c: Truncation in progress (page locked) in do_read_cache_page()
3480 * Case a, the page will be up to date when the page is unlocked. in do_read_cache_page()
3481 * There is no need to serialise on the page lock here as the page in do_read_cache_page()
3483 * page is truncated, the data is still valid if PageUptodate as in do_read_cache_page()
3485 * Case b, the page will not be up to date in do_read_cache_page()
3486 * Case c, the page may be truncated but in itself, the data may still in do_read_cache_page()
3488 * operation must restart if the page is not uptodate on unlock but in do_read_cache_page()
3489 * otherwise serialising on page lock to stabilise the mapping gives in do_read_cache_page()
3490 * no additional guarantees to the caller as the page lock is in do_read_cache_page()
3492 * Case d, similar to truncation. If reclaim holds the page lock, it in do_read_cache_page()
3495 * no need to serialise with page lock. in do_read_cache_page()
3497 * As the page lock gives no additional guarantee, we optimistically in do_read_cache_page()
3498 * wait on the page to be unlocked and check if it's up to date and in do_read_cache_page()
3499 * use the page if it is. Otherwise, the page lock is required to in do_read_cache_page()
3502 * wait on the same page for IO to complete. in do_read_cache_page()
3504 wait_on_page_locked(page); in do_read_cache_page()
3505 if (PageUptodate(page)) in do_read_cache_page()
3509 lock_page(page); in do_read_cache_page()
3512 if (!page->mapping) { in do_read_cache_page()
3513 unlock_page(page); in do_read_cache_page()
3514 put_page(page); in do_read_cache_page()
3518 /* Someone else locked and filled the page in a very small window */ in do_read_cache_page()
3519 if (PageUptodate(page)) { in do_read_cache_page()
3520 unlock_page(page); in do_read_cache_page()
3527 * Clear page error before actual read, PG_error will be in do_read_cache_page()
3528 * set again if read page fails. in do_read_cache_page()
3530 ClearPageError(page); in do_read_cache_page()
3534 mark_page_accessed(page); in do_read_cache_page()
3535 return page; in do_read_cache_page()
3539 * read_cache_page - read into page cache, fill it if needed
3540 * @mapping: the page's address_space
3541 * @index: the page index
3543 * @data: first arg to filler(data, page) function, often left as NULL
3545 * Read into the page cache. If a page already exists, and PageUptodate() is
3546 * not set, try to fill the page and wait for it to become unlocked.
3548 * If the page does not get brought uptodate, return -EIO.
3552 * Return: up to date page on success, ERR_PTR() on failure.
3554 struct page *read_cache_page(struct address_space *mapping, in read_cache_page()
3556 int (*filler)(void *, struct page *), in read_cache_page() argument
3565 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3566 * @mapping: the page's address_space
3567 * @index: the page index
3568 * @gfp: the page allocator flags to use if allocating
3571 * any new page allocations done using the specified allocation flags.
3573 * If the page does not get brought uptodate, return -EIO.
3577 * Return: up to date page on success, ERR_PTR() on failure.
3579 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp()
3589 struct page **pagep, void **fsdata) in pagecache_write_begin()
3600 struct page *page, void *fsdata) in pagecache_write_end() argument
3604 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); in pagecache_write_end()
3609 * Warn about a page cache invalidation failure during a direct I/O write.
3622 …pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision… in dio_warn_stale_pagecache()
3656 * the new data. We invalidate clean cached page from the region we're in generic_file_direct_write()
3663 * If a page can not be invalidated, return 0 to fall back in generic_file_direct_write()
3712 * Find or create a page at the given pagecache position. Return the locked
3713 * page. This function is specifically for buffered writes.
3715 struct page *grab_cache_page_write_begin(struct address_space *mapping, in grab_cache_page_write_begin()
3718 struct page *page; in grab_cache_page_write_begin() local
3724 page = pagecache_get_page(mapping, index, fgp_flags, in grab_cache_page_write_begin()
3726 if (page) in grab_cache_page_write_begin()
3727 wait_for_stable_page(page); in grab_cache_page_write_begin()
3729 return page; in grab_cache_page_write_begin()
3743 struct page *page; in generic_perform_write() local
3744 unsigned long offset; /* Offset into pagecache page */ in generic_perform_write()
3745 unsigned long bytes; /* Bytes to write to page */ in generic_perform_write()
3755 * Bring in the user page that we will copy from _first_. in generic_perform_write()
3757 * same page as we're writing to, without it being marked in generic_perform_write()
3771 &page, &fsdata); in generic_perform_write()
3776 flush_dcache_page(page); in generic_perform_write()
3778 copied = copy_page_from_iter_atomic(page, offset, bytes, i); in generic_perform_write()
3779 flush_dcache_page(page); in generic_perform_write()
3782 page, fsdata); in generic_perform_write()
3841 /* We can write back this queue in page reclaim */ in __generic_file_write_iter()
3860 * page-cache pages correctly). in __generic_file_write_iter()
3878 * We need to ensure that the page cache pages are written to in __generic_file_write_iter()
3939 * try_to_release_page() - release old fs-specific metadata on a page
3941 * @page: the page which the kernel is trying to free
3944 * The address_space is to try to release any data against the page
3945 * (presumably at page->private).
3947 * This may also be called if PG_fscache is set on a page, indicating that the
3948 * page is known to the local caching routines.
3951 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
3955 int try_to_release_page(struct page *page, gfp_t gfp_mask) in try_to_release_page() argument
3957 struct address_space * const mapping = page->mapping; in try_to_release_page()
3959 BUG_ON(!PageLocked(page)); in try_to_release_page()
3960 if (PageWriteback(page)) in try_to_release_page()
3964 return mapping->a_ops->releasepage(page, gfp_mask); in try_to_release_page()
3965 return try_to_free_buffers(page); in try_to_release_page()