Lines Matching refs:page
83 struct page *page = pvec->pages[i]; in truncate_exceptional_pvec_entries() local
86 if (!xa_is_value(page)) { in truncate_exceptional_pvec_entries()
87 pvec->pages[j++] = page; in truncate_exceptional_pvec_entries()
99 __clear_shadow_entry(mapping, index, page); in truncate_exceptional_pvec_entries()
152 void do_invalidatepage(struct page *page, unsigned int offset, in do_invalidatepage() argument
155 void (*invalidatepage)(struct page *, unsigned int, unsigned int); in do_invalidatepage()
157 invalidatepage = page->mapping->a_ops->invalidatepage; in do_invalidatepage()
163 (*invalidatepage)(page, offset, length); in do_invalidatepage()
177 truncate_cleanup_page(struct address_space *mapping, struct page *page) in truncate_cleanup_page() argument
179 if (page_mapped(page)) { in truncate_cleanup_page()
180 pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1; in truncate_cleanup_page()
181 unmap_mapping_pages(mapping, page->index, nr, false); in truncate_cleanup_page()
184 if (page_has_private(page)) in truncate_cleanup_page()
185 do_invalidatepage(page, 0, PAGE_SIZE); in truncate_cleanup_page()
192 cancel_dirty_page(page); in truncate_cleanup_page()
193 ClearPageMappedToDisk(page); in truncate_cleanup_page()
205 invalidate_complete_page(struct address_space *mapping, struct page *page) in invalidate_complete_page() argument
209 if (page->mapping != mapping) in invalidate_complete_page()
212 if (page_has_private(page) && !try_to_release_page(page, 0)) in invalidate_complete_page()
215 ret = remove_mapping(mapping, page); in invalidate_complete_page()
220 int truncate_inode_page(struct address_space *mapping, struct page *page) in truncate_inode_page() argument
222 VM_BUG_ON_PAGE(PageTail(page), page); in truncate_inode_page()
224 if (page->mapping != mapping) in truncate_inode_page()
227 truncate_cleanup_page(mapping, page); in truncate_inode_page()
228 delete_from_page_cache(page); in truncate_inode_page()
235 int generic_error_remove_page(struct address_space *mapping, struct page *page) in generic_error_remove_page() argument
245 return truncate_inode_page(mapping, page); in generic_error_remove_page()
255 int invalidate_inode_page(struct page *page) in invalidate_inode_page() argument
257 struct address_space *mapping = page_mapping(page); in invalidate_inode_page()
260 if (PageDirty(page) || PageWriteback(page)) in invalidate_inode_page()
262 if (page_mapped(page)) in invalidate_inode_page()
264 return invalidate_complete_page(mapping, page); in invalidate_inode_page()
341 struct page *page = pvec.pages[i]; in truncate_inode_pages_range() local
348 if (xa_is_value(page)) in truncate_inode_pages_range()
351 if (!trylock_page(page)) in truncate_inode_pages_range()
353 WARN_ON(page_to_index(page) != index); in truncate_inode_pages_range()
354 if (PageWriteback(page)) { in truncate_inode_pages_range()
355 unlock_page(page); in truncate_inode_pages_range()
358 if (page->mapping != mapping) { in truncate_inode_pages_range()
359 unlock_page(page); in truncate_inode_pages_range()
362 pagevec_add(&locked_pvec, page); in truncate_inode_pages_range()
375 struct page *page = find_lock_page(mapping, start - 1); in truncate_inode_pages_range() local
376 if (page) { in truncate_inode_pages_range()
383 wait_on_page_writeback(page); in truncate_inode_pages_range()
384 zero_user_segment(page, partial_start, top); in truncate_inode_pages_range()
385 cleancache_invalidate_page(mapping, page); in truncate_inode_pages_range()
386 if (page_has_private(page)) in truncate_inode_pages_range()
387 do_invalidatepage(page, partial_start, in truncate_inode_pages_range()
389 unlock_page(page); in truncate_inode_pages_range()
390 put_page(page); in truncate_inode_pages_range()
394 struct page *page = find_lock_page(mapping, end); in truncate_inode_pages_range() local
395 if (page) { in truncate_inode_pages_range()
396 wait_on_page_writeback(page); in truncate_inode_pages_range()
397 zero_user_segment(page, 0, partial_end); in truncate_inode_pages_range()
398 cleancache_invalidate_page(mapping, page); in truncate_inode_pages_range()
399 if (page_has_private(page)) in truncate_inode_pages_range()
400 do_invalidatepage(page, 0, in truncate_inode_pages_range()
402 unlock_page(page); in truncate_inode_pages_range()
403 put_page(page); in truncate_inode_pages_range()
433 struct page *page = pvec.pages[i]; in truncate_inode_pages_range() local
443 if (xa_is_value(page)) in truncate_inode_pages_range()
446 lock_page(page); in truncate_inode_pages_range()
447 WARN_ON(page_to_index(page) != index); in truncate_inode_pages_range()
448 wait_on_page_writeback(page); in truncate_inode_pages_range()
449 truncate_inode_page(mapping, page); in truncate_inode_pages_range()
450 unlock_page(page); in truncate_inode_pages_range()
561 struct page *page = pvec.pages[i]; in invalidate_mapping_pages() local
568 if (xa_is_value(page)) { in invalidate_mapping_pages()
570 page); in invalidate_mapping_pages()
574 if (!trylock_page(page)) in invalidate_mapping_pages()
577 WARN_ON(page_to_index(page) != index); in invalidate_mapping_pages()
580 if (PageTransTail(page)) { in invalidate_mapping_pages()
581 unlock_page(page); in invalidate_mapping_pages()
583 } else if (PageTransHuge(page)) { in invalidate_mapping_pages()
592 unlock_page(page); in invalidate_mapping_pages()
597 get_page(page); in invalidate_mapping_pages()
607 ret = invalidate_inode_page(page); in invalidate_mapping_pages()
608 unlock_page(page); in invalidate_mapping_pages()
614 deactivate_file_page(page); in invalidate_mapping_pages()
615 if (PageTransHuge(page)) in invalidate_mapping_pages()
616 put_page(page); in invalidate_mapping_pages()
636 invalidate_complete_page2(struct address_space *mapping, struct page *page) in invalidate_complete_page2() argument
640 if (page->mapping != mapping) in invalidate_complete_page2()
643 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) in invalidate_complete_page2()
647 if (PageDirty(page)) in invalidate_complete_page2()
650 BUG_ON(page_has_private(page)); in invalidate_complete_page2()
651 __delete_from_page_cache(page, NULL); in invalidate_complete_page2()
655 mapping->a_ops->freepage(page); in invalidate_complete_page2()
657 put_page(page); /* pagecache ref */ in invalidate_complete_page2()
664 static int do_launder_page(struct address_space *mapping, struct page *page) in do_launder_page() argument
666 if (!PageDirty(page)) in do_launder_page()
668 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) in do_launder_page()
670 return mapping->a_ops->launder_page(page); in do_launder_page()
704 struct page *page = pvec.pages[i]; in invalidate_inode_pages2_range() local
711 if (xa_is_value(page)) { in invalidate_inode_pages2_range()
713 index, page)) in invalidate_inode_pages2_range()
718 lock_page(page); in invalidate_inode_pages2_range()
719 WARN_ON(page_to_index(page) != index); in invalidate_inode_pages2_range()
720 if (page->mapping != mapping) { in invalidate_inode_pages2_range()
721 unlock_page(page); in invalidate_inode_pages2_range()
724 wait_on_page_writeback(page); in invalidate_inode_pages2_range()
725 if (page_mapped(page)) { in invalidate_inode_pages2_range()
741 BUG_ON(page_mapped(page)); in invalidate_inode_pages2_range()
742 ret2 = do_launder_page(mapping, page); in invalidate_inode_pages2_range()
744 if (!invalidate_complete_page2(mapping, page)) in invalidate_inode_pages2_range()
749 unlock_page(page); in invalidate_inode_pages2_range()
869 struct page *page; in pagecache_isize_extended() local
882 page = find_lock_page(inode->i_mapping, index); in pagecache_isize_extended()
884 if (!page) in pagecache_isize_extended()
890 if (page_mkclean(page)) in pagecache_isize_extended()
891 set_page_dirty(page); in pagecache_isize_extended()
892 unlock_page(page); in pagecache_isize_extended()
893 put_page(page); in pagecache_isize_extended()