Lines Matching full:mapping
34 static inline void __clear_shadow_entry(struct address_space *mapping, in __clear_shadow_entry() argument
37 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry()
45 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, in clear_shadow_entry() argument
48 xa_lock_irq(&mapping->i_pages); in clear_shadow_entry()
49 __clear_shadow_entry(mapping, index, entry); in clear_shadow_entry()
50 xa_unlock_irq(&mapping->i_pages); in clear_shadow_entry()
58 static void truncate_exceptional_pvec_entries(struct address_space *mapping, in truncate_exceptional_pvec_entries() argument
65 if (shmem_mapping(mapping)) in truncate_exceptional_pvec_entries()
75 dax = dax_mapping(mapping); in truncate_exceptional_pvec_entries()
77 xa_lock_irq(&mapping->i_pages); in truncate_exceptional_pvec_entries()
89 dax_delete_mapping_entry(mapping, index); in truncate_exceptional_pvec_entries()
93 __clear_shadow_entry(mapping, index, page); in truncate_exceptional_pvec_entries()
97 xa_unlock_irq(&mapping->i_pages); in truncate_exceptional_pvec_entries()
105 static int invalidate_exceptional_entry(struct address_space *mapping, in invalidate_exceptional_entry() argument
109 if (shmem_mapping(mapping) || dax_mapping(mapping)) in invalidate_exceptional_entry()
111 clear_shadow_entry(mapping, index, entry); in invalidate_exceptional_entry()
119 static int invalidate_exceptional_entry2(struct address_space *mapping, in invalidate_exceptional_entry2() argument
123 if (shmem_mapping(mapping)) in invalidate_exceptional_entry2()
125 if (dax_mapping(mapping)) in invalidate_exceptional_entry2()
126 return dax_invalidate_mapping_entry_sync(mapping, index); in invalidate_exceptional_entry2()
127 clear_shadow_entry(mapping, index, entry); in invalidate_exceptional_entry2()
151 invalidatepage = page->mapping->a_ops->invalidatepage; in do_invalidatepage()
165 * We need to bail out if page->mapping is no longer equal to the original
166 * mapping. This happens a) when the VM reclaimed the page while we waited on
196 invalidate_complete_page(struct address_space *mapping, struct page *page) in invalidate_complete_page() argument
200 if (page->mapping != mapping) in invalidate_complete_page()
206 ret = remove_mapping(mapping, page); in invalidate_complete_page()
211 int truncate_inode_page(struct address_space *mapping, struct page *page) in truncate_inode_page() argument
215 if (page->mapping != mapping) in truncate_inode_page()
226 int generic_error_remove_page(struct address_space *mapping, struct page *page) in generic_error_remove_page() argument
228 if (!mapping) in generic_error_remove_page()
234 if (!S_ISREG(mapping->host->i_mode)) in generic_error_remove_page()
236 return truncate_inode_page(mapping, page); in generic_error_remove_page()
241 * Safely invalidate one page from its pagecache mapping.
248 struct address_space *mapping = page_mapping(page); in invalidate_inode_page() local
249 if (!mapping) in invalidate_inode_page()
255 return invalidate_complete_page(mapping, page); in invalidate_inode_page()
260 * @mapping: mapping to truncate
275 * mapping is large, it is probably the case that the final pages are the most
282 void truncate_inode_pages_range(struct address_space *mapping, in truncate_inode_pages_range() argument
294 if (mapping_empty(mapping)) in truncate_inode_pages_range()
320 while (index < end && find_lock_entries(mapping, index, end - 1, in truncate_inode_pages_range()
323 truncate_exceptional_pvec_entries(mapping, &pvec, indices); in truncate_inode_pages_range()
326 delete_from_page_cache_batch(mapping, &pvec); in truncate_inode_pages_range()
334 struct page *page = find_lock_page(mapping, start - 1); in truncate_inode_pages_range()
344 cleancache_invalidate_page(mapping, page); in truncate_inode_pages_range()
353 struct page *page = find_lock_page(mapping, end); in truncate_inode_pages_range()
357 cleancache_invalidate_page(mapping, page); in truncate_inode_pages_range()
375 if (!find_get_entries(mapping, index, end - 1, &pvec, in truncate_inode_pages_range()
397 truncate_inode_page(mapping, page); in truncate_inode_pages_range()
400 truncate_exceptional_pvec_entries(mapping, &pvec, indices); in truncate_inode_pages_range()
406 cleancache_invalidate_inode(mapping); in truncate_inode_pages_range()
412 * @mapping: mapping to truncate
416 * mapping->invalidate_lock.
420 * mapping->nrpages can be non-zero when this function returns even after
421 * truncation of the whole mapping.
423 void truncate_inode_pages(struct address_space *mapping, loff_t lstart) in truncate_inode_pages() argument
425 truncate_inode_pages_range(mapping, lstart, (loff_t)-1); in truncate_inode_pages()
431 * @mapping: mapping to truncate
438 void truncate_inode_pages_final(struct address_space *mapping) in truncate_inode_pages_final() argument
447 mapping_set_exiting(mapping); in truncate_inode_pages_final()
449 if (!mapping_empty(mapping)) { in truncate_inode_pages_final()
456 xa_lock_irq(&mapping->i_pages); in truncate_inode_pages_final()
457 xa_unlock_irq(&mapping->i_pages); in truncate_inode_pages_final()
464 truncate_inode_pages(mapping, 0); in truncate_inode_pages_final()
468 static unsigned long __invalidate_mapping_pages(struct address_space *mapping, in __invalidate_mapping_pages() argument
479 while (find_lock_entries(mapping, index, end, &pvec, indices)) { in __invalidate_mapping_pages()
487 count += invalidate_exceptional_entry(mapping, in __invalidate_mapping_pages()
518 * @mapping: the address_space which holds the cache to invalidate
530 unsigned long invalidate_mapping_pages(struct address_space *mapping, in invalidate_mapping_pages() argument
533 return __invalidate_mapping_pages(mapping, start, end, NULL); in invalidate_mapping_pages()
539 * @mapping: the address_space which holds the pages to invalidate
548 void invalidate_mapping_pagevec(struct address_space *mapping, in invalidate_mapping_pagevec() argument
551 __invalidate_mapping_pages(mapping, start, end, nr_pagevec); in invalidate_mapping_pagevec()
562 invalidate_complete_page2(struct address_space *mapping, struct page *page) in invalidate_complete_page2() argument
564 if (page->mapping != mapping) in invalidate_complete_page2()
570 xa_lock_irq(&mapping->i_pages); in invalidate_complete_page2()
576 xa_unlock_irq(&mapping->i_pages); in invalidate_complete_page2()
578 if (mapping->a_ops->freepage) in invalidate_complete_page2()
579 mapping->a_ops->freepage(page); in invalidate_complete_page2()
584 xa_unlock_irq(&mapping->i_pages); in invalidate_complete_page2()
588 static int do_launder_page(struct address_space *mapping, struct page *page) in do_launder_page() argument
592 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) in do_launder_page()
594 return mapping->a_ops->launder_page(page); in do_launder_page()
599 * @mapping: the address_space
608 int invalidate_inode_pages2_range(struct address_space *mapping, in invalidate_inode_pages2_range() argument
619 if (mapping_empty(mapping)) in invalidate_inode_pages2_range()
624 while (find_get_entries(mapping, index, end, &pvec, indices)) { in invalidate_inode_pages2_range()
632 if (!invalidate_exceptional_entry2(mapping, in invalidate_inode_pages2_range()
643 unmap_mapping_pages(mapping, index, in invalidate_inode_pages2_range()
650 if (page->mapping != mapping) { in invalidate_inode_pages2_range()
660 ret2 = do_launder_page(mapping, page); in invalidate_inode_pages2_range()
662 if (!invalidate_complete_page2(mapping, page)) in invalidate_inode_pages2_range()
681 if (dax_mapping(mapping)) { in invalidate_inode_pages2_range()
682 unmap_mapping_pages(mapping, start, end - start + 1, false); in invalidate_inode_pages2_range()
685 cleancache_invalidate_inode(mapping); in invalidate_inode_pages2_range()
692 * @mapping: the address_space
699 int invalidate_inode_pages2(struct address_space *mapping) in invalidate_inode_pages2() argument
701 return invalidate_inode_pages2_range(mapping, 0, -1); in invalidate_inode_pages2()
722 struct address_space *mapping = inode->i_mapping; in truncate_pagecache() local
734 unmap_mapping_range(mapping, holebegin, 0, 1); in truncate_pagecache()
735 truncate_inode_pages(mapping, newsize); in truncate_pagecache()
736 unmap_mapping_range(mapping, holebegin, 0, 1); in truncate_pagecache()
830 struct address_space *mapping = inode->i_mapping; in truncate_pagecache_range() local
847 unmap_mapping_range(mapping, unmap_start, in truncate_pagecache_range()
849 truncate_inode_pages_range(mapping, lstart, lend); in truncate_pagecache_range()