Lines Matching full:mapping
32 static inline void __clear_shadow_entry(struct address_space *mapping, in __clear_shadow_entry() argument
35 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry()
43 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, in clear_shadow_entry() argument
46 spin_lock(&mapping->host->i_lock); in clear_shadow_entry()
47 xa_lock_irq(&mapping->i_pages); in clear_shadow_entry()
48 __clear_shadow_entry(mapping, index, entry); in clear_shadow_entry()
49 xa_unlock_irq(&mapping->i_pages); in clear_shadow_entry()
50 if (mapping_shrinkable(mapping)) in clear_shadow_entry()
51 inode_add_lru(mapping->host); in clear_shadow_entry()
52 spin_unlock(&mapping->host->i_lock); in clear_shadow_entry()
60 static void truncate_folio_batch_exceptionals(struct address_space *mapping, in truncate_folio_batch_exceptionals() argument
67 if (shmem_mapping(mapping)) in truncate_folio_batch_exceptionals()
77 dax = dax_mapping(mapping); in truncate_folio_batch_exceptionals()
79 spin_lock(&mapping->host->i_lock); in truncate_folio_batch_exceptionals()
80 xa_lock_irq(&mapping->i_pages); in truncate_folio_batch_exceptionals()
93 dax_delete_mapping_entry(mapping, index); in truncate_folio_batch_exceptionals()
97 __clear_shadow_entry(mapping, index, folio); in truncate_folio_batch_exceptionals()
101 xa_unlock_irq(&mapping->i_pages); in truncate_folio_batch_exceptionals()
102 if (mapping_shrinkable(mapping)) in truncate_folio_batch_exceptionals()
103 inode_add_lru(mapping->host); in truncate_folio_batch_exceptionals()
104 spin_unlock(&mapping->host->i_lock); in truncate_folio_batch_exceptionals()
113 static int invalidate_exceptional_entry(struct address_space *mapping, in invalidate_exceptional_entry() argument
117 if (shmem_mapping(mapping) || dax_mapping(mapping)) in invalidate_exceptional_entry()
119 clear_shadow_entry(mapping, index, entry); in invalidate_exceptional_entry()
127 static int invalidate_exceptional_entry2(struct address_space *mapping, in invalidate_exceptional_entry2() argument
131 if (shmem_mapping(mapping)) in invalidate_exceptional_entry2()
133 if (dax_mapping(mapping)) in invalidate_exceptional_entry2()
134 return dax_invalidate_mapping_entry_sync(mapping, index); in invalidate_exceptional_entry2()
135 clear_shadow_entry(mapping, index, entry); in invalidate_exceptional_entry2()
156 const struct address_space_operations *aops = folio->mapping->a_ops; in folio_invalidate()
168 * We need to bail out if page->mapping is no longer equal to the original
169 * mapping. This happens a) when the VM reclaimed the page while we waited on
190 int truncate_inode_folio(struct address_space *mapping, struct folio *folio) in truncate_inode_folio() argument
192 if (folio->mapping != mapping) in truncate_inode_folio()
228 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio()
247 truncate_inode_folio(folio->mapping, folio); in truncate_inode_partial_folio()
254 int generic_error_remove_page(struct address_space *mapping, struct page *page) in generic_error_remove_page() argument
258 if (!mapping) in generic_error_remove_page()
264 if (!S_ISREG(mapping->host->i_mode)) in generic_error_remove_page()
266 return truncate_inode_folio(mapping, page_folio(page)); in generic_error_remove_page()
270 static long mapping_evict_folio(struct address_space *mapping, in mapping_evict_folio() argument
282 return remove_mapping(mapping, folio); in mapping_evict_folio()
289 * Safely invalidate one page from its pagecache mapping.
298 struct address_space *mapping = folio_mapping(folio); in invalidate_inode_page() local
301 if (!mapping) in invalidate_inode_page()
303 return mapping_evict_folio(mapping, folio); in invalidate_inode_page()
308 * @mapping: mapping to truncate
323 * mapping is large, it is probably the case that the final pages are the most
330 void truncate_inode_pages_range(struct address_space *mapping, in truncate_inode_pages_range() argument
342 if (mapping_empty(mapping)) in truncate_inode_pages_range()
364 while (index < end && find_lock_entries(mapping, index, end - 1, in truncate_inode_pages_range()
367 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); in truncate_inode_pages_range()
370 delete_from_page_cache_batch(mapping, &fbatch); in truncate_inode_pages_range()
378 folio = __filemap_get_folio(mapping, lstart >> PAGE_SHIFT, FGP_LOCK, 0); in truncate_inode_pages_range()
392 folio = __filemap_get_folio(mapping, lend >> PAGE_SHIFT, in truncate_inode_pages_range()
404 if (!find_get_entries(mapping, index, end - 1, &fbatch, in truncate_inode_pages_range()
426 truncate_inode_folio(mapping, folio); in truncate_inode_pages_range()
430 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); in truncate_inode_pages_range()
439 * @mapping: mapping to truncate
443 * mapping->invalidate_lock.
447 * mapping->nrpages can be non-zero when this function returns even after
448 * truncation of the whole mapping.
450 void truncate_inode_pages(struct address_space *mapping, loff_t lstart) in truncate_inode_pages() argument
452 truncate_inode_pages_range(mapping, lstart, (loff_t)-1); in truncate_inode_pages()
458 * @mapping: mapping to truncate
465 void truncate_inode_pages_final(struct address_space *mapping) in truncate_inode_pages_final() argument
474 mapping_set_exiting(mapping); in truncate_inode_pages_final()
476 if (!mapping_empty(mapping)) { in truncate_inode_pages_final()
483 xa_lock_irq(&mapping->i_pages); in truncate_inode_pages_final()
484 xa_unlock_irq(&mapping->i_pages); in truncate_inode_pages_final()
487 truncate_inode_pages(mapping, 0); in truncate_inode_pages_final()
493 * @mapping: the address_space which holds the pages to invalidate
502 unsigned long invalidate_mapping_pagevec(struct address_space *mapping, in invalidate_mapping_pagevec() argument
513 while (find_lock_entries(mapping, index, end, &fbatch, indices)) { in invalidate_mapping_pagevec()
521 count += invalidate_exceptional_entry(mapping, in invalidate_mapping_pagevec()
528 ret = mapping_evict_folio(mapping, folio); in invalidate_mapping_pagevec()
552 * @mapping: the address_space which holds the cache to invalidate
564 unsigned long invalidate_mapping_pages(struct address_space *mapping, in invalidate_mapping_pages() argument
567 return invalidate_mapping_pagevec(mapping, start, end, NULL); in invalidate_mapping_pages()
578 static int invalidate_complete_folio2(struct address_space *mapping, in invalidate_complete_folio2() argument
581 if (folio->mapping != mapping) in invalidate_complete_folio2()
588 spin_lock(&mapping->host->i_lock); in invalidate_complete_folio2()
589 xa_lock_irq(&mapping->i_pages); in invalidate_complete_folio2()
595 xa_unlock_irq(&mapping->i_pages); in invalidate_complete_folio2()
596 if (mapping_shrinkable(mapping)) in invalidate_complete_folio2()
597 inode_add_lru(mapping->host); in invalidate_complete_folio2()
598 spin_unlock(&mapping->host->i_lock); in invalidate_complete_folio2()
600 filemap_free_folio(mapping, folio); in invalidate_complete_folio2()
603 xa_unlock_irq(&mapping->i_pages); in invalidate_complete_folio2()
604 spin_unlock(&mapping->host->i_lock); in invalidate_complete_folio2()
608 static int folio_launder(struct address_space *mapping, struct folio *folio) in folio_launder() argument
612 if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) in folio_launder()
614 return mapping->a_ops->launder_folio(folio); in folio_launder()
619 * @mapping: the address_space
628 int invalidate_inode_pages2_range(struct address_space *mapping, in invalidate_inode_pages2_range() argument
639 if (mapping_empty(mapping)) in invalidate_inode_pages2_range()
644 while (find_get_entries(mapping, index, end, &fbatch, indices)) { in invalidate_inode_pages2_range()
652 if (!invalidate_exceptional_entry2(mapping, in invalidate_inode_pages2_range()
663 unmap_mapping_pages(mapping, index, in invalidate_inode_pages2_range()
670 if (folio->mapping != mapping) { in invalidate_inode_pages2_range()
680 ret2 = folio_launder(mapping, folio); in invalidate_inode_pages2_range()
682 if (!invalidate_complete_folio2(mapping, folio)) in invalidate_inode_pages2_range()
701 if (dax_mapping(mapping)) { in invalidate_inode_pages2_range()
702 unmap_mapping_pages(mapping, start, end - start + 1, false); in invalidate_inode_pages2_range()
710 * @mapping: the address_space
717 int invalidate_inode_pages2(struct address_space *mapping) in invalidate_inode_pages2() argument
719 return invalidate_inode_pages2_range(mapping, 0, -1); in invalidate_inode_pages2()
740 struct address_space *mapping = inode->i_mapping; in truncate_pagecache() local
752 unmap_mapping_range(mapping, holebegin, 0, 1); in truncate_pagecache()
753 truncate_inode_pages(mapping, newsize); in truncate_pagecache()
754 unmap_mapping_range(mapping, holebegin, 0, 1); in truncate_pagecache()
848 struct address_space *mapping = inode->i_mapping; in truncate_pagecache_range() local
865 unmap_mapping_range(mapping, unmap_start, in truncate_pagecache_range()
867 truncate_inode_pages_range(mapping, lstart, lend); in truncate_pagecache_range()