Lines Matching full:mapping

191  * @entry may no longer be the entry at the index in the mapping.
337 static inline bool dax_mapping_is_cow(struct address_space *mapping) in dax_mapping_is_cow() argument
339 return (unsigned long)mapping == PAGE_MAPPING_DAX_COW; in dax_mapping_is_cow()
343 * Set the page->mapping with FS_DAX_MAPPING_COW flag, increase the refcount.
347 if ((uintptr_t)page->mapping != PAGE_MAPPING_DAX_COW) { in dax_mapping_set_cow()
352 if (page->mapping) in dax_mapping_set_cow()
354 page->mapping = (void *)PAGE_MAPPING_DAX_COW; in dax_mapping_set_cow()
361 * whether this entry is shared by multiple files. If so, set the page->mapping
364 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument
380 WARN_ON_ONCE(page->mapping); in dax_associate_entry()
381 page->mapping = mapping; in dax_associate_entry()
387 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument
399 if (dax_mapping_is_cow(page->mapping)) { in dax_disassociate_entry()
404 WARN_ON_ONCE(page->mapping && page->mapping != mapping); in dax_disassociate_entry()
405 page->mapping = NULL; in dax_disassociate_entry()
436 /* Ensure page->mapping isn't freed while we look at it */ in dax_lock_page()
439 struct address_space *mapping = READ_ONCE(page->mapping); in dax_lock_page() local
442 if (!mapping || !dax_mapping(mapping)) in dax_lock_page()
453 if (S_ISCHR(mapping->host->i_mode)) in dax_lock_page()
456 xas.xa = &mapping->i_pages; in dax_lock_page()
458 if (mapping != page->mapping) { in dax_lock_page()
480 struct address_space *mapping = page->mapping; in dax_unlock_page() local
481 XA_STATE(xas, &mapping->i_pages, page->index); in dax_unlock_page()
483 if (S_ISCHR(mapping->host->i_mode)) in dax_unlock_page()
490 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
491 * @mapping: the file's mapping whose entry we want to lock
498 dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index, in dax_lock_mapping_entry() argument
507 if (!dax_mapping(mapping)) in dax_lock_mapping_entry()
510 xas.xa = &mapping->i_pages; in dax_lock_mapping_entry()
523 * Because we are looking for entry from file's mapping in dax_lock_mapping_entry()
541 void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index, in dax_unlock_mapping_entry() argument
544 XA_STATE(xas, &mapping->i_pages, index); in dax_unlock_mapping_entry()
582 struct address_space *mapping, unsigned int order) in grab_mapping_entry() argument
624 unmap_mapping_pages(mapping, in grab_mapping_entry()
631 dax_disassociate_entry(entry, mapping, false); in grab_mapping_entry()
634 mapping->nrpages -= PG_PMD_NR; in grab_mapping_entry()
650 mapping->nrpages += 1UL << order; in grab_mapping_entry()
655 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) in grab_mapping_entry()
668 * dax_layout_busy_page_range - find first pinned page in @mapping
669 * @mapping: address space to scan for a page with ref count > 1
677 * any page in the mapping is busy, i.e. for DMA, or other
685 struct page *dax_layout_busy_page_range(struct address_space *mapping, in dax_layout_busy_page_range() argument
693 XA_STATE(xas, &mapping->i_pages, start_idx); in dax_layout_busy_page_range()
701 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) in dax_layout_busy_page_range()
721 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); in dax_layout_busy_page_range()
747 struct page *dax_layout_busy_page(struct address_space *mapping) in dax_layout_busy_page() argument
749 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); in dax_layout_busy_page()
753 static int __dax_invalidate_entry(struct address_space *mapping, in __dax_invalidate_entry() argument
756 XA_STATE(xas, &mapping->i_pages, index); in __dax_invalidate_entry()
768 dax_disassociate_entry(entry, mapping, trunc); in __dax_invalidate_entry()
770 mapping->nrpages -= 1UL << dax_entry_order(entry); in __dax_invalidate_entry()
779 * Delete DAX entry at @index from @mapping. Wait for it
782 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) in dax_delete_mapping_entry() argument
784 int ret = __dax_invalidate_entry(mapping, index, true); in dax_delete_mapping_entry()
800 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, in dax_invalidate_mapping_entry_sync() argument
803 return __dax_invalidate_entry(mapping, index, false); in dax_invalidate_mapping_entry_sync()
833 * MAP_SYNC on a dax mapping guarantees dirty metadata is
860 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_entry() local
866 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in dax_insert_entry()
870 /* we are replacing a zero page with block mapping */ in dax_insert_entry()
872 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, in dax_insert_entry()
875 unmap_mapping_pages(mapping, index, 1, false); in dax_insert_entry()
883 dax_disassociate_entry(entry, mapping, false); in dax_insert_entry()
884 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, in dax_insert_entry()
913 struct address_space *mapping, void *entry) in dax_writeback_one() argument
920 * A page got tagged dirty in DAX mapping? Something is seriously in dax_writeback_one()
978 i_mmap_lock_read(mapping); in dax_writeback_one()
979 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) { in dax_writeback_one()
983 i_mmap_unlock_read(mapping); in dax_writeback_one()
989 * the pfn mappings are writeprotected and fault waits for mapping in dax_writeback_one()
998 trace_dax_writeback_one(mapping->host, index, count); in dax_writeback_one()
1007 * Flush the mapping to the persistent domain within the byte range of [start,
1011 int dax_writeback_mapping_range(struct address_space *mapping, in dax_writeback_mapping_range() argument
1014 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); in dax_writeback_mapping_range()
1015 struct inode *inode = mapping->host; in dax_writeback_mapping_range()
1024 if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) in dax_writeback_mapping_range()
1029 tag_pages_for_writeback(mapping, xas.xa_index, end_index); in dax_writeback_mapping_range()
1033 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); in dax_writeback_mapping_range()
1035 mapping_set_error(mapping, ret); in dax_writeback_mapping_range()
1145 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1146 * If this page is ever written to we will re-fault and change the mapping to
1168 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_pmd_load_hole() local
1171 struct inode *inode = mapping->host; in dax_pmd_load_hole()
1526 * @xas: the dax mapping tree of a file
1589 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_iomap_pte_fault() local
1590 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1592 .inode = mapping->host, in dax_iomap_pte_fault()
1615 entry = grab_mapping_entry(&xas, mapping, 0); in dax_iomap_pte_fault()
1699 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_iomap_pmd_fault() local
1700 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1702 .inode = mapping->host, in dax_iomap_pmd_fault()
1737 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
1823 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_pfn_mkwrite() local
1824 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
1835 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, in dax_insert_pfn_mkwrite()
1851 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); in dax_insert_pfn_mkwrite()