Lines Matching full:mapping
191 * @entry may no longer be the entry at the index in the mapping.
342 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument
355 WARN_ON_ONCE(page->mapping); in dax_associate_entry()
356 page->mapping = mapping; in dax_associate_entry()
361 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument
373 WARN_ON_ONCE(page->mapping && page->mapping != mapping); in dax_disassociate_entry()
374 page->mapping = NULL; in dax_disassociate_entry()
405 /* Ensure page->mapping isn't freed while we look at it */ in dax_lock_page()
408 struct address_space *mapping = READ_ONCE(page->mapping); in dax_lock_page() local
411 if (!mapping || !dax_mapping(mapping)) in dax_lock_page()
422 if (S_ISCHR(mapping->host->i_mode)) in dax_lock_page()
425 xas.xa = &mapping->i_pages; in dax_lock_page()
427 if (mapping != page->mapping) { in dax_lock_page()
449 struct address_space *mapping = page->mapping; in dax_unlock_page() local
450 XA_STATE(xas, &mapping->i_pages, page->index); in dax_unlock_page()
452 if (S_ISCHR(mapping->host->i_mode)) in dax_unlock_page()
488 struct address_space *mapping, unsigned int order) in grab_mapping_entry() argument
530 unmap_mapping_pages(mapping, in grab_mapping_entry()
537 dax_disassociate_entry(entry, mapping, false); in grab_mapping_entry()
540 mapping->nrpages -= PG_PMD_NR; in grab_mapping_entry()
556 mapping->nrpages += 1UL << order; in grab_mapping_entry()
561 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) in grab_mapping_entry()
574 * dax_layout_busy_page_range - find first pinned page in @mapping
575 * @mapping: address space to scan for a page with ref count > 1
583 * any page in the mapping is busy, i.e. for DMA, or other
591 struct page *dax_layout_busy_page_range(struct address_space *mapping, in dax_layout_busy_page_range() argument
599 XA_STATE(xas, &mapping->i_pages, start_idx); in dax_layout_busy_page_range()
607 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) in dax_layout_busy_page_range()
627 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); in dax_layout_busy_page_range()
653 struct page *dax_layout_busy_page(struct address_space *mapping) in dax_layout_busy_page() argument
655 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); in dax_layout_busy_page()
659 static int __dax_invalidate_entry(struct address_space *mapping, in __dax_invalidate_entry() argument
662 XA_STATE(xas, &mapping->i_pages, index); in __dax_invalidate_entry()
674 dax_disassociate_entry(entry, mapping, trunc); in __dax_invalidate_entry()
676 mapping->nrpages -= 1UL << dax_entry_order(entry); in __dax_invalidate_entry()
685 * Delete DAX entry at @index from @mapping. Wait for it
688 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) in dax_delete_mapping_entry() argument
690 int ret = __dax_invalidate_entry(mapping, index, true); in dax_delete_mapping_entry()
706 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, in dax_invalidate_mapping_entry_sync() argument
709 return __dax_invalidate_entry(mapping, index, false); in dax_invalidate_mapping_entry_sync()
745 struct address_space *mapping, struct vm_fault *vmf, in dax_insert_entry() argument
751 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in dax_insert_entry()
755 /* we are replacing a zero page with block mapping */ in dax_insert_entry()
757 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, in dax_insert_entry()
760 unmap_mapping_pages(mapping, index, 1, false); in dax_insert_entry()
768 dax_disassociate_entry(entry, mapping, false); in dax_insert_entry()
769 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); in dax_insert_entry()
804 static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, in dax_entry_mkclean() argument
812 i_mmap_lock_read(mapping); in dax_entry_mkclean()
813 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { in dax_entry_mkclean()
874 i_mmap_unlock_read(mapping); in dax_entry_mkclean()
878 struct address_space *mapping, void *entry) in dax_writeback_one() argument
884 * A page got tagged dirty in DAX mapping? Something is seriously in dax_writeback_one()
940 dax_entry_mkclean(mapping, index, pfn); in dax_writeback_one()
945 * the pfn mappings are writeprotected and fault waits for mapping in dax_writeback_one()
954 trace_dax_writeback_one(mapping->host, index, count); in dax_writeback_one()
963 * Flush the mapping to the persistent domain within the byte range of [start,
967 int dax_writeback_mapping_range(struct address_space *mapping, in dax_writeback_mapping_range() argument
970 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); in dax_writeback_mapping_range()
971 struct inode *inode = mapping->host; in dax_writeback_mapping_range()
980 if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) in dax_writeback_mapping_range()
985 tag_pages_for_writeback(mapping, xas.xa_index, end_index); in dax_writeback_mapping_range()
989 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); in dax_writeback_mapping_range()
991 mapping_set_error(mapping, ret); in dax_writeback_mapping_range()
1048 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1049 * If this page is ever written to we will re-fault and change the mapping to
1053 struct address_space *mapping, void **entry, in dax_load_hole() argument
1056 struct inode *inode = mapping->host; in dax_load_hole()
1061 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_load_hole()
1073 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_pmd_load_hole() local
1076 struct inode *inode = mapping->host; in dax_pmd_load_hole()
1089 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_pmd_load_hole()
1308 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1370 * @xas: the dax mapping tree of a file
1378 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_fault_iter() local
1395 return dax_load_hole(xas, mapping, entry, vmf); in dax_fault_iter()
1408 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags, in dax_fault_iter()
1427 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_iomap_pte_fault() local
1428 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1430 .inode = mapping->host, in dax_iomap_pte_fault()
1453 entry = grab_mapping_entry(&xas, mapping, 0); in dax_iomap_pte_fault()
1537 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_iomap_pmd_fault() local
1538 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1540 .inode = mapping->host, in dax_iomap_pmd_fault()
1575 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
1661 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_pfn_mkwrite() local
1662 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
1673 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, in dax_insert_pfn_mkwrite()
1689 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); in dax_insert_pfn_mkwrite()