Lines Matching refs:mapping

118 	struct address_space *mapping;  member
127 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping, in dax_entry_waitqueue() argument
140 key->mapping = mapping; in dax_entry_waitqueue()
143 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS); in dax_entry_waitqueue()
154 if (key->mapping != ewait->key.mapping || in wake_exceptional_entry_func()
165 static void dax_wake_mapping_entry_waiter(struct address_space *mapping, in dax_wake_mapping_entry_waiter() argument
171 wq = dax_entry_waitqueue(mapping, index, entry, &key); in dax_wake_mapping_entry_waiter()
187 static inline int slot_locked(struct address_space *mapping, void **slot) in slot_locked() argument
190 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); in slot_locked()
197 static inline void *lock_slot(struct address_space *mapping, void **slot) in lock_slot() argument
200 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); in lock_slot()
203 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); in lock_slot()
210 static inline void *unlock_slot(struct address_space *mapping, void **slot) in unlock_slot() argument
213 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock); in unlock_slot()
216 radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry); in unlock_slot()
229 static void *__get_unlocked_mapping_entry(struct address_space *mapping, in __get_unlocked_mapping_entry() argument
242 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, in __get_unlocked_mapping_entry()
246 !slot_locked(mapping, slot)) { in __get_unlocked_mapping_entry()
252 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key); in __get_unlocked_mapping_entry()
255 xa_unlock_irq(&mapping->i_pages); in __get_unlocked_mapping_entry()
258 xa_lock_irq(&mapping->i_pages); in __get_unlocked_mapping_entry()
274 static void *get_unlocked_mapping_entry(struct address_space *mapping, in get_unlocked_mapping_entry() argument
277 return __get_unlocked_mapping_entry(mapping, index, slotp, entry_wait); in get_unlocked_mapping_entry()
280 static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index) in unlock_mapping_entry() argument
284 xa_lock_irq(&mapping->i_pages); in unlock_mapping_entry()
285 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot); in unlock_mapping_entry()
287 !slot_locked(mapping, slot))) { in unlock_mapping_entry()
288 xa_unlock_irq(&mapping->i_pages); in unlock_mapping_entry()
291 unlock_slot(mapping, slot); in unlock_mapping_entry()
292 xa_unlock_irq(&mapping->i_pages); in unlock_mapping_entry()
293 dax_wake_mapping_entry_waiter(mapping, index, entry, false); in unlock_mapping_entry()
296 static void put_locked_mapping_entry(struct address_space *mapping, in put_locked_mapping_entry() argument
299 unlock_mapping_entry(mapping, index); in put_locked_mapping_entry()
306 static void put_unlocked_mapping_entry(struct address_space *mapping, in put_unlocked_mapping_entry() argument
313 dax_wake_mapping_entry_waiter(mapping, index, entry, false); in put_unlocked_mapping_entry()
346 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument
359 WARN_ON_ONCE(page->mapping); in dax_associate_entry()
360 page->mapping = mapping; in dax_associate_entry()
365 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument
377 WARN_ON_ONCE(page->mapping && page->mapping != mapping); in dax_disassociate_entry()
378 page->mapping = NULL; in dax_disassociate_entry()
415 struct address_space *mapping; in dax_lock_mapping_entry() local
419 mapping = READ_ONCE(page->mapping); in dax_lock_mapping_entry()
421 if (!dax_mapping(mapping)) in dax_lock_mapping_entry()
431 inode = mapping->host; in dax_lock_mapping_entry()
437 xa_lock_irq(&mapping->i_pages); in dax_lock_mapping_entry()
438 if (mapping != page->mapping) { in dax_lock_mapping_entry()
439 xa_unlock_irq(&mapping->i_pages); in dax_lock_mapping_entry()
444 entry = __get_unlocked_mapping_entry(mapping, index, &slot, in dax_lock_mapping_entry()
447 xa_unlock_irq(&mapping->i_pages); in dax_lock_mapping_entry()
450 xa_unlock_irq(&mapping->i_pages); in dax_lock_mapping_entry()
454 lock_slot(mapping, slot); in dax_lock_mapping_entry()
456 xa_unlock_irq(&mapping->i_pages); in dax_lock_mapping_entry()
466 struct address_space *mapping = page->mapping; in dax_unlock_mapping_entry() local
467 struct inode *inode = mapping->host; in dax_unlock_mapping_entry()
472 unlock_mapping_entry(mapping, page->index); in dax_unlock_mapping_entry()
501 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index, in grab_mapping_entry() argument
508 xa_lock_irq(&mapping->i_pages); in grab_mapping_entry()
509 entry = get_unlocked_mapping_entry(mapping, index, &slot); in grab_mapping_entry()
519 put_unlocked_mapping_entry(mapping, index, in grab_mapping_entry()
542 entry = lock_slot(mapping, slot); in grab_mapping_entry()
545 xa_unlock_irq(&mapping->i_pages); in grab_mapping_entry()
552 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, in grab_mapping_entry()
556 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM); in grab_mapping_entry()
559 put_locked_mapping_entry(mapping, index); in grab_mapping_entry()
562 xa_lock_irq(&mapping->i_pages); in grab_mapping_entry()
571 entry = __radix_tree_lookup(&mapping->i_pages, index, in grab_mapping_entry()
575 xa_unlock_irq(&mapping->i_pages); in grab_mapping_entry()
581 dax_disassociate_entry(entry, mapping, false); in grab_mapping_entry()
582 radix_tree_delete(&mapping->i_pages, index); in grab_mapping_entry()
583 mapping->nrexceptional--; in grab_mapping_entry()
584 dax_wake_mapping_entry_waiter(mapping, index, entry, in grab_mapping_entry()
590 err = __radix_tree_insert(&mapping->i_pages, index, in grab_mapping_entry()
594 xa_unlock_irq(&mapping->i_pages); in grab_mapping_entry()
606 mapping->nrexceptional++; in grab_mapping_entry()
607 xa_unlock_irq(&mapping->i_pages); in grab_mapping_entry()
610 entry = lock_slot(mapping, slot); in grab_mapping_entry()
612 xa_unlock_irq(&mapping->i_pages); in grab_mapping_entry()
631 struct page *dax_layout_busy_page(struct address_space *mapping) in dax_layout_busy_page() argument
645 if (!dax_mapping(mapping) || !mapping_mapped(mapping)) in dax_layout_busy_page()
664 unmap_mapping_range(mapping, 0, 0, 1); in dax_layout_busy_page()
666 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, in dax_layout_busy_page()
683 xa_lock_irq(&mapping->i_pages); in dax_layout_busy_page()
684 entry = get_unlocked_mapping_entry(mapping, index, NULL); in dax_layout_busy_page()
694 put_unlocked_mapping_entry(mapping, index, entry); in dax_layout_busy_page()
695 xa_unlock_irq(&mapping->i_pages); in dax_layout_busy_page()
717 static int __dax_invalidate_mapping_entry(struct address_space *mapping, in __dax_invalidate_mapping_entry() argument
722 struct radix_tree_root *pages = &mapping->i_pages; in __dax_invalidate_mapping_entry()
725 entry = get_unlocked_mapping_entry(mapping, index, NULL); in __dax_invalidate_mapping_entry()
732 dax_disassociate_entry(entry, mapping, trunc); in __dax_invalidate_mapping_entry()
734 mapping->nrexceptional--; in __dax_invalidate_mapping_entry()
737 put_unlocked_mapping_entry(mapping, index, entry); in __dax_invalidate_mapping_entry()
745 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) in dax_delete_mapping_entry() argument
747 int ret = __dax_invalidate_mapping_entry(mapping, index, true); in dax_delete_mapping_entry()
763 int dax_invalidate_mapping_entry_sync(struct address_space *mapping, in dax_invalidate_mapping_entry_sync() argument
766 return __dax_invalidate_mapping_entry(mapping, index, false); in dax_invalidate_mapping_entry_sync()
802 static void *dax_insert_mapping_entry(struct address_space *mapping, in dax_insert_mapping_entry() argument
807 struct radix_tree_root *pages = &mapping->i_pages; in dax_insert_mapping_entry()
813 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); in dax_insert_mapping_entry()
818 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, in dax_insert_mapping_entry()
821 unmap_mapping_pages(mapping, vmf->pgoff, 1, false); in dax_insert_mapping_entry()
827 dax_disassociate_entry(entry, mapping, false); in dax_insert_mapping_entry()
828 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); in dax_insert_mapping_entry()
869 static void dax_mapping_entry_mkclean(struct address_space *mapping, in dax_mapping_entry_mkclean() argument
877 i_mmap_lock_read(mapping); in dax_mapping_entry_mkclean()
878 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { in dax_mapping_entry_mkclean()
937 i_mmap_unlock_read(mapping); in dax_mapping_entry_mkclean()
941 struct address_space *mapping, pgoff_t index, void *entry) in dax_writeback_one() argument
943 struct radix_tree_root *pages = &mapping->i_pages; in dax_writeback_one()
957 entry2 = get_unlocked_mapping_entry(mapping, index, &slot); in dax_writeback_one()
978 entry = lock_slot(mapping, slot); in dax_writeback_one()
999 dax_mapping_entry_mkclean(mapping, index, pfn); in dax_writeback_one()
1010 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT); in dax_writeback_one()
1011 put_locked_mapping_entry(mapping, index); in dax_writeback_one()
1015 put_unlocked_mapping_entry(mapping, index, entry2); in dax_writeback_one()
1025 int dax_writeback_mapping_range(struct address_space *mapping, in dax_writeback_mapping_range() argument
1028 struct inode *inode = mapping->host; in dax_writeback_mapping_range()
1039 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) in dax_writeback_mapping_range()
1051 tag_pages_for_writeback(mapping, start_index, end_index); in dax_writeback_mapping_range()
1055 pvec.nr = find_get_entries_tag(mapping, start_index, in dax_writeback_mapping_range()
1068 ret = dax_writeback_one(dax_dev, mapping, indices[i], in dax_writeback_mapping_range()
1071 mapping_set_error(mapping, ret); in dax_writeback_mapping_range()
1128 static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry, in dax_load_hole() argument
1131 struct inode *inode = mapping->host; in dax_load_hole()
1136 dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE, in dax_load_hole()
1294 struct address_space *mapping = iocb->ki_filp->f_mapping; in dax_iomap_rw() local
1295 struct inode *inode = mapping->host; in dax_iomap_rw()
1344 struct address_space *mapping = vma->vm_file->f_mapping; in dax_iomap_pte_fault() local
1345 struct inode *inode = mapping->host; in dax_iomap_pte_fault()
1371 entry = grab_mapping_entry(mapping, vmf->pgoff, 0); in dax_iomap_pte_fault()
1446 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, in dax_iomap_pte_fault()
1474 ret = dax_load_hole(mapping, entry, vmf); in dax_iomap_pte_fault()
1501 put_locked_mapping_entry(mapping, vmf->pgoff); in dax_iomap_pte_fault()
1511 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_pmd_load_hole() local
1513 struct inode *inode = mapping->host; in dax_pmd_load_hole()
1526 ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn, in dax_pmd_load_hole()
1551 struct address_space *mapping = vma->vm_file->f_mapping; in dax_iomap_pmd_fault() local
1556 struct inode *inode = mapping->host; in dax_iomap_pmd_fault()
1610 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); in dax_iomap_pmd_fault()
1647 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn, in dax_iomap_pmd_fault()
1695 put_locked_mapping_entry(mapping, pgoff); in dax_iomap_pmd_fault()
1754 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in dax_insert_pfn_mkwrite() local
1759 xa_lock_irq(&mapping->i_pages); in dax_insert_pfn_mkwrite()
1760 entry = get_unlocked_mapping_entry(mapping, index, &slot); in dax_insert_pfn_mkwrite()
1765 put_unlocked_mapping_entry(mapping, index, entry); in dax_insert_pfn_mkwrite()
1766 xa_unlock_irq(&mapping->i_pages); in dax_insert_pfn_mkwrite()
1767 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, in dax_insert_pfn_mkwrite()
1771 radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY); in dax_insert_pfn_mkwrite()
1772 entry = lock_slot(mapping, slot); in dax_insert_pfn_mkwrite()
1773 xa_unlock_irq(&mapping->i_pages); in dax_insert_pfn_mkwrite()
1787 put_locked_mapping_entry(mapping, index); in dax_insert_pfn_mkwrite()
1788 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); in dax_insert_pfn_mkwrite()