Lines Matching refs:xas
157 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, in dax_entry_waitqueue() argument
161 unsigned long index = xas->xa_index; in dax_entry_waitqueue()
170 key->xa = xas->xa; in dax_entry_waitqueue()
173 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); in dax_entry_waitqueue()
195 static void dax_wake_entry(struct xa_state *xas, void *entry, in dax_wake_entry() argument
201 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry()
223 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) in get_unlocked_entry() argument
233 entry = xas_find_conflict(xas); in get_unlocked_entry()
241 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_unlocked_entry()
244 xas_unlock_irq(xas); in get_unlocked_entry()
245 xas_reset(xas); in get_unlocked_entry()
248 xas_lock_irq(xas); in get_unlocked_entry()
257 static void wait_entry_unlocked(struct xa_state *xas, void *entry) in wait_entry_unlocked() argument
265 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in wait_entry_unlocked()
273 xas_unlock_irq(xas); in wait_entry_unlocked()
278 static void put_unlocked_entry(struct xa_state *xas, void *entry, in put_unlocked_entry() argument
282 dax_wake_entry(xas, entry, mode); in put_unlocked_entry()
290 static void dax_unlock_entry(struct xa_state *xas, void *entry) in dax_unlock_entry() argument
295 xas_reset(xas); in dax_unlock_entry()
296 xas_lock_irq(xas); in dax_unlock_entry()
297 old = xas_store(xas, entry); in dax_unlock_entry()
298 xas_unlock_irq(xas); in dax_unlock_entry()
300 dax_wake_entry(xas, entry, WAKE_NEXT); in dax_unlock_entry()
306 static void *dax_lock_entry(struct xa_state *xas, void *entry) in dax_lock_entry() argument
309 return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); in dax_lock_entry()
433 XA_STATE(xas, NULL, 0); in dax_lock_page()
456 xas.xa = &mapping->i_pages; in dax_lock_page()
457 xas_lock_irq(&xas); in dax_lock_page()
459 xas_unlock_irq(&xas); in dax_lock_page()
462 xas_set(&xas, page->index); in dax_lock_page()
463 entry = xas_load(&xas); in dax_lock_page()
466 wait_entry_unlocked(&xas, entry); in dax_lock_page()
470 dax_lock_entry(&xas, entry); in dax_lock_page()
471 xas_unlock_irq(&xas); in dax_lock_page()
481 XA_STATE(xas, &mapping->i_pages, page->index); in dax_unlock_page()
486 dax_unlock_entry(&xas, (void *)cookie); in dax_unlock_page()
501 XA_STATE(xas, NULL, 0); in dax_lock_mapping_entry()
510 xas.xa = &mapping->i_pages; in dax_lock_mapping_entry()
511 xas_lock_irq(&xas); in dax_lock_mapping_entry()
512 xas_set(&xas, index); in dax_lock_mapping_entry()
513 entry = xas_load(&xas); in dax_lock_mapping_entry()
516 wait_entry_unlocked(&xas, entry); in dax_lock_mapping_entry()
532 dax_lock_entry(&xas, entry); in dax_lock_mapping_entry()
534 xas_unlock_irq(&xas); in dax_lock_mapping_entry()
544 XA_STATE(xas, &mapping->i_pages, index); in dax_unlock_mapping_entry()
549 dax_unlock_entry(&xas, (void *)cookie); in dax_unlock_mapping_entry()
581 static void *grab_mapping_entry(struct xa_state *xas, in grab_mapping_entry() argument
584 unsigned long index = xas->xa_index; in grab_mapping_entry()
590 xas_lock_irq(xas); in grab_mapping_entry()
591 entry = get_unlocked_entry(xas, order); in grab_mapping_entry()
597 xas_set_err(xas, -EIO); in grab_mapping_entry()
615 dax_lock_entry(xas, entry); in grab_mapping_entry()
623 xas_unlock_irq(xas); in grab_mapping_entry()
625 xas->xa_index & ~PG_PMD_COLOUR, in grab_mapping_entry()
627 xas_reset(xas); in grab_mapping_entry()
628 xas_lock_irq(xas); in grab_mapping_entry()
632 xas_store(xas, NULL); /* undo the PMD join */ in grab_mapping_entry()
633 dax_wake_entry(xas, entry, WAKE_ALL); in grab_mapping_entry()
636 xas_set(xas, index); in grab_mapping_entry()
640 dax_lock_entry(xas, entry); in grab_mapping_entry()
647 dax_lock_entry(xas, entry); in grab_mapping_entry()
648 if (xas_error(xas)) in grab_mapping_entry()
654 xas_unlock_irq(xas); in grab_mapping_entry()
655 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) in grab_mapping_entry()
657 if (xas->xa_node == XA_ERROR(-ENOMEM)) in grab_mapping_entry()
659 if (xas_error(xas)) in grab_mapping_entry()
663 xas_unlock_irq(xas); in grab_mapping_entry()
693 XA_STATE(xas, &mapping->i_pages, start_idx); in dax_layout_busy_page_range()
723 xas_lock_irq(&xas); in dax_layout_busy_page_range()
724 xas_for_each(&xas, entry, end_idx) { in dax_layout_busy_page_range()
728 entry = get_unlocked_entry(&xas, 0); in dax_layout_busy_page_range()
731 put_unlocked_entry(&xas, entry, WAKE_NEXT); in dax_layout_busy_page_range()
737 xas_pause(&xas); in dax_layout_busy_page_range()
738 xas_unlock_irq(&xas); in dax_layout_busy_page_range()
740 xas_lock_irq(&xas); in dax_layout_busy_page_range()
742 xas_unlock_irq(&xas); in dax_layout_busy_page_range()
756 XA_STATE(xas, &mapping->i_pages, index); in __dax_invalidate_entry()
760 xas_lock_irq(&xas); in __dax_invalidate_entry()
761 entry = get_unlocked_entry(&xas, 0); in __dax_invalidate_entry()
765 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || in __dax_invalidate_entry()
766 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) in __dax_invalidate_entry()
769 xas_store(&xas, NULL); in __dax_invalidate_entry()
773 put_unlocked_entry(&xas, entry, WAKE_ALL); in __dax_invalidate_entry()
774 xas_unlock_irq(&xas); in __dax_invalidate_entry()
856 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, in dax_insert_entry() argument
869 unsigned long index = xas->xa_index; in dax_insert_entry()
878 xas_reset(xas); in dax_insert_entry()
879 xas_lock_irq(xas); in dax_insert_entry()
894 old = dax_lock_entry(xas, new_entry); in dax_insert_entry()
899 xas_load(xas); /* Walk the xa_state */ in dax_insert_entry()
903 xas_set_mark(xas, PAGECACHE_TAG_DIRTY); in dax_insert_entry()
906 xas_set_mark(xas, PAGECACHE_TAG_TOWRITE); in dax_insert_entry()
908 xas_unlock_irq(xas); in dax_insert_entry()
912 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, in dax_writeback_one() argument
929 entry = get_unlocked_entry(xas, 0); in dax_writeback_one()
948 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) in dax_writeback_one()
953 dax_lock_entry(xas, entry); in dax_writeback_one()
962 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); in dax_writeback_one()
963 xas_unlock_irq(xas); in dax_writeback_one()
974 index = xas->xa_index & ~(count - 1); in dax_writeback_one()
992 xas_reset(xas); in dax_writeback_one()
993 xas_lock_irq(xas); in dax_writeback_one()
994 xas_store(xas, entry); in dax_writeback_one()
995 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); in dax_writeback_one()
996 dax_wake_entry(xas, entry, WAKE_NEXT); in dax_writeback_one()
1002 put_unlocked_entry(xas, entry, WAKE_NEXT); in dax_writeback_one()
1014 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); in dax_writeback_mapping_range()
1027 trace_dax_writeback_range(inode, xas.xa_index, end_index); in dax_writeback_mapping_range()
1029 tag_pages_for_writeback(mapping, xas.xa_index, end_index); in dax_writeback_mapping_range()
1031 xas_lock_irq(&xas); in dax_writeback_mapping_range()
1032 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { in dax_writeback_mapping_range()
1033 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); in dax_writeback_mapping_range()
1041 xas_pause(&xas); in dax_writeback_mapping_range()
1042 xas_unlock_irq(&xas); in dax_writeback_mapping_range()
1044 xas_lock_irq(&xas); in dax_writeback_mapping_range()
1046 xas_unlock_irq(&xas); in dax_writeback_mapping_range()
1047 trace_dax_writeback_range_done(inode, xas.xa_index, end_index); in dax_writeback_mapping_range()
1149 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_load_hole() argument
1157 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); in dax_load_hole()
1165 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1184 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, in dax_pmd_load_hole()
1217 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1532 struct xa_state *xas, void **entry, bool pmd) in dax_fault_iter() argument
1537 loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; in dax_fault_iter()
1551 return dax_load_hole(xas, vmf, iter, entry); in dax_fault_iter()
1552 return dax_pmd_load_hole(xas, vmf, iter, entry); in dax_fault_iter()
1564 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); in dax_fault_iter()
1590 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1615 entry = grab_mapping_entry(&xas, mapping, 0); in dax_iomap_pte_fault()
1638 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); in dax_iomap_pte_fault()
1656 dax_unlock_entry(&xas, entry); in dax_iomap_pte_fault()
1663 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, in dax_fault_check_fallback() argument
1690 if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff) in dax_fault_check_fallback()
1700 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1723 if (xas.xa_index >= max_pgoff) { in dax_iomap_pmd_fault()
1728 if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) in dax_iomap_pmd_fault()
1737 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
1755 iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT; in dax_iomap_pmd_fault()
1760 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); in dax_iomap_pmd_fault()
1766 dax_unlock_entry(&xas, entry); in dax_iomap_pmd_fault()
1824 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
1828 xas_lock_irq(&xas); in dax_insert_pfn_mkwrite()
1829 entry = get_unlocked_entry(&xas, order); in dax_insert_pfn_mkwrite()
1833 put_unlocked_entry(&xas, entry, WAKE_NEXT); in dax_insert_pfn_mkwrite()
1834 xas_unlock_irq(&xas); in dax_insert_pfn_mkwrite()
1839 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); in dax_insert_pfn_mkwrite()
1840 dax_lock_entry(&xas, entry); in dax_insert_pfn_mkwrite()
1841 xas_unlock_irq(&xas); in dax_insert_pfn_mkwrite()
1850 dax_unlock_entry(&xas, entry); in dax_insert_pfn_mkwrite()