Lines Matching refs:xas

147 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,  in dax_entry_waitqueue()  argument
151 unsigned long index = xas->xa_index; in dax_entry_waitqueue()
160 key->xa = xas->xa; in dax_entry_waitqueue()
163 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); in dax_entry_waitqueue()
185 static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all) in dax_wake_entry() argument
190 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry()
212 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) in get_unlocked_entry() argument
222 entry = xas_find_conflict(xas); in get_unlocked_entry()
230 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_unlocked_entry()
233 xas_unlock_irq(xas); in get_unlocked_entry()
234 xas_reset(xas); in get_unlocked_entry()
237 xas_lock_irq(xas); in get_unlocked_entry()
246 static void wait_entry_unlocked(struct xa_state *xas, void *entry) in wait_entry_unlocked() argument
254 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in wait_entry_unlocked()
262 xas_unlock_irq(xas); in wait_entry_unlocked()
267 static void put_unlocked_entry(struct xa_state *xas, void *entry) in put_unlocked_entry() argument
271 dax_wake_entry(xas, entry, false); in put_unlocked_entry()
279 static void dax_unlock_entry(struct xa_state *xas, void *entry) in dax_unlock_entry() argument
284 xas_reset(xas); in dax_unlock_entry()
285 xas_lock_irq(xas); in dax_unlock_entry()
286 old = xas_store(xas, entry); in dax_unlock_entry()
287 xas_unlock_irq(xas); in dax_unlock_entry()
289 dax_wake_entry(xas, entry, false); in dax_unlock_entry()
295 static void *dax_lock_entry(struct xa_state *xas, void *entry) in dax_lock_entry() argument
298 return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); in dax_lock_entry()
391 XA_STATE(xas, NULL, 0); in dax_lock_page()
414 xas.xa = &mapping->i_pages; in dax_lock_page()
415 xas_lock_irq(&xas); in dax_lock_page()
417 xas_unlock_irq(&xas); in dax_lock_page()
420 xas_set(&xas, page->index); in dax_lock_page()
421 entry = xas_load(&xas); in dax_lock_page()
424 wait_entry_unlocked(&xas, entry); in dax_lock_page()
428 dax_lock_entry(&xas, entry); in dax_lock_page()
429 xas_unlock_irq(&xas); in dax_lock_page()
439 XA_STATE(xas, &mapping->i_pages, page->index); in dax_unlock_page()
444 dax_unlock_entry(&xas, (void *)cookie); in dax_unlock_page()
476 static void *grab_mapping_entry(struct xa_state *xas, in grab_mapping_entry() argument
479 unsigned long index = xas->xa_index; in grab_mapping_entry()
484 xas_lock_irq(xas); in grab_mapping_entry()
485 entry = get_unlocked_entry(xas, order); in grab_mapping_entry()
491 xas_set_err(xas, -EIO); in grab_mapping_entry()
509 dax_lock_entry(xas, entry); in grab_mapping_entry()
517 xas_unlock_irq(xas); in grab_mapping_entry()
519 xas->xa_index & ~PG_PMD_COLOUR, in grab_mapping_entry()
521 xas_reset(xas); in grab_mapping_entry()
522 xas_lock_irq(xas); in grab_mapping_entry()
526 xas_store(xas, NULL); /* undo the PMD join */ in grab_mapping_entry()
527 dax_wake_entry(xas, entry, true); in grab_mapping_entry()
530 xas_set(xas, index); in grab_mapping_entry()
534 dax_lock_entry(xas, entry); in grab_mapping_entry()
541 dax_lock_entry(xas, entry); in grab_mapping_entry()
542 if (xas_error(xas)) in grab_mapping_entry()
548 xas_unlock_irq(xas); in grab_mapping_entry()
549 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) in grab_mapping_entry()
551 if (xas->xa_node == XA_ERROR(-ENOMEM)) in grab_mapping_entry()
553 if (xas_error(xas)) in grab_mapping_entry()
557 xas_unlock_irq(xas); in grab_mapping_entry()
587 XA_STATE(xas, &mapping->i_pages, start_idx); in dax_layout_busy_page_range()
617 xas_lock_irq(&xas); in dax_layout_busy_page_range()
618 xas_for_each(&xas, entry, end_idx) { in dax_layout_busy_page_range()
622 entry = get_unlocked_entry(&xas, 0); in dax_layout_busy_page_range()
625 put_unlocked_entry(&xas, entry); in dax_layout_busy_page_range()
631 xas_pause(&xas); in dax_layout_busy_page_range()
632 xas_unlock_irq(&xas); in dax_layout_busy_page_range()
634 xas_lock_irq(&xas); in dax_layout_busy_page_range()
636 xas_unlock_irq(&xas); in dax_layout_busy_page_range()
650 XA_STATE(xas, &mapping->i_pages, index); in __dax_invalidate_entry()
654 xas_lock_irq(&xas); in __dax_invalidate_entry()
655 entry = get_unlocked_entry(&xas, 0); in __dax_invalidate_entry()
659 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || in __dax_invalidate_entry()
660 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) in __dax_invalidate_entry()
663 xas_store(&xas, NULL); in __dax_invalidate_entry()
667 put_unlocked_entry(&xas, entry); in __dax_invalidate_entry()
668 xas_unlock_irq(&xas); in __dax_invalidate_entry()
732 static void *dax_insert_entry(struct xa_state *xas, in dax_insert_entry() argument
742 unsigned long index = xas->xa_index; in dax_insert_entry()
751 xas_reset(xas); in dax_insert_entry()
752 xas_lock_irq(xas); in dax_insert_entry()
766 old = dax_lock_entry(xas, new_entry); in dax_insert_entry()
771 xas_load(xas); /* Walk the xa_state */ in dax_insert_entry()
775 xas_set_mark(xas, PAGECACHE_TAG_DIRTY); in dax_insert_entry()
777 xas_unlock_irq(xas); in dax_insert_entry()
865 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, in dax_writeback_one() argument
881 entry = get_unlocked_entry(xas, 0); in dax_writeback_one()
900 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) in dax_writeback_one()
905 dax_lock_entry(xas, entry); in dax_writeback_one()
914 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); in dax_writeback_one()
915 xas_unlock_irq(xas); in dax_writeback_one()
926 index = xas->xa_index & ~(count - 1); in dax_writeback_one()
936 xas_reset(xas); in dax_writeback_one()
937 xas_lock_irq(xas); in dax_writeback_one()
938 xas_store(xas, entry); in dax_writeback_one()
939 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); in dax_writeback_one()
940 dax_wake_entry(xas, entry, false); in dax_writeback_one()
946 put_unlocked_entry(xas, entry); in dax_writeback_one()
958 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); in dax_writeback_mapping_range()
971 trace_dax_writeback_range(inode, xas.xa_index, end_index); in dax_writeback_mapping_range()
973 tag_pages_for_writeback(mapping, xas.xa_index, end_index); in dax_writeback_mapping_range()
975 xas_lock_irq(&xas); in dax_writeback_mapping_range()
976 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { in dax_writeback_mapping_range()
977 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); in dax_writeback_mapping_range()
985 xas_pause(&xas); in dax_writeback_mapping_range()
986 xas_unlock_irq(&xas); in dax_writeback_mapping_range()
988 xas_lock_irq(&xas); in dax_writeback_mapping_range()
990 xas_unlock_irq(&xas); in dax_writeback_mapping_range()
991 trace_dax_writeback_range_done(inode, xas.xa_index, end_index); in dax_writeback_mapping_range()
1040 static vm_fault_t dax_load_hole(struct xa_state *xas, in dax_load_hole() argument
1049 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_load_hole()
1252 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1280 entry = grab_mapping_entry(&xas, mapping, 0); in dax_iomap_pte_fault()
1355 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pte_fault()
1383 ret = dax_load_hole(&xas, mapping, &entry, vmf); in dax_iomap_pte_fault()
1410 dax_unlock_entry(&xas, entry); in dax_iomap_pte_fault()
1417 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1436 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_pmd_load_hole()
1474 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1518 if (xas.xa_index >= max_pgoff) { in dax_iomap_pmd_fault()
1524 if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff) in dax_iomap_pmd_fault()
1533 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
1556 pos = (loff_t)xas.xa_index << PAGE_SHIFT; in dax_iomap_pmd_fault()
1573 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pmd_fault()
1597 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry); in dax_iomap_pmd_fault()
1620 dax_unlock_entry(&xas, entry); in dax_iomap_pmd_fault()
1678 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
1682 xas_lock_irq(&xas); in dax_insert_pfn_mkwrite()
1683 entry = get_unlocked_entry(&xas, order); in dax_insert_pfn_mkwrite()
1687 put_unlocked_entry(&xas, entry); in dax_insert_pfn_mkwrite()
1688 xas_unlock_irq(&xas); in dax_insert_pfn_mkwrite()
1693 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); in dax_insert_pfn_mkwrite()
1694 dax_lock_entry(&xas, entry); in dax_insert_pfn_mkwrite()
1695 xas_unlock_irq(&xas); in dax_insert_pfn_mkwrite()
1704 dax_unlock_entry(&xas, entry); in dax_insert_pfn_mkwrite()