Lines Matching refs:xas
147 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, in dax_entry_waitqueue() argument
151 unsigned long index = xas->xa_index; in dax_entry_waitqueue()
160 key->xa = xas->xa; in dax_entry_waitqueue()
163 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); in dax_entry_waitqueue()
185 static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all) in dax_wake_entry() argument
190 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry()
212 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) in get_unlocked_entry() argument
222 entry = xas_find_conflict(xas); in get_unlocked_entry()
230 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_unlocked_entry()
233 xas_unlock_irq(xas); in get_unlocked_entry()
234 xas_reset(xas); in get_unlocked_entry()
237 xas_lock_irq(xas); in get_unlocked_entry()
246 static void wait_entry_unlocked(struct xa_state *xas, void *entry) in wait_entry_unlocked() argument
254 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in wait_entry_unlocked()
262 xas_unlock_irq(xas); in wait_entry_unlocked()
267 static void put_unlocked_entry(struct xa_state *xas, void *entry) in put_unlocked_entry() argument
271 dax_wake_entry(xas, entry, false); in put_unlocked_entry()
279 static void dax_unlock_entry(struct xa_state *xas, void *entry) in dax_unlock_entry() argument
284 xas_reset(xas); in dax_unlock_entry()
285 xas_lock_irq(xas); in dax_unlock_entry()
286 old = xas_store(xas, entry); in dax_unlock_entry()
287 xas_unlock_irq(xas); in dax_unlock_entry()
289 dax_wake_entry(xas, entry, false); in dax_unlock_entry()
295 static void *dax_lock_entry(struct xa_state *xas, void *entry) in dax_lock_entry() argument
298 return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); in dax_lock_entry()
391 XA_STATE(xas, NULL, 0); in dax_lock_page()
414 xas.xa = &mapping->i_pages; in dax_lock_page()
415 xas_lock_irq(&xas); in dax_lock_page()
417 xas_unlock_irq(&xas); in dax_lock_page()
420 xas_set(&xas, page->index); in dax_lock_page()
421 entry = xas_load(&xas); in dax_lock_page()
424 wait_entry_unlocked(&xas, entry); in dax_lock_page()
428 dax_lock_entry(&xas, entry); in dax_lock_page()
429 xas_unlock_irq(&xas); in dax_lock_page()
439 XA_STATE(xas, &mapping->i_pages, page->index); in dax_unlock_page()
444 dax_unlock_entry(&xas, (void *)cookie); in dax_unlock_page()
476 static void *grab_mapping_entry(struct xa_state *xas, in grab_mapping_entry() argument
479 unsigned long index = xas->xa_index; in grab_mapping_entry()
484 xas_lock_irq(xas); in grab_mapping_entry()
485 entry = get_unlocked_entry(xas, order); in grab_mapping_entry()
491 xas_set_err(xas, EIO); in grab_mapping_entry()
509 dax_lock_entry(xas, entry); in grab_mapping_entry()
517 xas_unlock_irq(xas); in grab_mapping_entry()
519 xas->xa_index & ~PG_PMD_COLOUR, in grab_mapping_entry()
521 xas_reset(xas); in grab_mapping_entry()
522 xas_lock_irq(xas); in grab_mapping_entry()
526 xas_store(xas, NULL); /* undo the PMD join */ in grab_mapping_entry()
527 dax_wake_entry(xas, entry, true); in grab_mapping_entry()
530 xas_set(xas, index); in grab_mapping_entry()
534 dax_lock_entry(xas, entry); in grab_mapping_entry()
541 dax_lock_entry(xas, entry); in grab_mapping_entry()
542 if (xas_error(xas)) in grab_mapping_entry()
548 xas_unlock_irq(xas); in grab_mapping_entry()
549 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) in grab_mapping_entry()
551 if (xas->xa_node == XA_ERROR(-ENOMEM)) in grab_mapping_entry()
553 if (xas_error(xas)) in grab_mapping_entry()
557 xas_unlock_irq(xas); in grab_mapping_entry()
578 XA_STATE(xas, &mapping->i_pages, 0); in dax_layout_busy_page()
606 xas_lock_irq(&xas); in dax_layout_busy_page()
607 xas_for_each(&xas, entry, ULONG_MAX) { in dax_layout_busy_page()
611 entry = get_unlocked_entry(&xas, 0); in dax_layout_busy_page()
614 put_unlocked_entry(&xas, entry); in dax_layout_busy_page()
620 xas_pause(&xas); in dax_layout_busy_page()
621 xas_unlock_irq(&xas); in dax_layout_busy_page()
623 xas_lock_irq(&xas); in dax_layout_busy_page()
625 xas_unlock_irq(&xas); in dax_layout_busy_page()
633 XA_STATE(xas, &mapping->i_pages, index); in __dax_invalidate_entry()
637 xas_lock_irq(&xas); in __dax_invalidate_entry()
638 entry = get_unlocked_entry(&xas, 0); in __dax_invalidate_entry()
642 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || in __dax_invalidate_entry()
643 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) in __dax_invalidate_entry()
646 xas_store(&xas, NULL); in __dax_invalidate_entry()
650 put_unlocked_entry(&xas, entry); in __dax_invalidate_entry()
651 xas_unlock_irq(&xas); in __dax_invalidate_entry()
716 static void *dax_insert_entry(struct xa_state *xas, in dax_insert_entry() argument
726 unsigned long index = xas->xa_index; in dax_insert_entry()
735 xas_reset(xas); in dax_insert_entry()
736 xas_lock_irq(xas); in dax_insert_entry()
750 old = dax_lock_entry(xas, new_entry); in dax_insert_entry()
755 xas_load(xas); /* Walk the xa_state */ in dax_insert_entry()
759 xas_set_mark(xas, PAGECACHE_TAG_DIRTY); in dax_insert_entry()
761 xas_unlock_irq(xas); in dax_insert_entry()
849 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, in dax_writeback_one() argument
865 entry = get_unlocked_entry(xas, 0); in dax_writeback_one()
884 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) in dax_writeback_one()
889 dax_lock_entry(xas, entry); in dax_writeback_one()
898 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); in dax_writeback_one()
899 xas_unlock_irq(xas); in dax_writeback_one()
910 index = xas->xa_index & ~(count - 1); in dax_writeback_one()
920 xas_reset(xas); in dax_writeback_one()
921 xas_lock_irq(xas); in dax_writeback_one()
922 xas_store(xas, entry); in dax_writeback_one()
923 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); in dax_writeback_one()
924 dax_wake_entry(xas, entry, false); in dax_writeback_one()
930 put_unlocked_entry(xas, entry); in dax_writeback_one()
942 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); in dax_writeback_mapping_range()
960 trace_dax_writeback_range(inode, xas.xa_index, end_index); in dax_writeback_mapping_range()
962 tag_pages_for_writeback(mapping, xas.xa_index, end_index); in dax_writeback_mapping_range()
964 xas_lock_irq(&xas); in dax_writeback_mapping_range()
965 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { in dax_writeback_mapping_range()
966 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); in dax_writeback_mapping_range()
974 xas_pause(&xas); in dax_writeback_mapping_range()
975 xas_unlock_irq(&xas); in dax_writeback_mapping_range()
977 xas_lock_irq(&xas); in dax_writeback_mapping_range()
979 xas_unlock_irq(&xas); in dax_writeback_mapping_range()
981 trace_dax_writeback_range_done(inode, xas.xa_index, end_index); in dax_writeback_mapping_range()
1030 static vm_fault_t dax_load_hole(struct xa_state *xas, in dax_load_hole() argument
1039 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_load_hole()
1247 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1274 entry = grab_mapping_entry(&xas, mapping, 0); in dax_iomap_pte_fault()
1349 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pte_fault()
1377 ret = dax_load_hole(&xas, mapping, &entry, vmf); in dax_iomap_pte_fault()
1404 dax_unlock_entry(&xas, entry); in dax_iomap_pte_fault()
1411 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1430 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_pmd_load_hole()
1468 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1511 if (xas.xa_index >= max_pgoff) { in dax_iomap_pmd_fault()
1517 if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff) in dax_iomap_pmd_fault()
1526 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
1549 pos = (loff_t)xas.xa_index << PAGE_SHIFT; in dax_iomap_pmd_fault()
1565 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pmd_fault()
1589 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry); in dax_iomap_pmd_fault()
1612 dax_unlock_entry(&xas, entry); in dax_iomap_pmd_fault()
1670 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
1674 xas_lock_irq(&xas); in dax_insert_pfn_mkwrite()
1675 entry = get_unlocked_entry(&xas, order); in dax_insert_pfn_mkwrite()
1679 put_unlocked_entry(&xas, entry); in dax_insert_pfn_mkwrite()
1680 xas_unlock_irq(&xas); in dax_insert_pfn_mkwrite()
1685 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); in dax_insert_pfn_mkwrite()
1686 dax_lock_entry(&xas, entry); in dax_insert_pfn_mkwrite()
1687 xas_unlock_irq(&xas); in dax_insert_pfn_mkwrite()
1696 dax_unlock_entry(&xas, entry); in dax_insert_pfn_mkwrite()