Lines Matching full:entry
52 /* The order of a PMD entry */
69 * for pages. We use one bit for locking, one bit for the entry size (PMD)
70 * and two more to tell us if the entry is a zero page or an empty entry that
73 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
74 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
83 static unsigned long dax_to_pfn(void *entry) in dax_to_pfn() argument
85 return xa_to_value(entry) >> DAX_SHIFT; in dax_to_pfn()
93 static bool dax_is_locked(void *entry) in dax_is_locked() argument
95 return xa_to_value(entry) & DAX_LOCKED; in dax_is_locked()
98 static unsigned int dax_entry_order(void *entry) in dax_entry_order() argument
100 if (xa_to_value(entry) & DAX_PMD) in dax_entry_order()
105 static unsigned long dax_is_pmd_entry(void *entry) in dax_is_pmd_entry() argument
107 return xa_to_value(entry) & DAX_PMD; in dax_is_pmd_entry()
110 static bool dax_is_pte_entry(void *entry) in dax_is_pte_entry() argument
112 return !(xa_to_value(entry) & DAX_PMD); in dax_is_pte_entry()
115 static int dax_is_zero_entry(void *entry) in dax_is_zero_entry() argument
117 return xa_to_value(entry) & DAX_ZERO_PAGE; in dax_is_zero_entry()
120 static int dax_is_empty_entry(void *entry) in dax_is_empty_entry() argument
122 return xa_to_value(entry) & DAX_EMPTY; in dax_is_empty_entry()
126 * true if the entry that was found is of a smaller order than the entry
129 static bool dax_is_conflict(void *entry) in dax_is_conflict() argument
131 return entry == XA_RETRY_ENTRY; in dax_is_conflict()
135 * DAX page cache entry locking
148 void *entry, struct exceptional_entry_key *key) in dax_entry_waitqueue() argument
154 * If 'entry' is a PMD, align the 'index' that we use for the wait in dax_entry_waitqueue()
158 if (dax_is_pmd_entry(entry)) in dax_entry_waitqueue()
181 * @entry may no longer be the entry at the index in the mapping.
182 * The important information it's conveying is whether the entry at
183 * this index used to be a PMD entry.
185 static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all) in dax_wake_entry() argument
190 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry()
193 * Checking for locked entry and prepare_to_wait_exclusive() happens in dax_wake_entry()
194 * under the i_pages lock, ditto for entry handling in our callers. in dax_wake_entry()
195 * So at this point all tasks that could have seen our entry locked in dax_wake_entry()
203 * Look up entry in page cache, wait for it to become unlocked if it
204 * is a DAX entry and return it. The caller must subsequently call
205 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
206 * if it did. The entry returned may have a larger order than @order.
207 * If @order is larger than the order of the entry found in i_pages, this
208 * function returns a dax_is_conflict entry.
214 void *entry; in get_unlocked_entry() local
222 entry = xas_find_conflict(xas); in get_unlocked_entry()
223 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) in get_unlocked_entry()
224 return entry; in get_unlocked_entry()
225 if (dax_entry_order(entry) < order) in get_unlocked_entry()
227 if (!dax_is_locked(entry)) in get_unlocked_entry()
228 return entry; in get_unlocked_entry()
230 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_unlocked_entry()
246 static void wait_entry_unlocked(struct xa_state *xas, void *entry) in wait_entry_unlocked() argument
254 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in wait_entry_unlocked()
257 * path ever successfully retrieves an unlocked entry before an in wait_entry_unlocked()
267 static void put_unlocked_entry(struct xa_state *xas, void *entry) in put_unlocked_entry() argument
270 if (entry && !dax_is_conflict(entry)) in put_unlocked_entry()
271 dax_wake_entry(xas, entry, false); in put_unlocked_entry()
275 * We used the xa_state to get the entry, but then we locked the entry and
279 static void dax_unlock_entry(struct xa_state *xas, void *entry) in dax_unlock_entry() argument
283 BUG_ON(dax_is_locked(entry)); in dax_unlock_entry()
286 old = xas_store(xas, entry); in dax_unlock_entry()
289 dax_wake_entry(xas, entry, false); in dax_unlock_entry()
293 * Return: The entry stored at this location before it was locked.
295 static void *dax_lock_entry(struct xa_state *xas, void *entry) in dax_lock_entry() argument
297 unsigned long v = xa_to_value(entry); in dax_lock_entry()
301 static unsigned long dax_entry_size(void *entry) in dax_entry_size() argument
303 if (dax_is_zero_entry(entry)) in dax_entry_size()
305 else if (dax_is_empty_entry(entry)) in dax_entry_size()
307 else if (dax_is_pmd_entry(entry)) in dax_entry_size()
313 static unsigned long dax_end_pfn(void *entry) in dax_end_pfn() argument
315 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; in dax_end_pfn()
319 * Iterate through all mapped pfns represented by an entry, i.e. skip
322 #define for_each_mapped_pfn(entry, pfn) \ argument
323 for (pfn = dax_to_pfn(entry); \
324 pfn < dax_end_pfn(entry); pfn++)
331 static void dax_associate_entry(void *entry, struct address_space *mapping, in dax_associate_entry() argument
334 unsigned long size = dax_entry_size(entry), pfn, index; in dax_associate_entry()
341 for_each_mapped_pfn(entry, pfn) { in dax_associate_entry()
350 static void dax_disassociate_entry(void *entry, struct address_space *mapping, in dax_disassociate_entry() argument
358 for_each_mapped_pfn(entry, pfn) { in dax_disassociate_entry()
368 static struct page *dax_busy_page(void *entry) in dax_busy_page() argument
372 for_each_mapped_pfn(entry, pfn) { in dax_busy_page()
382 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
383 * @page: The page whose entry we want to lock
386 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
392 void *entry; in dax_lock_page() local
399 entry = NULL; in dax_lock_page()
410 entry = (void *)~0UL; in dax_lock_page()
421 entry = xas_load(&xas); in dax_lock_page()
422 if (dax_is_locked(entry)) { in dax_lock_page()
424 wait_entry_unlocked(&xas, entry); in dax_lock_page()
428 dax_lock_entry(&xas, entry); in dax_lock_page()
433 return (dax_entry_t)entry; in dax_lock_page()
448 * Find page cache entry at given index. If it is a DAX entry, return it
449 * with the entry locked. If the page cache doesn't contain an entry at
450 * that index, add a locked empty entry.
452 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
453 * either return that locked entry or will return VM_FAULT_FALLBACK.
458 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD
460 * PTE insertion will cause an existing PMD entry to be unmapped and
466 * the tree, and PTE writes will simply dirty the entire PMD entry.
473 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values
480 bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */ in grab_mapping_entry()
481 void *entry; in grab_mapping_entry() local
485 entry = get_unlocked_entry(xas, order); in grab_mapping_entry()
487 if (entry) { in grab_mapping_entry()
488 if (dax_is_conflict(entry)) in grab_mapping_entry()
490 if (!xa_is_value(entry)) { in grab_mapping_entry()
496 if (dax_is_pmd_entry(entry) && in grab_mapping_entry()
497 (dax_is_zero_entry(entry) || in grab_mapping_entry()
498 dax_is_empty_entry(entry))) { in grab_mapping_entry()
506 * Make sure 'entry' remains valid while we drop in grab_mapping_entry()
509 dax_lock_entry(xas, entry); in grab_mapping_entry()
516 if (dax_is_zero_entry(entry)) { in grab_mapping_entry()
525 dax_disassociate_entry(entry, mapping, false); in grab_mapping_entry()
527 dax_wake_entry(xas, entry, true); in grab_mapping_entry()
529 entry = NULL; in grab_mapping_entry()
533 if (entry) { in grab_mapping_entry()
534 dax_lock_entry(xas, entry); in grab_mapping_entry()
540 entry = dax_make_entry(pfn_to_pfn_t(0), flags); in grab_mapping_entry()
541 dax_lock_entry(xas, entry); in grab_mapping_entry()
555 return entry; in grab_mapping_entry()
582 void *entry; in dax_layout_busy_page_range() local
618 xas_for_each(&xas, entry, end_idx) { in dax_layout_busy_page_range()
619 if (WARN_ON_ONCE(!xa_is_value(entry))) in dax_layout_busy_page_range()
621 if (unlikely(dax_is_locked(entry))) in dax_layout_busy_page_range()
622 entry = get_unlocked_entry(&xas, 0); in dax_layout_busy_page_range()
623 if (entry) in dax_layout_busy_page_range()
624 page = dax_busy_page(entry); in dax_layout_busy_page_range()
625 put_unlocked_entry(&xas, entry); in dax_layout_busy_page_range()
652 void *entry; in __dax_invalidate_entry() local
655 entry = get_unlocked_entry(&xas, 0); in __dax_invalidate_entry()
656 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) in __dax_invalidate_entry()
662 dax_disassociate_entry(entry, mapping, trunc); in __dax_invalidate_entry()
667 put_unlocked_entry(&xas, entry); in __dax_invalidate_entry()
673 * Delete DAX entry at @index from @mapping. Wait for it
684 * caller has seen a DAX entry for this index, we better find it in dax_delete_mapping_entry()
692 * Invalidate DAX entry if it is clean.
726 * By this point grab_mapping_entry() has ensured that we have a locked entry
734 void *entry, pfn_t pfn, unsigned long flags, bool dirty) in dax_insert_entry() argument
741 if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) { in dax_insert_entry()
744 if (dax_is_pmd_entry(entry)) in dax_insert_entry()
747 else /* pte entry */ in dax_insert_entry()
753 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { in dax_insert_entry()
756 dax_disassociate_entry(entry, mapping, false); in dax_insert_entry()
759 * Only swap our new entry into the page cache if the current in dax_insert_entry()
760 * entry is a zero page or an empty entry. If a normal PTE or in dax_insert_entry()
761 * PMD entry is already in the cache, we leave it alone. This in dax_insert_entry()
763 * existing entry is a PMD, we will just leave the PMD in the in dax_insert_entry()
767 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | in dax_insert_entry()
769 entry = new_entry; in dax_insert_entry()
778 return entry; in dax_insert_entry()
866 struct address_space *mapping, void *entry) in dax_writeback_one() argument
875 if (WARN_ON(!xa_is_value(entry))) in dax_writeback_one()
878 if (unlikely(dax_is_locked(entry))) { in dax_writeback_one()
879 void *old_entry = entry; in dax_writeback_one()
881 entry = get_unlocked_entry(xas, 0); in dax_writeback_one()
883 /* Entry got punched out / reallocated? */ in dax_writeback_one()
884 if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) in dax_writeback_one()
887 * Entry got reallocated elsewhere? No need to writeback. in dax_writeback_one()
889 * difference in lockbit or entry type. in dax_writeback_one()
891 if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) in dax_writeback_one()
893 if (WARN_ON_ONCE(dax_is_empty_entry(entry) || in dax_writeback_one()
894 dax_is_zero_entry(entry))) { in dax_writeback_one()
899 /* Another fsync thread may have already done this entry */ in dax_writeback_one()
904 /* Lock the entry to serialize with page faults */ in dax_writeback_one()
905 dax_lock_entry(xas, entry); in dax_writeback_one()
911 * at the entry only under the i_pages lock and once they do that in dax_writeback_one()
912 * they will see the entry locked and wait for it to unlock. in dax_writeback_one()
924 pfn = dax_to_pfn(entry); in dax_writeback_one()
925 count = 1UL << dax_entry_order(entry); in dax_writeback_one()
934 * entry lock. in dax_writeback_one()
938 xas_store(xas, entry); in dax_writeback_one()
940 dax_wake_entry(xas, entry, false); in dax_writeback_one()
946 put_unlocked_entry(xas, entry); in dax_writeback_one()
961 void *entry; in dax_writeback_mapping_range() local
976 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { in dax_writeback_mapping_range()
977 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); in dax_writeback_mapping_range()
1041 struct address_space *mapping, void **entry, in dax_load_hole() argument
1049 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_load_hole()
1263 void *entry; in dax_iomap_pte_fault() local
1280 entry = grab_mapping_entry(&xas, mapping, 0); in dax_iomap_pte_fault()
1281 if (xa_is_internal(entry)) { in dax_iomap_pte_fault()
1282 ret = xa_to_internal(entry); in dax_iomap_pte_fault()
1355 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pte_fault()
1373 trace_dax_insert_mapping(inode, vmf, entry); in dax_iomap_pte_fault()
1383 ret = dax_load_hole(&xas, mapping, &entry, vmf); in dax_iomap_pte_fault()
1410 dax_unlock_entry(&xas, entry); in dax_iomap_pte_fault()
1418 struct iomap *iomap, void **entry) in dax_pmd_load_hole() argument
1436 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_pmd_load_hole()
1459 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1465 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); in dax_pmd_load_hole()
1484 void *entry; in dax_iomap_pmd_fault() local
1528 * grab_mapping_entry() will make sure we get an empty PMD entry, in dax_iomap_pmd_fault()
1529 * a zero PMD entry or a DAX PMD. If it can't (because a PTE in dax_iomap_pmd_fault()
1530 * entry is already in the array, for instance), it will return in dax_iomap_pmd_fault()
1533 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
1534 if (xa_is_internal(entry)) { in dax_iomap_pmd_fault()
1535 result = xa_to_internal(entry); in dax_iomap_pmd_fault()
1573 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, in dax_iomap_pmd_fault()
1590 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); in dax_iomap_pmd_fault()
1597 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry); in dax_iomap_pmd_fault()
1620 dax_unlock_entry(&xas, entry); in dax_iomap_pmd_fault()
1666 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1669 * @order: Order of entry to insert.
1671 * This function inserts a writeable PTE or PMD entry into the page tables
1672 * for an mmaped DAX file. It also marks the page cache entry as dirty.
1679 void *entry; in dax_insert_pfn_mkwrite() local
1683 entry = get_unlocked_entry(&xas, order); in dax_insert_pfn_mkwrite()
1684 /* Did we race with someone splitting entry or so? */ in dax_insert_pfn_mkwrite()
1685 if (!entry || dax_is_conflict(entry) || in dax_insert_pfn_mkwrite()
1686 (order == 0 && !dax_is_pte_entry(entry))) { in dax_insert_pfn_mkwrite()
1687 put_unlocked_entry(&xas, entry); in dax_insert_pfn_mkwrite()
1694 dax_lock_entry(&xas, entry); in dax_insert_pfn_mkwrite()
1704 dax_unlock_entry(&xas, entry); in dax_insert_pfn_mkwrite()
1712 * @pe_size: Size of entry to be inserted
1717 * table entry.