Lines Matching refs:xas

129 	XA_STATE(xas, &mapping->i_pages, folio->index);  in page_cache_delete()
132 mapping_set_update(&xas, mapping); in page_cache_delete()
136 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete()
142 xas_store(&xas, shadow); in page_cache_delete()
143 xas_init_marks(&xas); in page_cache_delete()
282 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
287 mapping_set_update(&xas, mapping); in page_cache_delete_batch()
288 xas_for_each(&xas, folio, ULONG_MAX) { in page_cache_delete_batch()
314 xas_store(&xas, NULL); in page_cache_delete_batch()
476 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page()
484 folio = xas_find(&xas, max); in filemap_range_has_page()
485 if (xas_retry(&xas, folio)) in filemap_range_has_page()
634 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_writeback()
642 xas_for_each(&xas, folio, max) { in filemap_range_has_writeback()
643 if (xas_retry(&xas, folio)) in filemap_range_has_writeback()
812 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_folio()
824 xas_lock_irq(&xas); in replace_page_cache_folio()
825 xas_store(&xas, new); in replace_page_cache_folio()
837 xas_unlock_irq(&xas); in replace_page_cache_folio()
847 XA_STATE(xas, &mapping->i_pages, index); in __filemap_add_folio()
854 mapping_set_update(&xas, mapping); in __filemap_add_folio()
862 xas_set_order(&xas, index, folio_order(folio)); in __filemap_add_folio()
869 folio->index = xas.xa_index; in __filemap_add_folio()
872 unsigned int order = xa_get_order(xas.xa, xas.xa_index); in __filemap_add_folio()
876 xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index), in __filemap_add_folio()
878 xas_lock_irq(&xas); in __filemap_add_folio()
879 xas_for_each_conflict(&xas, entry) { in __filemap_add_folio()
882 xas_set_err(&xas, -EEXIST); in __filemap_add_folio()
891 order = xa_get_order(xas.xa, xas.xa_index); in __filemap_add_folio()
895 xas_split(&xas, old, order); in __filemap_add_folio()
896 xas_reset(&xas); in __filemap_add_folio()
900 xas_store(&xas, folio); in __filemap_add_folio()
901 if (xas_error(&xas)) in __filemap_add_folio()
914 xas_unlock_irq(&xas); in __filemap_add_folio()
915 } while (xas_nomem(&xas, gfp)); in __filemap_add_folio()
917 if (xas_error(&xas)) in __filemap_add_folio()
928 return xas_error(&xas); in __filemap_add_folio()
1734 XA_STATE(xas, &mapping->i_pages, index); in page_cache_next_miss()
1737 void *entry = xas_next(&xas); in page_cache_next_miss()
1740 if (xas.xa_index == 0) in page_cache_next_miss()
1744 return xas.xa_index; in page_cache_next_miss()
1770 XA_STATE(xas, &mapping->i_pages, index); in page_cache_prev_miss()
1773 void *entry = xas_prev(&xas); in page_cache_prev_miss()
1776 if (xas.xa_index == ULONG_MAX) in page_cache_prev_miss()
1780 return xas.xa_index; in page_cache_prev_miss()
1818 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_entry()
1823 xas_reset(&xas); in filemap_get_entry()
1824 folio = xas_load(&xas); in filemap_get_entry()
1825 if (xas_retry(&xas, folio)) in filemap_get_entry()
1837 if (unlikely(folio != xas_reload(&xas))) { in filemap_get_entry()
1969 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max, in find_get_entry() argument
1976 folio = xas_find(xas, max); in find_get_entry()
1978 folio = xas_find_marked(xas, max, mark); in find_get_entry()
1980 if (xas_retry(xas, folio)) in find_get_entry()
1993 if (unlikely(folio != xas_reload(xas))) { in find_get_entry()
2000 xas_reset(xas); in find_get_entry()
2027 XA_STATE(xas, &mapping->i_pages, *start); in find_get_entries()
2031 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { in find_get_entries()
2032 indices[fbatch->nr] = xas.xa_index; in find_get_entries()
2073 XA_STATE(xas, &mapping->i_pages, *start); in find_lock_entries()
2077 while ((folio = find_get_entry(&xas, end, XA_PRESENT))) { in find_lock_entries()
2088 VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), in find_lock_entries()
2091 indices[fbatch->nr] = xas.xa_index; in find_lock_entries()
2138 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios()
2142 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) { in filemap_get_folios()
2191 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_contig()
2197 for (folio = xas_load(&xas); folio && xas.xa_index <= end; in filemap_get_folios_contig()
2198 folio = xas_next(&xas)) { in filemap_get_folios_contig()
2199 if (xas_retry(&xas, folio)) in filemap_get_folios_contig()
2211 if (unlikely(folio != xas_reload(&xas))) in filemap_get_folios_contig()
2227 xas_reset(&xas); in filemap_get_folios_contig()
2262 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_tag()
2266 while ((folio = find_get_entry(&xas, end, tag)) != NULL) { in filemap_get_folios_tag()
2332 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_read_batch()
2336 for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) { in filemap_get_read_batch()
2337 if (xas_retry(&xas, folio)) in filemap_get_read_batch()
2339 if (xas.xa_index > max || xa_is_value(folio)) in filemap_get_read_batch()
2346 if (unlikely(folio != xas_reload(&xas))) in filemap_get_read_batch()
2355 xas_advance(&xas, folio_next_index(folio) - 1); in filemap_get_read_batch()
2360 xas_reset(&xas); in filemap_get_read_batch()
2981 static inline loff_t folio_seek_hole_data(struct xa_state *xas, in folio_seek_hole_data() argument
2993 xas_pause(xas); in folio_seek_hole_data()
3014 static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio) in seek_folio_size() argument
3017 return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index); in seek_folio_size()
3042 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); in mapping_seek_hole_data()
3051 while ((folio = find_get_entry(&xas, max, XA_PRESENT))) { in mapping_seek_hole_data()
3052 loff_t pos = (u64)xas.xa_index << PAGE_SHIFT; in mapping_seek_hole_data()
3061 seek_size = seek_folio_size(&xas, folio); in mapping_seek_hole_data()
3063 start = folio_seek_hole_data(&xas, mapping, folio, start, pos, in mapping_seek_hole_data()
3070 xas_set(&xas, pos >> PAGE_SHIFT); in mapping_seek_hole_data()
3431 static struct folio *next_uptodate_folio(struct xa_state *xas, in next_uptodate_folio() argument
3434 struct folio *folio = xas_next_entry(xas, end_pgoff); in next_uptodate_folio()
3440 if (xas_retry(xas, folio)) in next_uptodate_folio()
3449 if (unlikely(folio != xas_reload(xas))) in next_uptodate_folio()
3460 if (xas->xa_index >= max_idx) in next_uptodate_folio()
3467 } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL); in next_uptodate_folio()
3566 XA_STATE(xas, &mapping->i_pages, start_pgoff); in filemap_map_pages()
3572 folio = next_uptodate_folio(&xas, mapping, end_pgoff); in filemap_map_pages()
3591 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT; in filemap_map_pages()
3592 vmf->pte += xas.xa_index - last_pgoff; in filemap_map_pages()
3593 last_pgoff = xas.xa_index; in filemap_map_pages()
3595 nr_pages = min(end, end_pgoff) - xas.xa_index + 1; in filemap_map_pages()
3602 xas.xa_index - folio->index, addr, in filemap_map_pages()
3607 } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); in filemap_map_pages()
4148 XA_STATE(xas, &mapping->i_pages, first_index); in filemap_cachestat()
4152 xas_for_each(&xas, folio, last_index) { in filemap_cachestat()
4156 if (xas_retry(&xas, folio)) in filemap_cachestat()
4163 int order = xa_get_order(xas.xa, xas.xa_index); in filemap_cachestat()
4166 folio_first_index = round_down(xas.xa_index, 1 << order); in filemap_cachestat()
4214 xas_pause(&xas); in filemap_cachestat()