Home
last modified time | relevance | path

Searched refs:xas (Results 1 – 25 of 32) sorted by relevance

12

/Linux-v5.15/lib/
Dxarray.c36 static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type) in xas_lock_type() argument
39 xas_lock_irq(xas); in xas_lock_type()
41 xas_lock_bh(xas); in xas_lock_type()
43 xas_lock(xas); in xas_lock_type()
46 static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type) in xas_unlock_type() argument
49 xas_unlock_irq(xas); in xas_unlock_type()
51 xas_unlock_bh(xas); in xas_unlock_type()
53 xas_unlock(xas); in xas_unlock_type()
124 static void xas_squash_marks(const struct xa_state *xas) in xas_squash_marks() argument
127 unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; in xas_squash_marks()
[all …]
Dtest_xarray.c74 XA_STATE_ORDER(xas, xa, index, order); in xa_store_order()
78 xas_lock(&xas); in xa_store_order()
79 curr = xas_store(&xas, entry); in xa_store_order()
80 xas_unlock(&xas); in xa_store_order()
81 } while (xas_nomem(&xas, gfp)); in xa_store_order()
104 XA_STATE(xas, xa, 0); in check_xas_retry()
111 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); in check_xas_retry()
113 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); in check_xas_retry()
114 XA_BUG_ON(xa, xas_retry(&xas, NULL)); in check_xas_retry()
115 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); in check_xas_retry()
[all …]
Didr.c383 XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS); in ida_alloc_range()
395 xas_lock_irqsave(&xas, flags); in ida_alloc_range()
397 bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK); in ida_alloc_range()
398 if (xas.xa_index > min / IDA_BITMAP_BITS) in ida_alloc_range()
400 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range()
408 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range()
412 xas_store(&xas, xa_mk_value(tmp)); in ida_alloc_range()
422 xas_store(&xas, bitmap); in ida_alloc_range()
423 if (xas_error(&xas)) { in ida_alloc_range()
431 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range()
[all …]
Diov_iter.c78 XA_STATE(xas, i->xarray, index); \
81 xas_for_each(&xas, head, ULONG_MAX) { \
83 if (xas_retry(&xas, head)) \
1360 XA_STATE(xas, xa, index); in iter_xarray_populate_pages()
1365 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in iter_xarray_populate_pages()
1366 if (xas_retry(&xas, page)) in iter_xarray_populate_pages()
1370 if (unlikely(page != xas_reload(&xas))) { in iter_xarray_populate_pages()
1371 xas_reset(&xas); in iter_xarray_populate_pages()
1375 pages[ret] = find_subpage(page, xas.xa_index); in iter_xarray_populate_pages()
/Linux-v5.15/include/linux/
Dxarray.h1370 #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark)) argument
1371 #define xas_trylock(xas) xa_trylock((xas)->xa) argument
1372 #define xas_lock(xas) xa_lock((xas)->xa) argument
1373 #define xas_unlock(xas) xa_unlock((xas)->xa) argument
1374 #define xas_lock_bh(xas) xa_lock_bh((xas)->xa) argument
1375 #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa) argument
1376 #define xas_lock_irq(xas) xa_lock_irq((xas)->xa) argument
1377 #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa) argument
1378 #define xas_lock_irqsave(xas, flags) \ argument
1379 xa_lock_irqsave((xas)->xa, flags)
[all …]
Dpagemap.h942 XA_STATE(xas, &rac->mapping->i_pages, 0); in __readahead_batch()
950 xas_set(&xas, rac->_index); in __readahead_batch()
952 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { in __readahead_batch()
953 if (xas_retry(&xas, page)) in __readahead_batch()
967 xas_set(&xas, rac->_index + rac->_batch_count); in __readahead_batch()
/Linux-v5.15/fs/
Ddax.c157 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, in dax_entry_waitqueue() argument
161 unsigned long index = xas->xa_index; in dax_entry_waitqueue()
170 key->xa = xas->xa; in dax_entry_waitqueue()
173 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); in dax_entry_waitqueue()
195 static void dax_wake_entry(struct xa_state *xas, void *entry, in dax_wake_entry() argument
201 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry()
223 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) in get_unlocked_entry() argument
233 entry = xas_find_conflict(xas); in get_unlocked_entry()
241 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_unlocked_entry()
244 xas_unlock_irq(xas); in get_unlocked_entry()
[all …]
/Linux-v5.15/tools/testing/radix-tree/
Dmultiorder.c18 XA_STATE_ORDER(xas, xa, index, order); in item_insert_order()
22 xas_lock(&xas); in item_insert_order()
23 xas_store(&xas, item); in item_insert_order()
24 xas_unlock(&xas); in item_insert_order()
25 } while (xas_nomem(&xas, GFP_KERNEL)); in item_insert_order()
27 if (!xas_error(&xas)) in item_insert_order()
31 return xas_error(&xas); in item_insert_order()
36 XA_STATE(xas, xa, 0); in multiorder_iteration()
56 xas_set(&xas, j); in multiorder_iteration()
57 xas_for_each(&xas, item, ULONG_MAX) { in multiorder_iteration()
[all …]
Diteration_check.c23 XA_STATE(xas, xa, index); in my_item_insert()
28 xas_lock(&xas); in my_item_insert()
30 xas_set_order(&xas, index, order); in my_item_insert()
32 if (xas_find_conflict(&xas)) in my_item_insert()
34 xas_store(&xas, item); in my_item_insert()
35 xas_set_mark(&xas, TAG); in my_item_insert()
38 xas_unlock(&xas); in my_item_insert()
39 if (xas_nomem(&xas, GFP_KERNEL)) in my_item_insert()
69 XA_STATE(xas, &array, 0); in tagged_iteration_fn()
75 xas_set(&xas, 0); in tagged_iteration_fn()
[all …]
Dtest.c176 XA_STATE(xas, xa, start); in tag_tagged_items()
183 xas_lock_irq(&xas); in tag_tagged_items()
184 xas_for_each_marked(&xas, item, end, iftag) { in tag_tagged_items()
185 xas_set_mark(&xas, thentag); in tag_tagged_items()
189 xas_pause(&xas); in tag_tagged_items()
190 xas_unlock_irq(&xas); in tag_tagged_items()
192 xas_lock_irq(&xas); in tag_tagged_items()
194 xas_unlock_irq(&xas); in tag_tagged_items()
257 XA_STATE(xas, xa, 0); in item_kill_tree()
260 xas_for_each(&xas, entry, ULONG_MAX) { in item_kill_tree()
[all …]
Diteration_check_2.c15 XA_STATE(xas, arg, 0); in iterator()
21 xas_set(&xas, 0); in iterator()
23 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) in iterator()
26 assert(xas.xa_index >= 100); in iterator()
Dregression1.c82 XA_STATE(xas, &mt_tree, start); in find_get_pages()
87 xas_for_each(&xas, page, ULONG_MAX) { in find_get_pages()
88 if (xas_retry(&xas, page)) in find_get_pages()
99 if (unlikely(page != xas_reload(&xas))) in find_get_pages()
108 xas_reset(&xas); in find_get_pages()
/Linux-v5.15/mm/
Dmemfd.c31 static void memfd_tag_pins(struct xa_state *xas) in memfd_tag_pins() argument
38 xas_lock_irq(xas); in memfd_tag_pins()
39 xas_for_each(xas, page, ULONG_MAX) { in memfd_tag_pins()
42 page = find_subpage(page, xas->xa_index); in memfd_tag_pins()
44 xas_set_mark(xas, MEMFD_TAG_PINNED); in memfd_tag_pins()
49 xas_pause(xas); in memfd_tag_pins()
50 xas_unlock_irq(xas); in memfd_tag_pins()
52 xas_lock_irq(xas); in memfd_tag_pins()
54 xas_unlock_irq(xas); in memfd_tag_pins()
68 XA_STATE(xas, &mapping->i_pages, 0); in memfd_wait_for_pins()
[all …]
Dfilemap.c127 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
130 mapping_set_update(&xas, mapping); in page_cache_delete()
134 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete()
142 xas_store(&xas, shadow); in page_cache_delete()
143 xas_init_marks(&xas); in page_cache_delete()
290 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); in page_cache_delete_batch()
295 mapping_set_update(&xas, mapping); in page_cache_delete_batch()
296 xas_for_each(&xas, page, ULONG_MAX) { in page_cache_delete_batch()
318 if (page->index == xas.xa_index) in page_cache_delete_batch()
327 if (page->index + compound_nr(page) - 1 == xas.xa_index) in page_cache_delete_batch()
[all …]
Dswap_state.c105 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); in add_to_swap_cache()
117 xas_lock_irq(&xas); in add_to_swap_cache()
118 xas_create_range(&xas); in add_to_swap_cache()
119 if (xas_error(&xas)) in add_to_swap_cache()
122 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); in add_to_swap_cache()
123 old = xas_load(&xas); in add_to_swap_cache()
129 xas_store(&xas, page); in add_to_swap_cache()
130 xas_next(&xas); in add_to_swap_cache()
137 xas_unlock_irq(&xas); in add_to_swap_cache()
138 } while (xas_nomem(&xas, gfp)); in add_to_swap_cache()
[all …]
Dkhugepaged.c1647 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); in collapse_file()
1672 xas_lock_irq(&xas); in collapse_file()
1673 xas_create_range(&xas); in collapse_file()
1674 if (!xas_error(&xas)) in collapse_file()
1676 xas_unlock_irq(&xas); in collapse_file()
1677 if (!xas_nomem(&xas, GFP_KERNEL)) { in collapse_file()
1695 xas_set(&xas, start); in collapse_file()
1697 struct page *page = xas_next(&xas); in collapse_file()
1699 VM_BUG_ON(index != xas.xa_index); in collapse_file()
1708 if (!xas_next_entry(&xas, end - 1)) { in collapse_file()
[all …]
Dpage-writeback.c2127 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback()
2131 xas_lock_irq(&xas); in tag_pages_for_writeback()
2132 xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) { in tag_pages_for_writeback()
2133 xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE); in tag_pages_for_writeback()
2137 xas_pause(&xas); in tag_pages_for_writeback()
2138 xas_unlock_irq(&xas); in tag_pages_for_writeback()
2140 xas_lock_irq(&xas); in tag_pages_for_writeback()
2142 xas_unlock_irq(&xas); in tag_pages_for_writeback()
2820 XA_STATE(xas, &mapping->i_pages, page_index(page)); in __test_set_page_writeback()
2825 xas_lock_irqsave(&xas, flags); in __test_set_page_writeback()
[all …]
Dmigrate.c383 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_page_move_mapping()
406 xas_lock_irq(&xas); in migrate_page_move_mapping()
407 if (page_count(page) != expected_count || xas_load(&xas) != page) { in migrate_page_move_mapping()
408 xas_unlock_irq(&xas); in migrate_page_move_mapping()
413 xas_unlock_irq(&xas); in migrate_page_move_mapping()
441 xas_store(&xas, newpage); in migrate_page_move_mapping()
446 xas_next(&xas); in migrate_page_move_mapping()
447 xas_store(&xas, newpage); in migrate_page_move_mapping()
458 xas_unlock(&xas); in migrate_page_move_mapping()
511 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_huge_page_move_mapping()
[all …]
Dshmem.c414 XA_STATE(xas, &mapping->i_pages, index); in shmem_replace_entry()
419 item = xas_load(&xas); in shmem_replace_entry()
422 xas_store(&xas, replacement); in shmem_replace_entry()
697 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); in shmem_add_to_page_cache()
726 xas_lock_irq(&xas); in shmem_add_to_page_cache()
727 entry = xas_find_conflict(&xas); in shmem_add_to_page_cache()
729 xas_set_err(&xas, -EEXIST); in shmem_add_to_page_cache()
730 xas_create_range(&xas); in shmem_add_to_page_cache()
731 if (xas_error(&xas)) in shmem_add_to_page_cache()
734 xas_store(&xas, page); in shmem_add_to_page_cache()
[all …]
Dtruncate.c37 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry()
39 xas_set_update(&xas, workingset_update_node); in __clear_shadow_entry()
40 if (xas_load(&xas) != entry) in __clear_shadow_entry()
42 xas_store(&xas, NULL); in __clear_shadow_entry()
/Linux-v5.15/drivers/infiniband/core/
Dib_core_uverbs.c268 XA_STATE(xas, &ucontext->mmap_xa, min_pgoff); in rdma_user_mmap_entry_insert_range()
294 xas_find_marked(&xas, max_pgoff, XA_FREE_MARK); in rdma_user_mmap_entry_insert_range()
295 if (xas.xa_node == XAS_RESTART) in rdma_user_mmap_entry_insert_range()
298 xa_first = xas.xa_index; in rdma_user_mmap_entry_insert_range()
308 xas_next_entry(&xas, xa_last - 1); in rdma_user_mmap_entry_insert_range()
309 if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last) in rdma_user_mmap_entry_insert_range()
Drestrack.c116 XA_STATE(xas, &rt->xa, 0); in rdma_restrack_count()
120 xas_for_each(&xas, e, U32_MAX) in rdma_restrack_count()
/Linux-v5.15/arch/x86/kernel/cpu/sgx/
Dencl.c226 XA_STATE(xas, &encl->page_array, PFN_DOWN(start)); in sgx_encl_may_map()
236 xas_lock(&xas); in sgx_encl_may_map()
237 xas_for_each(&xas, page, PFN_DOWN(end - 1)) { in sgx_encl_may_map()
245 xas_pause(&xas); in sgx_encl_may_map()
246 xas_unlock(&xas); in sgx_encl_may_map()
252 xas_lock(&xas); in sgx_encl_may_map()
255 xas_unlock(&xas); in sgx_encl_may_map()
/Linux-v5.15/fs/afs/
Dwrite.c267 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); in afs_pages_written_back()
275 xas_for_each(&xas, page, end) { in afs_pages_written_back()
449 XA_STATE(xas, &mapping->i_pages, index); in afs_extend_writeback()
459 xas_for_each(&xas, page, ULONG_MAX) { in afs_extend_writeback()
461 if (xas_retry(&xas, page)) in afs_extend_writeback()
469 xas_reset(&xas); in afs_extend_writeback()
474 if (unlikely(page != xas_reload(&xas))) { in afs_extend_writeback()
514 xas_pause(&xas); in afs_extend_writeback()
/Linux-v5.15/drivers/target/
Dtarget_core_user.c508 XA_STATE(xas, &udev->data_pages, 0); in tcmu_get_empty_block()
519 xas_set(&xas, dpi); in tcmu_get_empty_block()
521 for (cnt = 0; xas_next(&xas) && cnt < page_cnt;) in tcmu_get_empty_block()
1663 XA_STATE(xas, &udev->data_pages, first * udev->data_pages_per_blk); in tcmu_blocks_release()
1667 xas_lock(&xas); in tcmu_blocks_release()
1668 xas_for_each(&xas, page, (last + 1) * udev->data_pages_per_blk - 1) { in tcmu_blocks_release()
1669 xas_store(&xas, NULL); in tcmu_blocks_release()
1673 xas_unlock(&xas); in tcmu_blocks_release()
3027 XA_STATE(xas, &udev->commands, cmd_id); in tcmu_free_kept_buf_store()
3029 xas_lock(&xas); in tcmu_free_kept_buf_store()
[all …]

12