| /Linux-v5.4/lib/ |
| D | xarray.c | 35 static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type) in xas_lock_type() argument 38 xas_lock_irq(xas); in xas_lock_type() 40 xas_lock_bh(xas); in xas_lock_type() 42 xas_lock(xas); in xas_lock_type() 45 static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type) in xas_unlock_type() argument 48 xas_unlock_irq(xas); in xas_unlock_type() 50 xas_unlock_bh(xas); in xas_unlock_type() 52 xas_unlock(xas); in xas_unlock_type() 123 static void xas_squash_marks(const struct xa_state *xas) in xas_squash_marks() argument 126 unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; in xas_squash_marks() [all …]
|
| D | test_xarray.c | 70 XA_STATE_ORDER(xas, xa, index, order); in xa_store_order() 74 xas_lock(&xas); in xa_store_order() 75 curr = xas_store(&xas, entry); in xa_store_order() 76 xas_unlock(&xas); in xa_store_order() 77 } while (xas_nomem(&xas, gfp)); in xa_store_order() 100 XA_STATE(xas, xa, 0); in check_xas_retry() 107 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); in check_xas_retry() 109 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); in check_xas_retry() 110 XA_BUG_ON(xa, xas_retry(&xas, NULL)); in check_xas_retry() 111 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); in check_xas_retry() [all …]
|
| D | idr.c | 382 XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS); in ida_alloc_range() 394 xas_lock_irqsave(&xas, flags); in ida_alloc_range() 396 bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK); in ida_alloc_range() 397 if (xas.xa_index > min / IDA_BITMAP_BITS) in ida_alloc_range() 399 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range() 407 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range() 411 xas_store(&xas, xa_mk_value(tmp)); in ida_alloc_range() 421 xas_store(&xas, bitmap); in ida_alloc_range() 422 if (xas_error(&xas)) { in ida_alloc_range() 430 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range() [all …]
|
| /Linux-v5.4/include/linux/ |
| D | xarray.h | 1331 #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark)) argument 1332 #define xas_trylock(xas) xa_trylock((xas)->xa) argument 1333 #define xas_lock(xas) xa_lock((xas)->xa) argument 1334 #define xas_unlock(xas) xa_unlock((xas)->xa) argument 1335 #define xas_lock_bh(xas) xa_lock_bh((xas)->xa) argument 1336 #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa) argument 1337 #define xas_lock_irq(xas) xa_lock_irq((xas)->xa) argument 1338 #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa) argument 1339 #define xas_lock_irqsave(xas, flags) \ argument 1340 xa_lock_irqsave((xas)->xa, flags) [all …]
|
| D | swap.h | 316 #define mapping_set_update(xas, mapping) do { \ argument 318 xas_set_update(xas, workingset_update_node); \
|
| /Linux-v5.4/fs/ |
| D | dax.c | 147 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, in dax_entry_waitqueue() argument 151 unsigned long index = xas->xa_index; in dax_entry_waitqueue() 160 key->xa = xas->xa; in dax_entry_waitqueue() 163 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); in dax_entry_waitqueue() 185 static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all) in dax_wake_entry() argument 190 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry() 212 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) in get_unlocked_entry() argument 222 entry = xas_find_conflict(xas); in get_unlocked_entry() 230 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_unlocked_entry() 233 xas_unlock_irq(xas); in get_unlocked_entry() [all …]
|
| D | fs-writeback.c | 360 XA_STATE(xas, &mapping->i_pages, 0); in inode_switch_wbs_work_fn() 404 xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) { in inode_switch_wbs_work_fn() 411 xas_set(&xas, 0); in inode_switch_wbs_work_fn() 412 xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) { in inode_switch_wbs_work_fn()
|
| /Linux-v5.4/tools/testing/radix-tree/ |
| D | iteration_check.c | 23 XA_STATE(xas, xa, index); in my_item_insert() 28 xas_lock(&xas); in my_item_insert() 30 xas_set_order(&xas, index, order); in my_item_insert() 32 if (xas_find_conflict(&xas)) in my_item_insert() 34 xas_store(&xas, item); in my_item_insert() 35 xas_set_mark(&xas, TAG); in my_item_insert() 38 xas_unlock(&xas); in my_item_insert() 39 if (xas_nomem(&xas, GFP_KERNEL)) in my_item_insert() 69 XA_STATE(xas, &array, 0); in tagged_iteration_fn() 75 xas_set(&xas, 0); in tagged_iteration_fn() [all …]
|
| D | multiorder.c | 18 XA_STATE_ORDER(xas, xa, index, order); in item_insert_order() 22 xas_lock(&xas); in item_insert_order() 23 xas_store(&xas, item); in item_insert_order() 24 xas_unlock(&xas); in item_insert_order() 25 } while (xas_nomem(&xas, GFP_KERNEL)); in item_insert_order() 27 if (!xas_error(&xas)) in item_insert_order() 31 return xas_error(&xas); in item_insert_order() 36 XA_STATE(xas, xa, 0); in multiorder_iteration() 56 xas_set(&xas, j); in multiorder_iteration() 57 xas_for_each(&xas, item, ULONG_MAX) { in multiorder_iteration() [all …]
|
| D | test.c | 176 XA_STATE(xas, xa, start); in tag_tagged_items() 183 xas_lock_irq(&xas); in tag_tagged_items() 184 xas_for_each_marked(&xas, item, end, iftag) { in tag_tagged_items() 185 xas_set_mark(&xas, thentag); in tag_tagged_items() 189 xas_pause(&xas); in tag_tagged_items() 190 xas_unlock_irq(&xas); in tag_tagged_items() 192 xas_lock_irq(&xas); in tag_tagged_items() 194 xas_unlock_irq(&xas); in tag_tagged_items() 257 XA_STATE(xas, xa, 0); in item_kill_tree() 260 xas_for_each(&xas, entry, ULONG_MAX) { in item_kill_tree() [all …]
|
| D | regression1.c | 82 XA_STATE(xas, &mt_tree, start); in find_get_pages() 87 xas_for_each(&xas, page, ULONG_MAX) { in find_get_pages() 88 if (xas_retry(&xas, page)) in find_get_pages() 99 if (unlikely(page != xas_reload(&xas))) in find_get_pages() 108 xas_reset(&xas); in find_get_pages()
|
| /Linux-v5.4/mm/ |
| D | memfd.c | 31 static void memfd_tag_pins(struct xa_state *xas) in memfd_tag_pins() argument 38 xas_lock_irq(xas); in memfd_tag_pins() 39 xas_for_each(xas, page, ULONG_MAX) { in memfd_tag_pins() 42 page = find_subpage(page, xas->xa_index); in memfd_tag_pins() 44 xas_set_mark(xas, MEMFD_TAG_PINNED); in memfd_tag_pins() 49 xas_pause(xas); in memfd_tag_pins() 50 xas_unlock_irq(xas); in memfd_tag_pins() 52 xas_lock_irq(xas); in memfd_tag_pins() 54 xas_unlock_irq(xas); in memfd_tag_pins() 68 XA_STATE(xas, &mapping->i_pages, 0); in memfd_wait_for_pins() [all …]
|
| D | filemap.c | 122 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete() 125 mapping_set_update(&xas, mapping); in page_cache_delete() 129 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete() 137 xas_store(&xas, shadow); in page_cache_delete() 138 xas_init_marks(&xas); in page_cache_delete() 297 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); in page_cache_delete_batch() 302 mapping_set_update(&xas, mapping); in page_cache_delete_batch() 303 xas_for_each(&xas, page, ULONG_MAX) { in page_cache_delete_batch() 325 if (page->index == xas.xa_index) in page_cache_delete_batch() 334 if (page->index + compound_nr(page) - 1 == xas.xa_index) in page_cache_delete_batch() [all …]
|
| D | khugepaged.c | 1502 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); in collapse_file() 1525 xas_lock_irq(&xas); in collapse_file() 1526 xas_create_range(&xas); in collapse_file() 1527 if (!xas_error(&xas)) in collapse_file() 1529 xas_unlock_irq(&xas); in collapse_file() 1530 if (!xas_nomem(&xas, GFP_KERNEL)) { in collapse_file() 1549 xas_set(&xas, start); in collapse_file() 1551 struct page *page = xas_next(&xas); in collapse_file() 1553 VM_BUG_ON(index != xas.xa_index); in collapse_file() 1562 if (!xas_next_entry(&xas, end - 1)) { in collapse_file() [all …]
|
| D | swap_state.c | 118 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); in add_to_swap_cache() 129 xas_lock_irq(&xas); in add_to_swap_cache() 130 xas_create_range(&xas); in add_to_swap_cache() 131 if (xas_error(&xas)) in add_to_swap_cache() 134 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); in add_to_swap_cache() 136 xas_store(&xas, page); in add_to_swap_cache() 137 xas_next(&xas); in add_to_swap_cache() 143 xas_unlock_irq(&xas); in add_to_swap_cache() 144 } while (xas_nomem(&xas, gfp)); in add_to_swap_cache() 146 if (!xas_error(&xas)) in add_to_swap_cache() [all …]
|
| D | workingset.c | 455 XA_STATE(xas, node->array, 0); in shadow_lru_isolate() 495 xas.xa_node = xa_parent_locked(&mapping->i_pages, node); in shadow_lru_isolate() 496 xas.xa_offset = node->offset; in shadow_lru_isolate() 497 xas.xa_shift = node->shift + XA_CHUNK_SHIFT; in shadow_lru_isolate() 498 xas_set_update(&xas, workingset_update_node); in shadow_lru_isolate() 503 xas_store(&xas, NULL); in shadow_lru_isolate()
|
| D | page-writeback.c | 2108 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback() 2112 xas_lock_irq(&xas); in tag_pages_for_writeback() 2113 xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) { in tag_pages_for_writeback() 2114 xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE); in tag_pages_for_writeback() 2118 xas_pause(&xas); in tag_pages_for_writeback() 2119 xas_unlock_irq(&xas); in tag_pages_for_writeback() 2121 xas_lock_irq(&xas); in tag_pages_for_writeback() 2123 xas_unlock_irq(&xas); in tag_pages_for_writeback() 2771 XA_STATE(xas, &mapping->i_pages, page_index(page)); in __test_set_page_writeback() 2776 xas_lock_irqsave(&xas, flags); in __test_set_page_writeback() [all …]
|
| D | migrate.c | 400 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_page_move_mapping() 422 xas_lock_irq(&xas); in migrate_page_move_mapping() 423 if (page_count(page) != expected_count || xas_load(&xas) != page) { in migrate_page_move_mapping() 424 xas_unlock_irq(&xas); in migrate_page_move_mapping() 429 xas_unlock_irq(&xas); in migrate_page_move_mapping() 457 xas_store(&xas, newpage); in migrate_page_move_mapping() 462 xas_next(&xas); in migrate_page_move_mapping() 463 xas_store(&xas, newpage); in migrate_page_move_mapping() 474 xas_unlock(&xas); in migrate_page_move_mapping() 514 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_huge_page_move_mapping() [all …]
|
| D | shmem.c | 356 XA_STATE(xas, &mapping->i_pages, index); in shmem_replace_entry() 361 item = xas_load(&xas); in shmem_replace_entry() 364 xas_store(&xas, replacement); in shmem_replace_entry() 610 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); in shmem_add_to_page_cache() 626 xas_lock_irq(&xas); in shmem_add_to_page_cache() 627 entry = xas_find_conflict(&xas); in shmem_add_to_page_cache() 629 xas_set_err(&xas, -EEXIST); in shmem_add_to_page_cache() 630 xas_create_range(&xas); in shmem_add_to_page_cache() 631 if (xas_error(&xas)) in shmem_add_to_page_cache() 634 xas_store(&xas, page); in shmem_add_to_page_cache() [all …]
|
| D | truncate.c | 37 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry() 39 xas_set_update(&xas, workingset_update_node); in __clear_shadow_entry() 40 if (xas_load(&xas) != entry) in __clear_shadow_entry() 42 xas_store(&xas, NULL); in __clear_shadow_entry()
|
| D | huge_memory.c | 2772 XA_STATE(xas, &mapping->i_pages, page_index(head)); in split_huge_page_to_list() 2779 if (xas_load(&xas) != head) in split_huge_page_to_list()
|
| /Linux-v5.4/drivers/infiniband/core/ |
| D | restrack.c | 115 XA_STATE(xas, &rt->xa, 0); in rdma_restrack_count() 119 xas_for_each(&xas, e, U32_MAX) { in rdma_restrack_count()
|
| D | device.c | 160 XA_STATE(xas, xa, *indexp); in xan_find_marked() 165 entry = xas_find_marked(&xas, ULONG_MAX, filter); in xan_find_marked() 168 } while (xas_retry(&xas, entry)); in xan_find_marked() 172 *indexp = xas.xa_index; in xan_find_marked()
|