| /Linux-v5.10/fs/nilfs2/ |
| D | btnode.c | 181 xa_lock_irq(&btnc->i_pages); in nilfs_btnode_prepare_change_key() 182 err = __xa_insert(&btnc->i_pages, newkey, opage, GFP_NOFS); in nilfs_btnode_prepare_change_key() 183 xa_unlock_irq(&btnc->i_pages); in nilfs_btnode_prepare_change_key() 238 xa_lock_irq(&btnc->i_pages); in nilfs_btnode_commit_change_key() 239 __xa_erase(&btnc->i_pages, oldkey); in nilfs_btnode_commit_change_key() 240 __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY); in nilfs_btnode_commit_change_key() 241 xa_unlock_irq(&btnc->i_pages); in nilfs_btnode_commit_change_key() 269 xa_erase_irq(&btnc->i_pages, newkey); in nilfs_btnode_abort_change_key()
|
| D | page.c | 324 xa_lock_irq(&smap->i_pages); in nilfs_copy_back_pages() 325 p = __xa_erase(&smap->i_pages, offset); in nilfs_copy_back_pages() 328 xa_unlock_irq(&smap->i_pages); in nilfs_copy_back_pages() 330 xa_lock_irq(&dmap->i_pages); in nilfs_copy_back_pages() 331 p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS); in nilfs_copy_back_pages() 340 __xa_set_mark(&dmap->i_pages, offset, in nilfs_copy_back_pages() 343 xa_unlock_irq(&dmap->i_pages); in nilfs_copy_back_pages() 464 xa_lock_irq(&mapping->i_pages); in __nilfs_clear_page_dirty() 466 __xa_clear_mark(&mapping->i_pages, page_index(page), in __nilfs_clear_page_dirty() 468 xa_unlock_irq(&mapping->i_pages); in __nilfs_clear_page_dirty() [all …]
|
| /Linux-v5.10/mm/ |
| D | truncate.c | 37 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry() 49 xa_lock_irq(&mapping->i_pages); in clear_shadow_entry() 51 xa_unlock_irq(&mapping->i_pages); in clear_shadow_entry() 80 xa_lock_irq(&mapping->i_pages); in truncate_exceptional_pvec_entries() 103 xa_unlock_irq(&mapping->i_pages); in truncate_exceptional_pvec_entries() 519 xa_lock_irq(&mapping->i_pages); in truncate_inode_pages_final() 520 xa_unlock_irq(&mapping->i_pages); in truncate_inode_pages_final() 668 xa_lock_irqsave(&mapping->i_pages, flags); in invalidate_complete_page2() 674 xa_unlock_irqrestore(&mapping->i_pages, flags); in invalidate_complete_page2() 682 xa_unlock_irqrestore(&mapping->i_pages, flags); in invalidate_complete_page2()
|
| D | swap_state.c | 133 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); in add_to_swap_cache() 189 XA_STATE(xas, &address_space->i_pages, idx); in __delete_from_swap_cache() 277 xa_lock_irq(&address_space->i_pages); in delete_from_swap_cache() 279 xa_unlock_irq(&address_space->i_pages); in delete_from_swap_cache() 295 XA_STATE(xas, &address_space->i_pages, curr); in clear_shadow_from_swap_cache() 297 xa_lock_irq(&address_space->i_pages); in clear_shadow_from_swap_cache() 305 xa_unlock_irq(&address_space->i_pages); in clear_shadow_from_swap_cache() 718 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); in init_swap_address_space()
|
| D | workingset.c | 537 mapping = container_of(node->array, struct address_space, i_pages); in shadow_lru_isolate() 540 if (!xa_trylock(&mapping->i_pages)) { in shadow_lru_isolate() 565 xa_unlock_irq(&mapping->i_pages); in shadow_lru_isolate()
|
| D | filemap.c | 123 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete() 273 xa_lock_irqsave(&mapping->i_pages, flags); in delete_from_page_cache() 275 xa_unlock_irqrestore(&mapping->i_pages, flags); in delete_from_page_cache() 298 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); in page_cache_delete_batch() 352 xa_lock_irqsave(&mapping->i_pages, flags); in delete_from_page_cache_batch() 359 xa_unlock_irqrestore(&mapping->i_pages, flags); in delete_from_page_cache_batch() 477 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page() 795 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_page() 835 XA_STATE(xas, &mapping->i_pages, offset); in __add_to_page_cache_locked() 1623 XA_STATE(xas, &mapping->i_pages, index); in page_cache_next_miss() [all …]
|
| D | page-writeback.c | 2118 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback() 2485 xa_lock_irqsave(&mapping->i_pages, flags); in __set_page_dirty_nobuffers() 2489 __xa_set_mark(&mapping->i_pages, page_index(page), in __set_page_dirty_nobuffers() 2491 xa_unlock_irqrestore(&mapping->i_pages, flags); in __set_page_dirty_nobuffers() 2736 xa_lock_irqsave(&mapping->i_pages, flags); in test_clear_page_writeback() 2739 __xa_clear_mark(&mapping->i_pages, page_index(page), in test_clear_page_writeback() 2753 xa_unlock_irqrestore(&mapping->i_pages, flags); in test_clear_page_writeback() 2773 XA_STATE(xas, &mapping->i_pages, page_index(page)); in __test_set_page_writeback()
|
| D | memfd.c | 68 XA_STATE(xas, &mapping->i_pages, 0); in memfd_wait_for_pins()
|
| D | shmem.c | 417 XA_STATE(xas, &mapping->i_pages, index); in shmem_replace_entry() 439 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); in shmem_confirm_swap() 672 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); in shmem_add_to_page_cache() 747 xa_lock_irq(&mapping->i_pages); in shmem_delete_from_page_cache() 753 xa_unlock_irq(&mapping->i_pages); in shmem_delete_from_page_cache() 766 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); in shmem_free_swap() 783 XA_STATE(xas, &mapping->i_pages, start); in shmem_partial_swap_usage() 1192 XA_STATE(xas, &mapping->i_pages, start); in shmem_find_swap_entries() 1543 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, in shmem_alloc_hugepage() 1657 xa_lock_irq(&swap_mapping->i_pages); in shmem_replace_page() [all …]
|
| D | huge_memory.c | 2434 xa_lock(&swap_cache->i_pages); in __split_huge_page() 2447 __xa_store(&head->mapping->i_pages, head[i].index, in __split_huge_page() 2450 __xa_store(&swap_cache->i_pages, offset + i, in __split_huge_page() 2464 xa_unlock(&swap_cache->i_pages); in __split_huge_page() 2471 xa_unlock(&head->mapping->i_pages); in __split_huge_page() 2690 XA_STATE(xas, &mapping->i_pages, page_index(head)); in split_huge_page_to_list() 2696 xa_lock(&mapping->i_pages); in split_huge_page_to_list() 2731 xa_unlock(&mapping->i_pages); in split_huge_page_to_list()
|
| D | readahead.c | 199 struct page *page = xa_load(&mapping->i_pages, index + i); in page_cache_ra_unbounded()
|
| D | vmscan.c | 864 xa_lock_irqsave(&mapping->i_pages, flags); in __remove_mapping() 905 xa_unlock_irqrestore(&mapping->i_pages, flags); in __remove_mapping() 931 xa_unlock_irqrestore(&mapping->i_pages, flags); in __remove_mapping() 940 xa_unlock_irqrestore(&mapping->i_pages, flags); in __remove_mapping()
|
| /Linux-v5.10/arch/nios2/include/asm/ |
| D | cacheflush.h | 49 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 50 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
| /Linux-v5.10/arch/csky/abiv1/inc/abi/ |
| D | cacheflush.h | 21 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 22 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
| /Linux-v5.10/arch/nds32/include/asm/ |
| D | cacheflush.h | 43 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) 44 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
|
| /Linux-v5.10/arch/parisc/include/asm/ |
| D | cacheflush.h | 58 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 59 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
| /Linux-v5.10/include/linux/ |
| D | backing-dev.h | 285 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && in inode_to_wb() 318 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); in unlocked_inode_to_wb_begin() 336 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); in unlocked_inode_to_wb_end()
|
| D | pagemap.h | 887 page = xa_load(&rac->mapping->i_pages, rac->_index); in readahead_page() 898 XA_STATE(xas, &rac->mapping->i_pages, 0); in __readahead_batch()
|
| /Linux-v5.10/arch/arm/include/asm/ |
| D | cacheflush.h | 318 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 319 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
| /Linux-v5.10/fs/ |
| D | dax.c | 414 xas.xa = &mapping->i_pages; in dax_lock_page() 439 XA_STATE(xas, &mapping->i_pages, page->index); in dax_unlock_page() 587 XA_STATE(xas, &mapping->i_pages, start_idx); in dax_layout_busy_page_range() 650 XA_STATE(xas, &mapping->i_pages, index); in __dax_invalidate_entry() 958 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); in dax_writeback_mapping_range() 1252 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault() 1474 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault() 1678 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
|
| D | inode.c | 372 xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT); in __address_space_init_once() 531 xa_lock_irq(&inode->i_data.i_pages); in clear_inode() 534 xa_unlock_irq(&inode->i_data.i_pages); in clear_inode()
|
| D | fs-writeback.c | 361 XA_STATE(xas, &mapping->i_pages, 0); in inode_switch_wbs_work_fn() 389 xa_lock_irq(&mapping->i_pages); in inode_switch_wbs_work_fn() 453 xa_unlock_irq(&mapping->i_pages); in inode_switch_wbs_work_fn()
|
| D | buffer.c | 605 xa_lock_irqsave(&mapping->i_pages, flags); in __set_page_dirty() 609 __xa_set_mark(&mapping->i_pages, page_index(page), in __set_page_dirty() 612 xa_unlock_irqrestore(&mapping->i_pages, flags); in __set_page_dirty()
|
| /Linux-v5.10/fs/gfs2/ |
| D | glops.c | 548 xa_lock_irq(&inode->i_data.i_pages); in inode_go_dump() 550 xa_unlock_irq(&inode->i_data.i_pages); in inode_go_dump()
|
| /Linux-v5.10/Documentation/vm/ |
| D | page_migration.rst | 110 5. The i_pages lock is taken. This will cause all processes trying 128 11. The i_pages lock is dropped. With that lookups in the mapping
|