Home
last modified time | relevance | path

Searched refs:i_pages (Results 1 – 25 of 41) sorted by relevance

12

/Linux-v5.15/fs/nilfs2/
Dbtnode.c181 xa_lock_irq(&btnc->i_pages); in nilfs_btnode_prepare_change_key()
182 err = __xa_insert(&btnc->i_pages, newkey, opage, GFP_NOFS); in nilfs_btnode_prepare_change_key()
183 xa_unlock_irq(&btnc->i_pages); in nilfs_btnode_prepare_change_key()
238 xa_lock_irq(&btnc->i_pages); in nilfs_btnode_commit_change_key()
239 __xa_erase(&btnc->i_pages, oldkey); in nilfs_btnode_commit_change_key()
240 __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY); in nilfs_btnode_commit_change_key()
241 xa_unlock_irq(&btnc->i_pages); in nilfs_btnode_commit_change_key()
269 xa_erase_irq(&btnc->i_pages, newkey); in nilfs_btnode_abort_change_key()
Dpage.c324 xa_lock_irq(&smap->i_pages); in nilfs_copy_back_pages()
325 p = __xa_erase(&smap->i_pages, offset); in nilfs_copy_back_pages()
328 xa_unlock_irq(&smap->i_pages); in nilfs_copy_back_pages()
330 xa_lock_irq(&dmap->i_pages); in nilfs_copy_back_pages()
331 p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS); in nilfs_copy_back_pages()
340 __xa_set_mark(&dmap->i_pages, offset, in nilfs_copy_back_pages()
343 xa_unlock_irq(&dmap->i_pages); in nilfs_copy_back_pages()
464 xa_lock_irq(&mapping->i_pages); in __nilfs_clear_page_dirty()
466 __xa_clear_mark(&mapping->i_pages, page_index(page), in __nilfs_clear_page_dirty()
468 xa_unlock_irq(&mapping->i_pages); in __nilfs_clear_page_dirty()
[all …]
/Linux-v5.15/mm/
Dtruncate.c37 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry()
48 xa_lock_irq(&mapping->i_pages); in clear_shadow_entry()
50 xa_unlock_irq(&mapping->i_pages); in clear_shadow_entry()
77 xa_lock_irq(&mapping->i_pages); in truncate_exceptional_pvec_entries()
97 xa_unlock_irq(&mapping->i_pages); in truncate_exceptional_pvec_entries()
456 xa_lock_irq(&mapping->i_pages); in truncate_inode_pages_final()
457 xa_unlock_irq(&mapping->i_pages); in truncate_inode_pages_final()
570 xa_lock_irq(&mapping->i_pages); in invalidate_complete_page2()
576 xa_unlock_irq(&mapping->i_pages); in invalidate_complete_page2()
584 xa_unlock_irq(&mapping->i_pages); in invalidate_complete_page2()
Dswap_state.c90 page = xa_load(&address_space->i_pages, idx); in get_shadow_from_swap_cache()
105 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); in add_to_swap_cache()
158 XA_STATE(xas, &address_space->i_pages, idx); in __delete_from_swap_cache()
245 xa_lock_irq(&address_space->i_pages); in delete_from_swap_cache()
247 xa_unlock_irq(&address_space->i_pages); in delete_from_swap_cache()
262 XA_STATE(xas, &address_space->i_pages, curr); in clear_shadow_from_swap_cache()
264 xa_lock_irq(&address_space->i_pages); in clear_shadow_from_swap_cache()
270 xa_unlock_irq(&address_space->i_pages); in clear_shadow_from_swap_cache()
675 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); in init_swap_address_space()
Dfilemap.c127 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
265 xa_lock_irq(&mapping->i_pages); in delete_from_page_cache()
267 xa_unlock_irq(&mapping->i_pages); in delete_from_page_cache()
290 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); in page_cache_delete_batch()
343 xa_lock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
350 xa_unlock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
486 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page()
659 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_needs_writeback()
841 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_page()
878 XA_STATE(xas, &mapping->i_pages, offset); in __add_to_page_cache_locked()
[all …]
Dworkingset.c537 mapping = container_of(node->array, struct address_space, i_pages); in shadow_lru_isolate()
540 if (!xa_trylock(&mapping->i_pages)) { in shadow_lru_isolate()
564 xa_unlock_irq(&mapping->i_pages); in shadow_lru_isolate()
Dreadahead.c200 struct page *page = xa_load(&mapping->i_pages, index + i); in page_cache_ra_unbounded()
675 struct page *page = xa_load(&mapping->i_pages, index); in readahead_expand()
698 struct page *page = xa_load(&mapping->i_pages, index); in readahead_expand()
Dpage-writeback.c2127 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback()
2497 xa_lock_irqsave(&mapping->i_pages, flags); in __set_page_dirty()
2501 __xa_set_mark(&mapping->i_pages, page_index(page), in __set_page_dirty()
2504 xa_unlock_irqrestore(&mapping->i_pages, flags); in __set_page_dirty()
2780 xa_lock_irqsave(&mapping->i_pages, flags); in test_clear_page_writeback()
2783 __xa_clear_mark(&mapping->i_pages, page_index(page), in test_clear_page_writeback()
2800 xa_unlock_irqrestore(&mapping->i_pages, flags); in test_clear_page_writeback()
2820 XA_STATE(xas, &mapping->i_pages, page_index(page)); in __test_set_page_writeback()
Dmemfd.c68 XA_STATE(xas, &mapping->i_pages, 0); in memfd_wait_for_pins()
Dshmem.c414 XA_STATE(xas, &mapping->i_pages, index); in shmem_replace_entry()
436 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); in shmem_confirm_swap()
697 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); in shmem_add_to_page_cache()
772 xa_lock_irq(&mapping->i_pages); in shmem_delete_from_page_cache()
778 xa_unlock_irq(&mapping->i_pages); in shmem_delete_from_page_cache()
791 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); in shmem_free_swap()
808 XA_STATE(xas, &mapping->i_pages, start); in shmem_partial_swap_usage()
1172 XA_STATE(xas, &mapping->i_pages, start); in shmem_find_swap_entries()
1559 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, in shmem_alloc_hugepage()
1673 xa_lock_irq(&swap_mapping->i_pages); in shmem_replace_page()
[all …]
Dhuge_memory.c2423 xa_lock(&swap_cache->i_pages); in __split_huge_page()
2441 __xa_store(&head->mapping->i_pages, head[i].index, in __split_huge_page()
2444 __xa_store(&swap_cache->i_pages, offset + i, in __split_huge_page()
2460 xa_unlock(&swap_cache->i_pages); in __split_huge_page()
2467 xa_unlock(&head->mapping->i_pages); in __split_huge_page()
2683 XA_STATE(xas, &mapping->i_pages, page_index(head)); in split_huge_page_to_list()
2689 xa_lock(&mapping->i_pages); in split_huge_page_to_list()
2721 xa_unlock(&mapping->i_pages); in split_huge_page_to_list()
/Linux-v5.15/arch/nios2/include/asm/
Dcacheflush.h49 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
50 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
/Linux-v5.15/arch/nds32/include/asm/
Dcacheflush.h42 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages)
43 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
/Linux-v5.15/arch/csky/abiv1/inc/abi/
Dcacheflush.h17 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
18 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
/Linux-v5.15/arch/parisc/include/asm/
Dcacheflush.h54 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
55 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
/Linux-v5.15/include/linux/
Dbacking-dev.h285 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && in inode_to_wb()
329 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); in unlocked_inode_to_wb_begin()
347 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); in unlocked_inode_to_wb_end()
Dpagemap.h23 return xa_empty(&mapping->i_pages); in mapping_empty()
931 page = xa_load(&rac->mapping->i_pages, rac->_index); in readahead_page()
942 XA_STATE(xas, &rac->mapping->i_pages, 0); in __readahead_batch()
/Linux-v5.15/arch/arm/include/asm/
Dcacheflush.h316 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
317 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
/Linux-v5.15/fs/
Ddax.c425 xas.xa = &mapping->i_pages; in dax_lock_page()
450 XA_STATE(xas, &mapping->i_pages, page->index); in dax_unlock_page()
599 XA_STATE(xas, &mapping->i_pages, start_idx); in dax_layout_busy_page_range()
662 XA_STATE(xas, &mapping->i_pages, index); in __dax_invalidate_entry()
970 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); in dax_writeback_mapping_range()
1428 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1538 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1662 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
/Linux-v5.15/fs/netfs/
Dread_helper.c153 iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages, in netfs_clear_unread()
179 iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, in netfs_read_from_cache()
248 XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE); in netfs_rreq_unmark_after_write()
333 iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages, in netfs_rreq_do_write_to_cache()
383 XA_STATE(xas, &rreq->mapping->i_pages, start_page); in netfs_rreq_unlock()
/Linux-v5.15/fs/orangefs/
Dinode.c253 struct xarray *i_pages; in orangefs_readahead() local
271 i_pages = &rac->mapping->i_pages; in orangefs_readahead()
273 iov_iter_xarray(&iter, READ, i_pages, offset, readahead_length(rac)); in orangefs_readahead()
/Linux-v5.15/fs/afs/
Dwrite.c267 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); in afs_pages_written_back()
449 XA_STATE(xas, &mapping->i_pages, index); in afs_extend_writeback()
604 iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len); in afs_write_back_from_locked_page()
Dfile.c309 &fsreq->vnode->vfs_inode.i_mapping->i_pages, in afs_req_issue_op()
330 iov_iter_xarray(&fsreq->def_iter, READ, &page->mapping->i_pages, in afs_symlink_readpage()
Ddir.c115 XA_STATE(xas, &mapping->i_pages, 0); in afs_dir_read_cleanup()
199 XA_STATE(xas, &mapping->i_pages, 0); in afs_dir_dump()
235 XA_STATE(xas, &mapping->i_pages, 0); in afs_dir_check()
318 iov_iter_xarray(&req->def_iter, READ, &dvnode->vfs_inode.i_mapping->i_pages, in afs_read_dir()
550 slot = radix_tree_lookup_slot(&dvnode->vfs_inode.i_mapping->i_pages, in afs_dir_iterate()
/Linux-v5.15/fs/gfs2/
Dglops.c542 xa_lock_irq(&inode->i_data.i_pages); in inode_go_dump()
544 xa_unlock_irq(&inode->i_data.i_pages); in inode_go_dump()

12