| /Linux-v5.15/include/linux/ |
| D | pagevec.h | 26 void __pagevec_release(struct pagevec *pvec); 27 void __pagevec_lru_add(struct pagevec *pvec); 28 void pagevec_remove_exceptionals(struct pagevec *pvec); 29 unsigned pagevec_lookup_range(struct pagevec *pvec, 32 static inline unsigned pagevec_lookup(struct pagevec *pvec, in pagevec_lookup() argument 36 return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1); in pagevec_lookup() 39 unsigned pagevec_lookup_range_tag(struct pagevec *pvec, 42 static inline unsigned pagevec_lookup_tag(struct pagevec *pvec, in pagevec_lookup_tag() argument 45 return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag); in pagevec_lookup_tag() 48 static inline void pagevec_init(struct pagevec *pvec) in pagevec_init() argument [all …]
|
| D | pagemap.h | 469 pgoff_t end, struct pagevec *pvec, pgoff_t *indices); 800 struct pagevec *pvec);
|
| /Linux-v5.15/mm/ |
| D | swap.c | 52 struct pagevec pvec; member 182 static void pagevec_lru_move_fn(struct pagevec *pvec, in pagevec_lru_move_fn() argument 189 for (i = 0; i < pagevec_count(pvec); i++) { in pagevec_lru_move_fn() 190 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn() 203 release_pages(pvec->pages, pvec->nr); in pagevec_lru_move_fn() 204 pagevec_reinit(pvec); in pagevec_lru_move_fn() 218 static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page) in pagevec_add_and_need_flush() argument 222 if (!pagevec_add(pvec, page) || PageCompound(page) || in pagevec_add_and_need_flush() 240 struct pagevec *pvec; in rotate_reclaimable_page() local 245 pvec = this_cpu_ptr(&lru_rotate.pvec); in rotate_reclaimable_page() [all …]
|
| D | truncate.c | 59 struct pagevec *pvec, pgoff_t *indices) in truncate_exceptional_pvec_entries() argument 68 for (j = 0; j < pagevec_count(pvec); j++) in truncate_exceptional_pvec_entries() 69 if (xa_is_value(pvec->pages[j])) in truncate_exceptional_pvec_entries() 72 if (j == pagevec_count(pvec)) in truncate_exceptional_pvec_entries() 79 for (i = j; i < pagevec_count(pvec); i++) { in truncate_exceptional_pvec_entries() 80 struct page *page = pvec->pages[i]; in truncate_exceptional_pvec_entries() 84 pvec->pages[j++] = page; in truncate_exceptional_pvec_entries() 98 pvec->nr = j; in truncate_exceptional_pvec_entries() 289 struct pagevec pvec; in truncate_inode_pages_range() local 318 pagevec_init(&pvec); in truncate_inode_pages_range() [all …]
|
| D | mlock.c | 216 static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, in __putback_lru_fast_prepare() argument 223 pagevec_add(pvec, page); in __putback_lru_fast_prepare() 239 static void __putback_lru_fast(struct pagevec *pvec, int pgrescued) in __putback_lru_fast() argument 241 count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec)); in __putback_lru_fast() 246 __pagevec_lru_add(pvec); in __putback_lru_fast() 260 static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) in __munlock_pagevec() argument 263 int nr = pagevec_count(pvec); in __munlock_pagevec() 273 struct page *page = pvec->pages[i]; in __munlock_pagevec() 296 pagevec_add(&pvec_putback, pvec->pages[i]); in __munlock_pagevec() 297 pvec->pages[i] = NULL; in __munlock_pagevec() [all …]
|
| D | filemap.c | 288 struct pagevec *pvec) in page_cache_delete_batch() argument 290 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); in page_cache_delete_batch() 297 if (i >= pagevec_count(pvec)) in page_cache_delete_batch() 310 if (page != pvec->pages[i]) { in page_cache_delete_batch() 311 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, in page_cache_delete_batch() 336 struct pagevec *pvec) in delete_from_page_cache_batch() argument 340 if (!pagevec_count(pvec)) in delete_from_page_cache_batch() 344 for (i = 0; i < pagevec_count(pvec); i++) { in delete_from_page_cache_batch() 345 trace_mm_filemap_delete_from_page_cache(pvec->pages[i]); in delete_from_page_cache_batch() 347 unaccount_page_cache_page(mapping, pvec->pages[i]); in delete_from_page_cache_batch() [all …]
|
| D | shmem.c | 869 struct pagevec pvec; in shmem_unlock_mapping() local 872 pagevec_init(&pvec); in shmem_unlock_mapping() 877 if (!pagevec_lookup(&pvec, mapping, &index)) in shmem_unlock_mapping() 879 check_move_unevictable_pages(&pvec); in shmem_unlock_mapping() 880 pagevec_release(&pvec); in shmem_unlock_mapping() 924 struct pagevec pvec; in shmem_undo_range() local 936 pagevec_init(&pvec); in shmem_undo_range() 939 &pvec, indices)) { in shmem_undo_range() 940 for (i = 0; i < pagevec_count(&pvec); i++) { in shmem_undo_range() 941 struct page *page = pvec.pages[i]; in shmem_undo_range() [all …]
|
| D | page-writeback.c | 2184 struct pagevec pvec; in write_cache_pages() local 2192 pagevec_init(&pvec); in write_cache_pages() 2212 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in write_cache_pages() 2218 struct page *page = pvec.pages[i]; in write_cache_pages() 2294 pagevec_release(&pvec); in write_cache_pages()
|
| /Linux-v5.15/fs/nilfs2/ |
| D | page.c | 243 struct pagevec pvec; in nilfs_copy_dirty_pages() local 248 pagevec_init(&pvec); in nilfs_copy_dirty_pages() 250 if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY)) in nilfs_copy_dirty_pages() 253 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_copy_dirty_pages() 254 struct page *page = pvec.pages[i], *dpage; in nilfs_copy_dirty_pages() 278 pagevec_release(&pvec); in nilfs_copy_dirty_pages() 297 struct pagevec pvec; in nilfs_copy_back_pages() local 301 pagevec_init(&pvec); in nilfs_copy_back_pages() 303 n = pagevec_lookup(&pvec, smap, &index); in nilfs_copy_back_pages() 307 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_copy_back_pages() [all …]
|
| D | segment.c | 683 struct pagevec pvec; in nilfs_lookup_dirty_data_buffers() local 697 pagevec_init(&pvec); in nilfs_lookup_dirty_data_buffers() 700 !pagevec_lookup_range_tag(&pvec, mapping, &index, last, in nilfs_lookup_dirty_data_buffers() 704 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_lookup_dirty_data_buffers() 706 struct page *page = pvec.pages[i]; in nilfs_lookup_dirty_data_buffers() 721 pagevec_release(&pvec); in nilfs_lookup_dirty_data_buffers() 727 pagevec_release(&pvec); in nilfs_lookup_dirty_data_buffers() 737 struct pagevec pvec; in nilfs_lookup_dirty_node_buffers() local 742 pagevec_init(&pvec); in nilfs_lookup_dirty_node_buffers() 744 while (pagevec_lookup_tag(&pvec, mapping, &index, in nilfs_lookup_dirty_node_buffers() [all …]
|
| /Linux-v5.15/drivers/gpu/drm/i915/gem/ |
| D | i915_gem_userptr.c | 110 struct page **pvec = NULL; in i915_gem_object_userptr_drop_ref() local 115 pvec = obj->userptr.pvec; in i915_gem_object_userptr_drop_ref() 116 obj->userptr.pvec = NULL; in i915_gem_object_userptr_drop_ref() 120 if (pvec) { in i915_gem_object_userptr_drop_ref() 123 unpin_user_pages(pvec, num_pages); in i915_gem_object_userptr_drop_ref() 124 kvfree(pvec); in i915_gem_object_userptr_drop_ref() 134 struct page **pvec; in i915_gem_userptr_get_pages() local 147 pvec = obj->userptr.pvec; in i915_gem_userptr_get_pages() 150 ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0, in i915_gem_userptr_get_pages() 260 struct page **pvec; in i915_gem_object_userptr_submit_init() local [all …]
|
| D | i915_gem_shmem.c | 21 static void check_release_pagevec(struct pagevec *pvec) in check_release_pagevec() argument 23 check_move_unevictable_pages(pvec); in check_release_pagevec() 24 __pagevec_release(pvec); in check_release_pagevec() 212 struct pagevec pvec; in shmem_get_pages() local 214 pagevec_init(&pvec); in shmem_get_pages() 216 if (!pagevec_add(&pvec, page)) in shmem_get_pages() 217 check_release_pagevec(&pvec); in shmem_get_pages() 219 if (pagevec_count(&pvec)) in shmem_get_pages() 220 check_release_pagevec(&pvec); in shmem_get_pages() 320 struct pagevec pvec; in i915_gem_object_put_pages_shmem() local [all …]
|
| D | i915_gem_object_types.h | 550 struct page **pvec; member
|
| /Linux-v5.15/drivers/gpu/drm/ |
| D | drm_gem.c | 508 static void drm_gem_check_release_pagevec(struct pagevec *pvec) in drm_gem_check_release_pagevec() argument 510 check_move_unevictable_pages(pvec); in drm_gem_check_release_pagevec() 511 __pagevec_release(pvec); in drm_gem_check_release_pagevec() 544 struct pagevec pvec; in drm_gem_get_pages() local 587 pagevec_init(&pvec); in drm_gem_get_pages() 589 if (!pagevec_add(&pvec, pages[i])) in drm_gem_get_pages() 590 drm_gem_check_release_pagevec(&pvec); in drm_gem_get_pages() 592 if (pagevec_count(&pvec)) in drm_gem_get_pages() 593 drm_gem_check_release_pagevec(&pvec); in drm_gem_get_pages() 612 struct pagevec pvec; in drm_gem_put_pages() local [all …]
|
| /Linux-v5.15/fs/gfs2/ |
| D | aops.c | 235 struct pagevec *pvec, in gfs2_write_jdata_pagevec() argument 250 struct page *page = pvec->pages[i]; in gfs2_write_jdata_pagevec() 333 struct pagevec pvec; in gfs2_write_cache_jdata() local 343 pagevec_init(&pvec); in gfs2_write_cache_jdata() 369 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in gfs2_write_cache_jdata() 374 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index); in gfs2_write_cache_jdata() 379 pagevec_release(&pvec); in gfs2_write_cache_jdata()
|
| /Linux-v5.15/drivers/gpu/drm/etnaviv/ |
| D | etnaviv_gem.c | 662 struct page **pvec = NULL; in etnaviv_gem_userptr_get_pages() local 671 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in etnaviv_gem_userptr_get_pages() 672 if (!pvec) in etnaviv_gem_userptr_get_pages() 678 struct page **pages = pvec + pinned; in etnaviv_gem_userptr_get_pages() 684 unpin_user_pages(pvec, pinned); in etnaviv_gem_userptr_get_pages() 685 kvfree(pvec); in etnaviv_gem_userptr_get_pages() 693 etnaviv_obj->pages = pvec; in etnaviv_gem_userptr_get_pages()
|
| /Linux-v5.15/fs/ceph/ |
| D | addr.c | 715 struct pagevec pvec; in ceph_writepages_start() local 739 pagevec_init(&pvec); in ceph_writepages_start() 797 pvec_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, in ceph_writepages_start() 803 page = pvec.pages[i]; in ceph_writepages_start() 923 pvec.pages[i] = NULL; in ceph_writepages_start() 935 if (!pvec.pages[j]) in ceph_writepages_start() 938 pvec.pages[n] = pvec.pages[j]; in ceph_writepages_start() 941 pvec.nr = n; in ceph_writepages_start() 946 pagevec_release(&pvec); in ceph_writepages_start() 1071 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, in ceph_writepages_start() [all …]
|
| /Linux-v5.15/fs/afs/ |
| D | write.c | 439 struct pagevec pvec; in afs_extend_writeback() local 450 pagevec_init(&pvec); in afs_extend_writeback() 507 if (!pagevec_add(&pvec, page)) in afs_extend_writeback() 520 if (!pagevec_count(&pvec)) in afs_extend_writeback() 523 for (i = 0; i < pagevec_count(&pvec); i++) { in afs_extend_writeback() 524 page = pvec.pages[i]; in afs_extend_writeback() 536 pagevec_release(&pvec); in afs_extend_writeback()
|
| /Linux-v5.15/fs/hugetlbfs/ |
| D | inode.c | 111 static void huge_pagevec_release(struct pagevec *pvec) in huge_pagevec_release() argument 115 for (i = 0; i < pagevec_count(pvec); ++i) in huge_pagevec_release() 116 put_page(pvec->pages[i]); in huge_pagevec_release() 118 pagevec_reinit(pvec); in huge_pagevec_release() 471 struct pagevec pvec; in remove_inode_hugepages() local 476 pagevec_init(&pvec); in remove_inode_hugepages() 482 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1)) in remove_inode_hugepages() 485 for (i = 0; i < pagevec_count(&pvec); ++i) { in remove_inode_hugepages() 486 struct page *page = pvec.pages[i]; in remove_inode_hugepages() 545 huge_pagevec_release(&pvec); in remove_inode_hugepages()
|
| /Linux-v5.15/fs/f2fs/ |
| D | node.c | 1507 struct pagevec pvec; in last_fsync_dnode() local 1511 pagevec_init(&pvec); in last_fsync_dnode() 1514 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, in last_fsync_dnode() 1519 struct page *page = pvec.pages[i]; in last_fsync_dnode() 1523 pagevec_release(&pvec); in last_fsync_dnode() 1554 pagevec_release(&pvec); in last_fsync_dnode() 1720 struct pagevec pvec; in f2fs_fsync_node_pages() local 1734 pagevec_init(&pvec); in f2fs_fsync_node_pages() 1737 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, in f2fs_fsync_node_pages() 1742 struct page *page = pvec.pages[i]; in f2fs_fsync_node_pages() [all …]
|
| D | checkpoint.c | 375 struct pagevec pvec; in f2fs_sync_meta_pages() local 383 pagevec_init(&pvec); in f2fs_sync_meta_pages() 387 while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, in f2fs_sync_meta_pages() 392 struct page *page = pvec.pages[i]; in f2fs_sync_meta_pages() 397 pagevec_release(&pvec); in f2fs_sync_meta_pages() 427 pagevec_release(&pvec); in f2fs_sync_meta_pages()
|
| /Linux-v5.15/arch/x86/kvm/mmu/ |
| D | mmu.c | 1811 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, in mmu_pages_add() argument 1817 for (i=0; i < pvec->nr; i++) in mmu_pages_add() 1818 if (pvec->page[i].sp == sp) in mmu_pages_add() 1821 pvec->page[pvec->nr].sp = sp; in mmu_pages_add() 1822 pvec->page[pvec->nr].idx = idx; in mmu_pages_add() 1823 pvec->nr++; in mmu_pages_add() 1824 return (pvec->nr == KVM_PAGE_ARRAY_NR); in mmu_pages_add() 1835 struct kvm_mmu_pages *pvec) in __mmu_unsync_walk() argument 1851 if (mmu_pages_add(pvec, child, i)) in __mmu_unsync_walk() 1854 ret = __mmu_unsync_walk(child, pvec); in __mmu_unsync_walk() [all …]
|
| /Linux-v5.15/fs/fscache/ |
| D | page.c | 1213 struct pagevec pvec; in __fscache_uncache_all_inode_pages() local 1224 pagevec_init(&pvec); in __fscache_uncache_all_inode_pages() 1227 if (!pagevec_lookup(&pvec, mapping, &next)) in __fscache_uncache_all_inode_pages() 1229 for (i = 0; i < pagevec_count(&pvec); i++) { in __fscache_uncache_all_inode_pages() 1230 struct page *page = pvec.pages[i]; in __fscache_uncache_all_inode_pages() 1236 pagevec_release(&pvec); in __fscache_uncache_all_inode_pages()
|
| /Linux-v5.15/fs/ext4/ |
| D | inode.c | 1547 struct pagevec pvec; in mpage_release_unused_pages() local 1565 pagevec_init(&pvec); in mpage_release_unused_pages() 1567 nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end); in mpage_release_unused_pages() 1571 struct page *page = pvec.pages[i]; in mpage_release_unused_pages() 1583 pagevec_release(&pvec); in mpage_release_unused_pages() 2312 struct pagevec pvec; in mpage_map_and_submit_buffers() local 2327 pagevec_init(&pvec); in mpage_map_and_submit_buffers() 2329 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, in mpage_map_and_submit_buffers() 2334 struct page *page = pvec.pages[i]; in mpage_map_and_submit_buffers() 2350 pagevec_release(&pvec); in mpage_map_and_submit_buffers() [all …]
|
| /Linux-v5.15/drivers/mtd/lpddr/ |
| D | lpddr_cmds.c | 404 unsigned long adr, const struct kvec **pvec, in do_write_buffer() argument 436 vec = *pvec; in do_write_buffer() 466 *pvec = vec; in do_write_buffer()
|