/Linux-v4.19/include/linux/ |
D | pagevec.h | 24 void __pagevec_release(struct pagevec *pvec); 25 void __pagevec_lru_add(struct pagevec *pvec); 26 unsigned pagevec_lookup_entries(struct pagevec *pvec, 30 void pagevec_remove_exceptionals(struct pagevec *pvec); 31 unsigned pagevec_lookup_range(struct pagevec *pvec, 34 static inline unsigned pagevec_lookup(struct pagevec *pvec, in pagevec_lookup() argument 38 return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1); in pagevec_lookup() 41 unsigned pagevec_lookup_range_tag(struct pagevec *pvec, 44 unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec, 47 static inline unsigned pagevec_lookup_tag(struct pagevec *pvec, in pagevec_lookup_tag() argument [all …]
|
/Linux-v4.19/mm/ |
D | swap.c | 189 static void pagevec_lru_move_fn(struct pagevec *pvec, in pagevec_lru_move_fn() argument 198 for (i = 0; i < pagevec_count(pvec); i++) { in pagevec_lru_move_fn() 199 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn() 214 release_pages(pvec->pages, pvec->nr); in pagevec_lru_move_fn() 215 pagevec_reinit(pvec); in pagevec_lru_move_fn() 235 static void pagevec_move_tail(struct pagevec *pvec) in pagevec_move_tail() argument 239 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); in pagevec_move_tail() 252 struct pagevec *pvec; in rotate_reclaimable_page() local 257 pvec = this_cpu_ptr(&lru_rotate_pvecs); in rotate_reclaimable_page() 258 if (!pagevec_add(pvec, page) || PageCompound(page)) in rotate_reclaimable_page() [all …]
|
D | truncate.c | 62 struct pagevec *pvec, pgoff_t *indices, in truncate_exceptional_pvec_entries() argument 72 for (j = 0; j < pagevec_count(pvec); j++) in truncate_exceptional_pvec_entries() 73 if (radix_tree_exceptional_entry(pvec->pages[j])) in truncate_exceptional_pvec_entries() 76 if (j == pagevec_count(pvec)) in truncate_exceptional_pvec_entries() 84 for (i = j; i < pagevec_count(pvec); i++) { in truncate_exceptional_pvec_entries() 85 struct page *page = pvec->pages[i]; in truncate_exceptional_pvec_entries() 89 pvec->pages[j++] = page; in truncate_exceptional_pvec_entries() 106 pvec->nr = j; in truncate_exceptional_pvec_entries() 300 struct pagevec pvec; in truncate_inode_pages_range() local 329 pagevec_init(&pvec); in truncate_inode_pages_range() [all …]
|
D | mlock.c | 246 static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, in __putback_lru_fast_prepare() argument 253 pagevec_add(pvec, page); in __putback_lru_fast_prepare() 269 static void __putback_lru_fast(struct pagevec *pvec, int pgrescued) in __putback_lru_fast() argument 271 count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec)); in __putback_lru_fast() 276 __pagevec_lru_add(pvec); in __putback_lru_fast() 290 static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) in __munlock_pagevec() argument 293 int nr = pagevec_count(pvec); in __munlock_pagevec() 303 struct page *page = pvec->pages[i]; in __munlock_pagevec() 324 pagevec_add(&pvec_putback, pvec->pages[i]); in __munlock_pagevec() 325 pvec->pages[i] = NULL; in __munlock_pagevec() [all …]
|
D | shmem.c | 765 struct pagevec pvec; in shmem_unlock_mapping() local 769 pagevec_init(&pvec); in shmem_unlock_mapping() 778 pvec.nr = find_get_entries(mapping, index, in shmem_unlock_mapping() 779 PAGEVEC_SIZE, pvec.pages, indices); in shmem_unlock_mapping() 780 if (!pvec.nr) in shmem_unlock_mapping() 782 index = indices[pvec.nr - 1] + 1; in shmem_unlock_mapping() 783 pagevec_remove_exceptionals(&pvec); in shmem_unlock_mapping() 784 check_move_unevictable_pages(pvec.pages, pvec.nr); in shmem_unlock_mapping() 785 pagevec_release(&pvec); in shmem_unlock_mapping() 803 struct pagevec pvec; in shmem_undo_range() local [all …]
|
D | filemap.c | 324 struct pagevec *pvec) in page_cache_tree_delete_batch() argument 333 start = pvec->pages[0]->index; in page_cache_tree_delete_batch() 335 if (i >= pagevec_count(pvec) && !tail_pages) in page_cache_tree_delete_batch() 347 if (page != pvec->pages[i]) in page_cache_tree_delete_batch() 370 struct pagevec *pvec) in delete_from_page_cache_batch() argument 375 if (!pagevec_count(pvec)) in delete_from_page_cache_batch() 379 for (i = 0; i < pagevec_count(pvec); i++) { in delete_from_page_cache_batch() 380 trace_mm_filemap_delete_from_page_cache(pvec->pages[i]); in delete_from_page_cache_batch() 382 unaccount_page_cache_page(mapping, pvec->pages[i]); in delete_from_page_cache_batch() 384 page_cache_tree_delete_batch(mapping, pvec); in delete_from_page_cache_batch() [all …]
|
D | page-writeback.c | 2159 struct pagevec pvec; in write_cache_pages() local 2169 pagevec_init(&pvec); in write_cache_pages() 2196 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in write_cache_pages() 2202 struct page *page = pvec.pages[i]; in write_cache_pages() 2272 pagevec_release(&pvec); in write_cache_pages()
|
/Linux-v4.19/fs/nilfs2/ |
D | page.c | 244 struct pagevec pvec; in nilfs_copy_dirty_pages() local 249 pagevec_init(&pvec); in nilfs_copy_dirty_pages() 251 if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY)) in nilfs_copy_dirty_pages() 254 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_copy_dirty_pages() 255 struct page *page = pvec.pages[i], *dpage; in nilfs_copy_dirty_pages() 279 pagevec_release(&pvec); in nilfs_copy_dirty_pages() 298 struct pagevec pvec; in nilfs_copy_back_pages() local 303 pagevec_init(&pvec); in nilfs_copy_back_pages() 305 n = pagevec_lookup(&pvec, smap, &index); in nilfs_copy_back_pages() 309 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_copy_back_pages() [all …]
|
D | segment.c | 688 struct pagevec pvec; in nilfs_lookup_dirty_data_buffers() local 702 pagevec_init(&pvec); in nilfs_lookup_dirty_data_buffers() 705 !pagevec_lookup_range_tag(&pvec, mapping, &index, last, in nilfs_lookup_dirty_data_buffers() 709 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_lookup_dirty_data_buffers() 711 struct page *page = pvec.pages[i]; in nilfs_lookup_dirty_data_buffers() 726 pagevec_release(&pvec); in nilfs_lookup_dirty_data_buffers() 732 pagevec_release(&pvec); in nilfs_lookup_dirty_data_buffers() 742 struct pagevec pvec; in nilfs_lookup_dirty_node_buffers() local 747 pagevec_init(&pvec); in nilfs_lookup_dirty_node_buffers() 749 while (pagevec_lookup_tag(&pvec, mapping, &index, in nilfs_lookup_dirty_node_buffers() [all …]
|
D | btree.c | 2140 struct pagevec pvec; in nilfs_btree_lookup_dirty_buffers() local 2150 pagevec_init(&pvec); in nilfs_btree_lookup_dirty_buffers() 2152 while (pagevec_lookup_tag(&pvec, btcache, &index, in nilfs_btree_lookup_dirty_buffers() 2154 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_btree_lookup_dirty_buffers() 2155 bh = head = page_buffers(pvec.pages[i]); in nilfs_btree_lookup_dirty_buffers() 2162 pagevec_release(&pvec); in nilfs_btree_lookup_dirty_buffers()
|
/Linux-v4.19/drivers/gpu/drm/i915/ |
D | i915_gem_userptr.c | 423 struct page **pvec, int num_pages) in __i915_gem_userptr_alloc_pages() argument 435 ret = __sg_alloc_table_from_pages(st, pvec, num_pages, in __i915_gem_userptr_alloc_pages() 506 struct page **pvec; in __i915_gem_userptr_get_pages_worker() local 512 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in __i915_gem_userptr_get_pages_worker() 513 if (pvec != NULL) { in __i915_gem_userptr_get_pages_worker() 529 pvec + pinned, NULL, NULL); in __i915_gem_userptr_get_pages_worker() 545 pages = __i915_gem_userptr_alloc_pages(obj, pvec, in __i915_gem_userptr_get_pages_worker() 559 release_pages(pvec, pinned); in __i915_gem_userptr_get_pages_worker() 560 kvfree(pvec); in __i915_gem_userptr_get_pages_worker() 612 struct page **pvec; in i915_gem_userptr_get_pages() local [all …]
|
D | i915_gem_gtt.c | 383 pagevec_init(&stash->pvec); in stash_init() 392 if (likely(stash->pvec.nr)) in stash_pop_page() 393 page = stash->pvec.pages[--stash->pvec.nr]; in stash_pop_page() 399 static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) in stash_push_pagevec() argument 405 nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec)); in stash_push_pagevec() 406 memcpy(stash->pvec.pages + stash->pvec.nr, in stash_push_pagevec() 407 pvec->pages + pvec->nr - nr, in stash_push_pagevec() 408 sizeof(pvec->pages[0]) * nr); in stash_push_pagevec() 409 stash->pvec.nr += nr; in stash_push_pagevec() 413 pvec->nr -= nr; in stash_push_pagevec() [all …]
|
/Linux-v4.19/fs/ |
D | dax.c | 635 struct pagevec pvec; in dax_layout_busy_page() local 648 pagevec_init(&pvec); in dax_layout_busy_page() 666 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, in dax_layout_busy_page() 671 for (i = 0; i < pagevec_count(&pvec); i++) { in dax_layout_busy_page() 672 struct page *pvec_ent = pvec.pages[i]; in dax_layout_busy_page() 691 if (i + 1 >= pagevec_count(&pvec)) in dax_layout_busy_page() 706 pagevec_remove_exceptionals(&pvec); in dax_layout_busy_page() 707 pagevec_release(&pvec); in dax_layout_busy_page() 1032 struct pagevec pvec; in dax_writeback_mapping_range() local 1053 pagevec_init(&pvec); in dax_writeback_mapping_range() [all …]
|
D | iomap.c | 1271 struct pagevec pvec; in page_cache_seek_hole_data() local 1276 pagevec_init(&pvec); in page_cache_seek_hole_data() 1281 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index, in page_cache_seek_hole_data() 1287 struct page *page = pvec.pages[i]; in page_cache_seek_hole_data() 1293 pagevec_release(&pvec); in page_cache_seek_hole_data() 1306 pagevec_release(&pvec); in page_cache_seek_hole_data()
|
D | buffer.c | 1565 struct pagevec pvec; in clean_bdev_aliases() local 1573 pagevec_init(&pvec); in clean_bdev_aliases() 1574 while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) { in clean_bdev_aliases() 1575 count = pagevec_count(&pvec); in clean_bdev_aliases() 1577 struct page *page = pvec.pages[i]; in clean_bdev_aliases() 1606 pagevec_release(&pvec); in clean_bdev_aliases()
|
/Linux-v4.19/fs/ceph/ |
D | addr.c | 688 struct pagevec pvec; in ceph_release_pages() local 691 pagevec_init(&pvec); in ceph_release_pages() 693 if (pagevec_add(&pvec, pages[i]) == 0) in ceph_release_pages() 694 pagevec_release(&pvec); in ceph_release_pages() 696 pagevec_release(&pvec); in ceph_release_pages() 798 struct pagevec pvec; in ceph_writepages_start() local 822 pagevec_init(&pvec); in ceph_writepages_start() 880 pvec_pages = pagevec_lookup_range_nr_tag(&pvec, mapping, &index, in ceph_writepages_start() 887 page = pvec.pages[i]; in ceph_writepages_start() 1006 pvec.pages[i] = NULL; in ceph_writepages_start() [all …]
|
/Linux-v4.19/drivers/gpu/drm/etnaviv/ |
D | etnaviv_gem.c | 680 struct page **pvec = NULL; in etnaviv_gem_userptr_get_pages() local 689 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in etnaviv_gem_userptr_get_pages() 690 if (!pvec) in etnaviv_gem_userptr_get_pages() 696 struct page **pages = pvec + pinned; in etnaviv_gem_userptr_get_pages() 701 release_pages(pvec, pinned); in etnaviv_gem_userptr_get_pages() 702 kvfree(pvec); in etnaviv_gem_userptr_get_pages() 710 etnaviv_obj->pages = pvec; in etnaviv_gem_userptr_get_pages()
|
/Linux-v4.19/fs/gfs2/ |
D | aops.c | 263 struct pagevec *pvec, in gfs2_write_jdata_pagevec() argument 278 struct page *page = pvec->pages[i]; in gfs2_write_jdata_pagevec() 361 struct pagevec pvec; in gfs2_write_cache_jdata() local 371 pagevec_init(&pvec); in gfs2_write_cache_jdata() 397 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in gfs2_write_cache_jdata() 402 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index); in gfs2_write_cache_jdata() 407 pagevec_release(&pvec); in gfs2_write_cache_jdata()
|
/Linux-v4.19/fs/hugetlbfs/ |
D | inode.c | 101 static void huge_pagevec_release(struct pagevec *pvec) in huge_pagevec_release() argument 105 for (i = 0; i < pagevec_count(pvec); ++i) in huge_pagevec_release() 106 put_page(pvec->pages[i]); in huge_pagevec_release() 108 pagevec_reinit(pvec); in huge_pagevec_release() 408 struct pagevec pvec; in remove_inode_hugepages() local 415 pagevec_init(&pvec); in remove_inode_hugepages() 421 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1)) in remove_inode_hugepages() 424 for (i = 0; i < pagevec_count(&pvec); ++i) { in remove_inode_hugepages() 425 struct page *page = pvec.pages[i]; in remove_inode_hugepages() 475 huge_pagevec_release(&pvec); in remove_inode_hugepages()
|
/Linux-v4.19/arch/x86/kvm/ |
D | mmu.c | 2073 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, in mmu_pages_add() argument 2079 for (i=0; i < pvec->nr; i++) in mmu_pages_add() 2080 if (pvec->page[i].sp == sp) in mmu_pages_add() 2083 pvec->page[pvec->nr].sp = sp; in mmu_pages_add() 2084 pvec->page[pvec->nr].idx = idx; in mmu_pages_add() 2085 pvec->nr++; in mmu_pages_add() 2086 return (pvec->nr == KVM_PAGE_ARRAY_NR); in mmu_pages_add() 2097 struct kvm_mmu_pages *pvec) in __mmu_unsync_walk() argument 2113 if (mmu_pages_add(pvec, child, i)) in __mmu_unsync_walk() 2116 ret = __mmu_unsync_walk(child, pvec); in __mmu_unsync_walk() [all …]
|
/Linux-v4.19/fs/f2fs/ |
D | node.c | 1435 struct pagevec pvec; in last_fsync_dnode() local 1439 pagevec_init(&pvec); in last_fsync_dnode() 1442 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, in last_fsync_dnode() 1447 struct page *page = pvec.pages[i]; in last_fsync_dnode() 1451 pagevec_release(&pvec); in last_fsync_dnode() 1482 pagevec_release(&pvec); in last_fsync_dnode() 1634 struct pagevec pvec; in f2fs_fsync_node_pages() local 1647 pagevec_init(&pvec); in f2fs_fsync_node_pages() 1650 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, in f2fs_fsync_node_pages() 1655 struct page *page = pvec.pages[i]; in f2fs_fsync_node_pages() [all …]
|
D | checkpoint.c | 342 struct pagevec pvec; in f2fs_sync_meta_pages() local 350 pagevec_init(&pvec); in f2fs_sync_meta_pages() 354 while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, in f2fs_sync_meta_pages() 359 struct page *page = pvec.pages[i]; in f2fs_sync_meta_pages() 364 pagevec_release(&pvec); in f2fs_sync_meta_pages() 395 pagevec_release(&pvec); in f2fs_sync_meta_pages()
|
/Linux-v4.19/fs/fscache/ |
D | page.c | 1223 struct pagevec pvec; in __fscache_uncache_all_inode_pages() local 1234 pagevec_init(&pvec); in __fscache_uncache_all_inode_pages() 1237 if (!pagevec_lookup(&pvec, mapping, &next)) in __fscache_uncache_all_inode_pages() 1239 for (i = 0; i < pagevec_count(&pvec); i++) { in __fscache_uncache_all_inode_pages() 1240 struct page *page = pvec.pages[i]; in __fscache_uncache_all_inode_pages() 1246 pagevec_release(&pvec); in __fscache_uncache_all_inode_pages()
|
/Linux-v4.19/fs/ext4/ |
D | inode.c | 1717 struct pagevec pvec; in mpage_release_unused_pages() local 1734 pagevec_init(&pvec); in mpage_release_unused_pages() 1736 nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end); in mpage_release_unused_pages() 1740 struct page *page = pvec.pages[i]; in mpage_release_unused_pages() 1752 pagevec_release(&pvec); in mpage_release_unused_pages() 2341 struct pagevec pvec; in mpage_map_and_submit_buffers() local 2356 pagevec_init(&pvec); in mpage_map_and_submit_buffers() 2358 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, in mpage_map_and_submit_buffers() 2363 struct page *page = pvec.pages[i]; in mpage_map_and_submit_buffers() 2385 pagevec_release(&pvec); in mpage_map_and_submit_buffers() [all …]
|
/Linux-v4.19/drivers/mtd/lpddr/ |
D | lpddr_cmds.c | 390 unsigned long adr, const struct kvec **pvec, in do_write_buffer() argument 422 vec = *pvec; in do_write_buffer() 452 *pvec = vec; in do_write_buffer()
|