Home
last modified time | relevance | path

Searched refs:pvec (Results 1 – 25 of 33) sorted by relevance

12

/Linux-v5.4/include/linux/
Dpagevec.h26 void __pagevec_release(struct pagevec *pvec);
27 void __pagevec_lru_add(struct pagevec *pvec);
28 unsigned pagevec_lookup_entries(struct pagevec *pvec,
32 void pagevec_remove_exceptionals(struct pagevec *pvec);
33 unsigned pagevec_lookup_range(struct pagevec *pvec,
36 static inline unsigned pagevec_lookup(struct pagevec *pvec, in pagevec_lookup() argument
40 return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1); in pagevec_lookup()
43 unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
46 unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
49 static inline unsigned pagevec_lookup_tag(struct pagevec *pvec, in pagevec_lookup_tag() argument
[all …]
/Linux-v5.4/mm/
Dswap.c190 static void pagevec_lru_move_fn(struct pagevec *pvec, in pagevec_lru_move_fn() argument
199 for (i = 0; i < pagevec_count(pvec); i++) { in pagevec_lru_move_fn()
200 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn()
215 release_pages(pvec->pages, pvec->nr); in pagevec_lru_move_fn()
216 pagevec_reinit(pvec); in pagevec_lru_move_fn()
236 static void pagevec_move_tail(struct pagevec *pvec) in pagevec_move_tail() argument
240 pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); in pagevec_move_tail()
253 struct pagevec *pvec; in rotate_reclaimable_page() local
258 pvec = this_cpu_ptr(&lru_rotate_pvecs); in rotate_reclaimable_page()
259 if (!pagevec_add(pvec, page) || PageCompound(page)) in rotate_reclaimable_page()
[all …]
Dtruncate.c60 struct pagevec *pvec, pgoff_t *indices, in truncate_exceptional_pvec_entries() argument
70 for (j = 0; j < pagevec_count(pvec); j++) in truncate_exceptional_pvec_entries()
71 if (xa_is_value(pvec->pages[j])) in truncate_exceptional_pvec_entries()
74 if (j == pagevec_count(pvec)) in truncate_exceptional_pvec_entries()
82 for (i = j; i < pagevec_count(pvec); i++) { in truncate_exceptional_pvec_entries()
83 struct page *page = pvec->pages[i]; in truncate_exceptional_pvec_entries()
87 pvec->pages[j++] = page; in truncate_exceptional_pvec_entries()
104 pvec->nr = j; in truncate_exceptional_pvec_entries()
298 struct pagevec pvec; in truncate_inode_pages_range() local
327 pagevec_init(&pvec); in truncate_inode_pages_range()
[all …]
Dmlock.c246 static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, in __putback_lru_fast_prepare() argument
253 pagevec_add(pvec, page); in __putback_lru_fast_prepare()
269 static void __putback_lru_fast(struct pagevec *pvec, int pgrescued) in __putback_lru_fast() argument
271 count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec)); in __putback_lru_fast()
276 __pagevec_lru_add(pvec); in __putback_lru_fast()
290 static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) in __munlock_pagevec() argument
293 int nr = pagevec_count(pvec); in __munlock_pagevec()
303 struct page *page = pvec->pages[i]; in __munlock_pagevec()
324 pagevec_add(&pvec_putback, pvec->pages[i]); in __munlock_pagevec()
325 pvec->pages[i] = NULL; in __munlock_pagevec()
[all …]
Dshmem.c766 struct pagevec pvec; in shmem_unlock_mapping() local
770 pagevec_init(&pvec); in shmem_unlock_mapping()
779 pvec.nr = find_get_entries(mapping, index, in shmem_unlock_mapping()
780 PAGEVEC_SIZE, pvec.pages, indices); in shmem_unlock_mapping()
781 if (!pvec.nr) in shmem_unlock_mapping()
783 index = indices[pvec.nr - 1] + 1; in shmem_unlock_mapping()
784 pagevec_remove_exceptionals(&pvec); in shmem_unlock_mapping()
785 check_move_unevictable_pages(&pvec); in shmem_unlock_mapping()
786 pagevec_release(&pvec); in shmem_unlock_mapping()
804 struct pagevec pvec; in shmem_undo_range() local
[all …]
Dfilemap.c295 struct pagevec *pvec) in page_cache_delete_batch() argument
297 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); in page_cache_delete_batch()
304 if (i >= pagevec_count(pvec)) in page_cache_delete_batch()
317 if (page != pvec->pages[i]) { in page_cache_delete_batch()
318 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, in page_cache_delete_batch()
343 struct pagevec *pvec) in delete_from_page_cache_batch() argument
348 if (!pagevec_count(pvec)) in delete_from_page_cache_batch()
352 for (i = 0; i < pagevec_count(pvec); i++) { in delete_from_page_cache_batch()
353 trace_mm_filemap_delete_from_page_cache(pvec->pages[i]); in delete_from_page_cache_batch()
355 unaccount_page_cache_page(mapping, pvec->pages[i]); in delete_from_page_cache_batch()
[all …]
Dpage-writeback.c2165 struct pagevec pvec; in write_cache_pages() local
2174 pagevec_init(&pvec); in write_cache_pages()
2195 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in write_cache_pages()
2201 struct page *page = pvec.pages[i]; in write_cache_pages()
2277 pagevec_release(&pvec); in write_cache_pages()
/Linux-v5.4/fs/nilfs2/
Dpage.c244 struct pagevec pvec; in nilfs_copy_dirty_pages() local
249 pagevec_init(&pvec); in nilfs_copy_dirty_pages()
251 if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY)) in nilfs_copy_dirty_pages()
254 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_copy_dirty_pages()
255 struct page *page = pvec.pages[i], *dpage; in nilfs_copy_dirty_pages()
279 pagevec_release(&pvec); in nilfs_copy_dirty_pages()
298 struct pagevec pvec; in nilfs_copy_back_pages() local
302 pagevec_init(&pvec); in nilfs_copy_back_pages()
304 n = pagevec_lookup(&pvec, smap, &index); in nilfs_copy_back_pages()
308 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_copy_back_pages()
[all …]
Dsegment.c688 struct pagevec pvec; in nilfs_lookup_dirty_data_buffers() local
702 pagevec_init(&pvec); in nilfs_lookup_dirty_data_buffers()
705 !pagevec_lookup_range_tag(&pvec, mapping, &index, last, in nilfs_lookup_dirty_data_buffers()
709 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_lookup_dirty_data_buffers()
711 struct page *page = pvec.pages[i]; in nilfs_lookup_dirty_data_buffers()
726 pagevec_release(&pvec); in nilfs_lookup_dirty_data_buffers()
732 pagevec_release(&pvec); in nilfs_lookup_dirty_data_buffers()
742 struct pagevec pvec; in nilfs_lookup_dirty_node_buffers() local
747 pagevec_init(&pvec); in nilfs_lookup_dirty_node_buffers()
749 while (pagevec_lookup_tag(&pvec, mapping, &index, in nilfs_lookup_dirty_node_buffers()
[all …]
/Linux-v5.4/drivers/gpu/drm/i915/gem/
Di915_gem_shmem.c19 static void check_release_pagevec(struct pagevec *pvec) in check_release_pagevec() argument
21 check_move_unevictable_pages(pvec); in check_release_pagevec()
22 __pagevec_release(pvec); in check_release_pagevec()
39 struct pagevec pvec; in shmem_get_pages() local
191 pagevec_init(&pvec); in shmem_get_pages()
193 if (!pagevec_add(&pvec, page)) in shmem_get_pages()
194 check_release_pagevec(&pvec); in shmem_get_pages()
196 if (pagevec_count(&pvec)) in shmem_get_pages()
197 check_release_pagevec(&pvec); in shmem_get_pages()
297 struct pagevec pvec; in shmem_put_pages() local
[all …]
Di915_gem_userptr.c430 struct page **pvec, int num_pages) in __i915_gem_userptr_alloc_pages() argument
442 ret = __sg_alloc_table_from_pages(st, pvec, num_pages, in __i915_gem_userptr_alloc_pages()
477 struct page **pvec; in __i915_gem_userptr_get_pages_worker() local
483 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in __i915_gem_userptr_get_pages_worker()
484 if (pvec != NULL) { in __i915_gem_userptr_get_pages_worker()
500 pvec + pinned, NULL, NULL); in __i915_gem_userptr_get_pages_worker()
516 pages = __i915_gem_userptr_alloc_pages(obj, pvec, in __i915_gem_userptr_get_pages_worker()
530 release_pages(pvec, pinned); in __i915_gem_userptr_get_pages_worker()
531 kvfree(pvec); in __i915_gem_userptr_get_pages_worker()
583 struct page **pvec; in i915_gem_userptr_get_pages() local
[all …]
/Linux-v5.4/fs/iomap/
Dseek.c80 struct pagevec pvec; in page_cache_seek_hole_data() local
85 pagevec_init(&pvec); in page_cache_seek_hole_data()
90 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index, in page_cache_seek_hole_data()
96 struct page *page = pvec.pages[i]; in page_cache_seek_hole_data()
102 pagevec_release(&pvec); in page_cache_seek_hole_data()
115 pagevec_release(&pvec); in page_cache_seek_hole_data()
/Linux-v5.4/drivers/gpu/drm/
Ddrm_gem.c525 static void drm_gem_check_release_pagevec(struct pagevec *pvec) in drm_gem_check_release_pagevec() argument
527 check_move_unevictable_pages(pvec); in drm_gem_check_release_pagevec()
528 __pagevec_release(pvec); in drm_gem_check_release_pagevec()
557 struct pagevec pvec; in drm_gem_get_pages() local
596 pagevec_init(&pvec); in drm_gem_get_pages()
598 if (!pagevec_add(&pvec, pages[i])) in drm_gem_get_pages()
599 drm_gem_check_release_pagevec(&pvec); in drm_gem_get_pages()
601 if (pagevec_count(&pvec)) in drm_gem_get_pages()
602 drm_gem_check_release_pagevec(&pvec); in drm_gem_get_pages()
621 struct pagevec pvec; in drm_gem_put_pages() local
[all …]
/Linux-v5.4/drivers/gpu/drm/i915/
Di915_gem_gtt.c332 pagevec_init(&stash->pvec); in stash_init()
341 if (likely(stash->pvec.nr)) in stash_pop_page()
342 page = stash->pvec.pages[--stash->pvec.nr]; in stash_pop_page()
348 static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) in stash_push_pagevec() argument
354 nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec)); in stash_push_pagevec()
355 memcpy(stash->pvec.pages + stash->pvec.nr, in stash_push_pagevec()
356 pvec->pages + pvec->nr - nr, in stash_push_pagevec()
357 sizeof(pvec->pages[0]) * nr); in stash_push_pagevec()
358 stash->pvec.nr += nr; in stash_push_pagevec()
362 pvec->nr -= nr; in stash_push_pagevec()
[all …]
/Linux-v5.4/fs/gfs2/
Daops.c240 struct pagevec *pvec, in gfs2_write_jdata_pagevec() argument
255 struct page *page = pvec->pages[i]; in gfs2_write_jdata_pagevec()
338 struct pagevec pvec; in gfs2_write_cache_jdata() local
348 pagevec_init(&pvec); in gfs2_write_cache_jdata()
374 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in gfs2_write_cache_jdata()
379 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index); in gfs2_write_cache_jdata()
384 pagevec_release(&pvec); in gfs2_write_cache_jdata()
/Linux-v5.4/drivers/gpu/drm/etnaviv/
Detnaviv_gem.c660 struct page **pvec = NULL; in etnaviv_gem_userptr_get_pages() local
669 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in etnaviv_gem_userptr_get_pages()
670 if (!pvec) in etnaviv_gem_userptr_get_pages()
676 struct page **pages = pvec + pinned; in etnaviv_gem_userptr_get_pages()
681 release_pages(pvec, pinned); in etnaviv_gem_userptr_get_pages()
682 kvfree(pvec); in etnaviv_gem_userptr_get_pages()
690 etnaviv_obj->pages = pvec; in etnaviv_gem_userptr_get_pages()
/Linux-v5.4/fs/ceph/
Daddr.c788 struct pagevec pvec; in ceph_writepages_start() local
812 pagevec_init(&pvec); in ceph_writepages_start()
870 pvec_pages = pagevec_lookup_range_nr_tag(&pvec, mapping, &index, in ceph_writepages_start()
877 page = pvec.pages[i]; in ceph_writepages_start()
997 pvec.pages[i] = NULL; in ceph_writepages_start()
1009 if (!pvec.pages[j]) in ceph_writepages_start()
1012 pvec.pages[n] = pvec.pages[j]; in ceph_writepages_start()
1015 pvec.nr = n; in ceph_writepages_start()
1020 pagevec_release(&pvec); in ceph_writepages_start()
1145 dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr, in ceph_writepages_start()
[all …]
/Linux-v5.4/fs/hugetlbfs/
Dinode.c115 static void huge_pagevec_release(struct pagevec *pvec) in huge_pagevec_release() argument
119 for (i = 0; i < pagevec_count(pvec); ++i) in huge_pagevec_release()
120 put_page(pvec->pages[i]); in huge_pagevec_release()
122 pagevec_reinit(pvec); in huge_pagevec_release()
422 struct pagevec pvec; in remove_inode_hugepages() local
429 pagevec_init(&pvec); in remove_inode_hugepages()
435 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1)) in remove_inode_hugepages()
438 for (i = 0; i < pagevec_count(&pvec); ++i) { in remove_inode_hugepages()
439 struct page *page = pvec.pages[i]; in remove_inode_hugepages()
487 huge_pagevec_release(&pvec); in remove_inode_hugepages()
/Linux-v5.4/arch/x86/kvm/
Dmmu.c2246 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, in mmu_pages_add() argument
2252 for (i=0; i < pvec->nr; i++) in mmu_pages_add()
2253 if (pvec->page[i].sp == sp) in mmu_pages_add()
2256 pvec->page[pvec->nr].sp = sp; in mmu_pages_add()
2257 pvec->page[pvec->nr].idx = idx; in mmu_pages_add()
2258 pvec->nr++; in mmu_pages_add()
2259 return (pvec->nr == KVM_PAGE_ARRAY_NR); in mmu_pages_add()
2270 struct kvm_mmu_pages *pvec) in __mmu_unsync_walk() argument
2286 if (mmu_pages_add(pvec, child, i)) in __mmu_unsync_walk()
2289 ret = __mmu_unsync_walk(child, pvec); in __mmu_unsync_walk()
[all …]
/Linux-v5.4/fs/f2fs/
Dnode.c1445 struct pagevec pvec; in last_fsync_dnode() local
1449 pagevec_init(&pvec); in last_fsync_dnode()
1452 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, in last_fsync_dnode()
1457 struct page *page = pvec.pages[i]; in last_fsync_dnode()
1461 pagevec_release(&pvec); in last_fsync_dnode()
1492 pagevec_release(&pvec); in last_fsync_dnode()
1653 struct pagevec pvec; in f2fs_fsync_node_pages() local
1667 pagevec_init(&pvec); in f2fs_fsync_node_pages()
1670 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, in f2fs_fsync_node_pages()
1675 struct page *page = pvec.pages[i]; in f2fs_fsync_node_pages()
[all …]
Dcheckpoint.c367 struct pagevec pvec; in f2fs_sync_meta_pages() local
375 pagevec_init(&pvec); in f2fs_sync_meta_pages()
379 while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, in f2fs_sync_meta_pages()
384 struct page *page = pvec.pages[i]; in f2fs_sync_meta_pages()
389 pagevec_release(&pvec); in f2fs_sync_meta_pages()
419 pagevec_release(&pvec); in f2fs_sync_meta_pages()
/Linux-v5.4/fs/fscache/
Dpage.c1219 struct pagevec pvec; in __fscache_uncache_all_inode_pages() local
1230 pagevec_init(&pvec); in __fscache_uncache_all_inode_pages()
1233 if (!pagevec_lookup(&pvec, mapping, &next)) in __fscache_uncache_all_inode_pages()
1235 for (i = 0; i < pagevec_count(&pvec); i++) { in __fscache_uncache_all_inode_pages()
1236 struct page *page = pvec.pages[i]; in __fscache_uncache_all_inode_pages()
1242 pagevec_release(&pvec); in __fscache_uncache_all_inode_pages()
/Linux-v5.4/fs/ext4/
Dinode.c1685 struct pagevec pvec; in mpage_release_unused_pages() local
1702 pagevec_init(&pvec); in mpage_release_unused_pages()
1704 nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end); in mpage_release_unused_pages()
1708 struct page *page = pvec.pages[i]; in mpage_release_unused_pages()
1720 pagevec_release(&pvec); in mpage_release_unused_pages()
2359 struct pagevec pvec; in mpage_map_and_submit_buffers() local
2374 pagevec_init(&pvec); in mpage_map_and_submit_buffers()
2376 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, in mpage_map_and_submit_buffers()
2381 struct page *page = pvec.pages[i]; in mpage_map_and_submit_buffers()
2403 pagevec_release(&pvec); in mpage_map_and_submit_buffers()
[all …]
/Linux-v5.4/drivers/mtd/lpddr/
Dlpddr_cmds.c378 unsigned long adr, const struct kvec **pvec, in do_write_buffer() argument
410 vec = *pvec; in do_write_buffer()
440 *pvec = vec; in do_write_buffer()
/Linux-v5.4/fs/
Dbuffer.c1567 struct pagevec pvec; in clean_bdev_aliases() local
1575 pagevec_init(&pvec); in clean_bdev_aliases()
1576 while (pagevec_lookup_range(&pvec, bd_mapping, &index, end)) { in clean_bdev_aliases()
1577 count = pagevec_count(&pvec); in clean_bdev_aliases()
1579 struct page *page = pvec.pages[i]; in clean_bdev_aliases()
1608 pagevec_release(&pvec); in clean_bdev_aliases()

12