Searched refs:pagevec_count (Results 1 – 15 of 15) sorted by relevance
198 for (i = 0; i < pagevec_count(pvec); i++) { in pagevec_lru_move_fn()297 if (pagevec_count(pvec)) in activate_page_drain()303 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; in need_activate_page_drain()355 for (i = pagevec_count(pvec) - 1; i >= 0; i--) { in __lru_cache_activate_page()581 if (pagevec_count(pvec)) in lru_add_drain_cpu()585 if (pagevec_count(pvec)) { in lru_add_drain_cpu()595 if (pagevec_count(pvec)) in lru_add_drain_cpu()599 if (pagevec_count(pvec)) in lru_add_drain_cpu()690 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || in lru_add_drain_all()691 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || in lru_add_drain_all()[all …]
72 for (j = 0; j < pagevec_count(pvec); j++) in truncate_exceptional_pvec_entries()76 if (j == pagevec_count(pvec)) in truncate_exceptional_pvec_entries()84 for (i = j; i < pagevec_count(pvec); i++) { in truncate_exceptional_pvec_entries()342 for (i = 0; i < pagevec_count(&pvec); i++) { in truncate_inode_pages_range()366 for (i = 0; i < pagevec_count(&locked_pvec); i++) in truncate_inode_pages_range()369 for (i = 0; i < pagevec_count(&locked_pvec); i++) in truncate_inode_pages_range()434 for (i = 0; i < pagevec_count(&pvec); i++) { in truncate_inode_pages_range()556 for (i = 0; i < pagevec_count(&pvec); i++) { in invalidate_mapping_pages()687 for (i = 0; i < pagevec_count(&pvec); i++) { in invalidate_inode_pages2_range()
271 count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec)); in __putback_lru_fast()293 int nr = pagevec_count(pvec); in __munlock_pagevec()357 if (pagevec_count(&pvec_putback)) in __munlock_pagevec()
335 if (i >= pagevec_count(pvec) && !tail_pages) in page_cache_tree_delete_batch()375 if (!pagevec_count(pvec)) in delete_from_page_cache_batch()379 for (i = 0; i < pagevec_count(pvec); i++) { in delete_from_page_cache_batch()387 for (i = 0; i < pagevec_count(pvec); i++) in delete_from_page_cache_batch()
820 for (i = 0; i < pagevec_count(&pvec); i++) { in shmem_undo_range()917 for (i = 0; i < pagevec_count(&pvec); i++) { in shmem_undo_range()
64 static inline unsigned pagevec_count(struct pagevec *pvec) in pagevec_count() function85 if (pagevec_count(pvec)) in pagevec_release()
254 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_copy_dirty_pages()309 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_copy_back_pages()371 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_clear_dirty_pages()552 } while (++i < pagevec_count(&pvec)); in nilfs_find_uncommitted_extent()
709 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_lookup_dirty_data_buffers()751 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_lookup_dirty_node_buffers()
2154 for (i = 0; i < pagevec_count(&pvec); i++) { in nilfs_btree_lookup_dirty_buffers()
761 if (pagevec_count(&pagevec) > 0) in cachefiles_read_or_alloc_pages()859 if (pagevec_count(&pagevec) > 0) in cachefiles_allocate_pages()
105 for (i = 0; i < pagevec_count(pvec); ++i) in huge_pagevec_release()424 for (i = 0; i < pagevec_count(&pvec); ++i) { in remove_inode_hugepages()
671 for (i = 0; i < pagevec_count(&pvec); i++) { in dax_layout_busy_page()691 if (i + 1 >= pagevec_count(&pvec)) in dax_layout_busy_page()
1575 count = pagevec_count(&pvec); in clean_bdev_aliases()
1239 for (i = 0; i < pagevec_count(&pvec); i++) { in __fscache_uncache_all_inode_pages()
482 GEM_BUG_ON(!pagevec_count(pvec)); in vm_free_pages_release()559 if (pagevec_count(&vm->free_pages.pvec)) in i915_address_space_fini()561 GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); in i915_address_space_fini()