/Linux-v5.15/net/ceph/ |
D | pagevec.c | 13 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() 26 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() 41 struct page **pages; in ceph_alloc_page_vector() local 61 int ceph_copy_user_to_page_vector(struct page **pages, in ceph_copy_user_to_page_vector() 87 void ceph_copy_to_page_vector(struct page **pages, in ceph_copy_to_page_vector() 110 void ceph_copy_from_page_vector(struct page **pages, in ceph_copy_from_page_vector() 137 void ceph_zero_page_vector_range(int off, int len, struct page **pages) in ceph_zero_page_vector_range()
|
/Linux-v5.15/mm/ |
D | gup.c | 306 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, in unpin_user_pages_dirty_lock() 391 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() 1081 unsigned int gup_flags, struct page **pages, in __get_user_pages() 1310 struct page **pages, in __get_user_pages_locked() 1616 unsigned long nr_pages, struct page **pages, in __get_user_pages_locked() 1699 struct page **pages, in check_and_migrate_movable_pages() 1771 struct page **pages, in check_and_migrate_movable_pages() 1785 struct page **pages, in __gup_longterm_locked() 1830 unsigned int gup_flags, struct page **pages, in __get_user_pages_remote() 1919 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() [all …]
|
D | percpu-vm.c | 34 static struct page **pages; in pcpu_get_pages() local 55 struct page **pages, int page_start, int page_end) in pcpu_free_pages() 83 struct page **pages, int page_start, int page_end, in pcpu_alloc_pages() 154 struct page **pages, int page_start, int page_end) in pcpu_unmap_pages() 193 static int __pcpu_map_pages(unsigned long addr, struct page **pages, in __pcpu_map_pages() 215 struct page **pages, int page_start, int page_end) in pcpu_map_pages() 279 struct page **pages; in pcpu_populate_chunk() local 315 struct page **pages; in pcpu_depopulate_chunk() local
|
D | gup_test.c | 9 static void put_back_pages(unsigned int cmd, struct page **pages, in put_back_pages() 38 static void verify_dma_pinned(unsigned int cmd, struct page **pages, in verify_dma_pinned() 67 static void dump_pages_test(struct gup_test *gup, struct page **pages, in dump_pages_test() 104 struct page **pages; in __gup_test_ioctl() local
|
D | mprotect.c | 44 unsigned long pages = 0; in change_pte_range() local 228 unsigned long pages = 0; in change_pmd_range() local 300 unsigned long pages = 0; in change_pud_range() local 320 unsigned long pages = 0; in change_p4d_range() local 342 unsigned long pages = 0; in change_protection_range() local 368 unsigned long pages; in change_protection() local
|
D | mincore.c | 185 static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) in do_mincore() 196 unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE); in do_mincore() local 234 unsigned long pages; in SYSCALL_DEFINE3() local
|
/Linux-v5.15/drivers/media/common/videobuf2/ |
D | frame_vector.c | 107 struct page **pages; in put_vaddr_frames() local 139 struct page **pages; in frame_vector_to_pages() local 165 struct page **pages; in frame_vector_to_pfns() local
|
/Linux-v5.15/drivers/gpu/drm/i915/gem/selftests/ |
D | huge_gem_object.c | 12 struct sg_table *pages) in huge_free_pages() 34 struct sg_table *pages; in huge_get_pages() local 82 struct sg_table *pages) in huge_put_pages()
|
/Linux-v5.15/include/xen/ |
D | xen-ops.h | 88 struct page **pages) in xen_xlate_remap_gfn_array() 94 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() 125 struct page **pages) in xen_remap_domain_gfn_array() 184 struct page **pages) in xen_remap_domain_gfn_range()
|
D | mem-reservation.h | 38 struct page **pages, in xenmem_reservation_va_mapping_update() 48 struct page **pages) in xenmem_reservation_va_mapping_reset()
|
D | grant_table.h | 83 struct page **pages; member 212 struct page *pages; member 214 struct list_head pages; member 234 struct page **pages; member
|
/Linux-v5.15/drivers/xen/ |
D | xlate_mmu.c | 48 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn, in xen_for_each_gfn() 71 struct page **pages; member 148 struct page **pages) in xen_xlate_remap_gfn_array() 184 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() 217 struct page **pages; in xen_xlate_map_ballooned_pages() local 268 struct page **pages; member
|
D | privcmd.c | 90 static void free_page_list(struct list_head *pages) in free_page_list() 339 struct page **pages = vma->vm_private_data; in mmap_batch_fn() local 421 struct page **pages; in alloc_empty_pages() local 582 struct page *pages[], unsigned int nr_pages, unsigned int *pinned) in lock_pages() 610 static void unlock_pages(struct page *pages[], unsigned int nr_pages) in unlock_pages() 621 struct page **pages = NULL; in privcmd_ioctl_dm_op() local 771 struct page **pages; in privcmd_ioctl_mmap_resource() local 901 struct page **pages = vma->vm_private_data; in privcmd_close() local
|
/Linux-v5.15/drivers/hwtracing/coresight/ |
D | coresight-tmc-etr.c | 46 void **pages; member 192 enum dma_data_direction dir, void **pages) in tmc_pages_alloc() 288 static int tmc_alloc_data_pages(struct tmc_sg_table *sg_table, void **pages) in tmc_alloc_data_pages() 322 void **pages) in tmc_alloc_sg_table() 564 unsigned long size, void **pages) in tmc_init_etr_sg_table() 599 void **pages) in tmc_etr_alloc_flat_buf() 679 void **pages) in tmc_etr_alloc_sg_buf() 817 void **pages) in tmc_etr_mode_alloc_buf() 846 int node, void **pages) in tmc_alloc_etr_buf() 1223 int nr_pages, void **pages, bool snapshot) in alloc_etr_buf() [all …]
|
/Linux-v5.15/include/linux/ |
D | balloon_compaction.h | 57 struct list_head pages; /* Pages enqueued & handled to Host */ member 188 static inline void balloon_page_push(struct list_head *pages, struct page *page) in balloon_page_push() 200 static inline struct page *balloon_page_pop(struct list_head *pages) in balloon_page_pop()
|
/Linux-v5.15/arch/x86/include/asm/ |
D | pgtable_32.h | 69 #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) argument 71 #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) argument
|
/Linux-v5.15/fs/squashfs/ |
D | page_actor.h | 12 int pages; member 18 int pages, int length) in squashfs_page_actor_init() 58 int pages; member
|
D | page_actor.c | 42 int pages, int length) in squashfs_page_actor_init() 82 int pages, int length) in squashfs_page_actor_init_special()
|
/Linux-v5.15/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_gart.c | 225 int pages) in amdgpu_gart_unbind() 274 int pages, dma_addr_t *dma_addr, uint64_t flags, in amdgpu_gart_map() 311 int pages, dma_addr_t *dma_addr, in amdgpu_gart_bind()
|
/Linux-v5.15/fs/ntfs/ |
D | file.c | 492 pgoff_t index, const unsigned nr_pages, struct page **pages, in __ntfs_grab_cache_pages() 567 static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, in ntfs_prepare_pages_for_non_resident_write() 1357 static inline void ntfs_flush_dcache_pages(struct page **pages, in ntfs_flush_dcache_pages() 1382 struct page **pages, const unsigned nr_pages, in ntfs_commit_pages_after_non_resident_write() 1536 static int ntfs_commit_pages_after_write(struct page **pages, in ntfs_commit_pages_after_write() 1682 static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages, in ntfs_copy_from_user_iter() 1731 struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER]; in ntfs_perform_write() local
|
/Linux-v5.15/drivers/staging/media/ipu3/ |
D | ipu3-dmamap.c | 20 static void imgu_dmamap_free_buffer(struct page **pages, in imgu_dmamap_free_buffer() 36 struct page **pages; in imgu_dmamap_alloc_buffer() local 100 struct page **pages; in imgu_dmamap_alloc() local
|
/Linux-v5.15/fs/cifs/ |
D | fscache.h | 107 struct list_head *pages, in cifs_readpages_from_fscache() 124 struct list_head *pages) in cifs_fscache_readpages_cancel() 167 struct list_head *pages, in cifs_readpages_from_fscache() 177 struct list_head *pages) in cifs_fscache_readpages_cancel()
|
/Linux-v5.15/drivers/block/xen-blkback/ |
D | blkback.c | 246 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in free_persistent_gnts() local 288 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap_purged_grants() local 647 struct grant_page **pages, in xen_blkbk_unmap_prepare() 707 struct grant_page **pages = req->segments; in xen_blkbk_unmap_and_respond() local 732 struct grant_page *pages[], in xen_blkbk_unmap() 757 struct grant_page *pages[], in xen_blkbk_map() 918 struct grant_page **pages = pending_req->indirect_pages; in xen_blkbk_parse_indirect() local 1199 struct grant_page **pages = pending_req->segments; in dispatch_rw_block_io() local
|
/Linux-v5.15/kernel/dma/ |
D | remap.c | 22 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() 42 struct page **pages; in dma_common_contiguous_remap() local
|
/Linux-v5.15/fs/isofs/ |
D | compress.c | 42 struct page **pages, unsigned poffset, in zisofs_uncompress_block() 202 struct page **pages) in zisofs_fill_pages() 309 struct page **pages; in zisofs_readpage() local
|