| /Linux-v5.10/mm/ |
| D | readahead.c | 279 unsigned long max_pages, index; in force_page_cache_ra() local 290 max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); in force_page_cache_ra() 291 nr_to_read = min_t(unsigned long, nr_to_read, max_pages); in force_page_cache_ra() 440 unsigned long max_pages = ra->ra_pages; in ondemand_readahead() local 449 if (req_size > max_pages && bdi->io_pages > max_pages) in ondemand_readahead() 450 max_pages = min(req_size, bdi->io_pages); in ondemand_readahead() 465 ra->size = get_next_ra_size(ra, max_pages); in ondemand_readahead() 481 max_pages); in ondemand_readahead() 484 if (!start || start - index > max_pages) in ondemand_readahead() 490 ra->size = get_next_ra_size(ra, max_pages); in ondemand_readahead() [all …]
|
| D | swap_state.c | 573 int max_pages, in __swapin_nr_pages() argument 599 if (pages > max_pages) in __swapin_nr_pages() 600 pages = max_pages; in __swapin_nr_pages() 613 unsigned int hits, pages, max_pages; in swapin_nr_pages() local 616 max_pages = 1 << READ_ONCE(page_cluster); in swapin_nr_pages() 617 if (max_pages <= 1) in swapin_nr_pages() 622 max_pages, in swapin_nr_pages()
|
| D | swap_cgroup.c | 167 int swap_cgroup_swapon(int type, unsigned long max_pages) in swap_cgroup_swapon() argument 174 length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); in swap_cgroup_swapon()
|
| /Linux-v5.10/include/drm/ttm/ |
| D | ttm_page_alloc.h | 37 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); 81 int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages); 99 unsigned max_pages) in ttm_dma_page_alloc_init() argument
|
| /Linux-v5.10/drivers/gpu/drm/r128/ |
| D | ati_pcigart.c | 68 int max_pages; in drm_ati_pcigart_cleanup() local 78 max_pages = (gart_info->table_size / sizeof(u32)); in drm_ati_pcigart_cleanup() 79 pages = (entry->pages <= max_pages) in drm_ati_pcigart_cleanup() 80 ? entry->pages : max_pages; in drm_ati_pcigart_cleanup()
|
| /Linux-v5.10/include/linux/ |
| D | swap_cgroup.h | 14 extern int swap_cgroup_swapon(int type, unsigned long max_pages); 33 swap_cgroup_swapon(int type, unsigned long max_pages) in swap_cgroup_swapon() argument
|
| D | pagevec.h | 48 xa_mark_t tag, unsigned max_pages);
|
| /Linux-v5.10/fs/nfs/ |
| D | pnfs_dev.c | 103 int max_pages; in nfs4_get_device_info() local 114 max_pages = nfs_page_array_len(0, max_resp_sz); in nfs4_get_device_info() 116 __func__, server, max_resp_sz, max_pages); in nfs4_get_device_info() 122 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); in nfs4_get_device_info() 126 for (i = 0; i < max_pages; i++) { in nfs4_get_device_info() 155 for (i = 0; i < max_pages; i++) in nfs4_get_device_info()
|
| /Linux-v5.10/arch/x86/xen/ |
| D | setup.c | 556 unsigned long max_pages, limit; in xen_get_max_pages() local 561 max_pages = limit; in xen_get_max_pages() 575 max_pages = ret; in xen_get_max_pages() 578 return min(max_pages, limit); in xen_get_max_pages() 735 unsigned long max_pages; in xen_memory_setup() local 784 max_pages = xen_get_max_pages(); in xen_memory_setup() 787 max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages); in xen_memory_setup() 789 if (max_pages > max_pfn) in xen_memory_setup() 790 extra_pages += max_pages - max_pfn; in xen_memory_setup() 807 extra_pages, max_pages - max_pfn); in xen_memory_setup()
|
| /Linux-v5.10/fs/cifs/ |
| D | misc.c | 842 unsigned int max_pages = iov_iter_npages(iter, INT_MAX); in setup_aio_ctx_iter() local 853 if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT) in setup_aio_ctx_iter() 854 bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL); in setup_aio_ctx_iter() 857 bv = vmalloc(array_size(max_pages, sizeof(*bv))); in setup_aio_ctx_iter() 862 if (array_size(max_pages, sizeof(*pages)) <= CIFS_AIO_KMALLOC_LIMIT) in setup_aio_ctx_iter() 863 pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL); in setup_aio_ctx_iter() 866 pages = vmalloc(array_size(max_pages, sizeof(*pages))); in setup_aio_ctx_iter() 875 while (count && npages < max_pages) { in setup_aio_ctx_iter() 876 rc = iov_iter_get_pages(iter, pages, count, max_pages, &start); in setup_aio_ctx_iter() 893 if (npages + cur_npages > max_pages) { in setup_aio_ctx_iter() [all …]
|
| /Linux-v5.10/fs/fuse/ |
| D | file.c | 953 unsigned int i, max_pages, nr_pages = 0; in fuse_readahead() local 958 max_pages = min_t(unsigned int, fc->max_pages, in fuse_readahead() 966 if (nr_pages > max_pages) in fuse_readahead() 967 nr_pages = max_pages; in fuse_readahead() 1129 unsigned int max_pages) in fuse_fill_write_pages() argument 1186 ap->num_pages < max_pages && offset == 0); in fuse_fill_write_pages() 1192 unsigned int max_pages) in fuse_wr_pages() argument 1197 max_pages); in fuse_wr_pages() 1218 fc->max_pages); in fuse_perform_write() 1354 unsigned int max_pages) in fuse_get_user_pages() argument [all …]
|
| /Linux-v5.10/fs/cramfs/ |
| D | inode.c | 359 unsigned int pages, max_pages, offset; in cramfs_physmem_mmap() local 378 max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; in cramfs_physmem_mmap() 380 if (pgoff >= max_pages) in cramfs_physmem_mmap() 382 pages = min(vma_pages(vma), max_pages - pgoff); in cramfs_physmem_mmap() 394 if (pgoff + pages == max_pages && cramfs_last_page_is_shared(inode)) { in cramfs_physmem_mmap() 461 unsigned int pages, block_pages, max_pages, offset; in cramfs_physmem_get_unmapped_area() local 464 max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; in cramfs_physmem_get_unmapped_area() 465 if (pgoff >= max_pages || pages > max_pages - pgoff) in cramfs_physmem_get_unmapped_area()
|
| /Linux-v5.10/net/rds/ |
| D | ib_rdma.c | 184 iinfo->rdma_mr_size = pool_1m->max_pages; in rds_ib_get_mr_info() 194 iinfo6->rdma_mr_size = pool_1m->max_pages; in rds6_ib_get_mr_info() 663 pool->max_pages = RDS_MR_1M_MSG_SIZE + 1; in rds_ib_create_mr_pool() 667 pool->max_pages = RDS_MR_8K_MSG_SIZE + 1; in rds_ib_create_mr_pool() 671 pool->max_free_pinned = pool->max_items * pool->max_pages / 4; in rds_ib_create_mr_pool()
|
| D | ib_mr.h | 107 unsigned int max_pages; member
|
| D | ib_frmr.c | 79 pool->max_pages); in rds_ib_alloc_frmr() 243 if (frmr->dma_npages > ibmr->pool->max_pages) { in rds_ib_map_frmr()
|
| /Linux-v5.10/fs/nilfs2/ |
| D | segbuf.c | 25 int max_pages; member 365 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks); in nilfs_segbuf_submit_bio() 406 wi->max_pages = BIO_MAX_PAGES; in nilfs_segbuf_prepare_write() 407 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks); in nilfs_segbuf_prepare_write()
|
| /Linux-v5.10/drivers/md/bcache/ |
| D | bcache.h | 763 unsigned int n, max_pages; in meta_bucket_pages() local 765 max_pages = min_t(unsigned int, in meta_bucket_pages() 770 if (n > max_pages) in meta_bucket_pages() 771 n = max_pages; in meta_bucket_pages()
|
| /Linux-v5.10/drivers/infiniband/hw/mlx4/ |
| D | mr.c | 552 int max_pages) in mlx4_alloc_priv_pages() argument 561 mr->page_map_size = roundup(max_pages * sizeof(u64), in mlx4_alloc_priv_pages() 669 mr->max_pages = max_num_sg; in mlx4_ib_alloc_mr() 693 if (unlikely(mr->npages == mr->max_pages)) in mlx4_set_page()
|
| /Linux-v5.10/include/drm/ |
| D | drm_prime.h | 108 dma_addr_t *addrs, int max_pages);
|
| /Linux-v5.10/drivers/infiniband/sw/rxe/ |
| D | rxe_mr.c | 206 int max_pages, struct rxe_mem *mem) in rxe_mem_init_fast() argument 215 err = rxe_mem_alloc(mem, max_pages); in rxe_mem_init_fast() 220 mem->max_buf = max_pages; in rxe_mem_init_fast()
|
| /Linux-v5.10/drivers/infiniband/hw/vmw_pvrdma/ |
| D | pvrdma_mr.c | 252 mr->max_pages = max_num_sg; in pvrdma_alloc_mr() 306 if (mr->npages == mr->max_pages) in pvrdma_set_page()
|
| /Linux-v5.10/drivers/infiniband/core/ |
| D | rw.c | 62 u32 max_pages; in rdma_rw_fr_page_list_len() local 65 max_pages = dev->attrs.max_pi_fast_reg_page_list_len; in rdma_rw_fr_page_list_len() 67 max_pages = dev->attrs.max_fast_reg_page_list_len; in rdma_rw_fr_page_list_len() 70 return min_t(u32, max_pages, 256); in rdma_rw_fr_page_list_len()
|
| /Linux-v5.10/drivers/gpu/drm/i915/selftests/ |
| D | i915_vma.c | 504 const unsigned int max_pages = 64; in igt_vma_rotate_remap() local 511 obj = i915_gem_object_create_internal(vm->i915, max_pages * PAGE_SIZE); in igt_vma_rotate_remap() 523 GEM_BUG_ON(max_offset > max_pages); in igt_vma_rotate_remap() 524 max_offset = max_pages - max_offset; in igt_vma_rotate_remap()
|
| D | intel_memory_region.c | 53 unsigned long max_pages; in igt_mock_fill() local 59 max_pages = div64_u64(total, page_size); in igt_mock_fill() 62 for_each_prime_number_from(page_num, 1, max_pages) { in igt_mock_fill()
|
| /Linux-v5.10/fs/ceph/ |
| D | addr.c | 903 unsigned i, pvec_pages, max_pages, locked_pages = 0; in ceph_writepages_start() local 910 max_pages = wsize >> PAGE_SHIFT; in ceph_writepages_start() 918 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { in ceph_writepages_start() 1001 max_pages = calc_pages_for(0, (u64)len); in ceph_writepages_start() 1002 pages = kmalloc_array(max_pages, in ceph_writepages_start() 1060 locked_pages < max_pages) { in ceph_writepages_start()
|