Home
last modified time | relevance | path

Searched refs:max_pages (Results 1 – 25 of 48) sorted by relevance

12

/Linux-v5.4/mm/
Dreadahead.c227 unsigned long max_pages; in force_page_cache_readahead() local
236 max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); in force_page_cache_readahead()
237 nr_to_read = min(nr_to_read, max_pages); in force_page_cache_readahead()
388 unsigned long max_pages = ra->ra_pages; in ondemand_readahead() local
396 if (req_size > max_pages && bdi->io_pages > max_pages) in ondemand_readahead()
397 max_pages = min(req_size, bdi->io_pages); in ondemand_readahead()
412 ra->size = get_next_ra_size(ra, max_pages); in ondemand_readahead()
427 start = page_cache_next_miss(mapping, offset + 1, max_pages); in ondemand_readahead()
430 if (!start || start - offset > max_pages) in ondemand_readahead()
436 ra->size = get_next_ra_size(ra, max_pages); in ondemand_readahead()
[all …]
Dswap_state.c464 int max_pages, in __swapin_nr_pages() argument
490 if (pages > max_pages) in __swapin_nr_pages()
491 pages = max_pages; in __swapin_nr_pages()
504 unsigned int hits, pages, max_pages; in swapin_nr_pages() local
507 max_pages = 1 << READ_ONCE(page_cluster); in swapin_nr_pages()
508 if (max_pages <= 1) in swapin_nr_pages()
512 pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages, in swapin_nr_pages()
Dswap_cgroup.c167 int swap_cgroup_swapon(int type, unsigned long max_pages) in swap_cgroup_swapon() argument
177 length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); in swap_cgroup_swapon()
/Linux-v5.4/include/drm/ttm/
Dttm_page_alloc.h37 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
81 int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
99 unsigned max_pages) in ttm_dma_page_alloc_init() argument
/Linux-v5.4/drivers/gpu/drm/
Dati_pcigart.c66 int max_pages; in drm_ati_pcigart_cleanup() local
76 max_pages = (gart_info->table_size / sizeof(u32)); in drm_ati_pcigart_cleanup()
77 pages = (entry->pages <= max_pages) in drm_ati_pcigart_cleanup()
78 ? entry->pages : max_pages; in drm_ati_pcigart_cleanup()
/Linux-v5.4/include/linux/
Dswap_cgroup.h14 extern int swap_cgroup_swapon(int type, unsigned long max_pages);
33 swap_cgroup_swapon(int type, unsigned long max_pages) in swap_cgroup_swapon() argument
Dpagevec.h48 xa_mark_t tag, unsigned max_pages);
/Linux-v5.4/fs/nfs/
Dpnfs_dev.c103 int max_pages; in nfs4_get_device_info() local
114 max_pages = nfs_page_array_len(0, max_resp_sz); in nfs4_get_device_info()
116 __func__, server, max_resp_sz, max_pages); in nfs4_get_device_info()
122 pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); in nfs4_get_device_info()
126 for (i = 0; i < max_pages; i++) { in nfs4_get_device_info()
155 for (i = 0; i < max_pages; i++) in nfs4_get_device_info()
/Linux-v5.4/fs/cifs/
Dmisc.c849 unsigned int max_pages = iov_iter_npages(iter, INT_MAX); in setup_aio_ctx_iter() local
860 if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT) in setup_aio_ctx_iter()
861 bv = kmalloc_array(max_pages, sizeof(struct bio_vec), in setup_aio_ctx_iter()
865 bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec))); in setup_aio_ctx_iter()
870 if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT) in setup_aio_ctx_iter()
871 pages = kmalloc_array(max_pages, sizeof(struct page *), in setup_aio_ctx_iter()
875 pages = vmalloc(array_size(max_pages, sizeof(struct page *))); in setup_aio_ctx_iter()
884 while (count && npages < max_pages) { in setup_aio_ctx_iter()
885 rc = iov_iter_get_pages(iter, pages, count, max_pages, &start); in setup_aio_ctx_iter()
902 if (npages + cur_npages > max_pages) { in setup_aio_ctx_iter()
[all …]
/Linux-v5.4/fs/fuse/
Dfile.c907 unsigned int max_pages; member
921 (ap->num_pages == fc->max_pages || in fuse_readpages_fill()
924 data->max_pages = min_t(unsigned int, data->nr_pages, in fuse_readpages_fill()
925 fc->max_pages); in fuse_readpages_fill()
927 data->ia = ia = fuse_io_alloc(NULL, data->max_pages); in fuse_readpages_fill()
935 if (WARN_ON(ap->num_pages >= data->max_pages)) { in fuse_readpages_fill()
964 data.max_pages = min_t(unsigned int, nr_pages, fc->max_pages); in fuse_readpages()
966 data.ia = fuse_io_alloc(NULL, data.max_pages); in fuse_readpages()
1124 unsigned int max_pages) in fuse_fill_write_pages() argument
1181 ap->num_pages < max_pages && offset == 0); in fuse_fill_write_pages()
[all …]
/Linux-v5.4/arch/x86/xen/
Dsetup.c559 unsigned long max_pages, limit; in xen_get_max_pages() local
564 max_pages = limit; in xen_get_max_pages()
578 max_pages = ret; in xen_get_max_pages()
581 return min(max_pages, limit); in xen_get_max_pages()
747 unsigned long max_pages; in xen_memory_setup() local
796 max_pages = xen_get_max_pages(); in xen_memory_setup()
799 max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages); in xen_memory_setup()
801 if (max_pages > max_pfn) in xen_memory_setup()
802 extra_pages += max_pages - max_pfn; in xen_memory_setup()
819 extra_pages, max_pages - max_pfn); in xen_memory_setup()
/Linux-v5.4/fs/cramfs/
Dinode.c359 unsigned int pages, max_pages, offset; in cramfs_physmem_mmap() local
378 max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; in cramfs_physmem_mmap()
380 if (pgoff >= max_pages) in cramfs_physmem_mmap()
382 pages = min(vma_pages(vma), max_pages - pgoff); in cramfs_physmem_mmap()
394 if (pgoff + pages == max_pages && cramfs_last_page_is_shared(inode)) { in cramfs_physmem_mmap()
461 unsigned int pages, block_pages, max_pages, offset; in cramfs_physmem_get_unmapped_area() local
464 max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; in cramfs_physmem_get_unmapped_area()
465 if (pgoff >= max_pages || pages > max_pages - pgoff) in cramfs_physmem_get_unmapped_area()
/Linux-v5.4/drivers/infiniband/core/
Dfmr_pool.c87 int max_pages; member
244 pool->max_pages = params->max_pages_per_fmr; in ib_create_fmr_pool()
265 .max_pages = params->max_pages_per_fmr, in ib_create_fmr_pool()
402 if (list_len < 1 || list_len > pool->max_pages) in ib_fmr_pool_map_phys()
Drw.c57 u32 max_pages; in rdma_rw_fr_page_list_len() local
60 max_pages = dev->attrs.max_pi_fast_reg_page_list_len; in rdma_rw_fr_page_list_len()
62 max_pages = dev->attrs.max_fast_reg_page_list_len; in rdma_rw_fr_page_list_len()
65 return min_t(u32, max_pages, 256); in rdma_rw_fr_page_list_len()
/Linux-v5.4/net/rds/
Dib_rdma.c177 iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages; in rds_ib_get_mr_info()
187 iinfo6->rdma_mr_size = pool_1m->fmr_attr.max_pages; in rds6_ib_get_mr_info()
599 pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1; in rds_ib_create_mr_pool()
603 pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1; in rds_ib_create_mr_pool()
607 pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4; in rds_ib_create_mr_pool()
Dib_frmr.c79 pool->fmr_attr.max_pages); in rds_ib_alloc_frmr()
243 if (frmr->dma_npages > ibmr->pool->fmr_attr.max_pages) { in rds_ib_map_frmr()
/Linux-v5.4/fs/nilfs2/
Dsegbuf.c25 int max_pages; member
365 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks); in nilfs_segbuf_submit_bio()
406 wi->max_pages = BIO_MAX_PAGES; in nilfs_segbuf_prepare_write()
407 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks); in nilfs_segbuf_prepare_write()
/Linux-v5.4/drivers/infiniband/hw/mlx4/
Dmr.c552 int max_pages) in mlx4_alloc_priv_pages() argument
561 mr->page_map_size = roundup(max_pages * sizeof(u64), in mlx4_alloc_priv_pages()
681 mr->max_pages = max_num_sg; in mlx4_ib_alloc_mr()
713 fmr_attr->max_pages, fmr_attr->max_maps, in mlx4_ib_fmr_alloc()
798 if (unlikely(mr->npages == mr->max_pages)) in mlx4_set_page()
/Linux-v5.4/include/drm/
Ddrm_prime.h107 dma_addr_t *addrs, int max_pages);
/Linux-v5.4/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_mr.c252 mr->max_pages = max_num_sg; in pvrdma_alloc_mr()
305 if (mr->npages == mr->max_pages) in pvrdma_set_page()
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx4/
Dmr.c974 if (npages > fmr->max_pages) in mlx4_check_fmr()
1043 int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, in mlx4_fmr_alloc() argument
1056 if (max_pages * sizeof(*fmr->mtts) > PAGE_SIZE) in mlx4_fmr_alloc()
1060 fmr->max_pages = max_pages; in mlx4_fmr_alloc()
1064 err = mlx4_mr_alloc(dev, pd, 0, 0, access, max_pages, in mlx4_fmr_alloc()
/Linux-v5.4/drivers/infiniband/sw/rxe/
Drxe_mr.c239 int max_pages, struct rxe_mem *mem) in rxe_mem_init_fast() argument
248 err = rxe_mem_alloc(mem, max_pages); in rxe_mem_init_fast()
253 mem->max_buf = max_pages; in rxe_mem_init_fast()
/Linux-v5.4/drivers/gpu/drm/i915/selftests/
Di915_vma.c501 const unsigned int max_pages = 64; in igt_vma_rotate_remap() local
508 obj = i915_gem_object_create_internal(vm->i915, max_pages * PAGE_SIZE); in igt_vma_rotate_remap()
520 GEM_BUG_ON(max_offset > max_pages); in igt_vma_rotate_remap()
521 max_offset = max_pages - max_offset; in igt_vma_rotate_remap()
Di915_gem_gtt.c349 const unsigned long max_pages = in fill_hole() local
351 const unsigned long max_step = max(int_sqrt(max_pages), 2UL); in fill_hole()
364 for (npages = 1; npages <= max_pages; npages *= prime) { in fill_hole()
560 const unsigned long max_pages = in walk_hole() local
571 for_each_prime_number_from(size, 1, max_pages) { in walk_hole()
/Linux-v5.4/fs/ceph/
Daddr.c860 unsigned i, pvec_pages, max_pages, locked_pages = 0; in ceph_writepages_start() local
867 max_pages = wsize >> PAGE_SHIFT; in ceph_writepages_start()
872 max_pages - locked_pages); in ceph_writepages_start()
876 for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) { in ceph_writepages_start()
959 max_pages = calc_pages_for(0, (u64)len); in ceph_writepages_start()
960 pages = kmalloc_array(max_pages, in ceph_writepages_start()
1018 locked_pages < max_pages) { in ceph_writepages_start()

12