Home
last modified time | relevance | path

Searched refs:npages (Results 1 – 25 of 272) sorted by relevance

1234567891011

/Linux-v5.4/drivers/gpu/drm/i915/selftests/
Dscatterlist.c53 unsigned int npages = npages_fn(n, pt->st.nents, rnd); in expect_pfn_sg() local
61 if (sg->length != npages * PAGE_SIZE) { in expect_pfn_sg()
63 __func__, who, npages * PAGE_SIZE, sg->length); in expect_pfn_sg()
70 pfn += npages; in expect_pfn_sg()
209 unsigned long npages) in page_contiguous() argument
211 return first + npages == last; in page_contiguous()
238 unsigned long npages = npages_fn(n, count, rnd); in alloc_table() local
242 pfn_to_page(pfn + npages), in alloc_table()
243 npages)) { in alloc_table()
250 sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0); in alloc_table()
[all …]
/Linux-v5.4/arch/sparc/kernel/
Diommu.c158 unsigned long npages) in alloc_npages() argument
162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages()
204 int npages, nid; in dma_4u_alloc_coherent() local
233 npages = size >> IO_PAGE_SHIFT; in dma_4u_alloc_coherent()
235 while (npages--) { in dma_4u_alloc_coherent()
251 unsigned long order, npages; in dma_4u_free_coherent() local
253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; in dma_4u_free_coherent()
256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4u_free_coherent()
271 unsigned long flags, npages, oaddr; in dma_4u_map_page() local
283 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); in dma_4u_map_page()
[all …]
Dpci_sun4v.c59 unsigned long npages; /* Number of pages in list. */ member
73 p->npages = 0; in iommu_batch_start()
90 unsigned long npages = p->npages; in iommu_batch_flush() local
99 while (npages != 0) { in iommu_batch_flush()
103 npages, in iommu_batch_flush()
111 npages, prot, __pa(pglist), in iommu_batch_flush()
116 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), in iommu_batch_flush()
134 npages -= num; in iommu_batch_flush()
139 p->npages = 0; in iommu_batch_flush()
148 if (p->entry + p->npages == entry) in iommu_batch_new_entry()
[all …]
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/
Dpagealloc.c52 s32 npages; member
137 s32 *npages, int boot) in mlx5_cmd_query_pages() argument
153 *npages = MLX5_GET(query_pages_out, out, num_pages); in mlx5_cmd_query_pages()
275 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in give_pages() argument
285 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); in give_pages()
293 for (i = 0; i < npages; i++) { in give_pages()
310 MLX5_SET(manage_pages_in, in, input_num_entries, npages); in give_pages()
316 func_id, npages, err); in give_pages()
320 dev->priv.fw_pages += npages; in give_pages()
322 dev->priv.vfs_pages += npages; in give_pages()
[all …]
/Linux-v5.4/drivers/infiniband/hw/hfi1/
Duser_pages.c72 u32 nlocked, u32 npages) in hfi1_can_pin_pages() argument
97 if (pinned + npages >= ulimit && !can_lock) in hfi1_can_pin_pages()
100 return ((nlocked + npages) <= size) || can_lock; in hfi1_can_pin_pages()
103 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, in hfi1_acquire_user_pages() argument
109 ret = get_user_pages_fast(vaddr, npages, gup_flags, pages); in hfi1_acquire_user_pages()
119 size_t npages, bool dirty) in hfi1_release_user_pages() argument
121 put_user_pages_dirty_lock(p, npages, dirty); in hfi1_release_user_pages()
124 atomic64_sub(npages, &mm->pinned_vm); in hfi1_release_user_pages()
Duser_exp_rcv.c57 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
61 u16 pageidx, unsigned int npages);
196 unsigned int npages, in unpin_rcv_pages() argument
209 hfi1_release_user_pages(fd->mm, pages, npages, mapped); in unpin_rcv_pages()
210 fd->tid_n_pinned -= npages; in unpin_rcv_pages()
219 unsigned int npages; in pin_rcv_pages() local
225 npages = num_user_pages(vaddr, tidbuf->length); in pin_rcv_pages()
226 if (!npages) in pin_rcv_pages()
229 if (npages > fd->uctxt->expected_count) { in pin_rcv_pages()
236 npages * PAGE_SIZE)) { in pin_rcv_pages()
[all …]
/Linux-v5.4/drivers/gpu/drm/ttm/
Dttm_page_alloc.c74 unsigned npages; member
247 static void ttm_pages_put(struct page *pages[], unsigned npages, in ttm_pages_put() argument
253 if (ttm_set_pages_array_wb(pages, npages)) in ttm_pages_put()
254 pr_err("Failed to set %d pages to wb!\n", npages); in ttm_pages_put()
257 for (i = 0; i < npages; ++i) { in ttm_pages_put()
269 pool->npages -= freed_pages; in ttm_pool_update_free_locked()
423 count += (pool->npages << pool->order); in ttm_pool_shrink_count()
495 unsigned npages = 1 << order; in ttm_alloc_new_pages() local
537 for (j = 0; j < npages; ++j) { in ttm_alloc_new_pages()
591 && count > pool->npages) { in ttm_page_pool_fill_locked()
[all …]
/Linux-v5.4/drivers/gpu/drm/i915/gem/selftests/
Dmock_dmabuf.c21 err = sg_alloc_table(st, mock->npages, GFP_KERNEL); in mock_map_dma_buf()
26 for (i = 0; i < mock->npages; i++) { in mock_map_dma_buf()
59 for (i = 0; i < mock->npages; i++) in mock_dmabuf_release()
69 return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL); in mock_dmabuf_vmap()
76 vm_unmap_ram(vaddr, mock->npages); in mock_dmabuf_vunmap()
109 static struct dma_buf *mock_dmabuf(int npages) in mock_dmabuf() argument
116 mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), in mock_dmabuf()
121 mock->npages = npages; in mock_dmabuf()
122 for (i = 0; i < npages; i++) { in mock_dmabuf()
129 exp_info.size = npages * PAGE_SIZE; in mock_dmabuf()
/Linux-v5.4/arch/powerpc/kernel/
Diommu.c165 unsigned long npages, in iommu_range_alloc() argument
172 int largealloc = npages > 15; in iommu_range_alloc()
185 if (unlikely(npages == 0)) { in iommu_range_alloc()
246 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc()
272 end = n + npages; in iommu_range_alloc()
294 void *page, unsigned int npages, in iommu_alloc() argument
303 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc()
312 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc()
322 __iommu_free(tbl, ret, npages); in iommu_alloc()
337 unsigned int npages) in iommu_free_check() argument
[all …]
/Linux-v5.4/arch/x86/mm/
Dcpu_entry_area.c58 unsigned int npages; in percpu_setup_debug_store() local
65 npages = sizeof(struct debug_store) / PAGE_SIZE; in percpu_setup_debug_store()
67 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, in percpu_setup_debug_store()
75 npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; in percpu_setup_debug_store()
76 for (; npages; npages--, cea += PAGE_SIZE) in percpu_setup_debug_store()
84 npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
86 estacks->name## _stack, npages, PAGE_KERNEL); \
93 unsigned int npages; in percpu_setup_exception_stacks() local
/Linux-v5.4/drivers/fpga/
Ddfl-afu-dma-region.c19 static void put_all_pages(struct page **pages, int npages) in put_all_pages() argument
23 for (i = 0; i < npages; i++) in put_all_pages()
46 int npages = region->length >> PAGE_SHIFT; in afu_dma_pin_pages() local
50 ret = account_locked_vm(current->mm, npages, true); in afu_dma_pin_pages()
54 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); in afu_dma_pin_pages()
60 pinned = get_user_pages_fast(region->user_addr, npages, FOLL_WRITE, in afu_dma_pin_pages()
65 } else if (pinned != npages) { in afu_dma_pin_pages()
79 account_locked_vm(current->mm, npages, false); in afu_dma_pin_pages()
94 long npages = region->length >> PAGE_SHIFT; in afu_dma_unpin_pages() local
97 put_all_pages(region->pages, npages); in afu_dma_unpin_pages()
[all …]
/Linux-v5.4/drivers/infiniband/core/
Dumem.c75 unsigned long npages, in ib_umem_add_sg_table() argument
91 while (i != npages) { in ib_umem_add_sg_table()
100 for (len = 0; i != npages && in ib_umem_add_sg_table()
200 unsigned long npages; in ib_umem_get() local
247 npages = ib_umem_num_pages(umem); in ib_umem_get()
248 if (npages == 0 || npages > UINT_MAX) { in ib_umem_get()
255 new_pinned = atomic64_add_return(npages, &mm->pinned_vm); in ib_umem_get()
257 atomic64_sub(npages, &mm->pinned_vm); in ib_umem_get()
264 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); in ib_umem_get()
273 while (npages) { in ib_umem_get()
[all …]
/Linux-v5.4/drivers/gpu/drm/lima/
Dlima_object.c17 int i, npages = bo->gem.size >> PAGE_SHIFT; in lima_bo_destroy() local
19 for (i = 0; i < npages; i++) { in lima_bo_destroy()
63 size_t npages; in lima_bo_create() local
70 npages = bo->gem.size >> PAGE_SHIFT; in lima_bo_create()
72 bo->pages_dma_addr = kcalloc(npages, sizeof(dma_addr_t), GFP_KERNEL); in lima_bo_create()
81 bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL); in lima_bo_create()
88 sgt, bo->pages, bo->pages_dma_addr, npages); in lima_bo_create()
102 for (i = 0; i < npages; i++) { in lima_bo_create()
/Linux-v5.4/drivers/infiniband/hw/mthca/
Dmthca_memfree.c69 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, in mthca_free_icm_pages()
72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages()
81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent()
137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, in mthca_alloc_icm() argument
157 while (npages > 0) { in mthca_alloc_icm()
165 chunk->npages = 0; in mthca_alloc_icm()
170 while (1 << cur_order > npages) in mthca_alloc_icm()
175 &chunk->mem[chunk->npages], in mthca_alloc_icm()
178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], in mthca_alloc_icm()
182 ++chunk->npages; in mthca_alloc_icm()
[all …]
Dmthca_allocator.c200 int npages, shift; in mthca_buf_alloc() local
207 npages = 1; in mthca_buf_alloc()
219 npages *= 2; in mthca_buf_alloc()
222 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc()
227 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
231 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc()
234 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc()
239 buf->page_list = kmalloc_array(npages, in mthca_buf_alloc()
245 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
248 for (i = 0; i < npages; ++i) { in mthca_buf_alloc()
[all …]
/Linux-v5.4/arch/x86/kernel/
Dpci-calgary_64.c191 unsigned long start_addr, unsigned int npages) in iommu_range_reserve() argument
203 end = index + npages; in iommu_range_reserve()
209 bitmap_set(tbl->it_map, index, npages); in iommu_range_reserve()
216 unsigned int npages) in iommu_range_alloc() argument
225 BUG_ON(npages == 0); in iommu_range_alloc()
230 npages, 0, boundary_size, 0); in iommu_range_alloc()
235 npages, 0, boundary_size, 0); in iommu_range_alloc()
246 tbl->it_hint = offset + npages; in iommu_range_alloc()
255 void *vaddr, unsigned int npages, int direction) in iommu_alloc() argument
260 entry = iommu_range_alloc(dev, tbl, npages); in iommu_alloc()
[all …]
/Linux-v5.4/drivers/infiniband/hw/hns/
Dhns_roce_mr.c221 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, in hns_roce_mtt_init() argument
228 if (!npages) { in hns_roce_mtt_init()
238 for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages; in hns_roce_mtt_init()
350 static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages, in pbl_1hop_alloc() argument
355 if (npages > pbl_bt_sz / 8) { in pbl_1hop_alloc()
357 npages); in pbl_1hop_alloc()
360 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, in pbl_1hop_alloc()
366 mr->pbl_size = npages; in pbl_1hop_alloc()
376 static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages, in pbl_2hop_alloc() argument
386 pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); in pbl_2hop_alloc()
[all …]
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx4/
Dmr.c194 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, in mlx4_mtt_init() argument
199 if (!npages) { in mlx4_mtt_init()
206 for (mtt->order = 0, i = 1; i < npages; i <<= 1) in mlx4_mtt_init()
418 u64 iova, u64 size, u32 access, int npages, in mlx4_mr_alloc_reserved() argument
428 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc_reserved()
528 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument
538 access, npages, page_shift, mr); in mlx4_mr_alloc()
590 u64 iova, u64 size, int npages, in mlx4_mr_rereg_mem_write() argument
595 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_rereg_mem_write()
693 int start_index, int npages, u64 *page_list) in mlx4_write_mtt_chunk() argument
[all …]
Dicm.c60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, in mlx4_free_icm_pages()
63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages()
72 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent()
132 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, in mlx4_alloc_icm() argument
159 while (npages > 0) { in mlx4_alloc_icm()
179 while (1 << cur_order > npages) in mlx4_alloc_icm()
188 &chunk->buf[chunk->npages], in mlx4_alloc_icm()
191 ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages], in mlx4_alloc_icm()
202 ++chunk->npages; in mlx4_alloc_icm()
206 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { in mlx4_alloc_icm()
[all …]
/Linux-v5.4/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_misc.c53 u64 npages, bool alloc_pages) in pvrdma_page_dir_init() argument
57 if (npages > PVRDMA_PAGE_DIR_MAX_PAGES) in pvrdma_page_dir_init()
67 pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1; in pvrdma_page_dir_init()
81 pdir->npages = npages; in pvrdma_page_dir_init()
84 pdir->pages = kcalloc(npages, sizeof(*pdir->pages), in pvrdma_page_dir_init()
89 for (i = 0; i < pdir->npages; i++) { in pvrdma_page_dir_init()
127 for (i = 0; i < pdir->npages && pdir->pages[i]; i++) { in pvrdma_page_dir_cleanup_pages()
173 if (idx >= pdir->npages) in pvrdma_page_dir_insert_dma()
189 if (offset >= pdir->npages) in pvrdma_page_dir_insert_umem()
213 if (num_pages > pdir->npages) in pvrdma_page_dir_insert_page_list()
Dpvrdma_mr.c122 int ret, npages; in pvrdma_reg_user_mr() local
136 npages = ib_umem_num_pages(umem); in pvrdma_reg_user_mr()
137 if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) { in pvrdma_reg_user_mr()
139 npages); in pvrdma_reg_user_mr()
154 ret = pvrdma_page_dir_init(dev, &mr->pdir, npages, false); in pvrdma_reg_user_mr()
171 cmd->nchunks = npages; in pvrdma_reg_user_mr()
305 if (mr->npages == mr->max_pages) in pvrdma_set_page()
308 mr->pages[mr->npages++] = addr; in pvrdma_set_page()
319 mr->npages = 0; in pvrdma_map_mr_sg()
/Linux-v5.4/arch/powerpc/include/asm/
Dultravisor.h34 static inline int uv_share_page(u64 pfn, u64 npages) in uv_share_page() argument
36 return ucall_norets(UV_SHARE_PAGE, pfn, npages); in uv_share_page()
39 static inline int uv_unshare_page(u64 pfn, u64 npages) in uv_unshare_page() argument
41 return ucall_norets(UV_UNSHARE_PAGE, pfn, npages); in uv_unshare_page()
/Linux-v5.4/drivers/gpu/drm/etnaviv/
Detnaviv_gem_prime.c17 int npages = obj->size >> PAGE_SHIFT; in etnaviv_gem_prime_get_sg_table() local
22 return drm_prime_pages_to_sg(etnaviv_obj->pages, npages); in etnaviv_gem_prime_get_sg_table()
111 int ret, npages; in etnaviv_gem_prime_import_sg_table() local
120 npages = size / PAGE_SIZE; in etnaviv_gem_prime_import_sg_table()
123 etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in etnaviv_gem_prime_import_sg_table()
130 NULL, npages); in etnaviv_gem_prime_import_sg_table()
/Linux-v5.4/arch/powerpc/sysdev/
Ddart_iommu.c173 long npages, unsigned long uaddr, in dart_build() argument
181 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); in dart_build()
188 l = npages; in dart_build()
196 dart_cache_sync(orig_dp, npages); in dart_build()
200 while (npages--) in dart_build()
209 static void dart_free(struct iommu_table *tbl, long index, long npages) in dart_free() argument
212 long orig_npages = npages; in dart_free()
219 DBG("dart: free at: %lx, %lx\n", index, npages); in dart_free()
223 while (npages--) in dart_free()
/Linux-v5.4/drivers/infiniband/hw/cxgb3/
Diwch_mem.c78 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) in iwch_alloc_pbl() argument
81 npages << 3); in iwch_alloc_pbl()
86 mhp->attr.pbl_size = npages; in iwch_alloc_pbl()
97 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset) in iwch_write_pbl() argument
100 mhp->attr.pbl_addr + (offset << 3), npages); in iwch_write_pbl()

1234567891011