/Linux-v4.19/drivers/gpu/drm/i915/selftests/ |
D | scatterlist.c | 52 unsigned int npages = npages_fn(n, pt->st.nents, rnd); in expect_pfn_sg() local 60 if (sg->length != npages * PAGE_SIZE) { in expect_pfn_sg() 62 __func__, who, npages * PAGE_SIZE, sg->length); in expect_pfn_sg() 69 pfn += npages; in expect_pfn_sg() 208 unsigned long npages) in page_contiguous() argument 210 return first + npages == last; in page_contiguous() 237 unsigned long npages = npages_fn(n, count, rnd); in alloc_table() local 241 pfn_to_page(pfn + npages), in alloc_table() 242 npages)) { in alloc_table() 249 sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0); in alloc_table() [all …]
|
D | mock_dmabuf.c | 39 err = sg_alloc_table(st, mock->npages, GFP_KERNEL); in mock_map_dma_buf() 44 for (i = 0; i < mock->npages; i++) { in mock_map_dma_buf() 77 for (i = 0; i < mock->npages; i++) in mock_dmabuf_release() 87 return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL); in mock_dmabuf_vmap() 94 vm_unmap_ram(vaddr, mock->npages); in mock_dmabuf_vunmap() 127 static struct dma_buf *mock_dmabuf(int npages) in mock_dmabuf() argument 134 mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), in mock_dmabuf() 139 mock->npages = npages; in mock_dmabuf() 140 for (i = 0; i < npages; i++) { in mock_dmabuf() 147 exp_info.size = npages * PAGE_SIZE; in mock_dmabuf()
|
/Linux-v4.19/arch/sparc/kernel/ |
D | iommu.c | 159 unsigned long npages) in alloc_npages() argument 163 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages() 205 int npages, nid; in dma_4u_alloc_coherent() local 234 npages = size >> IO_PAGE_SHIFT; in dma_4u_alloc_coherent() 236 while (npages--) { in dma_4u_alloc_coherent() 252 unsigned long order, npages; in dma_4u_free_coherent() local 254 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; in dma_4u_free_coherent() 257 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4u_free_coherent() 272 unsigned long flags, npages, oaddr; in dma_4u_map_page() local 284 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); in dma_4u_map_page() [all …]
|
D | pci_sun4v.c | 59 unsigned long npages; /* Number of pages in list. */ member 73 p->npages = 0; in iommu_batch_start() 85 unsigned long npages = p->npages; in iommu_batch_flush() local 94 while (npages != 0) { in iommu_batch_flush() 98 npages, in iommu_batch_flush() 106 npages, prot, __pa(pglist), in iommu_batch_flush() 111 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), in iommu_batch_flush() 129 npages -= num; in iommu_batch_flush() 134 p->npages = 0; in iommu_batch_flush() 143 if (p->entry + p->npages == entry) in iommu_batch_new_entry() [all …]
|
/Linux-v4.19/drivers/fpga/ |
D | dfl-afu-dma-region.c | 18 static void put_all_pages(struct page **pages, int npages) in put_all_pages() argument 22 for (i = 0; i < npages; i++) in put_all_pages() 45 static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr) in afu_dma_adjust_locked_vm() argument 57 locked = current->mm->locked_vm + npages; in afu_dma_adjust_locked_vm() 63 current->mm->locked_vm += npages; in afu_dma_adjust_locked_vm() 65 if (WARN_ON_ONCE(npages > current->mm->locked_vm)) in afu_dma_adjust_locked_vm() 66 npages = current->mm->locked_vm; in afu_dma_adjust_locked_vm() 67 current->mm->locked_vm -= npages; in afu_dma_adjust_locked_vm() 71 incr ? '+' : '-', npages << PAGE_SHIFT, in afu_dma_adjust_locked_vm() 91 int npages = region->length >> PAGE_SHIFT; in afu_dma_pin_pages() local [all …]
|
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx5/core/ |
D | pagealloc.c | 50 s32 npages; member 135 s32 *npages, int boot) in mlx5_cmd_query_pages() argument 150 *npages = MLX5_GET(query_pages_out, out, num_pages); in mlx5_cmd_query_pages() 271 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in give_pages() argument 281 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); in give_pages() 289 for (i = 0; i < npages; i++) { in give_pages() 306 MLX5_SET(manage_pages_in, in, input_num_entries, npages); in give_pages() 311 func_id, npages, err); in give_pages() 315 dev->priv.fw_pages += npages; in give_pages() 317 dev->priv.vfs_pages += npages; in give_pages() [all …]
|
/Linux-v4.19/drivers/gpu/drm/ttm/ |
D | ttm_page_alloc.c | 74 unsigned npages; member 247 static void ttm_pages_put(struct page *pages[], unsigned npages, in ttm_pages_put() argument 253 if (ttm_set_pages_array_wb(pages, npages)) in ttm_pages_put() 254 pr_err("Failed to set %d pages to wb!\n", npages); in ttm_pages_put() 257 for (i = 0; i < npages; ++i) { in ttm_pages_put() 269 pool->npages -= freed_pages; in ttm_pool_update_free_locked() 423 count += (pool->npages << pool->order); in ttm_pool_shrink_count() 495 unsigned npages = 1 << order; in ttm_alloc_new_pages() local 537 for (j = 0; j < npages; ++j) { in ttm_alloc_new_pages() 591 && count > pool->npages) { in ttm_page_pool_fill_locked() [all …]
|
/Linux-v4.19/drivers/infiniband/hw/hfi1/ |
D | user_pages.c | 72 u32 nlocked, u32 npages) in hfi1_can_pin_pages() argument 99 if (pinned + npages >= ulimit && !can_lock) in hfi1_can_pin_pages() 102 return ((nlocked + npages) <= size) || can_lock; in hfi1_can_pin_pages() 105 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, in hfi1_acquire_user_pages() argument 110 ret = get_user_pages_fast(vaddr, npages, writable, pages); in hfi1_acquire_user_pages() 122 size_t npages, bool dirty) in hfi1_release_user_pages() argument 126 for (i = 0; i < npages; i++) { in hfi1_release_user_pages() 134 mm->pinned_vm -= npages; in hfi1_release_user_pages()
|
D | user_exp_rcv.c | 57 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages); 61 u16 pageidx, unsigned int npages); 196 unsigned int npages, in unpin_rcv_pages() argument 209 hfi1_release_user_pages(fd->mm, pages, npages, mapped); in unpin_rcv_pages() 210 fd->tid_n_pinned -= npages; in unpin_rcv_pages() 219 unsigned int npages; in pin_rcv_pages() local 225 npages = num_user_pages(vaddr, tidbuf->length); in pin_rcv_pages() 226 if (!npages) in pin_rcv_pages() 229 if (npages > fd->uctxt->expected_count) { in pin_rcv_pages() 236 npages * PAGE_SIZE)) { in pin_rcv_pages() [all …]
|
D | trace_rx.h | 134 u32 npages, unsigned long va, unsigned long pa, 136 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma), 141 __field(u32, npages) 150 __entry->npages = npages; 159 __entry->npages, 168 TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, 170 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma)); 174 TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, 176 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma)); 208 u32 npages, dma_addr_t dma), [all …]
|
/Linux-v4.19/arch/powerpc/kernel/ |
D | iommu.c | 177 unsigned long npages, in iommu_range_alloc() argument 184 int largealloc = npages > 15; in iommu_range_alloc() 197 if (unlikely(npages == 0)) { in iommu_range_alloc() 258 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc() 284 end = n + npages; in iommu_range_alloc() 306 void *page, unsigned int npages, in iommu_alloc() argument 315 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc() 324 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc() 334 __iommu_free(tbl, ret, npages); in iommu_alloc() 349 unsigned int npages) in iommu_free_check() argument [all …]
|
/Linux-v4.19/drivers/infiniband/core/ |
D | umem.c | 55 umem->npages, in __ib_umem_release() 58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { in __ib_umem_release() 89 unsigned long npages; in ib_umem_get() local 146 npages = ib_umem_num_pages(umem); in ib_umem_get() 151 current->mm->pinned_vm += npages; in ib_umem_get() 161 if (npages == 0 || npages > UINT_MAX) { in ib_umem_get() 166 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); in ib_umem_get() 176 while (npages) { in ib_umem_get() 178 min_t(unsigned long, npages, in ib_umem_get() 186 umem->npages += ret; in ib_umem_get() [all …]
|
/Linux-v4.19/drivers/infiniband/hw/hns/ |
D | hns_roce_mr.c | 208 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, in hns_roce_mtt_init() argument 215 if (!npages) { in hns_roce_mtt_init() 225 for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages; in hns_roce_mtt_init() 318 static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, in hns_roce_mhop_alloc() argument 334 pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); in hns_roce_mhop_alloc() 341 if (npages > pbl_bt_sz / 8) { in hns_roce_mhop_alloc() 343 npages); in hns_roce_mhop_alloc() 346 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, in hns_roce_mhop_alloc() 352 mr->pbl_size = npages; in hns_roce_mhop_alloc() 399 size = (npages - npages_allocated) * 8; in hns_roce_mhop_alloc() [all …]
|
/Linux-v4.19/drivers/infiniband/hw/mthca/ |
D | mthca_memfree.c | 69 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, in mthca_free_icm_pages() 72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages() 81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent() 137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, in mthca_alloc_icm() argument 157 while (npages > 0) { in mthca_alloc_icm() 165 chunk->npages = 0; in mthca_alloc_icm() 170 while (1 << cur_order > npages) in mthca_alloc_icm() 175 &chunk->mem[chunk->npages], in mthca_alloc_icm() 178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], in mthca_alloc_icm() 182 ++chunk->npages; in mthca_alloc_icm() [all …]
|
D | mthca_allocator.c | 200 int npages, shift; in mthca_buf_alloc() local 207 npages = 1; in mthca_buf_alloc() 221 npages *= 2; in mthca_buf_alloc() 224 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 229 for (i = 0; i < npages; ++i) in mthca_buf_alloc() 233 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc() 236 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 241 buf->page_list = kmalloc_array(npages, in mthca_buf_alloc() 247 for (i = 0; i < npages; ++i) in mthca_buf_alloc() 250 for (i = 0; i < npages; ++i) { in mthca_buf_alloc() [all …]
|
/Linux-v4.19/arch/x86/kernel/ |
D | pci-calgary_64.c | 207 unsigned long start_addr, unsigned int npages) in iommu_range_reserve() argument 219 end = index + npages; in iommu_range_reserve() 225 bitmap_set(tbl->it_map, index, npages); in iommu_range_reserve() 232 unsigned int npages) in iommu_range_alloc() argument 241 BUG_ON(npages == 0); in iommu_range_alloc() 246 npages, 0, boundary_size, 0); in iommu_range_alloc() 251 npages, 0, boundary_size, 0); in iommu_range_alloc() 262 tbl->it_hint = offset + npages; in iommu_range_alloc() 271 void *vaddr, unsigned int npages, int direction) in iommu_alloc() argument 276 entry = iommu_range_alloc(dev, tbl, npages); in iommu_alloc() [all …]
|
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx4/ |
D | mr.c | 194 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, in mlx4_mtt_init() argument 199 if (!npages) { in mlx4_mtt_init() 206 for (mtt->order = 0, i = 1; i < npages; i <<= 1) in mlx4_mtt_init() 417 u64 iova, u64 size, u32 access, int npages, in mlx4_mr_alloc_reserved() argument 427 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc_reserved() 527 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument 537 access, npages, page_shift, mr); in mlx4_mr_alloc() 589 u64 iova, u64 size, int npages, in mlx4_mr_rereg_mem_write() argument 594 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_rereg_mem_write() 692 int start_index, int npages, u64 *page_list) in mlx4_write_mtt_chunk() argument [all …]
|
D | icm.c | 60 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, in mlx4_free_icm_pages() 63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages() 72 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent() 133 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, in mlx4_alloc_icm() argument 160 while (npages > 0) { in mlx4_alloc_icm() 175 chunk->npages = 0; in mlx4_alloc_icm() 180 while (1 << cur_order > npages) in mlx4_alloc_icm() 189 &chunk->mem[chunk->npages], in mlx4_alloc_icm() 192 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], in mlx4_alloc_icm() 203 ++chunk->npages; in mlx4_alloc_icm() [all …]
|
/Linux-v4.19/arch/sparc/mm/ |
D | iommu.c | 178 static u32 iommu_get_one(struct device *dev, struct page *page, int npages) in iommu_get_one() argument 187 ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page)); in iommu_get_one() 195 for (i = 0; i < npages; i++) { in iommu_get_one() 203 iommu_flush_iotlb(iopte0, npages); in iommu_get_one() 211 int npages; in iommu_get_scsi_one() local 216 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; in iommu_get_scsi_one() 218 busa = iommu_get_one(dev, page, npages); in iommu_get_scsi_one() 284 static void iommu_release_one(struct device *dev, u32 busa, int npages) in iommu_release_one() argument 292 for (i = 0; i < npages; i++) { in iommu_release_one() 297 bit_map_clear(&iommu->usemap, ioptex, npages); in iommu_release_one() [all …]
|
/Linux-v4.19/arch/powerpc/include/asm/ |
D | kvm_book3s_64.h | 389 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); in slot_is_aligned() 455 unsigned long npages) in set_dirty_bits() argument 458 if (npages >= 8) in set_dirty_bits() 459 memset((char *)map + i / 8, 0xff, npages / 8); in set_dirty_bits() 461 for (; npages; ++i, --npages) in set_dirty_bits() 466 unsigned long npages) in set_dirty_bits_atomic() argument 468 if (npages >= 8) in set_dirty_bits_atomic() 469 memset((char *)map + i / 8, 0xff, npages / 8); in set_dirty_bits_atomic() 471 for (; npages; ++i, --npages) in set_dirty_bits_atomic()
|
/Linux-v4.19/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_misc.c | 53 u64 npages, bool alloc_pages) in pvrdma_page_dir_init() argument 57 if (npages > PVRDMA_PAGE_DIR_MAX_PAGES) in pvrdma_page_dir_init() 67 pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1; in pvrdma_page_dir_init() 81 pdir->npages = npages; in pvrdma_page_dir_init() 84 pdir->pages = kcalloc(npages, sizeof(*pdir->pages), in pvrdma_page_dir_init() 89 for (i = 0; i < pdir->npages; i++) { in pvrdma_page_dir_init() 127 for (i = 0; i < pdir->npages && pdir->pages[i]; i++) { in pvrdma_page_dir_cleanup_pages() 173 if (idx >= pdir->npages) in pvrdma_page_dir_insert_dma() 190 if (offset >= pdir->npages) in pvrdma_page_dir_insert_umem() 218 if (num_pages > pdir->npages) in pvrdma_page_dir_insert_page_list()
|
/Linux-v4.19/arch/powerpc/sysdev/ |
D | dart_iommu.c | 187 long npages, unsigned long uaddr, in dart_build() argument 195 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); in dart_build() 202 l = npages; in dart_build() 210 dart_cache_sync(orig_dp, npages); in dart_build() 214 while (npages--) in dart_build() 223 static void dart_free(struct iommu_table *tbl, long index, long npages) in dart_free() argument 226 long orig_npages = npages; in dart_free() 233 DBG("dart: free at: %lx, %lx\n", index, npages); in dart_free() 237 while (npages--) in dart_free()
|
/Linux-v4.19/drivers/infiniband/hw/cxgb3/ |
D | iwch_mem.c | 78 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) in iwch_alloc_pbl() argument 81 npages << 3); in iwch_alloc_pbl() 86 mhp->attr.pbl_size = npages; in iwch_alloc_pbl() 97 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset) in iwch_write_pbl() argument 100 mhp->attr.pbl_addr + (offset << 3), npages); in iwch_write_pbl()
|
/Linux-v4.19/drivers/gpu/drm/etnaviv/ |
D | etnaviv_gem_prime.c | 15 int npages = obj->size >> PAGE_SHIFT; in etnaviv_gem_prime_get_sg_table() local 20 return drm_prime_pages_to_sg(etnaviv_obj->pages, npages); in etnaviv_gem_prime_get_sg_table() 109 int ret, npages; in etnaviv_gem_prime_import_sg_table() local 119 npages = size / PAGE_SIZE; in etnaviv_gem_prime_import_sg_table() 122 etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in etnaviv_gem_prime_import_sg_table() 129 NULL, npages); in etnaviv_gem_prime_import_sg_table()
|
/Linux-v4.19/arch/powerpc/kvm/ |
D | book3s_64_vio.c | 229 unsigned long i, npages = kvmppc_tce_pages(stt->size); in release_spapr_tce_table() local 231 for (i = 0; i < npages; i++) in release_spapr_tce_table() 298 unsigned long npages, size = args->size; in kvm_vm_ioctl_create_spapr_tce() local 306 npages = kvmppc_tce_pages(size); in kvm_vm_ioctl_create_spapr_tce() 307 ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true); in kvm_vm_ioctl_create_spapr_tce() 312 stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), in kvm_vm_ioctl_create_spapr_tce() 324 for (i = 0; i < npages; i++) { in kvm_vm_ioctl_create_spapr_tce() 356 for (i = 0; i < npages; i++) in kvm_vm_ioctl_create_spapr_tce() 362 kvmppc_account_memlimit(kvmppc_stt_pages(npages), false); in kvm_vm_ioctl_create_spapr_tce() 557 unsigned long tce_list, unsigned long npages) in kvmppc_h_put_tce_indirect() argument [all …]
|