/Linux-v4.19/drivers/infiniband/hw/mthca/ |
D | mthca_allocator.c | 122 if (array->page_list[p].page) in mthca_array_get() 123 return array->page_list[p].page[index & MTHCA_ARRAY_MASK]; in mthca_array_get() 133 if (!array->page_list[p].page) in mthca_array_set() 134 array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC); in mthca_array_set() 136 if (!array->page_list[p].page) in mthca_array_set() 139 array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value; in mthca_array_set() 140 ++array->page_list[p].used; in mthca_array_set() 149 if (--array->page_list[p].used == 0) { in mthca_array_clear() 150 free_page((unsigned long) array->page_list[p].page); in mthca_array_clear() 151 array->page_list[p].page = NULL; in mthca_array_clear() [all …]
|
D | mthca_eq.c | 231 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; in get_eqe() 482 eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list), in mthca_create_eq() 484 if (!eq->page_list) in mthca_create_eq() 488 eq->page_list[i].buf = NULL; in mthca_create_eq() 500 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, in mthca_create_eq() 502 if (!eq->page_list[i].buf) in mthca_create_eq() 506 dma_unmap_addr_set(&eq->page_list[i], mapping, t); in mthca_create_eq() 508 clear_page(eq->page_list[i].buf); in mthca_create_eq() 572 if (eq->page_list[i].buf) in mthca_create_eq() 574 eq->page_list[i].buf, in mthca_create_eq() [all …]
|
/Linux-v4.19/mm/ |
D | dmapool.c | 46 struct list_head page_list; member 57 struct list_head page_list; member 89 list_for_each_entry(page, &pool->page_list, page_list) { in show_pools() 166 INIT_LIST_HEAD(&retval->page_list); in dma_pool_create() 258 list_del(&page->page_list); in pool_free_page() 287 while (!list_empty(&pool->page_list)) { in dma_pool_destroy() 289 page = list_entry(pool->page_list.next, in dma_pool_destroy() 290 struct dma_page, page_list); in dma_pool_destroy() 300 list_del(&page->page_list); in dma_pool_destroy() 331 list_for_each_entry(page, &pool->page_list, page_list) { in dma_pool_alloc() [all …]
|
/Linux-v4.19/drivers/infiniband/hw/hns/ |
D | hns_roce_alloc.c | 170 if (buf->page_list[i].buf) in hns_roce_buf_free() 172 buf->page_list[i].buf, in hns_roce_buf_free() 173 buf->page_list[i].map); in hns_roce_buf_free() 174 kfree(buf->page_list); in hns_roce_buf_free() 215 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), in hns_roce_buf_alloc() 218 if (!buf->page_list) in hns_roce_buf_alloc() 222 buf->page_list[i].buf = dma_zalloc_coherent(dev, in hns_roce_buf_alloc() 226 if (!buf->page_list[i].buf) in hns_roce_buf_alloc() 229 buf->page_list[i].map = t; in hns_roce_buf_alloc()
|
D | hns_roce_mr.c | 705 u32 npages, u64 *page_list) in hns_roce_write_mtt_chunk() argument 741 mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT); in hns_roce_write_mtt_chunk() 743 mtts[i] = cpu_to_le64(page_list[i]); in hns_roce_write_mtt_chunk() 751 u32 npages, u64 *page_list) in hns_roce_write_mtt() argument 769 page_list); in hns_roce_write_mtt() 775 page_list += chunk; in hns_roce_write_mtt() 784 u64 *page_list; in hns_roce_buf_write_mtt() local 788 page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL); in hns_roce_buf_write_mtt() 789 if (!page_list) in hns_roce_buf_write_mtt() 794 page_list[i] = buf->direct.map + (i << buf->page_shift); in hns_roce_buf_write_mtt() [all …]
|
/Linux-v4.19/drivers/infiniband/core/ |
D | fmr_pool.c | 116 u64 *page_list, in ib_fmr_cache_lookup() argument 126 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); in ib_fmr_cache_lookup() 131 !memcmp(page_list, fmr->page_list, in ib_fmr_cache_lookup() 132 page_list_len * sizeof *page_list)) in ib_fmr_cache_lookup() 399 u64 *page_list, in ib_fmr_pool_map_phys() argument 413 page_list, in ib_fmr_pool_map_phys() 438 result = ib_map_phys_fmr(fmr->fmr, page_list, list_len, in ib_fmr_pool_map_phys() 457 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list)); in ib_fmr_pool_map_phys() 461 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0])); in ib_fmr_pool_map_phys()
|
D | umem_odp.c | 303 odp_data->page_list = in ib_alloc_odp_umem() 304 vzalloc(array_size(pages, sizeof(*odp_data->page_list))); in ib_alloc_odp_umem() 305 if (!odp_data->page_list) { in ib_alloc_odp_umem() 332 vfree(odp_data->page_list); in ib_alloc_odp_umem() 391 umem->odp_data->page_list = in ib_umem_odp_get() 392 vzalloc(array_size(sizeof(*umem->odp_data->page_list), in ib_umem_odp_get() 394 if (!umem->odp_data->page_list) { in ib_umem_odp_get() 463 vfree(umem->odp_data->page_list); in ib_umem_odp_get() 533 vfree(umem->odp_data->page_list); in ib_umem_odp_release() 588 umem->odp_data->page_list[page_index] = page; in ib_umem_odp_map_dma_single_page() [all …]
|
D | umem.c | 85 struct page **page_list; in ib_umem_get() local 132 page_list = (struct page **) __get_free_page(GFP_KERNEL); in ib_umem_get() 133 if (!page_list) { in ib_umem_get() 180 gup_flags, page_list, vma_list); in ib_umem_get() 194 sg_set_page(sg, page_list[i], PAGE_SIZE, 0); in ib_umem_get() 225 free_page((unsigned long) page_list); in ib_umem_get()
|
/Linux-v4.19/arch/powerpc/kernel/ |
D | machine_kexec_32.c | 35 unsigned long page_list; in default_machine_kexec() local 46 page_list = image->head; in default_machine_kexec() 63 (*rnk)(page_list, reboot_code_buffer_phys, image->start); in default_machine_kexec()
|
/Linux-v4.19/drivers/misc/genwqe/ |
D | card_utils.c | 250 struct page **page_list, int num_pages, in genwqe_map_pages() argument 261 daddr = pci_map_page(pci_dev, page_list[i], in genwqe_map_pages() 535 static int genwqe_free_user_pages(struct page **page_list, in genwqe_free_user_pages() argument 541 if (page_list[i] != NULL) { in genwqe_free_user_pages() 543 set_page_dirty_lock(page_list[i]); in genwqe_free_user_pages() 544 put_page(page_list[i]); in genwqe_free_user_pages() 592 m->page_list = kcalloc(m->nr_pages, in genwqe_user_vmap() 595 if (!m->page_list) { in genwqe_user_vmap() 602 m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages); in genwqe_user_vmap() 608 m->page_list); /* ptrs to pages */ in genwqe_user_vmap() [all …]
|
/Linux-v4.19/drivers/infiniband/hw/usnic/ |
D | usnic_uiom.c | 53 ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\ 54 ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \ 55 (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0])) 89 for_each_sg(chunk->page_list, sg, chunk->nents, i) { in usnic_uiom_put_pages() 104 struct page **page_list; in usnic_uiom_get_pages() local 133 page_list = (struct page **) __get_free_page(GFP_KERNEL); in usnic_uiom_get_pages() 134 if (!page_list) in usnic_uiom_get_pages() 160 gup_flags, page_list, NULL); in usnic_uiom_get_pages() 179 sg_init_table(chunk->page_list, chunk->nents); in usnic_uiom_get_pages() 180 for_each_sg(chunk->page_list, sg, chunk->nents, i) { in usnic_uiom_get_pages() [all …]
|
/Linux-v4.19/include/linux/ |
D | dm-io.h | 23 struct page_list { struct 24 struct page_list *next; argument 43 struct page_list *pl; argument
|
/Linux-v4.19/arch/x86/kernel/ |
D | machine_kexec_32.c | 177 unsigned long page_list[PAGES_NR]; in machine_kexec() local 215 page_list[PA_CONTROL_PAGE] = __pa(control_page); in machine_kexec() 216 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page; in machine_kexec() 217 page_list[PA_PGD] = __pa(image->arch.pgd); in machine_kexec() 220 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) in machine_kexec() 243 (unsigned long)page_list, in machine_kexec()
|
D | machine_kexec_64.c | 280 unsigned long page_list[PAGES_NR]; in machine_kexec() local 311 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page); in machine_kexec() 312 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page; in machine_kexec() 313 page_list[PA_TABLE_PAGE] = in machine_kexec() 317 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) in machine_kexec() 340 (unsigned long)page_list, in machine_kexec()
|
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx4/ |
D | mr.c | 692 int start_index, int npages, u64 *page_list) in mlx4_write_mtt_chunk() argument 709 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); in mlx4_write_mtt_chunk() 718 int start_index, int npages, u64 *page_list) in __mlx4_write_mtt() argument 733 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); in __mlx4_write_mtt() 738 page_list += chunk; in __mlx4_write_mtt() 746 int start_index, int npages, u64 *page_list) in mlx4_write_mtt() argument 769 inbox[i + 2] = cpu_to_be64(page_list[i] | in mlx4_write_mtt() 779 page_list += chunk; in mlx4_write_mtt() 785 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); in mlx4_write_mtt() 792 u64 *page_list; in mlx4_buf_write_mtt() local [all …]
|
D | alloc.c | 620 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), in mlx4_buf_alloc() 622 if (!buf->page_list) in mlx4_buf_alloc() 626 buf->page_list[i].buf = in mlx4_buf_alloc() 629 if (!buf->page_list[i].buf) in mlx4_buf_alloc() 632 buf->page_list[i].map = t; in mlx4_buf_alloc() 654 if (buf->page_list[i].buf) in mlx4_buf_free() 657 buf->page_list[i].buf, in mlx4_buf_free() 658 buf->page_list[i].map); in mlx4_buf_free() 659 kfree(buf->page_list); in mlx4_buf_free()
|
D | eq.c | 118 …return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % … in get_eqe() 988 eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list), in mlx4_create_eq() 990 if (!eq->page_list) in mlx4_create_eq() 994 eq->page_list[i].buf = NULL; in mlx4_create_eq() 1006 eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> in mlx4_create_eq() 1010 if (!eq->page_list[i].buf) in mlx4_create_eq() 1014 eq->page_list[i].map = t; in mlx4_create_eq() 1016 memset(eq->page_list[i].buf, 0, PAGE_SIZE); in mlx4_create_eq() 1074 if (eq->page_list[i].buf) in mlx4_create_eq() 1076 eq->page_list[i].buf, in mlx4_create_eq() [all …]
|
/Linux-v4.19/drivers/md/ |
D | dm-kcopyd.c | 41 struct page_list *pages; 73 static struct page_list zero_page_list; 194 static struct page_list *alloc_pl(gfp_t gfp) in alloc_pl() 196 struct page_list *pl; in alloc_pl() 211 static void free_pl(struct page_list *pl) in free_pl() 221 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) in kcopyd_put_pages() 223 struct page_list *next; in kcopyd_put_pages() 241 unsigned int nr, struct page_list **pages) in kcopyd_get_pages() 243 struct page_list *pl; in kcopyd_get_pages() 272 static void drop_pages(struct page_list *pl) in drop_pages() [all …]
|
/Linux-v4.19/drivers/gpu/drm/ttm/ |
D | ttm_page_alloc_dma.c | 123 struct list_head page_list; member 362 list_del(&d_page->page_list); in ttm_dma_page_put() 372 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) in ttm_dma_pages_put() 384 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { in ttm_dma_pages_put() 385 list_del(&d_page->page_list); in ttm_dma_pages_put() 438 page_list) { in ttm_dma_page_pool_free() 443 list_move(&dma_p->page_list, &d_pages); in ttm_dma_page_pool_free() 681 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { in ttm_dma_handle_caching_state_failure() 685 list_del(&d_page->page_list); in ttm_dma_handle_caching_state_failure() 747 list_add(&dma_p->page_list, d_pages); in ttm_dma_pool_alloc_new_pages() [all …]
|
/Linux-v4.19/arch/arm/kernel/ |
D | machine_kexec.c | 160 unsigned long page_list, reboot_entry_phys; in machine_kexec() local 172 page_list = image->head & PAGE_MASK; in machine_kexec() 179 kexec_indirection_page = page_list; in machine_kexec()
|
/Linux-v4.19/include/rdma/ |
D | ib_fmr_pool.h | 76 u64 page_list[0]; member 87 u64 *page_list,
|
/Linux-v4.19/arch/sh/kernel/ |
D | machine_kexec.c | 75 unsigned long page_list; in machine_kexec() local 105 page_list = image->head; in machine_kexec() 122 (*rnk)(page_list, reboot_code_buffer, in machine_kexec()
|
/Linux-v4.19/drivers/staging/comedi/ |
D | comedi_buf.c | 29 if (bm->page_list) { in comedi_buf_map_kref_release() 31 buf = &bm->page_list[i]; in comedi_buf_map_kref_release() 45 vfree(bm->page_list); in comedi_buf_map_kref_release() 102 bm->page_list = vzalloc(sizeof(*buf) * n_pages); in __comedi_buf_alloc() 103 if (bm->page_list) in __comedi_buf_alloc() 110 buf = &bm->page_list[i]; in __comedi_buf_alloc() 165 void *b = bm->page_list[pg].virt_addr + pgoff; in comedi_buf_map_access()
|
/Linux-v4.19/Documentation/device-mapper/ |
D | dm-io.txt | 24 struct page_list { 25 struct page_list *next; 30 struct page_list *pl, unsigned int offset, 33 struct page_list *pl, unsigned int offset,
|
/Linux-v4.19/include/uapi/linux/ |
D | vbox_vmmdev_types.h | 175 } page_list; member 198 } page_list; member
|