| /Linux-v5.4/drivers/infiniband/hw/mthca/ |
| D | mthca_allocator.c | 201 u64 *dma_list = NULL; in mthca_buf_alloc() local 222 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 224 if (!dma_list) in mthca_buf_alloc() 228 dma_list[i] = t + i * (1 << shift); in mthca_buf_alloc() 234 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 236 if (!dma_list) in mthca_buf_alloc() 255 dma_list[i] = t; in mthca_buf_alloc() 263 dma_list, shift, npages, in mthca_buf_alloc() 271 kfree(dma_list); in mthca_buf_alloc() 279 kfree(dma_list); in mthca_buf_alloc()
|
| D | mthca_eq.c | 471 u64 *dma_list = NULL; in mthca_create_eq() local 490 dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL); in mthca_create_eq() 491 if (!dma_list) in mthca_create_eq() 505 dma_list[i] = t; in mthca_create_eq() 519 dma_list, PAGE_SHIFT, npages, in mthca_create_eq() 551 kfree(dma_list); in mthca_create_eq() 582 kfree(dma_list); in mthca_create_eq()
|
| /Linux-v5.4/drivers/misc/genwqe/ |
| D | card_utils.c | 226 static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list, in genwqe_unmap_pages() argument 232 for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) { in genwqe_unmap_pages() 233 pci_unmap_page(pci_dev, dma_list[i], in genwqe_unmap_pages() 235 dma_list[i] = 0x0; in genwqe_unmap_pages() 241 dma_addr_t *dma_list) in genwqe_map_pages() argument 250 dma_list[i] = 0x0; in genwqe_map_pages() 263 dma_list[i] = daddr; in genwqe_map_pages() 268 genwqe_unmap_pages(cd, dma_list, num_pages); in genwqe_map_pages() 372 dma_addr_t *dma_list) in genwqe_setup_sgl() argument 407 daddr = dma_list[p] + map_offs; in genwqe_setup_sgl() [all …]
|
| D | card_base.h | 171 dma_addr_t *dma_list; /* list of dma addresses per page */ member 372 dma_addr_t *dma_list);
|
| D | card_dev.c | 956 &m->dma_list[page_offs]); in ddcb_cmd_fixups()
|
| /Linux-v5.4/arch/powerpc/platforms/cell/spufs/ |
| D | spu_utils.h | 38 struct dma_list_elem dma_list[15] __attribute__ ((aligned(8))); variable 92 dma_list[i].size = 16384; in build_dma_list() 93 dma_list[i].ea_low = ea_low; in build_dma_list()
|
| D | spu_save.c | 50 unsigned int list = (unsigned int)&dma_list[0]; in save_upper_240kb() 51 unsigned int size = sizeof(dma_list); in save_upper_240kb()
|
| D | spu_restore.c | 50 unsigned int list = (unsigned int)&dma_list[0]; in restore_upper_240kb() 51 unsigned int size = sizeof(dma_list); in restore_upper_240kb()
|
| /Linux-v5.4/drivers/infiniband/core/ |
| D | umem_odp.c | 247 umem_odp->dma_list = kvcalloc( in ib_init_umem_odp() 248 pages, sizeof(*umem_odp->dma_list), GFP_KERNEL); in ib_init_umem_odp() 249 if (!umem_odp->dma_list) { in ib_init_umem_odp() 277 kvfree(umem_odp->dma_list); in ib_init_umem_odp() 458 kvfree(umem_odp->dma_list); in ib_umem_odp_release() 523 if (!(umem_odp->dma_list[page_index])) { in ib_umem_odp_map_dma_single_page() 531 umem_odp->dma_list[page_index] = dma_addr | access_mask; in ib_umem_odp_map_dma_single_page() 535 umem_odp->dma_list[page_index] |= access_mask; in ib_umem_odp_map_dma_single_page() 737 dma_addr_t dma = umem_odp->dma_list[idx]; in ib_umem_odp_unmap_dma_pages() 759 umem_odp->dma_list[idx] = 0; in ib_umem_odp_unmap_dma_pages()
|
| /Linux-v5.4/drivers/vme/ |
| D | vme.c | 944 struct vme_dma_list *dma_list; in vme_new_dma_list() local 951 dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL); in vme_new_dma_list() 952 if (!dma_list) in vme_new_dma_list() 955 INIT_LIST_HEAD(&dma_list->entries); in vme_new_dma_list() 956 dma_list->parent = list_entry(resource->entry, in vme_new_dma_list() 959 mutex_init(&dma_list->mtx); in vme_new_dma_list() 961 return dma_list; in vme_new_dma_list()
|
| /Linux-v5.4/drivers/block/rsxx/ |
| D | dma.c | 672 struct list_head dma_list[RSXX_MAX_TARGETS]; in rsxx_dma_queue_bio() local 690 INIT_LIST_HEAD(&dma_list[i]); in rsxx_dma_queue_bio() 701 st = rsxx_queue_discard(card, &dma_list[tgt], laddr, in rsxx_dma_queue_bio() 723 st = rsxx_queue_dma(card, &dma_list[tgt], in rsxx_dma_queue_bio() 741 if (!list_empty(&dma_list[i])) { in rsxx_dma_queue_bio() 744 list_splice_tail(&dma_list[i], &card->ctrl[i].queue); in rsxx_dma_queue_bio() 756 rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i], in rsxx_dma_queue_bio()
|
| /Linux-v5.4/drivers/vfio/ |
| D | vfio_iommu_type1.c | 68 struct rb_root dma_list; member 137 struct rb_node *node = iommu->dma_list.rb_node; in vfio_find_dma() 155 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; in vfio_link_dma() 169 rb_insert_color(&new->node, &iommu->dma_list); in vfio_link_dma() 174 rb_erase(&old->node, &iommu->dma_list); in vfio_unlink_dma() 1197 n = rb_first(&iommu->dma_list); in vfio_iommu_replay() 1858 while ((node = rb_first(&iommu->dma_list))) in vfio_iommu_unmap_unpin_all() 1866 n = rb_first(&iommu->dma_list); in vfio_iommu_unmap_unpin_reaccount() 1889 n = rb_first(&iommu->dma_list); in vfio_sanity_check_pfn_list() 2081 iommu->dma_list = RB_ROOT; in vfio_iommu_type1_open()
|
| /Linux-v5.4/include/rdma/ |
| D | ib_umem_odp.h | 56 dma_addr_t *dma_list; member
|
| /Linux-v5.4/drivers/net/ethernet/mellanox/mlx4/ |
| D | eq.c | 976 u64 *dma_list = NULL; in mlx4_create_eq() local 997 dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL); in mlx4_create_eq() 998 if (!dma_list) in mlx4_create_eq() 1014 dma_list[i] = t; in mlx4_create_eq() 1032 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); in mlx4_create_eq() 1052 kfree(dma_list); in mlx4_create_eq() 1082 kfree(dma_list); in mlx4_create_eq()
|
| /Linux-v5.4/drivers/infiniband/hw/mlx5/ |
| D | mem.c | 147 to_ib_umem_odp(umem)->dma_list[offset + i]; in __mlx5_ib_populate_pas()
|
| D | odp.c | 288 if (umem_odp->dma_list[idx] & in mlx5_ib_invalidate_range()
|
| /Linux-v5.4/sound/pci/ |
| D | maestro3.c | 745 struct m3_list dma_list; member 1743 s->index_list[2] = &chip->dma_list; in snd_m3_substream_open() 2170 chip->dma_list.curlen = 0; in snd_m3_assp_init() 2171 chip->dma_list.mem_addr = KDATA_DMA_XFER0; in snd_m3_assp_init() 2172 chip->dma_list.max = MAX_VIRTUAL_DMA_CHANNELS; in snd_m3_assp_init()
|