/Linux-v5.10/include/rdma/ |
D | ib_umem.h | 50 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, in __rdma_umem_block_iter_start() argument 54 __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz); in __rdma_umem_block_iter_start() 68 #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \ argument 69 for (__rdma_umem_block_iter_start(biter, umem, pgsz); \ 70 __rdma_block_iter_next(biter);)
|
D | ib_verbs.h | 2795 void __rdma_block_iter_start(struct ib_block_iter *biter, 2799 bool __rdma_block_iter_next(struct ib_block_iter *biter); 2807 rdma_block_iter_dma_address(struct ib_block_iter *biter) in rdma_block_iter_dma_address() argument 2809 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1); in rdma_block_iter_dma_address() 2822 #define rdma_for_each_block(sglist, biter, nents, pgsz) \ argument 2823 for (__rdma_block_iter_start(biter, sglist, nents, \ 2825 __rdma_block_iter_next(biter);)
|
/Linux-v5.10/drivers/infiniband/core/ |
D | verbs.c | 2893 void __rdma_block_iter_start(struct ib_block_iter *biter, in __rdma_block_iter_start() argument 2897 memset(biter, 0, sizeof(struct ib_block_iter)); in __rdma_block_iter_start() 2898 biter->__sg = sglist; in __rdma_block_iter_start() 2899 biter->__sg_nents = nents; in __rdma_block_iter_start() 2902 biter->__pg_bit = __fls(pgsz); in __rdma_block_iter_start() 2906 bool __rdma_block_iter_next(struct ib_block_iter *biter) in __rdma_block_iter_next() argument 2910 if (!biter->__sg_nents || !biter->__sg) in __rdma_block_iter_next() 2913 biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; in __rdma_block_iter_next() 2914 block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); in __rdma_block_iter_next() 2915 biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset; in __rdma_block_iter_next() [all …]
|
/Linux-v5.10/drivers/infiniband/hw/hns/ |
D | hns_roce_alloc.c | 259 struct ib_block_iter biter; in hns_roce_get_umem_bufs() local 271 rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) { in hns_roce_get_umem_bufs() 272 addr = rdma_block_iter_dma_address(&biter); in hns_roce_get_umem_bufs()
|
/Linux-v5.10/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_misc.c | 185 struct ib_block_iter biter; in pvrdma_page_dir_insert_umem() local 192 rdma_umem_for_each_dma_block (umem, &biter, PAGE_SIZE) { in pvrdma_page_dir_insert_umem() 194 pdir, i, rdma_block_iter_dma_address(&biter)); in pvrdma_page_dir_insert_umem()
|
/Linux-v5.10/drivers/dma/ |
D | fsl-edma-common.c | 284 * le16_to_cpu(edesc->tcd[i].vtcd->biter); in fsl_edma_desc_residue() 297 * le16_to_cpu(edesc->tcd[i].vtcd->biter); in fsl_edma_desc_residue() 370 edma_writew(edma, (s16)tcd->biter, ®s->tcd[ch].biter); in fsl_edma_set_tcd_regs() 382 u16 biter, u16 doff, u32 dlast_sga, bool major_int, in fsl_edma_fill_tcd() argument 408 tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter)); in fsl_edma_fill_tcd()
|
D | mpc512x_dma.c | 165 u32 biter:9; /* Beginning "major" iteration count */ member 663 tcd->biter = 1; in mpc_dma_prep_memcpy() 770 tcd->biter = 1; in mpc_dma_prep_slave_sg() 784 tcd->biter = iter & 0x1ff; in mpc_dma_prep_slave_sg() 786 tcd->citer = tcd->biter; in mpc_dma_prep_slave_sg()
|
D | fsl-edma-common.h | 81 __le16 biter; member
|
/Linux-v5.10/drivers/infiniband/hw/bnxt_re/ |
D | qplib_res.c | 93 struct ib_block_iter biter; in bnxt_qplib_fill_user_dma_pages() local 96 rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) { in bnxt_qplib_fill_user_dma_pages() 97 pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter); in bnxt_qplib_fill_user_dma_pages()
|
D | ib_verbs.c | 3778 struct ib_block_iter biter; in fill_umem_pbl_tbl() local 3780 rdma_umem_for_each_dma_block(umem, &biter, page_size) in fill_umem_pbl_tbl() 3781 *pbl_tbl++ = rdma_block_iter_dma_address(&biter); in fill_umem_pbl_tbl()
|
/Linux-v5.10/drivers/infiniband/hw/efa/ |
D | efa_verbs.c | 1150 struct ib_block_iter biter; in umem_to_page_list() local 1156 rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift)) in umem_to_page_list() 1157 page_list[hp_idx++] = rdma_block_iter_dma_address(&biter); in umem_to_page_list() 1199 struct ib_block_iter biter; in pbl_chunk_list_create() local 1233 rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt, in pbl_chunk_list_create() 1236 rdma_block_iter_dma_address(&biter); in pbl_chunk_list_create()
|
/Linux-v5.10/drivers/infiniband/hw/cxgb4/ |
D | mem.c | 513 struct ib_block_iter biter; in c4iw_reg_user_mr() local 564 rdma_umem_for_each_dma_block(mhp->umem, &biter, 1 << shift) { in c4iw_reg_user_mr() 565 pages[i++] = cpu_to_be64(rdma_block_iter_dma_address(&biter)); in c4iw_reg_user_mr()
|
/Linux-v5.10/drivers/infiniband/hw/mthca/ |
D | mthca_provider.c | 856 struct ib_block_iter biter; in mthca_reg_user_mr() local 905 rdma_umem_for_each_dma_block(mr->umem, &biter, PAGE_SIZE) { in mthca_reg_user_mr() 906 pages[i++] = rdma_block_iter_dma_address(&biter); in mthca_reg_user_mr()
|
/Linux-v5.10/drivers/infiniband/hw/i40iw/ |
D | i40iw_verbs.c | 1304 struct ib_block_iter biter; in i40iw_copy_user_pgaddrs() local 1312 rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) { in i40iw_copy_user_pgaddrs() 1313 *pbl = rdma_block_iter_dma_address(&biter); in i40iw_copy_user_pgaddrs()
|
/Linux-v5.10/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 817 struct ib_block_iter biter; in build_user_pbes() local 828 rdma_umem_for_each_dma_block (mr->umem, &biter, PAGE_SIZE) { in build_user_pbes() 830 pg_addr = rdma_block_iter_dma_address(&biter); in build_user_pbes()
|
/Linux-v5.10/drivers/infiniband/hw/qedr/ |
D | verbs.c | 625 struct ib_block_iter biter; in qedr_populate_pbls() local 647 rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) { in qedr_populate_pbls() 648 u64 pg_addr = rdma_block_iter_dma_address(&biter); in qedr_populate_pbls()
|