/Linux-v5.4/net/xdp/ |
D | xdp_umem.c | 26 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) in xdp_add_sk_umem() argument 33 spin_lock_irqsave(&umem->xsk_list_lock, flags); in xdp_add_sk_umem() 34 list_add_rcu(&xs->list, &umem->xsk_list); in xdp_add_sk_umem() 35 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); in xdp_add_sk_umem() 38 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) in xdp_del_sk_umem() argument 45 spin_lock_irqsave(&umem->xsk_list_lock, flags); in xdp_del_sk_umem() 47 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); in xdp_del_sk_umem() 54 static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, in xdp_reg_umem_at_qid() argument 63 dev->_rx[queue_id].umem = umem; in xdp_reg_umem_at_qid() 65 dev->_tx[queue_id].umem = umem; in xdp_reg_umem_at_qid() [all …]
|
D | xsk.c | 36 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) && in xsk_is_setup_for_bpf_map() 37 READ_ONCE(xs->umem->fq); in xsk_is_setup_for_bpf_map() 40 bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) in xsk_umem_has_addrs() argument 42 return xskq_has_addrs(umem->fq, cnt); in xsk_umem_has_addrs() 46 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) in xsk_umem_peek_addr() argument 48 return xskq_peek_addr(umem->fq, addr, umem); in xsk_umem_peek_addr() 52 void xsk_umem_discard_addr(struct xdp_umem *umem) in xsk_umem_discard_addr() argument 54 xskq_discard_addr(umem->fq); in xsk_umem_discard_addr() 58 void xsk_set_rx_need_wakeup(struct xdp_umem *umem) in xsk_set_rx_need_wakeup() argument 60 if (umem->need_wakeup & XDP_WAKEUP_RX) in xsk_set_rx_need_wakeup() [all …]
|
D | xsk_diag.c | 49 struct xdp_umem *umem = xs->umem; in xsk_diag_put_umem() local 53 if (!umem) in xsk_diag_put_umem() 56 du.id = umem->id; in xsk_diag_put_umem() 57 du.size = umem->size; in xsk_diag_put_umem() 58 du.num_pages = umem->npgs; in xsk_diag_put_umem() 59 du.chunk_size = umem->chunk_size_nohr + umem->headroom; in xsk_diag_put_umem() 60 du.headroom = umem->headroom; in xsk_diag_put_umem() 61 du.ifindex = umem->dev ? umem->dev->ifindex : 0; in xsk_diag_put_umem() 62 du.queue_id = umem->queue_id; in xsk_diag_put_umem() 64 if (umem->zc) in xsk_diag_put_umem() [all …]
|
D | xdp_umem.h | 11 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, 13 void xdp_umem_clear_dev(struct xdp_umem *umem); 14 bool xdp_umem_validate_queues(struct xdp_umem *umem); 15 void xdp_get_umem(struct xdp_umem *umem); 16 void xdp_put_umem(struct xdp_umem *umem); 17 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs); 18 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
|
D | xsk_queue.h | 137 static inline bool xskq_crosses_non_contig_pg(struct xdp_umem *umem, u64 addr, in xskq_crosses_non_contig_pg() argument 142 (unsigned long)umem->pages[(addr >> PAGE_SHIFT)].addr & in xskq_crosses_non_contig_pg() 160 struct xdp_umem *umem) in xskq_is_valid_addr_unaligned() argument 166 xskq_crosses_non_contig_pg(umem, addr, length)) { in xskq_is_valid_addr_unaligned() 175 struct xdp_umem *umem) in xskq_validate_addr() argument 183 if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) { in xskq_validate_addr() 185 umem->chunk_size_nohr, in xskq_validate_addr() 186 umem)) in xskq_validate_addr() 202 struct xdp_umem *umem) in xskq_peek_addr() argument 213 return xskq_validate_addr(q, addr, umem); in xskq_peek_addr() [all …]
|
D | xsk_queue.c | 87 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, in xsk_reuseq_swap() argument 90 struct xdp_umem_fq_reuse *oldq = umem->fq_reuse; in xsk_reuseq_swap() 93 umem->fq_reuse = newq; in xsk_reuseq_swap() 104 umem->fq_reuse = newq; in xsk_reuseq_swap() 115 void xsk_reuseq_destroy(struct xdp_umem *umem) in xsk_reuseq_destroy() argument 117 xsk_reuseq_free(umem->fq_reuse); in xsk_reuseq_destroy() 118 umem->fq_reuse = NULL; in xsk_reuseq_destroy()
|
/Linux-v5.4/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
D | umem.c | 40 struct nvkm_umem *umem; in nvkm_umem_search() local 46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search() 47 if (umem->object.object == handle) { in nvkm_umem_search() 48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search() 55 umem = nvkm_umem(object); in nvkm_umem_search() 56 if (!umem->priv || client->super) in nvkm_umem_search() 57 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search() 66 struct nvkm_umem *umem = nvkm_umem(object); in nvkm_umem_unmap() local 68 if (!umem->map) in nvkm_umem_unmap() 71 if (umem->io) { in nvkm_umem_unmap() [all …]
|
/Linux-v5.4/drivers/infiniband/core/ |
D | umem.c | 46 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument 51 if (umem->nmap > 0) in __ib_umem_release() 52 ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents, in __ib_umem_release() 55 for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) { in __ib_umem_release() 57 put_user_pages_dirty_lock(&page, 1, umem->writable && dirty); in __ib_umem_release() 60 sg_free_table(&umem->sg_head); in __ib_umem_release() 144 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, in ib_umem_find_best_pgsz() argument 160 mask = roundup_pow_of_two(umem->length); in ib_umem_find_best_pgsz() 162 pgoff = umem->address & ~PAGE_MASK; in ib_umem_find_best_pgsz() 164 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { in ib_umem_find_best_pgsz() [all …]
|
D | umem_odp.c | 99 umem_odp->umem.ibdev->ops.invalidate_range( in ib_umem_notifier_release() 112 item->umem.ibdev->ops.invalidate_range(item, start, end); in invalidate_range_start_trampoline() 213 umem_odp->umem.is_odp = 1; in ib_init_umem_odp() 219 ALIGN_DOWN(umem_odp->umem.address, page_size); in ib_init_umem_odp() 220 if (check_add_overflow(umem_odp->umem.address, in ib_init_umem_odp() 221 (unsigned long)umem_odp->umem.length, in ib_init_umem_odp() 255 mn = mmu_notifier_get(&ib_umem_notifiers, umem_odp->umem.owning_mm); in ib_init_umem_odp() 272 mmgrab(umem_odp->umem.owning_mm); in ib_init_umem_odp() 299 struct ib_umem *umem; in ib_umem_odp_alloc_implicit() local 314 umem = &umem_odp->umem; in ib_umem_odp_alloc_implicit() [all …]
|
/Linux-v5.4/include/net/ |
D | xdp_sock.h | 84 struct xdp_umem *umem; member 116 bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt); 117 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); 118 void xsk_umem_discard_addr(struct xdp_umem *umem); 119 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); 120 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc); 121 void xsk_umem_consume_tx_done(struct xdp_umem *umem); 123 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, 127 void xsk_set_rx_need_wakeup(struct xdp_umem *umem); 128 void xsk_set_tx_need_wakeup(struct xdp_umem *umem); [all …]
|
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
D | umem.c | 10 struct xdp_umem *umem) in mlx5e_xsk_map_umem() argument 15 for (i = 0; i < umem->npgs; i++) { in mlx5e_xsk_map_umem() 16 dma_addr_t dma = dma_map_page(dev, umem->pgs[i], 0, PAGE_SIZE, in mlx5e_xsk_map_umem() 21 umem->pages[i].dma = dma; in mlx5e_xsk_map_umem() 28 dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE, in mlx5e_xsk_map_umem() 30 umem->pages[i].dma = 0; in mlx5e_xsk_map_umem() 37 struct xdp_umem *umem) in mlx5e_xsk_unmap_umem() argument 42 for (i = 0; i < umem->npgs; i++) { in mlx5e_xsk_unmap_umem() 43 dma_unmap_page(dev, umem->pages[i].dma, PAGE_SIZE, in mlx5e_xsk_unmap_umem() 45 umem->pages[i].dma = 0; in mlx5e_xsk_unmap_umem() [all …]
|
D | rx.c | 15 return xsk_umem_has_addrs_rq(rq->umem, count); in mlx5e_xsk_pages_enough_umem() 21 struct xdp_umem *umem = rq->umem; in mlx5e_xsk_page_alloc_umem() local 24 if (!xsk_umem_peek_addr_rq(umem, &handle)) in mlx5e_xsk_page_alloc_umem() 27 dma_info->xsk.handle = xsk_umem_adjust_offset(umem, handle, in mlx5e_xsk_page_alloc_umem() 29 dma_info->xsk.data = xdp_umem_get_data(umem, dma_info->xsk.handle); in mlx5e_xsk_page_alloc_umem() 36 dma_info->addr = xdp_umem_get_dma(umem, handle); in mlx5e_xsk_page_alloc_umem() 38 xsk_umem_discard_addr_rq(umem); in mlx5e_xsk_page_alloc_umem() 48 xsk_umem_fq_reuse(rq->umem, handle & rq->umem->chunk_mask); in mlx5e_xsk_recycle_frame()
|
D | tx.c | 66 struct xdp_umem *umem = sq->umem; in mlx5e_xsk_tx() local 83 if (!xsk_umem_consume_tx(umem, &desc)) { in mlx5e_xsk_tx() 92 xdptxd.dma_addr = xdp_umem_get_dma(umem, desc.addr); in mlx5e_xsk_tx() 93 xdptxd.data = xdp_umem_get_data(umem, desc.addr); in mlx5e_xsk_tx() 114 xsk_umem_consume_tx_done(umem); in mlx5e_xsk_tx()
|
/Linux-v5.4/include/rdma/ |
D | ib_umem.h | 58 static inline int ib_umem_offset(struct ib_umem *umem) in ib_umem_offset() argument 60 return umem->address & ~PAGE_MASK; in ib_umem_offset() 63 static inline size_t ib_umem_num_pages(struct ib_umem *umem) in ib_umem_num_pages() argument 65 return (ALIGN(umem->address + umem->length, PAGE_SIZE) - in ib_umem_num_pages() 66 ALIGN_DOWN(umem->address, PAGE_SIZE)) >> in ib_umem_num_pages() 74 void ib_umem_release(struct ib_umem *umem); 75 int ib_umem_page_count(struct ib_umem *umem); 76 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, 78 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, 92 static inline void ib_umem_release(struct ib_umem *umem) { } in ib_umem_release() argument [all …]
|
/Linux-v5.4/tools/lib/bpf/ |
D | xsk.c | 60 struct xsk_umem *umem; member 76 int xsk_umem__fd(const struct xsk_umem *umem) in xsk_umem__fd() argument 78 return umem ? umem->fd : -EINVAL; in xsk_umem__fd() 143 struct xsk_umem *umem; in xsk_umem__create_v0_0_4() local 153 umem = calloc(1, sizeof(*umem)); in xsk_umem__create_v0_0_4() 154 if (!umem) in xsk_umem__create_v0_0_4() 157 umem->fd = socket(AF_XDP, SOCK_RAW, 0); in xsk_umem__create_v0_0_4() 158 if (umem->fd < 0) { in xsk_umem__create_v0_0_4() 163 umem->umem_area = umem_area; in xsk_umem__create_v0_0_4() 164 xsk_set_umem_config(&umem->config, usr_config); in xsk_umem__create_v0_0_4() [all …]
|
/Linux-v5.4/samples/bpf/ |
D | xdpsock_user.c | 83 struct xsk_umem *umem; member 90 struct xsk_umem_info *umem; member 198 struct xsk_umem *umem = xsks[0]->umem->umem; in int_exit() local 204 (void)xsk_umem__delete(umem); in int_exit() 276 static size_t gen_eth_frame(struct xsk_umem_info *umem, u64 addr) in gen_eth_frame() argument 278 memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data, in gen_eth_frame() 285 struct xsk_umem_info *umem; in xsk_configure_umem() local 296 umem = calloc(1, sizeof(*umem)); in xsk_configure_umem() 297 if (!umem) in xsk_configure_umem() 300 ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq, in xsk_configure_umem() [all …]
|
/Linux-v5.4/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_xsk.c | 24 struct xdp_umem *umem) in ixgbe_xsk_umem_dma_map() argument 30 for (i = 0; i < umem->npgs; i++) { in ixgbe_xsk_umem_dma_map() 31 dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, in ixgbe_xsk_umem_dma_map() 36 umem->pages[i].dma = dma; in ixgbe_xsk_umem_dma_map() 43 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, in ixgbe_xsk_umem_dma_map() 45 umem->pages[i].dma = 0; in ixgbe_xsk_umem_dma_map() 52 struct xdp_umem *umem) in ixgbe_xsk_umem_dma_unmap() argument 57 for (i = 0; i < umem->npgs; i++) { in ixgbe_xsk_umem_dma_unmap() 58 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, in ixgbe_xsk_umem_dma_unmap() 61 umem->pages[i].dma = 0; in ixgbe_xsk_umem_dma_unmap() [all …]
|
/Linux-v5.4/drivers/net/ethernet/intel/i40e/ |
D | i40e_xsk.c | 19 static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem) in i40e_xsk_umem_dma_map() argument 27 for (i = 0; i < umem->npgs; i++) { in i40e_xsk_umem_dma_map() 28 dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, in i40e_xsk_umem_dma_map() 33 umem->pages[i].dma = dma; in i40e_xsk_umem_dma_map() 40 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, in i40e_xsk_umem_dma_map() 42 umem->pages[i].dma = 0; in i40e_xsk_umem_dma_map() 53 static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem) in i40e_xsk_umem_dma_unmap() argument 61 for (i = 0; i < umem->npgs; i++) { in i40e_xsk_umem_dma_unmap() 62 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, in i40e_xsk_umem_dma_unmap() 65 umem->pages[i].dma = 0; in i40e_xsk_umem_dma_unmap() [all …]
|
/Linux-v5.4/drivers/infiniband/sw/siw/ |
D | siw_mem.c | 69 void siw_umem_release(struct siw_umem *umem, bool dirty) in siw_umem_release() argument 71 struct mm_struct *mm_s = umem->owning_mm; in siw_umem_release() 72 int i, num_pages = umem->num_pages; in siw_umem_release() 77 siw_free_plist(&umem->page_chunk[i], to_free, in siw_umem_release() 78 umem->writable && dirty); in siw_umem_release() 79 kfree(umem->page_chunk[i].plist); in siw_umem_release() 82 atomic64_sub(umem->num_pages, &mm_s->pinned_vm); in siw_umem_release() 85 kfree(umem->page_chunk); in siw_umem_release() 86 kfree(umem); in siw_umem_release() 148 siw_umem_release(mem->umem, true); in siw_free_mem() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/mlx5/ |
D | mem.c | 46 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, in mlx5_ib_cont_pages() argument 65 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in mlx5_ib_cont_pages() 128 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, in __mlx5_ib_populate_pas() argument 141 if (umem->is_odp) { in __mlx5_ib_populate_pas() 147 to_ib_umem_odp(umem)->dma_list[offset + i]; in __mlx5_ib_populate_pas() 155 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in __mlx5_ib_populate_pas() 192 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, in mlx5_ib_populate_pas() argument 195 return __mlx5_ib_populate_pas(dev, umem, page_shift, 0, in mlx5_ib_populate_pas() 196 ib_umem_num_pages(umem), pas, in mlx5_ib_populate_pas()
|
D | doorbell.c | 41 struct ib_umem *umem; member 67 page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0); in mlx5_ib_db_map_user() 68 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user() 69 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user() 77 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx5_ib_db_map_user() 93 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
|
/Linux-v5.4/drivers/infiniband/hw/mlx4/ |
D | mr.c | 77 mr->umem = NULL; in mlx4_ib_get_dma_mr() 183 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument 203 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { in mlx4_ib_umem_write_mtt() 257 int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, in mlx4_ib_umem_calc_optimal_mtt_size() argument 274 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { in mlx4_ib_umem_calc_optimal_mtt_size() 418 mr->umem = mlx4_get_umem_mr(udata, start, length, access_flags); in mlx4_ib_reg_user_mr() 419 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr() 420 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr() 424 n = ib_umem_page_count(mr->umem); in mlx4_ib_reg_user_mr() 425 shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); in mlx4_ib_reg_user_mr() [all …]
|
D | doorbell.c | 40 struct ib_umem *umem; member 67 page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0); in mlx4_ib_db_map_user() 68 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user() 69 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user() 77 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx4_ib_db_map_user() 93 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
|
/Linux-v5.4/lib/ |
D | test_user_copy.c | 47 static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size) in test_check_nonzero_user() argument 65 umem += start; in test_check_nonzero_user() 87 ret |= test(copy_to_user(umem, kmem, size), in test_check_nonzero_user() 93 int retval = check_zeroed_user(umem + start, len); in test_check_nonzero_user() 105 static int test_copy_struct_from_user(char *kmem, char __user *umem, in test_copy_struct_from_user() argument 124 ret |= test(copy_to_user(umem, umem_src, size), in test_copy_struct_from_user() 134 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), in test_copy_struct_from_user() 147 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), in test_copy_struct_from_user() 157 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG, in test_copy_struct_from_user() 165 ret |= test(clear_user(umem + ksize, usize - ksize), in test_copy_struct_from_user() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_mr.c | 117 struct ib_umem *umem; in pvrdma_reg_user_mr() local 129 umem = ib_umem_get(udata, start, length, access_flags, 0); in pvrdma_reg_user_mr() 130 if (IS_ERR(umem)) { in pvrdma_reg_user_mr() 133 return ERR_CAST(umem); in pvrdma_reg_user_mr() 136 npages = ib_umem_num_pages(umem); in pvrdma_reg_user_mr() 152 mr->umem = umem; in pvrdma_reg_user_mr() 161 ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0); in pvrdma_reg_user_mr() 190 ib_umem_release(umem); in pvrdma_reg_user_mr() 257 mr->umem = NULL; in pvrdma_alloc_mr() 293 ib_umem_release(mr->umem); in pvrdma_dereg_mr()
|