Home
last modified time | relevance | path

Searched refs:umem (Results 1 – 25 of 73) sorted by relevance

123

/Linux-v4.19/net/xdp/
Dxdp_umem.c22 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) in xdp_add_sk_umem() argument
26 spin_lock_irqsave(&umem->xsk_list_lock, flags); in xdp_add_sk_umem()
27 list_add_rcu(&xs->list, &umem->xsk_list); in xdp_add_sk_umem()
28 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); in xdp_add_sk_umem()
31 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) in xdp_del_sk_umem() argument
36 spin_lock_irqsave(&umem->xsk_list_lock, flags); in xdp_del_sk_umem()
38 spin_unlock_irqrestore(&umem->xsk_list_lock, flags); in xdp_del_sk_umem()
40 if (umem->zc) in xdp_del_sk_umem()
57 return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem; in xdp_umem_query()
60 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, in xdp_umem_assign_dev() argument
[all …]
Dxsk.c40 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) && in xsk_is_setup_for_bpf_map()
41 READ_ONCE(xs->umem->fq); in xsk_is_setup_for_bpf_map()
44 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) in xsk_umem_peek_addr() argument
46 return xskq_peek_addr(umem->fq, addr); in xsk_umem_peek_addr()
50 void xsk_umem_discard_addr(struct xdp_umem *umem) in xsk_umem_discard_addr() argument
52 xskq_discard_addr(umem->fq); in xsk_umem_discard_addr()
62 if (!xskq_peek_addr(xs->umem->fq, &addr) || in __xsk_rcv()
63 len > xs->umem->chunk_size_nohr) { in __xsk_rcv()
68 addr += xs->umem->headroom; in __xsk_rcv()
70 buffer = xdp_umem_get_data(xs->umem, addr); in __xsk_rcv()
[all …]
Dxdp_umem.h11 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) in xdp_umem_get_data() argument
13 return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1)); in xdp_umem_get_data()
16 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) in xdp_umem_get_dma() argument
18 return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); in xdp_umem_get_dma()
21 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
23 bool xdp_umem_validate_queues(struct xdp_umem *umem);
24 void xdp_get_umem(struct xdp_umem *umem);
25 void xdp_put_umem(struct xdp_umem *umem);
26 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
27 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
/Linux-v4.19/drivers/infiniband/core/
Dumem.c47 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument
53 if (umem->nmap > 0) in __ib_umem_release()
54 ib_dma_unmap_sg(dev, umem->sg_head.sgl, in __ib_umem_release()
55 umem->npages, in __ib_umem_release()
58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { in __ib_umem_release()
61 if (!PageDirty(page) && umem->writable && dirty) in __ib_umem_release()
66 sg_free_table(&umem->sg_head); in __ib_umem_release()
84 struct ib_umem *umem; in ib_umem_get() local
110 umem = kzalloc(sizeof *umem, GFP_KERNEL); in ib_umem_get()
111 if (!umem) in ib_umem_get()
[all …]
Dumem_odp.c61 return ib_umem_start(umem_odp->umem); in node_start()
74 return ib_umem_end(umem_odp->umem) - 1; in node_last()
278 struct ib_umem *umem; in ib_alloc_odp_umem() local
283 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in ib_alloc_odp_umem()
284 if (!umem) in ib_alloc_odp_umem()
287 umem->context = context; in ib_alloc_odp_umem()
288 umem->length = size; in ib_alloc_odp_umem()
289 umem->address = addr; in ib_alloc_odp_umem()
290 umem->page_shift = PAGE_SHIFT; in ib_alloc_odp_umem()
291 umem->writable = 1; in ib_alloc_odp_umem()
[all …]
/Linux-v4.19/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dumem.c40 struct nvkm_umem *umem; in nvkm_umem_search() local
46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search()
47 if (umem->object.object == handle) { in nvkm_umem_search()
48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
55 umem = nvkm_umem(object); in nvkm_umem_search()
56 if (!umem->priv || client->super) in nvkm_umem_search()
57 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
66 struct nvkm_umem *umem = nvkm_umem(object); in nvkm_umem_unmap() local
68 if (!umem->map) in nvkm_umem_unmap()
71 if (umem->io) { in nvkm_umem_unmap()
[all …]
/Linux-v4.19/include/rdma/
Dib_umem.h60 static inline int ib_umem_offset(struct ib_umem *umem) in ib_umem_offset() argument
62 return umem->address & (BIT(umem->page_shift) - 1); in ib_umem_offset()
66 static inline unsigned long ib_umem_start(struct ib_umem *umem) in ib_umem_start() argument
68 return umem->address - ib_umem_offset(umem); in ib_umem_start()
72 static inline unsigned long ib_umem_end(struct ib_umem *umem) in ib_umem_end() argument
74 return ALIGN(umem->address + umem->length, BIT(umem->page_shift)); in ib_umem_end()
77 static inline size_t ib_umem_num_pages(struct ib_umem *umem) in ib_umem_num_pages() argument
79 return (ib_umem_end(umem) - ib_umem_start(umem)) >> umem->page_shift; in ib_umem_num_pages()
86 void ib_umem_release(struct ib_umem *umem);
87 int ib_umem_page_count(struct ib_umem *umem);
[all …]
Dib_umem_odp.h75 struct ib_umem *umem; member
87 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
93 void ib_umem_odp_release(struct ib_umem *umem);
108 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt,
111 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset,
158 struct ib_umem *umem, in ib_umem_odp_get() argument
171 static inline void ib_umem_odp_release(struct ib_umem *umem) {} in ib_umem_odp_release() argument
/Linux-v4.19/drivers/infiniband/hw/mlx5/
Dmem.c46 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, in mlx5_ib_cont_pages() argument
58 unsigned long page_shift = umem->page_shift; in mlx5_ib_cont_pages()
60 if (umem->odp_data) { in mlx5_ib_cont_pages()
61 *ncont = ib_umem_page_count(umem); in mlx5_ib_cont_pages()
76 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in mlx5_ib_cont_pages()
141 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, in __mlx5_ib_populate_pas() argument
145 unsigned long umem_page_shift = umem->page_shift; in __mlx5_ib_populate_pas()
155 const bool odp = umem->odp_data != NULL; in __mlx5_ib_populate_pas()
162 dma_addr_t pa = umem->odp_data->dma_list[offset + i]; in __mlx5_ib_populate_pas()
171 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in __mlx5_ib_populate_pas()
[all …]
Dodp.c67 struct ib_ucontext *ctx = odp->umem->context; in odp_next()
105 if (ib_umem_start(odp->umem) > start + length) in odp_lookup()
140 if (odp && odp->umem->address == va) { in mlx5_odp_populate_klm()
156 int idx = ib_umem_start(odp->umem) >> MLX5_IMR_MTT_SHIFT; in mr_leaf_free_action()
162 ib_umem_release(odp->umem); in mr_leaf_free_action()
173 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, in mlx5_ib_invalidate_range() argument
183 if (!umem || !umem->odp_data) { in mlx5_ib_invalidate_range()
188 mr = umem->odp_data->private; in mlx5_ib_invalidate_range()
193 start = max_t(u64, ib_umem_start(umem), start); in mlx5_ib_invalidate_range()
194 end = min_t(u64, ib_umem_end(umem), end); in mlx5_ib_invalidate_range()
[all …]
Ddoorbell.c41 struct ib_umem *umem; member
66 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx5_ib_db_map_user()
68 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user()
69 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user()
77 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx5_ib_db_map_user()
93 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
Dmr.c101 if (mr->umem->odp_data) { in update_odp_mr()
110 mr->umem->odp_data->private = mr; in update_odp_mr()
819 mr->umem = NULL; in mlx5_ib_get_dma_mr()
851 int access_flags, struct ib_umem **umem, in mr_umem_get() argument
859 *umem = NULL; in mr_umem_get()
876 *umem = u; in mr_umem_get()
928 struct ib_pd *pd, struct ib_umem *umem, in alloc_mr_from_cache() argument
953 mr->umem = umem; in alloc_mr_from_cache()
968 struct ib_umem *umem = mr->umem; in populate_xlt() local
977 npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx); in populate_xlt()
[all …]
Ddevx.c29 struct ib_umem *umem; member
868 obj->umem = ib_umem_get(ucontext, addr, size, access, 0); in devx_umem_get()
869 if (IS_ERR(obj->umem)) in devx_umem_get()
870 return PTR_ERR(obj->umem); in devx_umem_get()
872 mlx5_ib_cont_pages(obj->umem, obj->umem->address, in devx_umem_get()
877 ib_umem_release(obj->umem); in devx_umem_get()
882 obj->page_offset = obj->umem->address & page_mask; in devx_umem_get()
901 void *umem; in devx_umem_reg_cmd_build() local
904 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem); in devx_umem_reg_cmd_build()
905 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt); in devx_umem_reg_cmd_build()
[all …]
/Linux-v4.19/samples/bpf/
Dxdpsock_user.c113 struct xdp_umem *umem; member
279 return &xsk->umem->frames[addr]; in xq_get_data()
411 struct xdp_umem *umem; in xdp_umem_configure() local
415 umem = calloc(1, sizeof(*umem)); in xdp_umem_configure()
416 lassert(umem); in xdp_umem_configure()
436 umem->fq.map = mmap(0, off.fr.desc + in xdp_umem_configure()
441 lassert(umem->fq.map != MAP_FAILED); in xdp_umem_configure()
443 umem->fq.mask = FQ_NUM_DESCS - 1; in xdp_umem_configure()
444 umem->fq.size = FQ_NUM_DESCS; in xdp_umem_configure()
445 umem->fq.producer = umem->fq.map + off.fr.producer; in xdp_umem_configure()
[all …]
/Linux-v4.19/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_mr.c117 struct ib_umem *umem; in pvrdma_reg_user_mr() local
129 umem = ib_umem_get(pd->uobject->context, start, in pvrdma_reg_user_mr()
131 if (IS_ERR(umem)) { in pvrdma_reg_user_mr()
134 return ERR_CAST(umem); in pvrdma_reg_user_mr()
137 if (umem->npages < 0 || umem->npages > PVRDMA_PAGE_DIR_MAX_PAGES) { in pvrdma_reg_user_mr()
139 umem->npages); in pvrdma_reg_user_mr()
152 mr->umem = umem; in pvrdma_reg_user_mr()
154 ret = pvrdma_page_dir_init(dev, &mr->pdir, umem->npages, false); in pvrdma_reg_user_mr()
161 ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0); in pvrdma_reg_user_mr()
171 cmd->nchunks = umem->npages; in pvrdma_reg_user_mr()
[all …]
Dpvrdma_srq.c156 srq->umem = ib_umem_get(pd->uobject->context, in pvrdma_create_srq()
159 if (IS_ERR(srq->umem)) { in pvrdma_create_srq()
160 ret = PTR_ERR(srq->umem); in pvrdma_create_srq()
164 srq->npages = ib_umem_page_count(srq->umem); in pvrdma_create_srq()
180 pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0); in pvrdma_create_srq()
218 ib_umem_release(srq->umem); in pvrdma_create_srq()
239 ib_umem_release(srq->umem); in pvrdma_free_srq()
/Linux-v4.19/drivers/infiniband/hw/mlx4/
Dmr.c77 mr->umem = NULL; in mlx4_ib_get_dma_mr()
183 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument
203 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { in mlx4_ib_umem_write_mtt()
257 int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, in mlx4_ib_umem_calc_optimal_mtt_size() argument
261 u64 min_shift = umem->page_shift; in mlx4_ib_umem_calc_optimal_mtt_size()
274 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { in mlx4_ib_umem_calc_optimal_mtt_size()
298 (start_va & (~(((u64)(BIT(umem->page_shift))) - 1ULL))) in mlx4_ib_umem_calc_optimal_mtt_size()
418 mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length, in mlx4_ib_reg_user_mr()
420 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr()
421 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr()
[all …]
Ddoorbell.c39 struct ib_umem *umem; member
64 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx4_ib_db_map_user()
66 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user()
67 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user()
75 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx4_ib_db_map_user()
91 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
/Linux-v4.19/include/net/
Dxdp_sock.h55 struct xdp_umem *umem; member
77 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
78 void xsk_umem_discard_addr(struct xdp_umem *umem);
79 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
80 bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
81 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
/Linux-v4.19/drivers/infiniband/sw/rxe/
Drxe_mr.c99 if (mem->umem) in rxe_mem_cleanup()
100 ib_umem_release(mem->umem); in rxe_mem_cleanup()
168 struct ib_umem *umem; in rxe_mem_init_user() local
174 umem = ib_umem_get(pd->ibpd.uobject->context, start, length, access, 0); in rxe_mem_init_user()
175 if (IS_ERR(umem)) { in rxe_mem_init_user()
177 (int)PTR_ERR(umem)); in rxe_mem_init_user()
182 mem->umem = umem; in rxe_mem_init_user()
183 num_buf = umem->nmap; in rxe_mem_init_user()
190 ib_umem_release(umem); in rxe_mem_init_user()
194 mem->page_shift = umem->page_shift; in rxe_mem_init_user()
[all …]
/Linux-v4.19/drivers/infiniband/hw/hns/
Dhns_roce_mr.c551 npages = ib_umem_page_count(mr->umem); in hns_roce_mhop_free()
639 npages = ib_umem_page_count(mr->umem); in hns_roce_mr_free()
869 mr->umem = NULL; in hns_roce_get_dma_mr()
882 struct hns_roce_mtt *mtt, struct ib_umem *umem) in hns_roce_ib_umem_write_mtt() argument
906 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in hns_roce_ib_umem_write_mtt()
910 sg_dma_address(sg) + (k << umem->page_shift); in hns_roce_ib_umem_write_mtt()
942 struct ib_umem *umem) in hns_roce_ib_umem_write_mr() argument
955 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in hns_roce_ib_umem_write_mr()
959 (k << umem->page_shift); in hns_roce_ib_umem_write_mr()
1002 mr->umem = ib_umem_get(pd->uobject->context, start, length, in hns_roce_reg_user_mr()
[all …]
Dhns_roce_db.c31 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in hns_roce_db_map_user()
33 if (IS_ERR(page->umem)) { in hns_roce_db_map_user()
34 ret = PTR_ERR(page->umem); in hns_roce_db_map_user()
42 db->dma = sg_dma_address(page->umem->sg_head.sgl) + in hns_roce_db_map_user()
44 page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK; in hns_roce_db_map_user()
45 db->virt_addr = sg_virt(page->umem->sg_head.sgl); in hns_roce_db_map_user()
64 ib_umem_release(db->u.user_page->umem); in hns_roce_db_unmap_user()
Dhns_roce_cq.c220 struct ib_umem **umem, u64 buf_addr, int cqe) in hns_roce_ib_get_cq_umem() argument
226 *umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz, in hns_roce_ib_get_cq_umem()
228 if (IS_ERR(*umem)) in hns_roce_ib_get_cq_umem()
229 return PTR_ERR(*umem); in hns_roce_ib_get_cq_umem()
237 npages = (ib_umem_page_count(*umem) + in hns_roce_ib_get_cq_umem()
244 ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem), in hns_roce_ib_get_cq_umem()
245 (*umem)->page_shift, in hns_roce_ib_get_cq_umem()
251 ret = hns_roce_ib_umem_write_mtt(hr_dev, &buf->hr_mtt, *umem); in hns_roce_ib_get_cq_umem()
261 ib_umem_release(*umem); in hns_roce_ib_get_cq_umem()
351 &hr_cq->umem, ucmd.buf_addr, in hns_roce_ib_create_cq()
[all …]
/Linux-v4.19/drivers/vhost/
Dvhost.c327 vq->umem = NULL; in vhost_vq_reset()
425 dev->umem = NULL; in vhost_dev_init()
542 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem) in vhost_dev_reset_owner() argument
549 INIT_LIST_HEAD(&umem->umem_list); in vhost_dev_reset_owner()
550 dev->umem = umem; in vhost_dev_reset_owner()
555 dev->vqs[i]->umem = umem; in vhost_dev_reset_owner()
572 static void vhost_umem_free(struct vhost_umem *umem, in vhost_umem_free() argument
575 vhost_umem_interval_tree_remove(node, &umem->umem_tree); in vhost_umem_free()
578 umem->numem--; in vhost_umem_free()
581 static void vhost_umem_clean(struct vhost_umem *umem) in vhost_umem_clean() argument
[all …]
/Linux-v4.19/drivers/infiniband/sw/rdmavt/
Dmr.c383 struct ib_umem *umem; in rvt_reg_user_mr() local
391 umem = ib_umem_get(pd->uobject->context, start, length, in rvt_reg_user_mr()
393 if (IS_ERR(umem)) in rvt_reg_user_mr()
394 return (void *)umem; in rvt_reg_user_mr()
396 n = umem->nmap; in rvt_reg_user_mr()
407 mr->mr.offset = ib_umem_offset(umem); in rvt_reg_user_mr()
409 mr->umem = umem; in rvt_reg_user_mr()
411 mr->mr.page_shift = umem->page_shift; in rvt_reg_user_mr()
414 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in rvt_reg_user_mr()
423 mr->mr.map[m]->segs[n].length = BIT(umem->page_shift); in rvt_reg_user_mr()
[all …]

123