Home
last modified time | relevance | path

Searched refs:umem (Results 1 – 25 of 87) sorted by relevance

1234

/Linux-v5.10/net/xdp/
Dxdp_umem.c26 static void xdp_umem_unpin_pages(struct xdp_umem *umem) in xdp_umem_unpin_pages() argument
28 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true); in xdp_umem_unpin_pages()
30 kfree(umem->pgs); in xdp_umem_unpin_pages()
31 umem->pgs = NULL; in xdp_umem_unpin_pages()
34 static void xdp_umem_unaccount_pages(struct xdp_umem *umem) in xdp_umem_unaccount_pages() argument
36 if (umem->user) { in xdp_umem_unaccount_pages()
37 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages()
38 free_uid(umem->user); in xdp_umem_unaccount_pages()
42 static void xdp_umem_addr_unmap(struct xdp_umem *umem) in xdp_umem_addr_unmap() argument
44 vunmap(umem->addrs); in xdp_umem_addr_unmap()
[all …]
Dxsk_buff_pool.c45 struct xdp_umem *umem) in xp_create_and_assign_umem() argument
51 pool = kvzalloc(struct_size(pool, free_heads, umem->chunks), in xp_create_and_assign_umem()
56 pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL); in xp_create_and_assign_umem()
60 pool->chunk_mask = ~((u64)umem->chunk_size - 1); in xp_create_and_assign_umem()
61 pool->addrs_cnt = umem->size; in xp_create_and_assign_umem()
62 pool->heads_cnt = umem->chunks; in xp_create_and_assign_umem()
63 pool->free_heads_cnt = umem->chunks; in xp_create_and_assign_umem()
64 pool->headroom = umem->headroom; in xp_create_and_assign_umem()
65 pool->chunk_size = umem->chunk_size; in xp_create_and_assign_umem()
66 pool->unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; in xp_create_and_assign_umem()
[all …]
Dxsk_diag.c50 struct xdp_umem *umem = xs->umem; in xsk_diag_put_umem() local
54 if (!umem) in xsk_diag_put_umem()
57 du.id = umem->id; in xsk_diag_put_umem()
58 du.size = umem->size; in xsk_diag_put_umem()
59 du.num_pages = umem->npgs; in xsk_diag_put_umem()
60 du.chunk_size = umem->chunk_size; in xsk_diag_put_umem()
61 du.headroom = umem->headroom; in xsk_diag_put_umem()
65 if (umem->zc) in xsk_diag_put_umem()
67 du.refs = refcount_read(&umem->users); in xsk_diag_put_umem()
Dxdp_umem.h11 void xdp_get_umem(struct xdp_umem *umem);
12 void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup);
/Linux-v5.10/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dumem.c40 struct nvkm_umem *umem; in nvkm_umem_search() local
46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search()
47 if (umem->object.object == handle) { in nvkm_umem_search()
48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
55 umem = nvkm_umem(object); in nvkm_umem_search()
56 if (!umem->priv || client->super) in nvkm_umem_search()
57 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
66 struct nvkm_umem *umem = nvkm_umem(object); in nvkm_umem_unmap() local
68 if (!umem->map) in nvkm_umem_unmap()
71 if (umem->io) { in nvkm_umem_unmap()
[all …]
/Linux-v5.10/drivers/infiniband/core/
Dumem.c47 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument
52 if (umem->nmap > 0) in __ib_umem_release()
53 ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents, in __ib_umem_release()
56 for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) { in __ib_umem_release()
58 unpin_user_pages_dirty_lock(&page, 1, umem->writable && dirty); in __ib_umem_release()
61 sg_free_table(&umem->sg_head); in __ib_umem_release()
78 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, in ib_umem_find_best_pgsz() argument
97 umem->iova = va = virt; in ib_umem_find_best_pgsz()
104 bits_per((umem->length - 1 + virt) ^ virt)); in ib_umem_find_best_pgsz()
106 pgoff = umem->address & ~PAGE_MASK; in ib_umem_find_best_pgsz()
[all …]
Dumem_odp.c57 umem_odp->umem.is_odp = 1; in ib_init_umem_odp()
66 start = ALIGN_DOWN(umem_odp->umem.address, page_size); in ib_init_umem_odp()
67 if (check_add_overflow(umem_odp->umem.address, in ib_init_umem_odp()
68 (unsigned long)umem_odp->umem.length, in ib_init_umem_odp()
93 umem_odp->umem.owning_mm, in ib_init_umem_odp()
121 struct ib_umem *umem; in ib_umem_odp_alloc_implicit() local
131 umem = &umem_odp->umem; in ib_umem_odp_alloc_implicit()
132 umem->ibdev = device; in ib_umem_odp_alloc_implicit()
133 umem->writable = ib_access_writable(access); in ib_umem_odp_alloc_implicit()
134 umem->owning_mm = current->mm; in ib_umem_odp_alloc_implicit()
[all …]
/Linux-v5.10/include/rdma/
Dib_umem.h32 static inline int ib_umem_offset(struct ib_umem *umem) in ib_umem_offset() argument
34 return umem->address & ~PAGE_MASK; in ib_umem_offset()
37 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem, in ib_umem_num_dma_blocks() argument
40 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) - in ib_umem_num_dma_blocks()
41 ALIGN_DOWN(umem->iova, pgsz))) / in ib_umem_num_dma_blocks()
45 static inline size_t ib_umem_num_pages(struct ib_umem *umem) in ib_umem_num_pages() argument
47 return ib_umem_num_dma_blocks(umem, PAGE_SIZE); in ib_umem_num_pages()
51 struct ib_umem *umem, in __rdma_umem_block_iter_start() argument
54 __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz); in __rdma_umem_block_iter_start()
68 #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \ argument
[all …]
Dib_umem_odp.h13 struct ib_umem umem; member
47 static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) in to_ib_umem_odp() argument
49 return container_of(umem, struct ib_umem_odp, umem); in to_ib_umem_odp()
/Linux-v5.10/tools/lib/bpf/
Dxsk.c63 struct xsk_umem *umem; member
102 int xsk_umem__fd(const struct xsk_umem *umem) in xsk_umem__fd() argument
104 return umem ? umem->fd : -EINVAL; in xsk_umem__fd()
214 static int xsk_create_umem_rings(struct xsk_umem *umem, int fd, in xsk_create_umem_rings() argument
223 &umem->config.fill_size, in xsk_create_umem_rings()
224 sizeof(umem->config.fill_size)); in xsk_create_umem_rings()
229 &umem->config.comp_size, in xsk_create_umem_rings()
230 sizeof(umem->config.comp_size)); in xsk_create_umem_rings()
238 map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64), in xsk_create_umem_rings()
244 fill->mask = umem->config.fill_size - 1; in xsk_create_umem_rings()
[all …]
Dxsk.h186 LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem);
216 LIBBPF_API int xsk_umem__create(struct xsk_umem **umem,
221 LIBBPF_API int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
226 LIBBPF_API int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
233 struct xsk_umem *umem,
240 __u32 queue_id, struct xsk_umem *umem,
248 LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem);
/Linux-v5.10/drivers/infiniband/sw/siw/
Dsiw_mem.c69 void siw_umem_release(struct siw_umem *umem, bool dirty) in siw_umem_release() argument
71 struct mm_struct *mm_s = umem->owning_mm; in siw_umem_release()
72 int i, num_pages = umem->num_pages; in siw_umem_release()
77 siw_free_plist(&umem->page_chunk[i], to_free, in siw_umem_release()
78 umem->writable && dirty); in siw_umem_release()
79 kfree(umem->page_chunk[i].plist); in siw_umem_release()
82 atomic64_sub(umem->num_pages, &mm_s->pinned_vm); in siw_umem_release()
85 kfree(umem->page_chunk); in siw_umem_release()
86 kfree(umem); in siw_umem_release()
148 siw_umem_release(mem->umem, true); in siw_free_mem()
[all …]
Dsiw_mem.h10 void siw_umem_release(struct siw_umem *umem, bool dirty);
63 static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr) in siw_get_upage() argument
65 unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT, in siw_get_upage()
69 if (likely(page_idx < umem->num_pages)) in siw_get_upage()
70 return umem->page_chunk[chunk_idx].plist[page_in_chunk]; in siw_get_upage()
/Linux-v5.10/drivers/infiniband/hw/mlx4/
Dmr.c77 mr->umem = NULL; in mlx4_ib_get_dma_mr()
183 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument
203 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { in mlx4_ib_umem_write_mtt()
257 int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, in mlx4_ib_umem_calc_optimal_mtt_size() argument
274 *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in mlx4_ib_umem_calc_optimal_mtt_size()
276 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) { in mlx4_ib_umem_calc_optimal_mtt_size()
420 mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags); in mlx4_ib_reg_user_mr()
421 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr()
422 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr()
426 shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); in mlx4_ib_reg_user_mr()
[all …]
Ddoorbell.c40 struct ib_umem *umem; member
67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx4_ib_db_map_user()
69 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user()
70 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user()
78 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx4_ib_db_map_user()
94 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
Dsrq.c113 srq->umem = in mlx4_ib_create_srq()
115 if (IS_ERR(srq->umem)) in mlx4_ib_create_srq()
116 return PTR_ERR(srq->umem); in mlx4_ib_create_srq()
119 dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE), in mlx4_ib_create_srq()
124 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); in mlx4_ib_create_srq()
209 if (!srq->umem) in mlx4_ib_create_srq()
211 ib_umem_release(srq->umem); in mlx4_ib_create_srq()
285 ib_umem_release(msrq->umem); in mlx4_ib_destroy_srq()
/Linux-v5.10/samples/bpf/
Dxdpsock_user.c139 struct xsk_umem *umem; member
146 struct xsk_umem_info *umem; member
483 struct xsk_umem *umem = xsks[0]->umem->umem; in xdpsock_cleanup() local
489 (void)xsk_umem__delete(umem); in xdpsock_cleanup()
785 static void gen_eth_frame(struct xsk_umem_info *umem, u64 addr) in gen_eth_frame() argument
787 memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data, in gen_eth_frame()
793 struct xsk_umem_info *umem; in xsk_configure_umem() local
812 umem = calloc(1, sizeof(*umem)); in xsk_configure_umem()
813 if (!umem) in xsk_configure_umem()
816 ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq, in xsk_configure_umem()
[all …]
/Linux-v5.10/drivers/infiniband/hw/mlx5/
Ddoorbell.c41 struct ib_umem *umem; member
67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx5_ib_db_map_user()
69 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user()
70 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user()
78 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx5_ib_db_map_user()
94 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
Dmem.c47 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, in mlx5_ib_cont_pages() argument
66 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in mlx5_ib_cont_pages()
117 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, in __mlx5_ib_populate_pas() argument
131 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in __mlx5_ib_populate_pas()
168 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, in mlx5_ib_populate_pas() argument
171 return __mlx5_ib_populate_pas(dev, umem, page_shift, 0, in mlx5_ib_populate_pas()
172 ib_umem_num_dma_blocks(umem, PAGE_SIZE), in mlx5_ib_populate_pas()
/Linux-v5.10/lib/
Dtest_user_copy.c47 static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size) in test_check_nonzero_user() argument
65 umem += start; in test_check_nonzero_user()
87 ret |= test(copy_to_user(umem, kmem, size), in test_check_nonzero_user()
93 int retval = check_zeroed_user(umem + start, len); in test_check_nonzero_user()
105 static int test_copy_struct_from_user(char *kmem, char __user *umem, in test_copy_struct_from_user() argument
124 ret |= test(copy_to_user(umem, umem_src, size), in test_copy_struct_from_user()
134 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), in test_copy_struct_from_user()
147 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), in test_copy_struct_from_user()
157 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG, in test_copy_struct_from_user()
165 ret |= test(clear_user(umem + ksize, usize - ksize), in test_copy_struct_from_user()
[all …]
/Linux-v5.10/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_mr.c117 struct ib_umem *umem; in pvrdma_reg_user_mr() local
129 umem = ib_umem_get(pd->device, start, length, access_flags); in pvrdma_reg_user_mr()
130 if (IS_ERR(umem)) { in pvrdma_reg_user_mr()
133 return ERR_CAST(umem); in pvrdma_reg_user_mr()
136 npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in pvrdma_reg_user_mr()
152 mr->umem = umem; in pvrdma_reg_user_mr()
161 ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0); in pvrdma_reg_user_mr()
190 ib_umem_release(umem); in pvrdma_reg_user_mr()
257 mr->umem = NULL; in pvrdma_alloc_mr()
294 ib_umem_release(mr->umem); in pvrdma_dereg_mr()
Dpvrdma_srq.c149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); in pvrdma_create_srq()
150 if (IS_ERR(srq->umem)) { in pvrdma_create_srq()
151 ret = PTR_ERR(srq->umem); in pvrdma_create_srq()
155 srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE); in pvrdma_create_srq()
171 pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0); in pvrdma_create_srq()
209 ib_umem_release(srq->umem); in pvrdma_create_srq()
229 ib_umem_release(srq->umem); in pvrdma_free_srq()
/Linux-v5.10/drivers/infiniband/sw/rxe/
Drxe_mr.c67 ib_umem_release(mem->umem); in rxe_mem_cleanup()
132 struct ib_umem *umem; in rxe_mem_init_user() local
138 umem = ib_umem_get(pd->ibpd.device, start, length, access); in rxe_mem_init_user()
139 if (IS_ERR(umem)) { in rxe_mem_init_user()
141 (int)PTR_ERR(umem)); in rxe_mem_init_user()
146 mem->umem = umem; in rxe_mem_init_user()
147 num_buf = ib_umem_num_pages(umem); in rxe_mem_init_user()
154 ib_umem_release(umem); in rxe_mem_init_user()
166 for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { in rxe_mem_init_user()
176 ib_umem_release(umem); in rxe_mem_init_user()
[all …]
/Linux-v5.10/drivers/infiniband/hw/hns/
Dhns_roce_db.c34 page->umem = ib_umem_get(context->ibucontext.device, page_addr, in hns_roce_db_map_user()
36 if (IS_ERR(page->umem)) { in hns_roce_db_map_user()
37 ret = PTR_ERR(page->umem); in hns_roce_db_map_user()
46 db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset; in hns_roce_db_map_user()
47 db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset; in hns_roce_db_map_user()
65 ib_umem_release(db->u.user_page->umem); in hns_roce_db_unmap_user()
/Linux-v5.10/include/net/
Dxdp_sock_drv.h55 struct xdp_umem *umem = pool->umem; in xsk_pool_dma_map() local
57 return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs); in xsk_pool_dma_map()

1234