Home
last modified time | relevance | path

Searched refs:umem (Results 1 – 25 of 105) sorted by relevance

12345

/Linux-v6.6/net/xdp/
Dxdp_umem.c24 static void xdp_umem_unpin_pages(struct xdp_umem *umem) in xdp_umem_unpin_pages() argument
26 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true); in xdp_umem_unpin_pages()
28 kvfree(umem->pgs); in xdp_umem_unpin_pages()
29 umem->pgs = NULL; in xdp_umem_unpin_pages()
32 static void xdp_umem_unaccount_pages(struct xdp_umem *umem) in xdp_umem_unaccount_pages() argument
34 if (umem->user) { in xdp_umem_unaccount_pages()
35 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages()
36 free_uid(umem->user); in xdp_umem_unaccount_pages()
40 static void xdp_umem_addr_unmap(struct xdp_umem *umem) in xdp_umem_addr_unmap() argument
42 vunmap(umem->addrs); in xdp_umem_addr_unmap()
[all …]
Dxsk_buff_pool.c56 struct xdp_umem *umem) in xp_create_and_assign_umem() argument
58 bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; in xp_create_and_assign_umem()
63 entries = unaligned ? umem->chunks : 0; in xp_create_and_assign_umem()
68 pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL); in xp_create_and_assign_umem()
76 pool->chunk_mask = ~((u64)umem->chunk_size - 1); in xp_create_and_assign_umem()
77 pool->addrs_cnt = umem->size; in xp_create_and_assign_umem()
78 pool->heads_cnt = umem->chunks; in xp_create_and_assign_umem()
79 pool->free_heads_cnt = umem->chunks; in xp_create_and_assign_umem()
80 pool->headroom = umem->headroom; in xp_create_and_assign_umem()
81 pool->chunk_size = umem->chunk_size; in xp_create_and_assign_umem()
[all …]
Dxsk_diag.c50 struct xdp_umem *umem = xs->umem; in xsk_diag_put_umem() local
54 if (!umem) in xsk_diag_put_umem()
57 du.id = umem->id; in xsk_diag_put_umem()
58 du.size = umem->size; in xsk_diag_put_umem()
59 du.num_pages = umem->npgs; in xsk_diag_put_umem()
60 du.chunk_size = umem->chunk_size; in xsk_diag_put_umem()
61 du.headroom = umem->headroom; in xsk_diag_put_umem()
65 if (umem->zc) in xsk_diag_put_umem()
67 du.refs = refcount_read(&umem->users); in xsk_diag_put_umem()
/Linux-v6.6/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dumem.c40 struct nvkm_umem *umem; in nvkm_umem_search() local
46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search()
47 if (umem->object.object == handle) { in nvkm_umem_search()
48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
55 umem = nvkm_umem(object); in nvkm_umem_search()
56 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
65 struct nvkm_umem *umem = nvkm_umem(object); in nvkm_umem_unmap() local
67 if (!umem->map) in nvkm_umem_unmap()
70 if (umem->io) { in nvkm_umem_unmap()
71 if (!IS_ERR(umem->bar)) { in nvkm_umem_unmap()
[all …]
/Linux-v6.6/drivers/infiniband/core/
Dumem.c48 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument
50 bool make_dirty = umem->writable && dirty; in __ib_umem_release()
55 ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt, in __ib_umem_release()
58 for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) in __ib_umem_release()
62 sg_free_append_table(&umem->sgt_append); in __ib_umem_release()
79 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, in ib_umem_find_best_pgsz() argument
88 umem->iova = va = virt; in ib_umem_find_best_pgsz()
90 if (umem->is_odp) { in ib_umem_find_best_pgsz()
91 unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift); in ib_umem_find_best_pgsz()
111 bits_per((umem->length - 1 + virt) ^ virt)); in ib_umem_find_best_pgsz()
[all …]
Dumem_odp.c55 umem_odp->umem.is_odp = 1; in ib_init_umem_odp()
64 start = ALIGN_DOWN(umem_odp->umem.address, page_size); in ib_init_umem_odp()
65 if (check_add_overflow(umem_odp->umem.address, in ib_init_umem_odp()
66 (unsigned long)umem_odp->umem.length, in ib_init_umem_odp()
91 umem_odp->umem.owning_mm, in ib_init_umem_odp()
119 struct ib_umem *umem; in ib_umem_odp_alloc_implicit() local
129 umem = &umem_odp->umem; in ib_umem_odp_alloc_implicit()
130 umem->ibdev = device; in ib_umem_odp_alloc_implicit()
131 umem->writable = ib_access_writable(access); in ib_umem_odp_alloc_implicit()
132 umem->owning_mm = current->mm; in ib_umem_odp_alloc_implicit()
[all …]
Dumem_dmabuf.c36 start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE); in ib_umem_dmabuf_map_pages()
37 end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length, in ib_umem_dmabuf_map_pages()
62 umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg; in ib_umem_dmabuf_map_pages()
63 umem_dmabuf->umem.sgt_append.sgt.nents = nmap; in ib_umem_dmabuf_map_pages()
120 struct ib_umem *umem; in ib_umem_dmabuf_get() local
143 umem = &umem_dmabuf->umem; in ib_umem_dmabuf_get()
144 umem->ibdev = device; in ib_umem_dmabuf_get()
145 umem->length = size; in ib_umem_dmabuf_get()
146 umem->address = offset; in ib_umem_dmabuf_get()
147 umem->writable = ib_access_writable(access); in ib_umem_dmabuf_get()
[all …]
/Linux-v6.6/include/rdma/
Dib_umem.h32 struct ib_umem umem; member
43 static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem) in to_ib_umem_dmabuf() argument
45 return container_of(umem, struct ib_umem_dmabuf, umem); in to_ib_umem_dmabuf()
49 static inline int ib_umem_offset(struct ib_umem *umem) in ib_umem_offset() argument
51 return umem->address & ~PAGE_MASK; in ib_umem_offset()
54 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem, in ib_umem_dma_offset() argument
57 return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) & in ib_umem_dma_offset()
61 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem, in ib_umem_num_dma_blocks() argument
64 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) - in ib_umem_num_dma_blocks()
65 ALIGN_DOWN(umem->iova, pgsz))) / in ib_umem_num_dma_blocks()
[all …]
Dib_umem_odp.h13 struct ib_umem umem; member
47 static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) in to_ib_umem_odp() argument
49 return container_of(umem, struct ib_umem_odp, umem); in to_ib_umem_odp()
/Linux-v6.6/tools/testing/selftests/bpf/
Dxsk.c72 struct xsk_umem *umem; member
92 int xsk_umem__fd(const struct xsk_umem *umem) in xsk_umem__fd() argument
94 return umem ? umem->fd : -EINVAL; in xsk_umem__fd()
161 static int xsk_create_umem_rings(struct xsk_umem *umem, int fd, in xsk_create_umem_rings() argument
170 &umem->config.fill_size, in xsk_create_umem_rings()
171 sizeof(umem->config.fill_size)); in xsk_create_umem_rings()
176 &umem->config.comp_size, in xsk_create_umem_rings()
177 sizeof(umem->config.comp_size)); in xsk_create_umem_rings()
185 map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64), in xsk_create_umem_rings()
191 fill->mask = umem->config.fill_size - 1; in xsk_create_umem_rings()
[all …]
Dxskxceiver.c166 return !!ifobj->umem->umem; in is_umem_valid()
174 static u64 umem_size(struct xsk_umem_info *umem) in umem_size() argument
176 return umem->num_frames * umem->frame_size; in umem_size()
179 static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer, in xsk_configure_umem() argument
185 .frame_size = umem->frame_size, in xsk_configure_umem()
186 .frame_headroom = umem->frame_headroom, in xsk_configure_umem()
191 if (umem->unaligned_mode) in xsk_configure_umem()
194 ret = xsk_umem__create(&umem->umem, buffer, size, in xsk_configure_umem()
195 &umem->fq, &umem->cq, &cfg); in xsk_configure_umem()
199 umem->buffer = buffer; in xsk_configure_umem()
[all …]
/Linux-v6.6/drivers/infiniband/sw/siw/
Dsiw_mem.c69 void siw_umem_release(struct siw_umem *umem, bool dirty) in siw_umem_release() argument
71 struct mm_struct *mm_s = umem->owning_mm; in siw_umem_release()
72 int i, num_pages = umem->num_pages; in siw_umem_release()
77 siw_free_plist(&umem->page_chunk[i], to_free, in siw_umem_release()
78 umem->writable && dirty); in siw_umem_release()
79 kfree(umem->page_chunk[i].plist); in siw_umem_release()
82 atomic64_sub(umem->num_pages, &mm_s->pinned_vm); in siw_umem_release()
85 kfree(umem->page_chunk); in siw_umem_release()
86 kfree(umem); in siw_umem_release()
148 siw_umem_release(mem->umem, true); in siw_free_mem()
[all …]
Dsiw_mem.h10 void siw_umem_release(struct siw_umem *umem, bool dirty);
58 static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr) in siw_get_upage() argument
60 unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT, in siw_get_upage()
64 if (likely(page_idx < umem->num_pages)) in siw_get_upage()
65 return umem->page_chunk[chunk_idx].plist[page_in_chunk]; in siw_get_upage()
/Linux-v6.6/drivers/infiniband/hw/mana/
Dwq.c16 struct ib_umem *umem; in mana_ib_create_wq() local
35 umem = ib_umem_get(pd->device, ucmd.wq_buf_addr, ucmd.wq_buf_size, in mana_ib_create_wq()
37 if (IS_ERR(umem)) { in mana_ib_create_wq()
38 err = PTR_ERR(umem); in mana_ib_create_wq()
44 wq->umem = umem; in mana_ib_create_wq()
49 err = mana_ib_gd_create_dma_region(mdev, wq->umem, &wq->gdma_region); in mana_ib_create_wq()
66 ib_umem_release(umem); in mana_ib_create_wq()
90 ib_umem_release(wq->umem); in mana_ib_destroy_wq()
Dcq.c35 cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, in mana_ib_create_cq()
37 if (IS_ERR(cq->umem)) { in mana_ib_create_cq()
38 err = PTR_ERR(cq->umem); in mana_ib_create_cq()
44 err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region); in mana_ib_create_cq()
63 ib_umem_release(cq->umem); in mana_ib_create_cq()
76 ib_umem_release(cq->umem); in mana_ib_destroy_cq()
Dmr.c128 mr->umem = ib_umem_get(ibdev, start, length, access_flags); in mana_ib_reg_user_mr()
129 if (IS_ERR(mr->umem)) { in mana_ib_reg_user_mr()
130 err = PTR_ERR(mr->umem); in mana_ib_reg_user_mr()
136 err = mana_ib_gd_create_dma_region(dev, mr->umem, &dma_region_handle); in mana_ib_reg_user_mr()
171 ib_umem_release(mr->umem); in mana_ib_reg_user_mr()
191 if (mr->umem) in mana_ib_dereg_mr()
192 ib_umem_release(mr->umem); in mana_ib_dereg_mr()
/Linux-v6.6/drivers/infiniband/hw/mlx4/
Dmr.c77 mr->umem = NULL; in mlx4_ib_get_dma_mr()
183 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument
203 for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { in mlx4_ib_umem_write_mtt()
257 int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, in mlx4_ib_umem_calc_optimal_mtt_size() argument
274 *num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in mlx4_ib_umem_calc_optimal_mtt_size()
276 for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { in mlx4_ib_umem_calc_optimal_mtt_size()
420 mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags); in mlx4_ib_reg_user_mr()
421 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr()
422 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr()
426 shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); in mlx4_ib_reg_user_mr()
[all …]
Ddoorbell.c40 struct ib_umem *umem; member
67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx4_ib_db_map_user()
69 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user()
70 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user()
78 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx4_ib_db_map_user()
95 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
/Linux-v6.6/drivers/infiniband/hw/mlx5/
Ddoorbell.c42 struct ib_umem *umem; member
69 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx5_ib_db_map_user()
71 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user()
72 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user()
82 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx5_ib_db_map_user()
100 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
Dmr.c55 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
1096 mr->umem = NULL; in mlx5_ib_get_dma_mr()
1138 static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem, in mlx5_umem_dmabuf_default_pgsz() argument
1145 umem->iova = iova; in mlx5_umem_dmabuf_default_pgsz()
1150 struct ib_umem *umem, u64 iova, in alloc_cacheable_mr() argument
1161 if (umem->is_dmabuf) in alloc_cacheable_mr()
1162 page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova); in alloc_cacheable_mr()
1164 page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size, in alloc_cacheable_mr()
1169 rb_key.ndescs = ib_umem_num_dma_blocks(umem, page_size); in alloc_cacheable_mr()
1170 rb_key.ats = mlx5_umem_needs_ats(dev, umem, access_flags); in alloc_cacheable_mr()
[all …]
/Linux-v6.6/lib/
Dtest_user_copy.c47 static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size) in test_check_nonzero_user() argument
65 umem += start; in test_check_nonzero_user()
87 ret |= test(copy_to_user(umem, kmem, size), in test_check_nonzero_user()
93 int retval = check_zeroed_user(umem + start, len); in test_check_nonzero_user()
105 static int test_copy_struct_from_user(char *kmem, char __user *umem, in test_copy_struct_from_user() argument
124 ret |= test(copy_to_user(umem, umem_src, size), in test_copy_struct_from_user()
134 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), in test_copy_struct_from_user()
147 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), in test_copy_struct_from_user()
157 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG, in test_copy_struct_from_user()
165 ret |= test(clear_user(umem + ksize, usize - ksize), in test_copy_struct_from_user()
[all …]
/Linux-v6.6/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_mr.c117 struct ib_umem *umem; in pvrdma_reg_user_mr() local
129 umem = ib_umem_get(pd->device, start, length, access_flags); in pvrdma_reg_user_mr()
130 if (IS_ERR(umem)) { in pvrdma_reg_user_mr()
133 return ERR_CAST(umem); in pvrdma_reg_user_mr()
136 npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in pvrdma_reg_user_mr()
152 mr->umem = umem; in pvrdma_reg_user_mr()
161 ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0); in pvrdma_reg_user_mr()
190 ib_umem_release(umem); in pvrdma_reg_user_mr()
257 mr->umem = NULL; in pvrdma_alloc_mr()
294 ib_umem_release(mr->umem); in pvrdma_dereg_mr()
Dpvrdma_srq.c149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); in pvrdma_create_srq()
150 if (IS_ERR(srq->umem)) { in pvrdma_create_srq()
151 ret = PTR_ERR(srq->umem); in pvrdma_create_srq()
155 srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE); in pvrdma_create_srq()
171 pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0); in pvrdma_create_srq()
209 ib_umem_release(srq->umem); in pvrdma_create_srq()
229 ib_umem_release(srq->umem); in pvrdma_free_srq()
/Linux-v6.6/drivers/infiniband/hw/hns/
Dhns_roce_db.c32 page->umem = ib_umem_get(context->ibucontext.device, page_addr, in hns_roce_db_map_user()
34 if (IS_ERR(page->umem)) { in hns_roce_db_map_user()
35 ret = PTR_ERR(page->umem); in hns_roce_db_map_user()
44 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
45 db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
63 ib_umem_release(db->u.user_page->umem); in hns_roce_db_unmap_user()
/Linux-v6.6/drivers/vdpa/vdpa_user/
Dvduse_dev.c115 struct vduse_umem *umem; member
1001 if (!dev->umem) in vduse_dev_dereg_umem()
1008 if (dev->umem->iova != iova || size != dev->domain->bounce_size) in vduse_dev_dereg_umem()
1012 unpin_user_pages_dirty_lock(dev->umem->pages, in vduse_dev_dereg_umem()
1013 dev->umem->npages, true); in vduse_dev_dereg_umem()
1014 atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm); in vduse_dev_dereg_umem()
1015 mmdrop(dev->umem->mm); in vduse_dev_dereg_umem()
1016 vfree(dev->umem->pages); in vduse_dev_dereg_umem()
1017 kfree(dev->umem); in vduse_dev_dereg_umem()
1018 dev->umem = NULL; in vduse_dev_dereg_umem()
[all …]

12345