Lines Matching +full:dma +full:- +full:mem

1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
27 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length) in mem_check_range() argument
29 switch (mem->type) { in mem_check_range()
35 if (iova < mem->iova || in mem_check_range()
36 length > mem->length || in mem_check_range()
37 iova > mem->iova + mem->length - length) in mem_check_range()
38 return -EFAULT; in mem_check_range()
42 return -EFAULT; in mem_check_range()
50 static void rxe_mem_init(int access, struct rxe_mem *mem) in rxe_mem_init() argument
52 u32 lkey = mem->pelem.index << 8 | rxe_get_key(); in rxe_mem_init()
55 mem->ibmr.lkey = lkey; in rxe_mem_init()
56 mem->ibmr.rkey = rkey; in rxe_mem_init()
57 mem->state = RXE_MEM_STATE_INVALID; in rxe_mem_init()
58 mem->type = RXE_MEM_TYPE_NONE; in rxe_mem_init()
59 mem->map_shift = ilog2(RXE_BUF_PER_MAP); in rxe_mem_init()
64 struct rxe_mem *mem = container_of(arg, typeof(*mem), pelem); in rxe_mem_cleanup() local
67 ib_umem_release(mem->umem); in rxe_mem_cleanup()
69 if (mem->map) { in rxe_mem_cleanup()
70 for (i = 0; i < mem->num_map; i++) in rxe_mem_cleanup()
71 kfree(mem->map[i]); in rxe_mem_cleanup()
73 kfree(mem->map); in rxe_mem_cleanup()
77 static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf) in rxe_mem_alloc() argument
81 struct rxe_map **map = mem->map; in rxe_mem_alloc()
83 num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP; in rxe_mem_alloc()
85 mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL); in rxe_mem_alloc()
86 if (!mem->map) in rxe_mem_alloc()
90 mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL); in rxe_mem_alloc()
91 if (!mem->map[i]) in rxe_mem_alloc()
97 mem->map_shift = ilog2(RXE_BUF_PER_MAP); in rxe_mem_alloc()
98 mem->map_mask = RXE_BUF_PER_MAP - 1; in rxe_mem_alloc()
100 mem->num_buf = num_buf; in rxe_mem_alloc()
101 mem->num_map = num_map; in rxe_mem_alloc()
102 mem->max_buf = num_map * RXE_BUF_PER_MAP; in rxe_mem_alloc()
107 for (i--; i >= 0; i--) in rxe_mem_alloc()
108 kfree(mem->map[i]); in rxe_mem_alloc()
110 kfree(mem->map); in rxe_mem_alloc()
112 return -ENOMEM; in rxe_mem_alloc()
116 int access, struct rxe_mem *mem) in rxe_mem_init_dma() argument
118 rxe_mem_init(access, mem); in rxe_mem_init_dma()
120 mem->ibmr.pd = &pd->ibpd; in rxe_mem_init_dma()
121 mem->access = access; in rxe_mem_init_dma()
122 mem->state = RXE_MEM_STATE_VALID; in rxe_mem_init_dma()
123 mem->type = RXE_MEM_TYPE_DMA; in rxe_mem_init_dma()
128 struct rxe_mem *mem) in rxe_mem_init_user() argument
138 umem = ib_umem_get(pd->ibpd.device, start, length, access); in rxe_mem_init_user()
142 err = -EINVAL; in rxe_mem_init_user()
146 mem->umem = umem; in rxe_mem_init_user()
149 rxe_mem_init(access, mem); in rxe_mem_init_user()
151 err = rxe_mem_alloc(mem, num_buf); in rxe_mem_init_user()
158 mem->page_shift = PAGE_SHIFT; in rxe_mem_init_user()
159 mem->page_mask = PAGE_SIZE - 1; in rxe_mem_init_user()
162 map = mem->map; in rxe_mem_init_user()
164 buf = map[0]->buf; in rxe_mem_init_user()
166 for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { in rxe_mem_init_user()
169 buf = map[0]->buf; in rxe_mem_init_user()
177 err = -ENOMEM; in rxe_mem_init_user()
181 buf->addr = (uintptr_t)vaddr; in rxe_mem_init_user()
182 buf->size = PAGE_SIZE; in rxe_mem_init_user()
189 mem->ibmr.pd = &pd->ibpd; in rxe_mem_init_user()
190 mem->umem = umem; in rxe_mem_init_user()
191 mem->access = access; in rxe_mem_init_user()
192 mem->length = length; in rxe_mem_init_user()
193 mem->iova = iova; in rxe_mem_init_user()
194 mem->va = start; in rxe_mem_init_user()
195 mem->offset = ib_umem_offset(umem); in rxe_mem_init_user()
196 mem->state = RXE_MEM_STATE_VALID; in rxe_mem_init_user()
197 mem->type = RXE_MEM_TYPE_MR; in rxe_mem_init_user()
206 int max_pages, struct rxe_mem *mem) in rxe_mem_init_fast() argument
210 rxe_mem_init(0, mem); in rxe_mem_init_fast()
213 mem->ibmr.rkey = mem->ibmr.lkey; in rxe_mem_init_fast()
215 err = rxe_mem_alloc(mem, max_pages); in rxe_mem_init_fast()
219 mem->ibmr.pd = &pd->ibpd; in rxe_mem_init_fast()
220 mem->max_buf = max_pages; in rxe_mem_init_fast()
221 mem->state = RXE_MEM_STATE_FREE; in rxe_mem_init_fast()
222 mem->type = RXE_MEM_TYPE_MR; in rxe_mem_init_fast()
231 struct rxe_mem *mem, in lookup_iova() argument
237 size_t offset = iova - mem->iova + mem->offset; in lookup_iova()
242 if (likely(mem->page_shift)) { in lookup_iova()
243 *offset_out = offset & mem->page_mask; in lookup_iova()
244 offset >>= mem->page_shift; in lookup_iova()
245 *n_out = offset & mem->map_mask; in lookup_iova()
246 *m_out = offset >> mem->map_shift; in lookup_iova()
251 length = mem->map[map_index]->buf[buf_index].size; in lookup_iova()
254 offset -= length; in lookup_iova()
261 length = mem->map[map_index]->buf[buf_index].size; in lookup_iova()
270 void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length) in iova_to_vaddr() argument
276 if (mem->state != RXE_MEM_STATE_VALID) { in iova_to_vaddr()
277 pr_warn("mem not in valid state\n"); in iova_to_vaddr()
282 if (!mem->map) { in iova_to_vaddr()
287 if (mem_check_range(mem, iova, length)) { in iova_to_vaddr()
293 lookup_iova(mem, iova, &m, &n, &offset); in iova_to_vaddr()
295 if (offset + length > mem->map[m]->buf[n].size) { in iova_to_vaddr()
301 addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset; in iova_to_vaddr()
307 /* copy data from a range (vaddr, vaddr+length-1) to or from
308 * a mem object starting at iova. Compute incremental value of
309 * crc32 if crcp is not zero. caller must hold a reference to mem
311 int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length, in rxe_mem_copy() argument
327 if (mem->type == RXE_MEM_TYPE_DMA) { in rxe_mem_copy()
339 *crcp = rxe_crc32(to_rdev(mem->ibmr.device), in rxe_mem_copy()
345 WARN_ON_ONCE(!mem->map); in rxe_mem_copy()
347 err = mem_check_range(mem, iova, length); in rxe_mem_copy()
349 err = -EFAULT; in rxe_mem_copy()
353 lookup_iova(mem, iova, &m, &i, &offset); in rxe_mem_copy()
355 map = mem->map + m; in rxe_mem_copy()
356 buf = map[0]->buf + i; in rxe_mem_copy()
361 va = (u8 *)(uintptr_t)buf->addr + offset; in rxe_mem_copy()
365 bytes = buf->size - offset; in rxe_mem_copy()
373 crc = rxe_crc32(to_rdev(mem->ibmr.device), in rxe_mem_copy()
376 length -= bytes; in rxe_mem_copy()
386 buf = map[0]->buf; in rxe_mem_copy()
400 * under the control of a dma descriptor
405 struct rxe_dma_info *dma, in copy_data() argument
412 struct rxe_sge *sge = &dma->sge[dma->cur_sge]; in copy_data()
413 int offset = dma->sge_offset; in copy_data()
414 int resid = dma->resid; in copy_data()
415 struct rxe_mem *mem = NULL; in copy_data() local
423 err = -EINVAL; in copy_data()
427 if (sge->length && (offset < sge->length)) { in copy_data()
428 mem = lookup_mem(pd, access, sge->lkey, lookup_local); in copy_data()
429 if (!mem) { in copy_data()
430 err = -EINVAL; in copy_data()
438 if (offset >= sge->length) { in copy_data()
439 if (mem) { in copy_data()
440 rxe_drop_ref(mem); in copy_data()
441 mem = NULL; in copy_data()
444 dma->cur_sge++; in copy_data()
447 if (dma->cur_sge >= dma->num_sge) { in copy_data()
448 err = -ENOSPC; in copy_data()
452 if (sge->length) { in copy_data()
453 mem = lookup_mem(pd, access, sge->lkey, in copy_data()
455 if (!mem) { in copy_data()
456 err = -EINVAL; in copy_data()
464 if (bytes > sge->length - offset) in copy_data()
465 bytes = sge->length - offset; in copy_data()
468 iova = sge->addr + offset; in copy_data()
470 err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp); in copy_data()
475 resid -= bytes; in copy_data()
476 length -= bytes; in copy_data()
481 dma->sge_offset = offset; in copy_data()
482 dma->resid = resid; in copy_data()
484 if (mem) in copy_data()
485 rxe_drop_ref(mem); in copy_data()
490 if (mem) in copy_data()
491 rxe_drop_ref(mem); in copy_data()
496 int advance_dma_data(struct rxe_dma_info *dma, unsigned int length) in advance_dma_data() argument
498 struct rxe_sge *sge = &dma->sge[dma->cur_sge]; in advance_dma_data()
499 int offset = dma->sge_offset; in advance_dma_data()
500 int resid = dma->resid; in advance_dma_data()
505 if (offset >= sge->length) { in advance_dma_data()
507 dma->cur_sge++; in advance_dma_data()
509 if (dma->cur_sge >= dma->num_sge) in advance_dma_data()
510 return -ENOSPC; in advance_dma_data()
515 if (bytes > sge->length - offset) in advance_dma_data()
516 bytes = sge->length - offset; in advance_dma_data()
519 resid -= bytes; in advance_dma_data()
520 length -= bytes; in advance_dma_data()
523 dma->sge_offset = offset; in advance_dma_data()
524 dma->resid = resid; in advance_dma_data()
529 /* (1) find the mem (mr or mw) corresponding to lkey/rkey
531 * (2) verify that the (qp) pd matches the mem pd
532 * (3) verify that the mem can support the requested access
533 * (4) verify that mem state is valid
538 struct rxe_mem *mem; in lookup_mem() local
539 struct rxe_dev *rxe = to_rdev(pd->ibpd.device); in lookup_mem()
542 mem = rxe_pool_get_index(&rxe->mr_pool, index); in lookup_mem()
543 if (!mem) in lookup_mem()
546 if (unlikely((type == lookup_local && mr_lkey(mem) != key) || in lookup_mem()
547 (type == lookup_remote && mr_rkey(mem) != key) || in lookup_mem()
548 mr_pd(mem) != pd || in lookup_mem()
549 (access && !(access & mem->access)) || in lookup_mem()
550 mem->state != RXE_MEM_STATE_VALID)) { in lookup_mem()
551 rxe_drop_ref(mem); in lookup_mem()
552 mem = NULL; in lookup_mem()
555 return mem; in lookup_mem()