1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3 * Copyright (c) 2007 Cisco Systems. All rights reserved.
4 */
5
6 #ifndef IB_UMEM_H
7 #define IB_UMEM_H
8
9 #include <linux/list.h>
10 #include <linux/scatterlist.h>
11 #include <linux/workqueue.h>
12 #include <rdma/ib_verbs.h>
13
14 struct ib_ucontext;
15 struct ib_umem_odp;
16
17 struct ib_umem {
18 struct ib_device *ibdev;
19 struct mm_struct *owning_mm;
20 u64 iova;
21 size_t length;
22 unsigned long address;
23 u32 writable : 1;
24 u32 is_odp : 1;
25 struct work_struct work;
26 struct sg_table sg_head;
27 int nmap;
28 unsigned int sg_nents;
29 };
30
31 /* Returns the offset of the umem start relative to the first page. */
ib_umem_offset(struct ib_umem * umem)32 static inline int ib_umem_offset(struct ib_umem *umem)
33 {
34 return umem->address & ~PAGE_MASK;
35 }
36
ib_umem_num_dma_blocks(struct ib_umem * umem,unsigned long pgsz)37 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
38 unsigned long pgsz)
39 {
40 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
41 ALIGN_DOWN(umem->iova, pgsz))) /
42 pgsz;
43 }
44
ib_umem_num_pages(struct ib_umem * umem)45 static inline size_t ib_umem_num_pages(struct ib_umem *umem)
46 {
47 return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
48 }
49
__rdma_umem_block_iter_start(struct ib_block_iter * biter,struct ib_umem * umem,unsigned long pgsz)50 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
51 struct ib_umem *umem,
52 unsigned long pgsz)
53 {
54 __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
55 }
56
57 /**
58 * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
59 * @umem: umem to iterate over
60 * @pgsz: Page size to split the list into
61 *
62 * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
63 * returned DMA blocks will be aligned to pgsz and span the range:
64 * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
65 *
66 * Performs exactly ib_umem_num_dma_blocks() iterations.
67 */
68 #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
69 for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
70 __rdma_block_iter_next(biter);)
71
72 #ifdef CONFIG_INFINIBAND_USER_MEM
73
74 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
75 size_t size, int access);
76 void ib_umem_release(struct ib_umem *umem);
77 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
78 size_t length);
79 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
80 unsigned long pgsz_bitmap,
81 unsigned long virt);
82
83 #else /* CONFIG_INFINIBAND_USER_MEM */
84
85 #include <linux/err.h>
86
ib_umem_get(struct ib_device * device,unsigned long addr,size_t size,int access)87 static inline struct ib_umem *ib_umem_get(struct ib_device *device,
88 unsigned long addr, size_t size,
89 int access)
90 {
91 return ERR_PTR(-EINVAL);
92 }
ib_umem_release(struct ib_umem * umem)93 static inline void ib_umem_release(struct ib_umem *umem) { }
ib_umem_copy_from(void * dst,struct ib_umem * umem,size_t offset,size_t length)94 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
95 size_t length) {
96 return -EINVAL;
97 }
ib_umem_find_best_pgsz(struct ib_umem * umem,unsigned long pgsz_bitmap,unsigned long virt)98 static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
99 unsigned long pgsz_bitmap,
100 unsigned long virt)
101 {
102 return 0;
103 }
104
105 #endif /* CONFIG_INFINIBAND_USER_MEM */
106
107 #endif /* IB_UMEM_H */
108