1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #ifndef RXE_QUEUE_H
8 #define RXE_QUEUE_H
9
10 /* for definition of shared struct rxe_queue_buf */
11 #include <uapi/rdma/rdma_user_rxe.h>
12
13 /* implements a simple circular buffer that can optionally be
14 * shared between user space and the kernel and can be resized
15 * the requested element size is rounded up to a power of 2
16 * and the number of elements in the buffer is also rounded
17 * up to a power of 2. Since the queue is empty when the
18 * producer and consumer indices match the maximum capacity
19 * of the queue is one less than the number of element slots
20 *
21 * Notes:
22 * - Kernel space indices are always masked off to q->index_mask
23 * before storing so do not need to be checked on reads.
24 * - User space indices may be out of range and must be
25 * masked before use when read.
26 * - The kernel indices for shared queues must not be written
27 * by user space so a local copy is used and a shared copy is
28 * stored when the local copy changes.
29 * - By passing the type in the parameter list separate from q
30 * the compiler can eliminate the switch statement when the
31 * actual queue type is known when the function is called.
32 * In the performance path this is done. In less critical
33 * paths just q->type is passed.
34 */
35
36 /* type of queue */
37 enum queue_type {
38 QUEUE_TYPE_KERNEL,
39 QUEUE_TYPE_TO_USER,
40 QUEUE_TYPE_FROM_USER,
41 };
42
43 struct rxe_queue {
44 struct rxe_dev *rxe;
45 struct rxe_queue_buf *buf;
46 struct rxe_mmap_info *ip;
47 size_t buf_size;
48 size_t elem_size;
49 unsigned int log2_elem_size;
50 u32 index_mask;
51 enum queue_type type;
52 /* private copy of index for shared queues between
53 * kernel space and user space. Kernel reads and writes
54 * this copy and then replicates to rxe_queue_buf
55 * for read access by user space.
56 */
57 u32 index;
58 };
59
60 int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
61 struct ib_udata *udata, struct rxe_queue_buf *buf,
62 size_t buf_size, struct rxe_mmap_info **ip_p);
63
64 void rxe_queue_reset(struct rxe_queue *q);
65
66 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
67 unsigned int elem_size, enum queue_type type);
68
69 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
70 unsigned int elem_size, struct ib_udata *udata,
71 struct mminfo __user *outbuf,
72 /* Protect producers while resizing queue */
73 spinlock_t *producer_lock,
74 /* Protect consumers while resizing queue */
75 spinlock_t *consumer_lock);
76
77 void rxe_queue_cleanup(struct rxe_queue *queue);
78
next_index(struct rxe_queue * q,int index)79 static inline int next_index(struct rxe_queue *q, int index)
80 {
81 return (index + 1) & q->buf->index_mask;
82 }
83
queue_empty(struct rxe_queue * q,enum queue_type type)84 static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
85 {
86 u32 prod;
87 u32 cons;
88
89 switch (type) {
90 case QUEUE_TYPE_FROM_USER:
91 /* protect user space index */
92 prod = smp_load_acquire(&q->buf->producer_index);
93 cons = q->index;
94 break;
95 case QUEUE_TYPE_TO_USER:
96 prod = q->index;
97 /* protect user space index */
98 cons = smp_load_acquire(&q->buf->consumer_index);
99 break;
100 case QUEUE_TYPE_KERNEL:
101 prod = q->buf->producer_index;
102 cons = q->buf->consumer_index;
103 break;
104 }
105
106 return ((prod - cons) & q->index_mask) == 0;
107 }
108
queue_full(struct rxe_queue * q,enum queue_type type)109 static inline int queue_full(struct rxe_queue *q, enum queue_type type)
110 {
111 u32 prod;
112 u32 cons;
113
114 switch (type) {
115 case QUEUE_TYPE_FROM_USER:
116 /* protect user space index */
117 prod = smp_load_acquire(&q->buf->producer_index);
118 cons = q->index;
119 break;
120 case QUEUE_TYPE_TO_USER:
121 prod = q->index;
122 /* protect user space index */
123 cons = smp_load_acquire(&q->buf->consumer_index);
124 break;
125 case QUEUE_TYPE_KERNEL:
126 prod = q->buf->producer_index;
127 cons = q->buf->consumer_index;
128 break;
129 }
130
131 return ((prod + 1 - cons) & q->index_mask) == 0;
132 }
133
queue_count(const struct rxe_queue * q,enum queue_type type)134 static inline unsigned int queue_count(const struct rxe_queue *q,
135 enum queue_type type)
136 {
137 u32 prod;
138 u32 cons;
139
140 switch (type) {
141 case QUEUE_TYPE_FROM_USER:
142 /* protect user space index */
143 prod = smp_load_acquire(&q->buf->producer_index);
144 cons = q->index;
145 break;
146 case QUEUE_TYPE_TO_USER:
147 prod = q->index;
148 /* protect user space index */
149 cons = smp_load_acquire(&q->buf->consumer_index);
150 break;
151 case QUEUE_TYPE_KERNEL:
152 prod = q->buf->producer_index;
153 cons = q->buf->consumer_index;
154 break;
155 }
156
157 return (prod - cons) & q->index_mask;
158 }
159
advance_producer(struct rxe_queue * q,enum queue_type type)160 static inline void advance_producer(struct rxe_queue *q, enum queue_type type)
161 {
162 u32 prod;
163
164 switch (type) {
165 case QUEUE_TYPE_FROM_USER:
166 pr_warn_once("Normally kernel should not write user space index\n");
167 /* protect user space index */
168 prod = smp_load_acquire(&q->buf->producer_index);
169 prod = (prod + 1) & q->index_mask;
170 /* same */
171 smp_store_release(&q->buf->producer_index, prod);
172 break;
173 case QUEUE_TYPE_TO_USER:
174 prod = q->index;
175 q->index = (prod + 1) & q->index_mask;
176 q->buf->producer_index = q->index;
177 break;
178 case QUEUE_TYPE_KERNEL:
179 prod = q->buf->producer_index;
180 q->buf->producer_index = (prod + 1) & q->index_mask;
181 break;
182 }
183 }
184
advance_consumer(struct rxe_queue * q,enum queue_type type)185 static inline void advance_consumer(struct rxe_queue *q, enum queue_type type)
186 {
187 u32 cons;
188
189 switch (type) {
190 case QUEUE_TYPE_FROM_USER:
191 cons = q->index;
192 q->index = (cons + 1) & q->index_mask;
193 q->buf->consumer_index = q->index;
194 break;
195 case QUEUE_TYPE_TO_USER:
196 pr_warn_once("Normally kernel should not write user space index\n");
197 /* protect user space index */
198 cons = smp_load_acquire(&q->buf->consumer_index);
199 cons = (cons + 1) & q->index_mask;
200 /* same */
201 smp_store_release(&q->buf->consumer_index, cons);
202 break;
203 case QUEUE_TYPE_KERNEL:
204 cons = q->buf->consumer_index;
205 q->buf->consumer_index = (cons + 1) & q->index_mask;
206 break;
207 }
208 }
209
producer_addr(struct rxe_queue * q,enum queue_type type)210 static inline void *producer_addr(struct rxe_queue *q, enum queue_type type)
211 {
212 u32 prod;
213
214 switch (type) {
215 case QUEUE_TYPE_FROM_USER:
216 /* protect user space index */
217 prod = smp_load_acquire(&q->buf->producer_index);
218 prod &= q->index_mask;
219 break;
220 case QUEUE_TYPE_TO_USER:
221 prod = q->index;
222 break;
223 case QUEUE_TYPE_KERNEL:
224 prod = q->buf->producer_index;
225 break;
226 }
227
228 return q->buf->data + (prod << q->log2_elem_size);
229 }
230
consumer_addr(struct rxe_queue * q,enum queue_type type)231 static inline void *consumer_addr(struct rxe_queue *q, enum queue_type type)
232 {
233 u32 cons;
234
235 switch (type) {
236 case QUEUE_TYPE_FROM_USER:
237 cons = q->index;
238 break;
239 case QUEUE_TYPE_TO_USER:
240 /* protect user space index */
241 cons = smp_load_acquire(&q->buf->consumer_index);
242 cons &= q->index_mask;
243 break;
244 case QUEUE_TYPE_KERNEL:
245 cons = q->buf->consumer_index;
246 break;
247 }
248
249 return q->buf->data + (cons << q->log2_elem_size);
250 }
251
producer_index(struct rxe_queue * q,enum queue_type type)252 static inline unsigned int producer_index(struct rxe_queue *q,
253 enum queue_type type)
254 {
255 u32 prod;
256
257 switch (type) {
258 case QUEUE_TYPE_FROM_USER:
259 /* protect user space index */
260 prod = smp_load_acquire(&q->buf->producer_index);
261 prod &= q->index_mask;
262 break;
263 case QUEUE_TYPE_TO_USER:
264 prod = q->index;
265 break;
266 case QUEUE_TYPE_KERNEL:
267 prod = q->buf->producer_index;
268 break;
269 }
270
271 return prod;
272 }
273
consumer_index(struct rxe_queue * q,enum queue_type type)274 static inline unsigned int consumer_index(struct rxe_queue *q,
275 enum queue_type type)
276 {
277 u32 cons;
278
279 switch (type) {
280 case QUEUE_TYPE_FROM_USER:
281 cons = q->index;
282 break;
283 case QUEUE_TYPE_TO_USER:
284 /* protect user space index */
285 cons = smp_load_acquire(&q->buf->consumer_index);
286 cons &= q->index_mask;
287 break;
288 case QUEUE_TYPE_KERNEL:
289 cons = q->buf->consumer_index;
290 break;
291 }
292
293 return cons;
294 }
295
addr_from_index(struct rxe_queue * q,unsigned int index)296 static inline void *addr_from_index(struct rxe_queue *q,
297 unsigned int index)
298 {
299 return q->buf->data + ((index & q->index_mask)
300 << q->buf->log2_elem_size);
301 }
302
index_from_addr(const struct rxe_queue * q,const void * addr)303 static inline unsigned int index_from_addr(const struct rxe_queue *q,
304 const void *addr)
305 {
306 return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
307 & q->index_mask;
308 }
309
queue_head(struct rxe_queue * q,enum queue_type type)310 static inline void *queue_head(struct rxe_queue *q, enum queue_type type)
311 {
312 return queue_empty(q, type) ? NULL : consumer_addr(q, type);
313 }
314
315 #endif /* RXE_QUEUE_H */
316