1 /*
2 * Copyright(c) 2016 - 2019 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/ib_hdrs.h>
55 #include <rdma/opa_addr.h>
56 #include <rdma/uverbs_ioctl.h>
57 #include "qp.h"
58 #include "vt.h"
59 #include "trace.h"
60
61 #define RVT_RWQ_COUNT_THRESHOLD 16
62
63 static void rvt_rc_timeout(struct timer_list *t);
64
65 /*
66 * Convert the AETH RNR timeout code into the number of microseconds.
67 */
68 static const u32 ib_rvt_rnr_table[32] = {
69 655360, /* 00: 655.36 */
70 10, /* 01: .01 */
71 20, /* 02 .02 */
72 30, /* 03: .03 */
73 40, /* 04: .04 */
74 60, /* 05: .06 */
75 80, /* 06: .08 */
76 120, /* 07: .12 */
77 160, /* 08: .16 */
78 240, /* 09: .24 */
79 320, /* 0A: .32 */
80 480, /* 0B: .48 */
81 640, /* 0C: .64 */
82 960, /* 0D: .96 */
83 1280, /* 0E: 1.28 */
84 1920, /* 0F: 1.92 */
85 2560, /* 10: 2.56 */
86 3840, /* 11: 3.84 */
87 5120, /* 12: 5.12 */
88 7680, /* 13: 7.68 */
89 10240, /* 14: 10.24 */
90 15360, /* 15: 15.36 */
91 20480, /* 16: 20.48 */
92 30720, /* 17: 30.72 */
93 40960, /* 18: 40.96 */
94 61440, /* 19: 61.44 */
95 81920, /* 1A: 81.92 */
96 122880, /* 1B: 122.88 */
97 163840, /* 1C: 163.84 */
98 245760, /* 1D: 245.76 */
99 327680, /* 1E: 327.68 */
100 491520 /* 1F: 491.52 */
101 };
102
103 /*
104 * Note that it is OK to post send work requests in the SQE and ERR
105 * states; rvt_do_send() will process them and generate error
106 * completions as per IB 1.2 C10-96.
107 */
108 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
109 [IB_QPS_RESET] = 0,
110 [IB_QPS_INIT] = RVT_POST_RECV_OK,
111 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
112 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
113 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
114 RVT_PROCESS_NEXT_SEND_OK,
115 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
116 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
117 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
118 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
119 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
120 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
121 };
122 EXPORT_SYMBOL(ib_rvt_state_ops);
123
124 /* platform specific: return the last level cache (llc) size, in KiB */
rvt_wss_llc_size(void)125 static int rvt_wss_llc_size(void)
126 {
127 /* assume that the boot CPU value is universal for all CPUs */
128 return boot_cpu_data.x86_cache_size;
129 }
130
131 /* platform specific: cacheless copy */
cacheless_memcpy(void * dst,void * src,size_t n)132 static void cacheless_memcpy(void *dst, void *src, size_t n)
133 {
134 /*
135 * Use the only available X64 cacheless copy. Add a __user cast
136 * to quiet sparse. The src agument is already in the kernel so
137 * there are no security issues. The extra fault recovery machinery
138 * is not invoked.
139 */
140 __copy_user_nocache(dst, (void __user *)src, n, 0);
141 }
142
rvt_wss_exit(struct rvt_dev_info * rdi)143 void rvt_wss_exit(struct rvt_dev_info *rdi)
144 {
145 struct rvt_wss *wss = rdi->wss;
146
147 if (!wss)
148 return;
149
150 /* coded to handle partially initialized and repeat callers */
151 kfree(wss->entries);
152 wss->entries = NULL;
153 kfree(rdi->wss);
154 rdi->wss = NULL;
155 }
156
157 /**
158 * rvt_wss_init - Init wss data structures
159 *
160 * Return: 0 on success
161 */
rvt_wss_init(struct rvt_dev_info * rdi)162 int rvt_wss_init(struct rvt_dev_info *rdi)
163 {
164 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
165 unsigned int wss_threshold = rdi->dparms.wss_threshold;
166 unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
167 long llc_size;
168 long llc_bits;
169 long table_size;
170 long table_bits;
171 struct rvt_wss *wss;
172 int node = rdi->dparms.node;
173
174 if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
175 rdi->wss = NULL;
176 return 0;
177 }
178
179 rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
180 if (!rdi->wss)
181 return -ENOMEM;
182 wss = rdi->wss;
183
184 /* check for a valid percent range - default to 80 if none or invalid */
185 if (wss_threshold < 1 || wss_threshold > 100)
186 wss_threshold = 80;
187
188 /* reject a wildly large period */
189 if (wss_clean_period > 1000000)
190 wss_clean_period = 256;
191
192 /* reject a zero period */
193 if (wss_clean_period == 0)
194 wss_clean_period = 1;
195
196 /*
197 * Calculate the table size - the next power of 2 larger than the
198 * LLC size. LLC size is in KiB.
199 */
200 llc_size = rvt_wss_llc_size() * 1024;
201 table_size = roundup_pow_of_two(llc_size);
202
203 /* one bit per page in rounded up table */
204 llc_bits = llc_size / PAGE_SIZE;
205 table_bits = table_size / PAGE_SIZE;
206 wss->pages_mask = table_bits - 1;
207 wss->num_entries = table_bits / BITS_PER_LONG;
208
209 wss->threshold = (llc_bits * wss_threshold) / 100;
210 if (wss->threshold == 0)
211 wss->threshold = 1;
212
213 wss->clean_period = wss_clean_period;
214 atomic_set(&wss->clean_counter, wss_clean_period);
215
216 wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
217 GFP_KERNEL, node);
218 if (!wss->entries) {
219 rvt_wss_exit(rdi);
220 return -ENOMEM;
221 }
222
223 return 0;
224 }
225
226 /*
227 * Advance the clean counter. When the clean period has expired,
228 * clean an entry.
229 *
230 * This is implemented in atomics to avoid locking. Because multiple
231 * variables are involved, it can be racy which can lead to slightly
232 * inaccurate information. Since this is only a heuristic, this is
233 * OK. Any innaccuracies will clean themselves out as the counter
234 * advances. That said, it is unlikely the entry clean operation will
235 * race - the next possible racer will not start until the next clean
236 * period.
237 *
238 * The clean counter is implemented as a decrement to zero. When zero
239 * is reached an entry is cleaned.
240 */
wss_advance_clean_counter(struct rvt_wss * wss)241 static void wss_advance_clean_counter(struct rvt_wss *wss)
242 {
243 int entry;
244 int weight;
245 unsigned long bits;
246
247 /* become the cleaner if we decrement the counter to zero */
248 if (atomic_dec_and_test(&wss->clean_counter)) {
249 /*
250 * Set, not add, the clean period. This avoids an issue
251 * where the counter could decrement below the clean period.
252 * Doing a set can result in lost decrements, slowing the
253 * clean advance. Since this a heuristic, this possible
254 * slowdown is OK.
255 *
256 * An alternative is to loop, advancing the counter by a
257 * clean period until the result is > 0. However, this could
258 * lead to several threads keeping another in the clean loop.
259 * This could be mitigated by limiting the number of times
260 * we stay in the loop.
261 */
262 atomic_set(&wss->clean_counter, wss->clean_period);
263
264 /*
265 * Uniquely grab the entry to clean and move to next.
266 * The current entry is always the lower bits of
267 * wss.clean_entry. The table size, wss.num_entries,
268 * is always a power-of-2.
269 */
270 entry = (atomic_inc_return(&wss->clean_entry) - 1)
271 & (wss->num_entries - 1);
272
273 /* clear the entry and count the bits */
274 bits = xchg(&wss->entries[entry], 0);
275 weight = hweight64((u64)bits);
276 /* only adjust the contended total count if needed */
277 if (weight)
278 atomic_sub(weight, &wss->total_count);
279 }
280 }
281
282 /*
283 * Insert the given address into the working set array.
284 */
wss_insert(struct rvt_wss * wss,void * address)285 static void wss_insert(struct rvt_wss *wss, void *address)
286 {
287 u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
288 u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
289 u32 nr = page & (BITS_PER_LONG - 1);
290
291 if (!test_and_set_bit(nr, &wss->entries[entry]))
292 atomic_inc(&wss->total_count);
293
294 wss_advance_clean_counter(wss);
295 }
296
297 /*
298 * Is the working set larger than the threshold?
299 */
wss_exceeds_threshold(struct rvt_wss * wss)300 static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
301 {
302 return atomic_read(&wss->total_count) >= wss->threshold;
303 }
304
get_map_page(struct rvt_qpn_table * qpt,struct rvt_qpn_map * map)305 static void get_map_page(struct rvt_qpn_table *qpt,
306 struct rvt_qpn_map *map)
307 {
308 unsigned long page = get_zeroed_page(GFP_KERNEL);
309
310 /*
311 * Free the page if someone raced with us installing it.
312 */
313
314 spin_lock(&qpt->lock);
315 if (map->page)
316 free_page(page);
317 else
318 map->page = (void *)page;
319 spin_unlock(&qpt->lock);
320 }
321
322 /**
323 * init_qpn_table - initialize the QP number table for a device
324 * @qpt: the QPN table
325 */
init_qpn_table(struct rvt_dev_info * rdi,struct rvt_qpn_table * qpt)326 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
327 {
328 u32 offset, i;
329 struct rvt_qpn_map *map;
330 int ret = 0;
331
332 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
333 return -EINVAL;
334
335 spin_lock_init(&qpt->lock);
336
337 qpt->last = rdi->dparms.qpn_start;
338 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
339
340 /*
341 * Drivers may want some QPs beyond what we need for verbs let them use
342 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
343 * for those. The reserved range must be *after* the range which verbs
344 * will pick from.
345 */
346
347 /* Figure out number of bit maps needed before reserved range */
348 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
349
350 /* This should always be zero */
351 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
352
353 /* Starting with the first reserved bit map */
354 map = &qpt->map[qpt->nmaps];
355
356 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
357 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
358 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
359 if (!map->page) {
360 get_map_page(qpt, map);
361 if (!map->page) {
362 ret = -ENOMEM;
363 break;
364 }
365 }
366 set_bit(offset, map->page);
367 offset++;
368 if (offset == RVT_BITS_PER_PAGE) {
369 /* next page */
370 qpt->nmaps++;
371 map++;
372 offset = 0;
373 }
374 }
375 return ret;
376 }
377
378 /**
379 * free_qpn_table - free the QP number table for a device
380 * @qpt: the QPN table
381 */
free_qpn_table(struct rvt_qpn_table * qpt)382 static void free_qpn_table(struct rvt_qpn_table *qpt)
383 {
384 int i;
385
386 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
387 free_page((unsigned long)qpt->map[i].page);
388 }
389
390 /**
391 * rvt_driver_qp_init - Init driver qp resources
392 * @rdi: rvt dev strucutre
393 *
394 * Return: 0 on success
395 */
rvt_driver_qp_init(struct rvt_dev_info * rdi)396 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
397 {
398 int i;
399 int ret = -ENOMEM;
400
401 if (!rdi->dparms.qp_table_size)
402 return -EINVAL;
403
404 /*
405 * If driver is not doing any QP allocation then make sure it is
406 * providing the necessary QP functions.
407 */
408 if (!rdi->driver_f.free_all_qps ||
409 !rdi->driver_f.qp_priv_alloc ||
410 !rdi->driver_f.qp_priv_free ||
411 !rdi->driver_f.notify_qp_reset ||
412 !rdi->driver_f.notify_restart_rc)
413 return -EINVAL;
414
415 /* allocate parent object */
416 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
417 rdi->dparms.node);
418 if (!rdi->qp_dev)
419 return -ENOMEM;
420
421 /* allocate hash table */
422 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
423 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
424 rdi->qp_dev->qp_table =
425 kmalloc_array_node(rdi->qp_dev->qp_table_size,
426 sizeof(*rdi->qp_dev->qp_table),
427 GFP_KERNEL, rdi->dparms.node);
428 if (!rdi->qp_dev->qp_table)
429 goto no_qp_table;
430
431 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
432 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
433
434 spin_lock_init(&rdi->qp_dev->qpt_lock);
435
436 /* initialize qpn map */
437 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
438 goto fail_table;
439
440 spin_lock_init(&rdi->n_qps_lock);
441
442 return 0;
443
444 fail_table:
445 kfree(rdi->qp_dev->qp_table);
446 free_qpn_table(&rdi->qp_dev->qpn_table);
447
448 no_qp_table:
449 kfree(rdi->qp_dev);
450
451 return ret;
452 }
453
454 /**
455 * free_all_qps - check for QPs still in use
456 * @rdi: rvt device info structure
457 *
458 * There should not be any QPs still in use.
459 * Free memory for table.
460 */
rvt_free_all_qps(struct rvt_dev_info * rdi)461 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
462 {
463 unsigned long flags;
464 struct rvt_qp *qp;
465 unsigned n, qp_inuse = 0;
466 spinlock_t *ql; /* work around too long line below */
467
468 if (rdi->driver_f.free_all_qps)
469 qp_inuse = rdi->driver_f.free_all_qps(rdi);
470
471 qp_inuse += rvt_mcast_tree_empty(rdi);
472
473 if (!rdi->qp_dev)
474 return qp_inuse;
475
476 ql = &rdi->qp_dev->qpt_lock;
477 spin_lock_irqsave(ql, flags);
478 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
479 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
480 lockdep_is_held(ql));
481 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
482
483 for (; qp; qp = rcu_dereference_protected(qp->next,
484 lockdep_is_held(ql)))
485 qp_inuse++;
486 }
487 spin_unlock_irqrestore(ql, flags);
488 synchronize_rcu();
489 return qp_inuse;
490 }
491
492 /**
493 * rvt_qp_exit - clean up qps on device exit
494 * @rdi: rvt dev structure
495 *
496 * Check for qp leaks and free resources.
497 */
rvt_qp_exit(struct rvt_dev_info * rdi)498 void rvt_qp_exit(struct rvt_dev_info *rdi)
499 {
500 u32 qps_inuse = rvt_free_all_qps(rdi);
501
502 if (qps_inuse)
503 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
504 qps_inuse);
505 if (!rdi->qp_dev)
506 return;
507
508 kfree(rdi->qp_dev->qp_table);
509 free_qpn_table(&rdi->qp_dev->qpn_table);
510 kfree(rdi->qp_dev);
511 }
512
mk_qpn(struct rvt_qpn_table * qpt,struct rvt_qpn_map * map,unsigned off)513 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
514 struct rvt_qpn_map *map, unsigned off)
515 {
516 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
517 }
518
519 /**
520 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
521 * IB_QPT_SMI/IB_QPT_GSI
522 * @rdi: rvt device info structure
523 * @qpt: queue pair number table pointer
524 * @port_num: IB port number, 1 based, comes from core
525 *
526 * Return: The queue pair number
527 */
alloc_qpn(struct rvt_dev_info * rdi,struct rvt_qpn_table * qpt,enum ib_qp_type type,u8 port_num)528 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
529 enum ib_qp_type type, u8 port_num)
530 {
531 u32 i, offset, max_scan, qpn;
532 struct rvt_qpn_map *map;
533 u32 ret;
534
535 if (rdi->driver_f.alloc_qpn)
536 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
537
538 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
539 unsigned n;
540
541 ret = type == IB_QPT_GSI;
542 n = 1 << (ret + 2 * (port_num - 1));
543 spin_lock(&qpt->lock);
544 if (qpt->flags & n)
545 ret = -EINVAL;
546 else
547 qpt->flags |= n;
548 spin_unlock(&qpt->lock);
549 goto bail;
550 }
551
552 qpn = qpt->last + qpt->incr;
553 if (qpn >= RVT_QPN_MAX)
554 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
555 /* offset carries bit 0 */
556 offset = qpn & RVT_BITS_PER_PAGE_MASK;
557 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
558 max_scan = qpt->nmaps - !offset;
559 for (i = 0;;) {
560 if (unlikely(!map->page)) {
561 get_map_page(qpt, map);
562 if (unlikely(!map->page))
563 break;
564 }
565 do {
566 if (!test_and_set_bit(offset, map->page)) {
567 qpt->last = qpn;
568 ret = qpn;
569 goto bail;
570 }
571 offset += qpt->incr;
572 /*
573 * This qpn might be bogus if offset >= BITS_PER_PAGE.
574 * That is OK. It gets re-assigned below
575 */
576 qpn = mk_qpn(qpt, map, offset);
577 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
578 /*
579 * In order to keep the number of pages allocated to a
580 * minimum, we scan the all existing pages before increasing
581 * the size of the bitmap table.
582 */
583 if (++i > max_scan) {
584 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
585 break;
586 map = &qpt->map[qpt->nmaps++];
587 /* start at incr with current bit 0 */
588 offset = qpt->incr | (offset & 1);
589 } else if (map < &qpt->map[qpt->nmaps]) {
590 ++map;
591 /* start at incr with current bit 0 */
592 offset = qpt->incr | (offset & 1);
593 } else {
594 map = &qpt->map[0];
595 /* wrap to first map page, invert bit 0 */
596 offset = qpt->incr | ((offset & 1) ^ 1);
597 }
598 /* there can be no set bits in low-order QoS bits */
599 WARN_ON(rdi->dparms.qos_shift > 1 &&
600 offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
601 qpn = mk_qpn(qpt, map, offset);
602 }
603
604 ret = -ENOMEM;
605
606 bail:
607 return ret;
608 }
609
610 /**
611 * rvt_clear_mr_refs - Drop help mr refs
612 * @qp: rvt qp data structure
613 * @clr_sends: If shoudl clear send side or not
614 */
rvt_clear_mr_refs(struct rvt_qp * qp,int clr_sends)615 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
616 {
617 unsigned n;
618 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
619
620 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
621 rvt_put_ss(&qp->s_rdma_read_sge);
622
623 rvt_put_ss(&qp->r_sge);
624
625 if (clr_sends) {
626 while (qp->s_last != qp->s_head) {
627 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
628
629 rvt_put_qp_swqe(qp, wqe);
630 if (++qp->s_last >= qp->s_size)
631 qp->s_last = 0;
632 smp_wmb(); /* see qp_set_savail */
633 }
634 if (qp->s_rdma_mr) {
635 rvt_put_mr(qp->s_rdma_mr);
636 qp->s_rdma_mr = NULL;
637 }
638 }
639
640 for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
641 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
642
643 if (e->rdma_sge.mr) {
644 rvt_put_mr(e->rdma_sge.mr);
645 e->rdma_sge.mr = NULL;
646 }
647 }
648 }
649
650 /**
651 * rvt_swqe_has_lkey - return true if lkey is used by swqe
652 * @wqe - the send wqe
653 * @lkey - the lkey
654 *
655 * Test the swqe for using lkey
656 */
rvt_swqe_has_lkey(struct rvt_swqe * wqe,u32 lkey)657 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
658 {
659 int i;
660
661 for (i = 0; i < wqe->wr.num_sge; i++) {
662 struct rvt_sge *sge = &wqe->sg_list[i];
663
664 if (rvt_mr_has_lkey(sge->mr, lkey))
665 return true;
666 }
667 return false;
668 }
669
670 /**
671 * rvt_qp_sends_has_lkey - return true is qp sends use lkey
672 * @qp - the rvt_qp
673 * @lkey - the lkey
674 */
rvt_qp_sends_has_lkey(struct rvt_qp * qp,u32 lkey)675 static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
676 {
677 u32 s_last = qp->s_last;
678
679 while (s_last != qp->s_head) {
680 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
681
682 if (rvt_swqe_has_lkey(wqe, lkey))
683 return true;
684
685 if (++s_last >= qp->s_size)
686 s_last = 0;
687 }
688 if (qp->s_rdma_mr)
689 if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
690 return true;
691 return false;
692 }
693
694 /**
695 * rvt_qp_acks_has_lkey - return true if acks have lkey
696 * @qp - the qp
697 * @lkey - the lkey
698 */
rvt_qp_acks_has_lkey(struct rvt_qp * qp,u32 lkey)699 static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
700 {
701 int i;
702 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
703
704 for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
705 struct rvt_ack_entry *e = &qp->s_ack_queue[i];
706
707 if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
708 return true;
709 }
710 return false;
711 }
712
713 /*
714 * rvt_qp_mr_clean - clean up remote ops for lkey
715 * @qp - the qp
716 * @lkey - the lkey that is being de-registered
717 *
718 * This routine checks if the lkey is being used by
719 * the qp.
720 *
721 * If so, the qp is put into an error state to elminate
722 * any references from the qp.
723 */
rvt_qp_mr_clean(struct rvt_qp * qp,u32 lkey)724 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
725 {
726 bool lastwqe = false;
727
728 if (qp->ibqp.qp_type == IB_QPT_SMI ||
729 qp->ibqp.qp_type == IB_QPT_GSI)
730 /* avoid special QPs */
731 return;
732 spin_lock_irq(&qp->r_lock);
733 spin_lock(&qp->s_hlock);
734 spin_lock(&qp->s_lock);
735
736 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
737 goto check_lwqe;
738
739 if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
740 rvt_qp_sends_has_lkey(qp, lkey) ||
741 rvt_qp_acks_has_lkey(qp, lkey))
742 lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
743 check_lwqe:
744 spin_unlock(&qp->s_lock);
745 spin_unlock(&qp->s_hlock);
746 spin_unlock_irq(&qp->r_lock);
747 if (lastwqe) {
748 struct ib_event ev;
749
750 ev.device = qp->ibqp.device;
751 ev.element.qp = &qp->ibqp;
752 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
753 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
754 }
755 }
756
757 /**
758 * rvt_remove_qp - remove qp form table
759 * @rdi: rvt dev struct
760 * @qp: qp to remove
761 *
762 * Remove the QP from the table so it can't be found asynchronously by
763 * the receive routine.
764 */
rvt_remove_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp)765 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
766 {
767 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
768 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
769 unsigned long flags;
770 int removed = 1;
771
772 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
773
774 if (rcu_dereference_protected(rvp->qp[0],
775 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
776 RCU_INIT_POINTER(rvp->qp[0], NULL);
777 } else if (rcu_dereference_protected(rvp->qp[1],
778 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
779 RCU_INIT_POINTER(rvp->qp[1], NULL);
780 } else {
781 struct rvt_qp *q;
782 struct rvt_qp __rcu **qpp;
783
784 removed = 0;
785 qpp = &rdi->qp_dev->qp_table[n];
786 for (; (q = rcu_dereference_protected(*qpp,
787 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
788 qpp = &q->next) {
789 if (q == qp) {
790 RCU_INIT_POINTER(*qpp,
791 rcu_dereference_protected(qp->next,
792 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
793 removed = 1;
794 trace_rvt_qpremove(qp, n);
795 break;
796 }
797 }
798 }
799
800 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
801 if (removed) {
802 synchronize_rcu();
803 rvt_put_qp(qp);
804 }
805 }
806
807 /**
808 * rvt_alloc_rq - allocate memory for user or kernel buffer
809 * @rq: receive queue data structure
810 * @size: number of request queue entries
811 * @node: The NUMA node
812 * @udata: True if user data is available or not false
813 *
814 * Return: If memory allocation failed, return -ENONEM
815 * This function is used by both shared receive
816 * queues and non-shared receive queues to allocate
817 * memory.
818 */
rvt_alloc_rq(struct rvt_rq * rq,u32 size,int node,struct ib_udata * udata)819 int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
820 struct ib_udata *udata)
821 {
822 if (udata) {
823 rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size);
824 if (!rq->wq)
825 goto bail;
826 /* need kwq with no buffers */
827 rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node);
828 if (!rq->kwq)
829 goto bail;
830 rq->kwq->curr_wq = rq->wq->wq;
831 } else {
832 /* need kwq with buffers */
833 rq->kwq =
834 vzalloc_node(sizeof(struct rvt_krwq) + size, node);
835 if (!rq->kwq)
836 goto bail;
837 rq->kwq->curr_wq = rq->kwq->wq;
838 }
839
840 spin_lock_init(&rq->kwq->p_lock);
841 spin_lock_init(&rq->kwq->c_lock);
842 return 0;
843 bail:
844 rvt_free_rq(rq);
845 return -ENOMEM;
846 }
847
848 /**
849 * rvt_init_qp - initialize the QP state to the reset state
850 * @qp: the QP to init or reinit
851 * @type: the QP type
852 *
853 * This function is called from both rvt_create_qp() and
854 * rvt_reset_qp(). The difference is that the reset
855 * patch the necessary locks to protect against concurent
856 * access.
857 */
rvt_init_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)858 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
859 enum ib_qp_type type)
860 {
861 qp->remote_qpn = 0;
862 qp->qkey = 0;
863 qp->qp_access_flags = 0;
864 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
865 qp->s_hdrwords = 0;
866 qp->s_wqe = NULL;
867 qp->s_draining = 0;
868 qp->s_next_psn = 0;
869 qp->s_last_psn = 0;
870 qp->s_sending_psn = 0;
871 qp->s_sending_hpsn = 0;
872 qp->s_psn = 0;
873 qp->r_psn = 0;
874 qp->r_msn = 0;
875 if (type == IB_QPT_RC) {
876 qp->s_state = IB_OPCODE_RC_SEND_LAST;
877 qp->r_state = IB_OPCODE_RC_SEND_LAST;
878 } else {
879 qp->s_state = IB_OPCODE_UC_SEND_LAST;
880 qp->r_state = IB_OPCODE_UC_SEND_LAST;
881 }
882 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
883 qp->r_nak_state = 0;
884 qp->r_aflags = 0;
885 qp->r_flags = 0;
886 qp->s_head = 0;
887 qp->s_tail = 0;
888 qp->s_cur = 0;
889 qp->s_acked = 0;
890 qp->s_last = 0;
891 qp->s_ssn = 1;
892 qp->s_lsn = 0;
893 qp->s_mig_state = IB_MIG_MIGRATED;
894 qp->r_head_ack_queue = 0;
895 qp->s_tail_ack_queue = 0;
896 qp->s_acked_ack_queue = 0;
897 qp->s_num_rd_atomic = 0;
898 if (qp->r_rq.kwq)
899 qp->r_rq.kwq->count = qp->r_rq.size;
900 qp->r_sge.num_sge = 0;
901 atomic_set(&qp->s_reserved_used, 0);
902 }
903
904 /**
905 * rvt_reset_qp - initialize the QP state to the reset state
906 * @qp: the QP to reset
907 * @type: the QP type
908 *
909 * r_lock, s_hlock, and s_lock are required to be held by the caller
910 */
rvt_reset_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)911 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
912 enum ib_qp_type type)
913 __must_hold(&qp->s_lock)
914 __must_hold(&qp->s_hlock)
915 __must_hold(&qp->r_lock)
916 {
917 lockdep_assert_held(&qp->r_lock);
918 lockdep_assert_held(&qp->s_hlock);
919 lockdep_assert_held(&qp->s_lock);
920 if (qp->state != IB_QPS_RESET) {
921 qp->state = IB_QPS_RESET;
922
923 /* Let drivers flush their waitlist */
924 rdi->driver_f.flush_qp_waiters(qp);
925 rvt_stop_rc_timers(qp);
926 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
927 spin_unlock(&qp->s_lock);
928 spin_unlock(&qp->s_hlock);
929 spin_unlock_irq(&qp->r_lock);
930
931 /* Stop the send queue and the retry timer */
932 rdi->driver_f.stop_send_queue(qp);
933 rvt_del_timers_sync(qp);
934 /* Wait for things to stop */
935 rdi->driver_f.quiesce_qp(qp);
936
937 /* take qp out the hash and wait for it to be unused */
938 rvt_remove_qp(rdi, qp);
939
940 /* grab the lock b/c it was locked at call time */
941 spin_lock_irq(&qp->r_lock);
942 spin_lock(&qp->s_hlock);
943 spin_lock(&qp->s_lock);
944
945 rvt_clear_mr_refs(qp, 1);
946 /*
947 * Let the driver do any tear down or re-init it needs to for
948 * a qp that has been reset
949 */
950 rdi->driver_f.notify_qp_reset(qp);
951 }
952 rvt_init_qp(rdi, qp, type);
953 lockdep_assert_held(&qp->r_lock);
954 lockdep_assert_held(&qp->s_hlock);
955 lockdep_assert_held(&qp->s_lock);
956 }
957
958 /** rvt_free_qpn - Free a qpn from the bit map
959 * @qpt: QP table
960 * @qpn: queue pair number to free
961 */
rvt_free_qpn(struct rvt_qpn_table * qpt,u32 qpn)962 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
963 {
964 struct rvt_qpn_map *map;
965
966 map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
967 if (map->page)
968 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
969 }
970
971 /**
972 * get_allowed_ops - Given a QP type return the appropriate allowed OP
973 * @type: valid, supported, QP type
974 */
get_allowed_ops(enum ib_qp_type type)975 static u8 get_allowed_ops(enum ib_qp_type type)
976 {
977 return type == IB_QPT_RC ? IB_OPCODE_RC : type == IB_QPT_UC ?
978 IB_OPCODE_UC : IB_OPCODE_UD;
979 }
980
981 /**
982 * free_ud_wq_attr - Clean up AH attribute cache for UD QPs
983 * @qp: Valid QP with allowed_ops set
984 *
985 * The rvt_swqe data structure being used is a union, so this is
986 * only valid for UD QPs.
987 */
free_ud_wq_attr(struct rvt_qp * qp)988 static void free_ud_wq_attr(struct rvt_qp *qp)
989 {
990 struct rvt_swqe *wqe;
991 int i;
992
993 for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
994 wqe = rvt_get_swqe_ptr(qp, i);
995 kfree(wqe->ud_wr.attr);
996 wqe->ud_wr.attr = NULL;
997 }
998 }
999
1000 /**
1001 * alloc_ud_wq_attr - AH attribute cache for UD QPs
1002 * @qp: Valid QP with allowed_ops set
1003 * @node: Numa node for allocation
1004 *
1005 * The rvt_swqe data structure being used is a union, so this is
1006 * only valid for UD QPs.
1007 */
alloc_ud_wq_attr(struct rvt_qp * qp,int node)1008 static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
1009 {
1010 struct rvt_swqe *wqe;
1011 int i;
1012
1013 for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1014 wqe = rvt_get_swqe_ptr(qp, i);
1015 wqe->ud_wr.attr = kzalloc_node(sizeof(*wqe->ud_wr.attr),
1016 GFP_KERNEL, node);
1017 if (!wqe->ud_wr.attr) {
1018 free_ud_wq_attr(qp);
1019 return -ENOMEM;
1020 }
1021 }
1022
1023 return 0;
1024 }
1025
1026 /**
1027 * rvt_create_qp - create a queue pair for a device
1028 * @ibpd: the protection domain who's device we create the queue pair for
1029 * @init_attr: the attributes of the queue pair
1030 * @udata: user data for libibverbs.so
1031 *
1032 * Queue pair creation is mostly an rvt issue. However, drivers have their own
1033 * unique idea of what queue pair numbers mean. For instance there is a reserved
1034 * range for PSM.
1035 *
1036 * Return: the queue pair on success, otherwise returns an errno.
1037 *
1038 * Called by the ib_create_qp() core verbs function.
1039 */
rvt_create_qp(struct ib_pd * ibpd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)1040 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
1041 struct ib_qp_init_attr *init_attr,
1042 struct ib_udata *udata)
1043 {
1044 struct rvt_qp *qp;
1045 int err;
1046 struct rvt_swqe *swq = NULL;
1047 size_t sz;
1048 size_t sg_list_sz;
1049 struct ib_qp *ret = ERR_PTR(-ENOMEM);
1050 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
1051 void *priv = NULL;
1052 size_t sqsize;
1053
1054 if (!rdi)
1055 return ERR_PTR(-EINVAL);
1056
1057 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
1058 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
1059 init_attr->create_flags)
1060 return ERR_PTR(-EINVAL);
1061
1062 /* Check receive queue parameters if no SRQ is specified. */
1063 if (!init_attr->srq) {
1064 if (init_attr->cap.max_recv_sge >
1065 rdi->dparms.props.max_recv_sge ||
1066 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
1067 return ERR_PTR(-EINVAL);
1068
1069 if (init_attr->cap.max_send_sge +
1070 init_attr->cap.max_send_wr +
1071 init_attr->cap.max_recv_sge +
1072 init_attr->cap.max_recv_wr == 0)
1073 return ERR_PTR(-EINVAL);
1074 }
1075 sqsize =
1076 init_attr->cap.max_send_wr + 1 +
1077 rdi->dparms.reserved_operations;
1078 switch (init_attr->qp_type) {
1079 case IB_QPT_SMI:
1080 case IB_QPT_GSI:
1081 if (init_attr->port_num == 0 ||
1082 init_attr->port_num > ibpd->device->phys_port_cnt)
1083 return ERR_PTR(-EINVAL);
1084 /* fall through */
1085 case IB_QPT_UC:
1086 case IB_QPT_RC:
1087 case IB_QPT_UD:
1088 sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge);
1089 swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
1090 if (!swq)
1091 return ERR_PTR(-ENOMEM);
1092
1093 sz = sizeof(*qp);
1094 sg_list_sz = 0;
1095 if (init_attr->srq) {
1096 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
1097
1098 if (srq->rq.max_sge > 1)
1099 sg_list_sz = sizeof(*qp->r_sg_list) *
1100 (srq->rq.max_sge - 1);
1101 } else if (init_attr->cap.max_recv_sge > 1)
1102 sg_list_sz = sizeof(*qp->r_sg_list) *
1103 (init_attr->cap.max_recv_sge - 1);
1104 qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
1105 rdi->dparms.node);
1106 if (!qp)
1107 goto bail_swq;
1108 qp->allowed_ops = get_allowed_ops(init_attr->qp_type);
1109
1110 RCU_INIT_POINTER(qp->next, NULL);
1111 if (init_attr->qp_type == IB_QPT_RC) {
1112 qp->s_ack_queue =
1113 kcalloc_node(rvt_max_atomic(rdi),
1114 sizeof(*qp->s_ack_queue),
1115 GFP_KERNEL,
1116 rdi->dparms.node);
1117 if (!qp->s_ack_queue)
1118 goto bail_qp;
1119 }
1120 /* initialize timers needed for rc qp */
1121 timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
1122 hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
1123 HRTIMER_MODE_REL);
1124 qp->s_rnr_timer.function = rvt_rc_rnr_retry;
1125
1126 /*
1127 * Driver needs to set up it's private QP structure and do any
1128 * initialization that is needed.
1129 */
1130 priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
1131 if (IS_ERR(priv)) {
1132 ret = priv;
1133 goto bail_qp;
1134 }
1135 qp->priv = priv;
1136 qp->timeout_jiffies =
1137 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1138 1000UL);
1139 if (init_attr->srq) {
1140 sz = 0;
1141 } else {
1142 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1143 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1144 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1145 sizeof(struct rvt_rwqe);
1146 err = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
1147 rdi->dparms.node, udata);
1148 if (err) {
1149 ret = ERR_PTR(err);
1150 goto bail_driver_priv;
1151 }
1152 }
1153
1154 /*
1155 * ib_create_qp() will initialize qp->ibqp
1156 * except for qp->ibqp.qp_num.
1157 */
1158 spin_lock_init(&qp->r_lock);
1159 spin_lock_init(&qp->s_hlock);
1160 spin_lock_init(&qp->s_lock);
1161 atomic_set(&qp->refcount, 0);
1162 atomic_set(&qp->local_ops_pending, 0);
1163 init_waitqueue_head(&qp->wait);
1164 INIT_LIST_HEAD(&qp->rspwait);
1165 qp->state = IB_QPS_RESET;
1166 qp->s_wq = swq;
1167 qp->s_size = sqsize;
1168 qp->s_avail = init_attr->cap.max_send_wr;
1169 qp->s_max_sge = init_attr->cap.max_send_sge;
1170 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1171 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
1172 err = alloc_ud_wq_attr(qp, rdi->dparms.node);
1173 if (err) {
1174 ret = (ERR_PTR(err));
1175 goto bail_driver_priv;
1176 }
1177
1178 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
1179 init_attr->qp_type,
1180 init_attr->port_num);
1181 if (err < 0) {
1182 ret = ERR_PTR(err);
1183 goto bail_rq_wq;
1184 }
1185 qp->ibqp.qp_num = err;
1186 qp->port_num = init_attr->port_num;
1187 rvt_init_qp(rdi, qp, init_attr->qp_type);
1188 if (rdi->driver_f.qp_priv_init) {
1189 err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
1190 if (err) {
1191 ret = ERR_PTR(err);
1192 goto bail_rq_wq;
1193 }
1194 }
1195 break;
1196
1197 default:
1198 /* Don't support raw QPs */
1199 return ERR_PTR(-EINVAL);
1200 }
1201
1202 init_attr->cap.max_inline_data = 0;
1203
1204 /*
1205 * Return the address of the RWQ as the offset to mmap.
1206 * See rvt_mmap() for details.
1207 */
1208 if (udata && udata->outlen >= sizeof(__u64)) {
1209 if (!qp->r_rq.wq) {
1210 __u64 offset = 0;
1211
1212 err = ib_copy_to_udata(udata, &offset,
1213 sizeof(offset));
1214 if (err) {
1215 ret = ERR_PTR(err);
1216 goto bail_qpn;
1217 }
1218 } else {
1219 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
1220
1221 qp->ip = rvt_create_mmap_info(rdi, s, udata,
1222 qp->r_rq.wq);
1223 if (!qp->ip) {
1224 ret = ERR_PTR(-ENOMEM);
1225 goto bail_qpn;
1226 }
1227
1228 err = ib_copy_to_udata(udata, &qp->ip->offset,
1229 sizeof(qp->ip->offset));
1230 if (err) {
1231 ret = ERR_PTR(err);
1232 goto bail_ip;
1233 }
1234 }
1235 qp->pid = current->pid;
1236 }
1237
1238 spin_lock(&rdi->n_qps_lock);
1239 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
1240 spin_unlock(&rdi->n_qps_lock);
1241 ret = ERR_PTR(-ENOMEM);
1242 goto bail_ip;
1243 }
1244
1245 rdi->n_qps_allocated++;
1246 /*
1247 * Maintain a busy_jiffies variable that will be added to the timeout
1248 * period in mod_retry_timer and add_retry_timer. This busy jiffies
1249 * is scaled by the number of rc qps created for the device to reduce
1250 * the number of timeouts occurring when there is a large number of
1251 * qps. busy_jiffies is incremented every rc qp scaling interval.
1252 * The scaling interval is selected based on extensive performance
1253 * evaluation of targeted workloads.
1254 */
1255 if (init_attr->qp_type == IB_QPT_RC) {
1256 rdi->n_rc_qps++;
1257 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1258 }
1259 spin_unlock(&rdi->n_qps_lock);
1260
1261 if (qp->ip) {
1262 spin_lock_irq(&rdi->pending_lock);
1263 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
1264 spin_unlock_irq(&rdi->pending_lock);
1265 }
1266
1267 ret = &qp->ibqp;
1268
1269 return ret;
1270
1271 bail_ip:
1272 if (qp->ip)
1273 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1274
1275 bail_qpn:
1276 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1277
1278 bail_rq_wq:
1279 rvt_free_rq(&qp->r_rq);
1280 free_ud_wq_attr(qp);
1281
1282 bail_driver_priv:
1283 rdi->driver_f.qp_priv_free(rdi, qp);
1284
1285 bail_qp:
1286 kfree(qp->s_ack_queue);
1287 kfree(qp);
1288
1289 bail_swq:
1290 vfree(swq);
1291
1292 return ret;
1293 }
1294
1295 /**
1296 * rvt_error_qp - put a QP into the error state
1297 * @qp: the QP to put into the error state
1298 * @err: the receive completion error to signal if a RWQE is active
1299 *
1300 * Flushes both send and receive work queues.
1301 *
1302 * Return: true if last WQE event should be generated.
1303 * The QP r_lock and s_lock should be held and interrupts disabled.
1304 * If we are already in error state, just return.
1305 */
rvt_error_qp(struct rvt_qp * qp,enum ib_wc_status err)1306 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
1307 {
1308 struct ib_wc wc;
1309 int ret = 0;
1310 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1311
1312 lockdep_assert_held(&qp->r_lock);
1313 lockdep_assert_held(&qp->s_lock);
1314 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
1315 goto bail;
1316
1317 qp->state = IB_QPS_ERR;
1318
1319 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1320 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1321 del_timer(&qp->s_timer);
1322 }
1323
1324 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
1325 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
1326
1327 rdi->driver_f.notify_error_qp(qp);
1328
1329 /* Schedule the sending tasklet to drain the send work queue. */
1330 if (READ_ONCE(qp->s_last) != qp->s_head)
1331 rdi->driver_f.schedule_send(qp);
1332
1333 rvt_clear_mr_refs(qp, 0);
1334
1335 memset(&wc, 0, sizeof(wc));
1336 wc.qp = &qp->ibqp;
1337 wc.opcode = IB_WC_RECV;
1338
1339 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
1340 wc.wr_id = qp->r_wr_id;
1341 wc.status = err;
1342 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1343 }
1344 wc.status = IB_WC_WR_FLUSH_ERR;
1345
1346 if (qp->r_rq.kwq) {
1347 u32 head;
1348 u32 tail;
1349 struct rvt_rwq *wq = NULL;
1350 struct rvt_krwq *kwq = NULL;
1351
1352 spin_lock(&qp->r_rq.kwq->c_lock);
1353 /* qp->ip used to validate if there is a user buffer mmaped */
1354 if (qp->ip) {
1355 wq = qp->r_rq.wq;
1356 head = RDMA_READ_UAPI_ATOMIC(wq->head);
1357 tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
1358 } else {
1359 kwq = qp->r_rq.kwq;
1360 head = kwq->head;
1361 tail = kwq->tail;
1362 }
1363 /* sanity check pointers before trusting them */
1364 if (head >= qp->r_rq.size)
1365 head = 0;
1366 if (tail >= qp->r_rq.size)
1367 tail = 0;
1368 while (tail != head) {
1369 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1370 if (++tail >= qp->r_rq.size)
1371 tail = 0;
1372 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1373 }
1374 if (qp->ip)
1375 RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
1376 else
1377 kwq->tail = tail;
1378 spin_unlock(&qp->r_rq.kwq->c_lock);
1379 } else if (qp->ibqp.event_handler) {
1380 ret = 1;
1381 }
1382
1383 bail:
1384 return ret;
1385 }
1386 EXPORT_SYMBOL(rvt_error_qp);
1387
1388 /*
1389 * Put the QP into the hash table.
1390 * The hash table holds a reference to the QP.
1391 */
rvt_insert_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp)1392 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1393 {
1394 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1395 unsigned long flags;
1396
1397 rvt_get_qp(qp);
1398 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1399
1400 if (qp->ibqp.qp_num <= 1) {
1401 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1402 } else {
1403 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1404
1405 qp->next = rdi->qp_dev->qp_table[n];
1406 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1407 trace_rvt_qpinsert(qp, n);
1408 }
1409
1410 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1411 }
1412
1413 /**
1414 * rvt_modify_qp - modify the attributes of a queue pair
1415 * @ibqp: the queue pair who's attributes we're modifying
1416 * @attr: the new attributes
1417 * @attr_mask: the mask of attributes to modify
1418 * @udata: user data for libibverbs.so
1419 *
1420 * Return: 0 on success, otherwise returns an errno.
1421 */
rvt_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1422 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1423 int attr_mask, struct ib_udata *udata)
1424 {
1425 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1426 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1427 enum ib_qp_state cur_state, new_state;
1428 struct ib_event ev;
1429 int lastwqe = 0;
1430 int mig = 0;
1431 int pmtu = 0; /* for gcc warning only */
1432 int opa_ah;
1433
1434 spin_lock_irq(&qp->r_lock);
1435 spin_lock(&qp->s_hlock);
1436 spin_lock(&qp->s_lock);
1437
1438 cur_state = attr_mask & IB_QP_CUR_STATE ?
1439 attr->cur_qp_state : qp->state;
1440 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1441 opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
1442
1443 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1444 attr_mask))
1445 goto inval;
1446
1447 if (rdi->driver_f.check_modify_qp &&
1448 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1449 goto inval;
1450
1451 if (attr_mask & IB_QP_AV) {
1452 if (opa_ah) {
1453 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1454 opa_get_mcast_base(OPA_MCAST_NR))
1455 goto inval;
1456 } else {
1457 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1458 be16_to_cpu(IB_MULTICAST_LID_BASE))
1459 goto inval;
1460 }
1461
1462 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1463 goto inval;
1464 }
1465
1466 if (attr_mask & IB_QP_ALT_PATH) {
1467 if (opa_ah) {
1468 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1469 opa_get_mcast_base(OPA_MCAST_NR))
1470 goto inval;
1471 } else {
1472 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1473 be16_to_cpu(IB_MULTICAST_LID_BASE))
1474 goto inval;
1475 }
1476
1477 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1478 goto inval;
1479 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1480 goto inval;
1481 }
1482
1483 if (attr_mask & IB_QP_PKEY_INDEX)
1484 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1485 goto inval;
1486
1487 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1488 if (attr->min_rnr_timer > 31)
1489 goto inval;
1490
1491 if (attr_mask & IB_QP_PORT)
1492 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1493 qp->ibqp.qp_type == IB_QPT_GSI ||
1494 attr->port_num == 0 ||
1495 attr->port_num > ibqp->device->phys_port_cnt)
1496 goto inval;
1497
1498 if (attr_mask & IB_QP_DEST_QPN)
1499 if (attr->dest_qp_num > RVT_QPN_MASK)
1500 goto inval;
1501
1502 if (attr_mask & IB_QP_RETRY_CNT)
1503 if (attr->retry_cnt > 7)
1504 goto inval;
1505
1506 if (attr_mask & IB_QP_RNR_RETRY)
1507 if (attr->rnr_retry > 7)
1508 goto inval;
1509
1510 /*
1511 * Don't allow invalid path_mtu values. OK to set greater
1512 * than the active mtu (or even the max_cap, if we have tuned
1513 * that to a small mtu. We'll set qp->path_mtu
1514 * to the lesser of requested attribute mtu and active,
1515 * for packetizing messages.
1516 * Note that the QP port has to be set in INIT and MTU in RTR.
1517 */
1518 if (attr_mask & IB_QP_PATH_MTU) {
1519 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1520 if (pmtu < 0)
1521 goto inval;
1522 }
1523
1524 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1525 if (attr->path_mig_state == IB_MIG_REARM) {
1526 if (qp->s_mig_state == IB_MIG_ARMED)
1527 goto inval;
1528 if (new_state != IB_QPS_RTS)
1529 goto inval;
1530 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1531 if (qp->s_mig_state == IB_MIG_REARM)
1532 goto inval;
1533 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1534 goto inval;
1535 if (qp->s_mig_state == IB_MIG_ARMED)
1536 mig = 1;
1537 } else {
1538 goto inval;
1539 }
1540 }
1541
1542 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1543 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1544 goto inval;
1545
1546 switch (new_state) {
1547 case IB_QPS_RESET:
1548 if (qp->state != IB_QPS_RESET)
1549 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1550 break;
1551
1552 case IB_QPS_RTR:
1553 /* Allow event to re-trigger if QP set to RTR more than once */
1554 qp->r_flags &= ~RVT_R_COMM_EST;
1555 qp->state = new_state;
1556 break;
1557
1558 case IB_QPS_SQD:
1559 qp->s_draining = qp->s_last != qp->s_cur;
1560 qp->state = new_state;
1561 break;
1562
1563 case IB_QPS_SQE:
1564 if (qp->ibqp.qp_type == IB_QPT_RC)
1565 goto inval;
1566 qp->state = new_state;
1567 break;
1568
1569 case IB_QPS_ERR:
1570 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1571 break;
1572
1573 default:
1574 qp->state = new_state;
1575 break;
1576 }
1577
1578 if (attr_mask & IB_QP_PKEY_INDEX)
1579 qp->s_pkey_index = attr->pkey_index;
1580
1581 if (attr_mask & IB_QP_PORT)
1582 qp->port_num = attr->port_num;
1583
1584 if (attr_mask & IB_QP_DEST_QPN)
1585 qp->remote_qpn = attr->dest_qp_num;
1586
1587 if (attr_mask & IB_QP_SQ_PSN) {
1588 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1589 qp->s_psn = qp->s_next_psn;
1590 qp->s_sending_psn = qp->s_next_psn;
1591 qp->s_last_psn = qp->s_next_psn - 1;
1592 qp->s_sending_hpsn = qp->s_last_psn;
1593 }
1594
1595 if (attr_mask & IB_QP_RQ_PSN)
1596 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1597
1598 if (attr_mask & IB_QP_ACCESS_FLAGS)
1599 qp->qp_access_flags = attr->qp_access_flags;
1600
1601 if (attr_mask & IB_QP_AV) {
1602 rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
1603 qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1604 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1605 }
1606
1607 if (attr_mask & IB_QP_ALT_PATH) {
1608 rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
1609 qp->s_alt_pkey_index = attr->alt_pkey_index;
1610 }
1611
1612 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1613 qp->s_mig_state = attr->path_mig_state;
1614 if (mig) {
1615 qp->remote_ah_attr = qp->alt_ah_attr;
1616 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1617 qp->s_pkey_index = qp->s_alt_pkey_index;
1618 }
1619 }
1620
1621 if (attr_mask & IB_QP_PATH_MTU) {
1622 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1623 qp->log_pmtu = ilog2(qp->pmtu);
1624 }
1625
1626 if (attr_mask & IB_QP_RETRY_CNT) {
1627 qp->s_retry_cnt = attr->retry_cnt;
1628 qp->s_retry = attr->retry_cnt;
1629 }
1630
1631 if (attr_mask & IB_QP_RNR_RETRY) {
1632 qp->s_rnr_retry_cnt = attr->rnr_retry;
1633 qp->s_rnr_retry = attr->rnr_retry;
1634 }
1635
1636 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1637 qp->r_min_rnr_timer = attr->min_rnr_timer;
1638
1639 if (attr_mask & IB_QP_TIMEOUT) {
1640 qp->timeout = attr->timeout;
1641 qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1642 }
1643
1644 if (attr_mask & IB_QP_QKEY)
1645 qp->qkey = attr->qkey;
1646
1647 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1648 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1649
1650 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1651 qp->s_max_rd_atomic = attr->max_rd_atomic;
1652
1653 if (rdi->driver_f.modify_qp)
1654 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1655
1656 spin_unlock(&qp->s_lock);
1657 spin_unlock(&qp->s_hlock);
1658 spin_unlock_irq(&qp->r_lock);
1659
1660 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1661 rvt_insert_qp(rdi, qp);
1662
1663 if (lastwqe) {
1664 ev.device = qp->ibqp.device;
1665 ev.element.qp = &qp->ibqp;
1666 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1667 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1668 }
1669 if (mig) {
1670 ev.device = qp->ibqp.device;
1671 ev.element.qp = &qp->ibqp;
1672 ev.event = IB_EVENT_PATH_MIG;
1673 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1674 }
1675 return 0;
1676
1677 inval:
1678 spin_unlock(&qp->s_lock);
1679 spin_unlock(&qp->s_hlock);
1680 spin_unlock_irq(&qp->r_lock);
1681 return -EINVAL;
1682 }
1683
1684 /**
1685 * rvt_destroy_qp - destroy a queue pair
1686 * @ibqp: the queue pair to destroy
1687 *
1688 * Note that this can be called while the QP is actively sending or
1689 * receiving!
1690 *
1691 * Return: 0 on success.
1692 */
rvt_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)1693 int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1694 {
1695 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1696 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1697
1698 spin_lock_irq(&qp->r_lock);
1699 spin_lock(&qp->s_hlock);
1700 spin_lock(&qp->s_lock);
1701 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1702 spin_unlock(&qp->s_lock);
1703 spin_unlock(&qp->s_hlock);
1704 spin_unlock_irq(&qp->r_lock);
1705
1706 wait_event(qp->wait, !atomic_read(&qp->refcount));
1707 /* qpn is now available for use again */
1708 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1709
1710 spin_lock(&rdi->n_qps_lock);
1711 rdi->n_qps_allocated--;
1712 if (qp->ibqp.qp_type == IB_QPT_RC) {
1713 rdi->n_rc_qps--;
1714 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1715 }
1716 spin_unlock(&rdi->n_qps_lock);
1717
1718 if (qp->ip)
1719 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1720 kvfree(qp->r_rq.kwq);
1721 rdi->driver_f.qp_priv_free(rdi, qp);
1722 kfree(qp->s_ack_queue);
1723 rdma_destroy_ah_attr(&qp->remote_ah_attr);
1724 rdma_destroy_ah_attr(&qp->alt_ah_attr);
1725 free_ud_wq_attr(qp);
1726 vfree(qp->s_wq);
1727 kfree(qp);
1728 return 0;
1729 }
1730
1731 /**
1732 * rvt_query_qp - query an ipbq
1733 * @ibqp: IB qp to query
1734 * @attr: attr struct to fill in
1735 * @attr_mask: attr mask ignored
1736 * @init_attr: struct to fill in
1737 *
1738 * Return: always 0
1739 */
rvt_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_qp_init_attr * init_attr)1740 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1741 int attr_mask, struct ib_qp_init_attr *init_attr)
1742 {
1743 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1744 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1745
1746 attr->qp_state = qp->state;
1747 attr->cur_qp_state = attr->qp_state;
1748 attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1749 attr->path_mig_state = qp->s_mig_state;
1750 attr->qkey = qp->qkey;
1751 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1752 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1753 attr->dest_qp_num = qp->remote_qpn;
1754 attr->qp_access_flags = qp->qp_access_flags;
1755 attr->cap.max_send_wr = qp->s_size - 1 -
1756 rdi->dparms.reserved_operations;
1757 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1758 attr->cap.max_send_sge = qp->s_max_sge;
1759 attr->cap.max_recv_sge = qp->r_rq.max_sge;
1760 attr->cap.max_inline_data = 0;
1761 attr->ah_attr = qp->remote_ah_attr;
1762 attr->alt_ah_attr = qp->alt_ah_attr;
1763 attr->pkey_index = qp->s_pkey_index;
1764 attr->alt_pkey_index = qp->s_alt_pkey_index;
1765 attr->en_sqd_async_notify = 0;
1766 attr->sq_draining = qp->s_draining;
1767 attr->max_rd_atomic = qp->s_max_rd_atomic;
1768 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1769 attr->min_rnr_timer = qp->r_min_rnr_timer;
1770 attr->port_num = qp->port_num;
1771 attr->timeout = qp->timeout;
1772 attr->retry_cnt = qp->s_retry_cnt;
1773 attr->rnr_retry = qp->s_rnr_retry_cnt;
1774 attr->alt_port_num =
1775 rdma_ah_get_port_num(&qp->alt_ah_attr);
1776 attr->alt_timeout = qp->alt_timeout;
1777
1778 init_attr->event_handler = qp->ibqp.event_handler;
1779 init_attr->qp_context = qp->ibqp.qp_context;
1780 init_attr->send_cq = qp->ibqp.send_cq;
1781 init_attr->recv_cq = qp->ibqp.recv_cq;
1782 init_attr->srq = qp->ibqp.srq;
1783 init_attr->cap = attr->cap;
1784 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1785 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1786 else
1787 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1788 init_attr->qp_type = qp->ibqp.qp_type;
1789 init_attr->port_num = qp->port_num;
1790 return 0;
1791 }
1792
1793 /**
1794 * rvt_post_receive - post a receive on a QP
1795 * @ibqp: the QP to post the receive on
1796 * @wr: the WR to post
1797 * @bad_wr: the first bad WR is put here
1798 *
1799 * This may be called from interrupt context.
1800 *
1801 * Return: 0 on success otherwise errno
1802 */
rvt_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1803 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1804 const struct ib_recv_wr **bad_wr)
1805 {
1806 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1807 struct rvt_krwq *wq = qp->r_rq.kwq;
1808 unsigned long flags;
1809 int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1810 !qp->ibqp.srq;
1811
1812 /* Check that state is OK to post receive. */
1813 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1814 *bad_wr = wr;
1815 return -EINVAL;
1816 }
1817
1818 for (; wr; wr = wr->next) {
1819 struct rvt_rwqe *wqe;
1820 u32 next;
1821 int i;
1822
1823 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1824 *bad_wr = wr;
1825 return -EINVAL;
1826 }
1827
1828 spin_lock_irqsave(&qp->r_rq.kwq->p_lock, flags);
1829 next = wq->head + 1;
1830 if (next >= qp->r_rq.size)
1831 next = 0;
1832 if (next == READ_ONCE(wq->tail)) {
1833 spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1834 *bad_wr = wr;
1835 return -ENOMEM;
1836 }
1837 if (unlikely(qp_err_flush)) {
1838 struct ib_wc wc;
1839
1840 memset(&wc, 0, sizeof(wc));
1841 wc.qp = &qp->ibqp;
1842 wc.opcode = IB_WC_RECV;
1843 wc.wr_id = wr->wr_id;
1844 wc.status = IB_WC_WR_FLUSH_ERR;
1845 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1846 } else {
1847 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1848 wqe->wr_id = wr->wr_id;
1849 wqe->num_sge = wr->num_sge;
1850 for (i = 0; i < wr->num_sge; i++) {
1851 wqe->sg_list[i].addr = wr->sg_list[i].addr;
1852 wqe->sg_list[i].length = wr->sg_list[i].length;
1853 wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
1854 }
1855 /*
1856 * Make sure queue entry is written
1857 * before the head index.
1858 */
1859 smp_store_release(&wq->head, next);
1860 }
1861 spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1862 }
1863 return 0;
1864 }
1865
1866 /**
1867 * rvt_qp_valid_operation - validate post send wr request
1868 * @qp - the qp
1869 * @post-parms - the post send table for the driver
1870 * @wr - the work request
1871 *
1872 * The routine validates the operation based on the
1873 * validation table an returns the length of the operation
1874 * which can extend beyond the ib_send_bw. Operation
1875 * dependent flags key atomic operation validation.
1876 *
1877 * There is an exception for UD qps that validates the pd and
1878 * overrides the length to include the additional UD specific
1879 * length.
1880 *
1881 * Returns a negative error or the length of the work request
1882 * for building the swqe.
1883 */
rvt_qp_valid_operation(struct rvt_qp * qp,const struct rvt_operation_params * post_parms,const struct ib_send_wr * wr)1884 static inline int rvt_qp_valid_operation(
1885 struct rvt_qp *qp,
1886 const struct rvt_operation_params *post_parms,
1887 const struct ib_send_wr *wr)
1888 {
1889 int len;
1890
1891 if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1892 return -EINVAL;
1893 if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1894 return -EINVAL;
1895 if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1896 ibpd_to_rvtpd(qp->ibqp.pd)->user)
1897 return -EINVAL;
1898 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1899 (wr->num_sge == 0 ||
1900 wr->sg_list[0].length < sizeof(u64) ||
1901 wr->sg_list[0].addr & (sizeof(u64) - 1)))
1902 return -EINVAL;
1903 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1904 !qp->s_max_rd_atomic)
1905 return -EINVAL;
1906 len = post_parms[wr->opcode].length;
1907 /* UD specific */
1908 if (qp->ibqp.qp_type != IB_QPT_UC &&
1909 qp->ibqp.qp_type != IB_QPT_RC) {
1910 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1911 return -EINVAL;
1912 len = sizeof(struct ib_ud_wr);
1913 }
1914 return len;
1915 }
1916
1917 /**
1918 * rvt_qp_is_avail - determine queue capacity
1919 * @qp: the qp
1920 * @rdi: the rdmavt device
1921 * @reserved_op: is reserved operation
1922 *
1923 * This assumes the s_hlock is held but the s_last
1924 * qp variable is uncontrolled.
1925 *
1926 * For non reserved operations, the qp->s_avail
1927 * may be changed.
1928 *
1929 * The return value is zero or a -ENOMEM.
1930 */
rvt_qp_is_avail(struct rvt_qp * qp,struct rvt_dev_info * rdi,bool reserved_op)1931 static inline int rvt_qp_is_avail(
1932 struct rvt_qp *qp,
1933 struct rvt_dev_info *rdi,
1934 bool reserved_op)
1935 {
1936 u32 slast;
1937 u32 avail;
1938 u32 reserved_used;
1939
1940 /* see rvt_qp_wqe_unreserve() */
1941 smp_mb__before_atomic();
1942 if (unlikely(reserved_op)) {
1943 /* see rvt_qp_wqe_unreserve() */
1944 reserved_used = atomic_read(&qp->s_reserved_used);
1945 if (reserved_used >= rdi->dparms.reserved_operations)
1946 return -ENOMEM;
1947 return 0;
1948 }
1949 /* non-reserved operations */
1950 if (likely(qp->s_avail))
1951 return 0;
1952 /* See rvt_qp_complete_swqe() */
1953 slast = smp_load_acquire(&qp->s_last);
1954 if (qp->s_head >= slast)
1955 avail = qp->s_size - (qp->s_head - slast);
1956 else
1957 avail = slast - qp->s_head;
1958
1959 reserved_used = atomic_read(&qp->s_reserved_used);
1960 avail = avail - 1 -
1961 (rdi->dparms.reserved_operations - reserved_used);
1962 /* insure we don't assign a negative s_avail */
1963 if ((s32)avail <= 0)
1964 return -ENOMEM;
1965 qp->s_avail = avail;
1966 if (WARN_ON(qp->s_avail >
1967 (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1968 rvt_pr_err(rdi,
1969 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1970 qp->ibqp.qp_num, qp->s_size, qp->s_avail,
1971 qp->s_head, qp->s_tail, qp->s_cur,
1972 qp->s_acked, qp->s_last);
1973 return 0;
1974 }
1975
1976 /**
1977 * rvt_post_one_wr - post one RC, UC, or UD send work request
1978 * @qp: the QP to post on
1979 * @wr: the work request to send
1980 */
rvt_post_one_wr(struct rvt_qp * qp,const struct ib_send_wr * wr,bool * call_send)1981 static int rvt_post_one_wr(struct rvt_qp *qp,
1982 const struct ib_send_wr *wr,
1983 bool *call_send)
1984 {
1985 struct rvt_swqe *wqe;
1986 u32 next;
1987 int i;
1988 int j;
1989 int acc;
1990 struct rvt_lkey_table *rkt;
1991 struct rvt_pd *pd;
1992 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1993 u8 log_pmtu;
1994 int ret;
1995 size_t cplen;
1996 bool reserved_op;
1997 int local_ops_delayed = 0;
1998
1999 BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
2000
2001 /* IB spec says that num_sge == 0 is OK. */
2002 if (unlikely(wr->num_sge > qp->s_max_sge))
2003 return -EINVAL;
2004
2005 ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
2006 if (ret < 0)
2007 return ret;
2008 cplen = ret;
2009
2010 /*
2011 * Local operations include fast register and local invalidate.
2012 * Fast register needs to be processed immediately because the
2013 * registered lkey may be used by following work requests and the
2014 * lkey needs to be valid at the time those requests are posted.
2015 * Local invalidate can be processed immediately if fencing is
2016 * not required and no previous local invalidate ops are pending.
2017 * Signaled local operations that have been processed immediately
2018 * need to have requests with "completion only" flags set posted
2019 * to the send queue in order to generate completions.
2020 */
2021 if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
2022 switch (wr->opcode) {
2023 case IB_WR_REG_MR:
2024 ret = rvt_fast_reg_mr(qp,
2025 reg_wr(wr)->mr,
2026 reg_wr(wr)->key,
2027 reg_wr(wr)->access);
2028 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2029 return ret;
2030 break;
2031 case IB_WR_LOCAL_INV:
2032 if ((wr->send_flags & IB_SEND_FENCE) ||
2033 atomic_read(&qp->local_ops_pending)) {
2034 local_ops_delayed = 1;
2035 } else {
2036 ret = rvt_invalidate_rkey(
2037 qp, wr->ex.invalidate_rkey);
2038 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2039 return ret;
2040 }
2041 break;
2042 default:
2043 return -EINVAL;
2044 }
2045 }
2046
2047 reserved_op = rdi->post_parms[wr->opcode].flags &
2048 RVT_OPERATION_USE_RESERVE;
2049 /* check for avail */
2050 ret = rvt_qp_is_avail(qp, rdi, reserved_op);
2051 if (ret)
2052 return ret;
2053 next = qp->s_head + 1;
2054 if (next >= qp->s_size)
2055 next = 0;
2056
2057 rkt = &rdi->lkey_table;
2058 pd = ibpd_to_rvtpd(qp->ibqp.pd);
2059 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
2060
2061 /* cplen has length from above */
2062 memcpy(&wqe->wr, wr, cplen);
2063
2064 wqe->length = 0;
2065 j = 0;
2066 if (wr->num_sge) {
2067 struct rvt_sge *last_sge = NULL;
2068
2069 acc = wr->opcode >= IB_WR_RDMA_READ ?
2070 IB_ACCESS_LOCAL_WRITE : 0;
2071 for (i = 0; i < wr->num_sge; i++) {
2072 u32 length = wr->sg_list[i].length;
2073
2074 if (length == 0)
2075 continue;
2076 ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
2077 &wr->sg_list[i], acc);
2078 if (unlikely(ret < 0))
2079 goto bail_inval_free;
2080 wqe->length += length;
2081 if (ret)
2082 last_sge = &wqe->sg_list[j];
2083 j += ret;
2084 }
2085 wqe->wr.num_sge = j;
2086 }
2087
2088 /*
2089 * Calculate and set SWQE PSN values prior to handing it off
2090 * to the driver's check routine. This give the driver the
2091 * opportunity to adjust PSN values based on internal checks.
2092 */
2093 log_pmtu = qp->log_pmtu;
2094 if (qp->allowed_ops == IB_OPCODE_UD) {
2095 struct rvt_ah *ah = rvt_get_swqe_ah(wqe);
2096
2097 log_pmtu = ah->log_pmtu;
2098 rdma_copy_ah_attr(wqe->ud_wr.attr, &ah->attr);
2099 }
2100
2101 if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
2102 if (local_ops_delayed)
2103 atomic_inc(&qp->local_ops_pending);
2104 else
2105 wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
2106 wqe->ssn = 0;
2107 wqe->psn = 0;
2108 wqe->lpsn = 0;
2109 } else {
2110 wqe->ssn = qp->s_ssn++;
2111 wqe->psn = qp->s_next_psn;
2112 wqe->lpsn = wqe->psn +
2113 (wqe->length ?
2114 ((wqe->length - 1) >> log_pmtu) :
2115 0);
2116 }
2117
2118 /* general part of wqe valid - allow for driver checks */
2119 if (rdi->driver_f.setup_wqe) {
2120 ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
2121 if (ret < 0)
2122 goto bail_inval_free_ref;
2123 }
2124
2125 if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
2126 qp->s_next_psn = wqe->lpsn + 1;
2127
2128 if (unlikely(reserved_op)) {
2129 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
2130 rvt_qp_wqe_reserve(qp, wqe);
2131 } else {
2132 wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
2133 qp->s_avail--;
2134 }
2135 trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
2136 smp_wmb(); /* see request builders */
2137 qp->s_head = next;
2138
2139 return 0;
2140
2141 bail_inval_free_ref:
2142 if (qp->allowed_ops == IB_OPCODE_UD)
2143 rdma_destroy_ah_attr(wqe->ud_wr.attr);
2144 bail_inval_free:
2145 /* release mr holds */
2146 while (j) {
2147 struct rvt_sge *sge = &wqe->sg_list[--j];
2148
2149 rvt_put_mr(sge->mr);
2150 }
2151 return ret;
2152 }
2153
2154 /**
2155 * rvt_post_send - post a send on a QP
2156 * @ibqp: the QP to post the send on
2157 * @wr: the list of work requests to post
2158 * @bad_wr: the first bad WR is put here
2159 *
2160 * This may be called from interrupt context.
2161 *
2162 * Return: 0 on success else errno
2163 */
rvt_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)2164 int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2165 const struct ib_send_wr **bad_wr)
2166 {
2167 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
2168 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2169 unsigned long flags = 0;
2170 bool call_send;
2171 unsigned nreq = 0;
2172 int err = 0;
2173
2174 spin_lock_irqsave(&qp->s_hlock, flags);
2175
2176 /*
2177 * Ensure QP state is such that we can send. If not bail out early,
2178 * there is no need to do this every time we post a send.
2179 */
2180 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
2181 spin_unlock_irqrestore(&qp->s_hlock, flags);
2182 return -EINVAL;
2183 }
2184
2185 /*
2186 * If the send queue is empty, and we only have a single WR then just go
2187 * ahead and kick the send engine into gear. Otherwise we will always
2188 * just schedule the send to happen later.
2189 */
2190 call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
2191
2192 for (; wr; wr = wr->next) {
2193 err = rvt_post_one_wr(qp, wr, &call_send);
2194 if (unlikely(err)) {
2195 *bad_wr = wr;
2196 goto bail;
2197 }
2198 nreq++;
2199 }
2200 bail:
2201 spin_unlock_irqrestore(&qp->s_hlock, flags);
2202 if (nreq) {
2203 /*
2204 * Only call do_send if there is exactly one packet, and the
2205 * driver said it was ok.
2206 */
2207 if (nreq == 1 && call_send)
2208 rdi->driver_f.do_send(qp);
2209 else
2210 rdi->driver_f.schedule_send_no_lock(qp);
2211 }
2212 return err;
2213 }
2214
2215 /**
2216 * rvt_post_srq_receive - post a receive on a shared receive queue
2217 * @ibsrq: the SRQ to post the receive on
2218 * @wr: the list of work requests to post
2219 * @bad_wr: A pointer to the first WR to cause a problem is put here
2220 *
2221 * This may be called from interrupt context.
2222 *
2223 * Return: 0 on success else errno
2224 */
rvt_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)2225 int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2226 const struct ib_recv_wr **bad_wr)
2227 {
2228 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
2229 struct rvt_krwq *wq;
2230 unsigned long flags;
2231
2232 for (; wr; wr = wr->next) {
2233 struct rvt_rwqe *wqe;
2234 u32 next;
2235 int i;
2236
2237 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
2238 *bad_wr = wr;
2239 return -EINVAL;
2240 }
2241
2242 spin_lock_irqsave(&srq->rq.kwq->p_lock, flags);
2243 wq = srq->rq.kwq;
2244 next = wq->head + 1;
2245 if (next >= srq->rq.size)
2246 next = 0;
2247 if (next == READ_ONCE(wq->tail)) {
2248 spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2249 *bad_wr = wr;
2250 return -ENOMEM;
2251 }
2252
2253 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
2254 wqe->wr_id = wr->wr_id;
2255 wqe->num_sge = wr->num_sge;
2256 for (i = 0; i < wr->num_sge; i++) {
2257 wqe->sg_list[i].addr = wr->sg_list[i].addr;
2258 wqe->sg_list[i].length = wr->sg_list[i].length;
2259 wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
2260 }
2261 /* Make sure queue entry is written before the head index. */
2262 smp_store_release(&wq->head, next);
2263 spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2264 }
2265 return 0;
2266 }
2267
2268 /*
2269 * rvt used the internal kernel struct as part of its ABI, for now make sure
2270 * the kernel struct does not change layout. FIXME: rvt should never cast the
2271 * user struct to a kernel struct.
2272 */
rvt_cast_sge(struct rvt_wqe_sge * sge)2273 static struct ib_sge *rvt_cast_sge(struct rvt_wqe_sge *sge)
2274 {
2275 BUILD_BUG_ON(offsetof(struct ib_sge, addr) !=
2276 offsetof(struct rvt_wqe_sge, addr));
2277 BUILD_BUG_ON(offsetof(struct ib_sge, length) !=
2278 offsetof(struct rvt_wqe_sge, length));
2279 BUILD_BUG_ON(offsetof(struct ib_sge, lkey) !=
2280 offsetof(struct rvt_wqe_sge, lkey));
2281 return (struct ib_sge *)sge;
2282 }
2283
2284 /*
2285 * Validate a RWQE and fill in the SGE state.
2286 * Return 1 if OK.
2287 */
init_sge(struct rvt_qp * qp,struct rvt_rwqe * wqe)2288 static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
2289 {
2290 int i, j, ret;
2291 struct ib_wc wc;
2292 struct rvt_lkey_table *rkt;
2293 struct rvt_pd *pd;
2294 struct rvt_sge_state *ss;
2295 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2296
2297 rkt = &rdi->lkey_table;
2298 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
2299 ss = &qp->r_sge;
2300 ss->sg_list = qp->r_sg_list;
2301 qp->r_len = 0;
2302 for (i = j = 0; i < wqe->num_sge; i++) {
2303 if (wqe->sg_list[i].length == 0)
2304 continue;
2305 /* Check LKEY */
2306 ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
2307 NULL, rvt_cast_sge(&wqe->sg_list[i]),
2308 IB_ACCESS_LOCAL_WRITE);
2309 if (unlikely(ret <= 0))
2310 goto bad_lkey;
2311 qp->r_len += wqe->sg_list[i].length;
2312 j++;
2313 }
2314 ss->num_sge = j;
2315 ss->total_len = qp->r_len;
2316 return 1;
2317
2318 bad_lkey:
2319 while (j) {
2320 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
2321
2322 rvt_put_mr(sge->mr);
2323 }
2324 ss->num_sge = 0;
2325 memset(&wc, 0, sizeof(wc));
2326 wc.wr_id = wqe->wr_id;
2327 wc.status = IB_WC_LOC_PROT_ERR;
2328 wc.opcode = IB_WC_RECV;
2329 wc.qp = &qp->ibqp;
2330 /* Signal solicited completion event. */
2331 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
2332 return 0;
2333 }
2334
2335 /**
2336 * get_count - count numbers of request work queue entries
2337 * in circular buffer
2338 * @rq: data structure for request queue entry
2339 * @tail: tail indices of the circular buffer
2340 * @head: head indices of the circular buffer
2341 *
2342 * Return - total number of entries in the circular buffer
2343 */
get_count(struct rvt_rq * rq,u32 tail,u32 head)2344 static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
2345 {
2346 u32 count;
2347
2348 count = head;
2349
2350 if (count >= rq->size)
2351 count = 0;
2352 if (count < tail)
2353 count += rq->size - tail;
2354 else
2355 count -= tail;
2356
2357 return count;
2358 }
2359
2360 /**
2361 * get_rvt_head - get head indices of the circular buffer
2362 * @rq: data structure for request queue entry
2363 * @ip: the QP
2364 *
2365 * Return - head index value
2366 */
get_rvt_head(struct rvt_rq * rq,void * ip)2367 static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip)
2368 {
2369 u32 head;
2370
2371 if (ip)
2372 head = RDMA_READ_UAPI_ATOMIC(rq->wq->head);
2373 else
2374 head = rq->kwq->head;
2375
2376 return head;
2377 }
2378
2379 /**
2380 * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2381 * @qp: the QP
2382 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2383 *
2384 * Return -1 if there is a local error, 0 if no RWQE is available,
2385 * otherwise return 1.
2386 *
2387 * Can be called from interrupt level.
2388 */
rvt_get_rwqe(struct rvt_qp * qp,bool wr_id_only)2389 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
2390 {
2391 unsigned long flags;
2392 struct rvt_rq *rq;
2393 struct rvt_krwq *kwq = NULL;
2394 struct rvt_rwq *wq;
2395 struct rvt_srq *srq;
2396 struct rvt_rwqe *wqe;
2397 void (*handler)(struct ib_event *, void *);
2398 u32 tail;
2399 u32 head;
2400 int ret;
2401 void *ip = NULL;
2402
2403 if (qp->ibqp.srq) {
2404 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
2405 handler = srq->ibsrq.event_handler;
2406 rq = &srq->rq;
2407 ip = srq->ip;
2408 } else {
2409 srq = NULL;
2410 handler = NULL;
2411 rq = &qp->r_rq;
2412 ip = qp->ip;
2413 }
2414
2415 spin_lock_irqsave(&rq->kwq->c_lock, flags);
2416 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2417 ret = 0;
2418 goto unlock;
2419 }
2420 kwq = rq->kwq;
2421 if (ip) {
2422 wq = rq->wq;
2423 tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
2424 } else {
2425 tail = kwq->tail;
2426 }
2427
2428 /* Validate tail before using it since it is user writable. */
2429 if (tail >= rq->size)
2430 tail = 0;
2431
2432 if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
2433 head = get_rvt_head(rq, ip);
2434 kwq->count = get_count(rq, tail, head);
2435 }
2436 if (unlikely(kwq->count == 0)) {
2437 ret = 0;
2438 goto unlock;
2439 }
2440 /* Make sure entry is read after the count is read. */
2441 smp_rmb();
2442 wqe = rvt_get_rwqe_ptr(rq, tail);
2443 /*
2444 * Even though we update the tail index in memory, the verbs
2445 * consumer is not supposed to post more entries until a
2446 * completion is generated.
2447 */
2448 if (++tail >= rq->size)
2449 tail = 0;
2450 if (ip)
2451 RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
2452 else
2453 kwq->tail = tail;
2454 if (!wr_id_only && !init_sge(qp, wqe)) {
2455 ret = -1;
2456 goto unlock;
2457 }
2458 qp->r_wr_id = wqe->wr_id;
2459
2460 kwq->count--;
2461 ret = 1;
2462 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
2463 if (handler) {
2464 /*
2465 * Validate head pointer value and compute
2466 * the number of remaining WQEs.
2467 */
2468 if (kwq->count < srq->limit) {
2469 kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
2470 if (kwq->count < srq->limit) {
2471 struct ib_event ev;
2472
2473 srq->limit = 0;
2474 spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2475 ev.device = qp->ibqp.device;
2476 ev.element.srq = qp->ibqp.srq;
2477 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
2478 handler(&ev, srq->ibsrq.srq_context);
2479 goto bail;
2480 }
2481 }
2482 }
2483 unlock:
2484 spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2485 bail:
2486 return ret;
2487 }
2488 EXPORT_SYMBOL(rvt_get_rwqe);
2489
2490 /**
2491 * qp_comm_est - handle trap with QP established
2492 * @qp: the QP
2493 */
rvt_comm_est(struct rvt_qp * qp)2494 void rvt_comm_est(struct rvt_qp *qp)
2495 {
2496 qp->r_flags |= RVT_R_COMM_EST;
2497 if (qp->ibqp.event_handler) {
2498 struct ib_event ev;
2499
2500 ev.device = qp->ibqp.device;
2501 ev.element.qp = &qp->ibqp;
2502 ev.event = IB_EVENT_COMM_EST;
2503 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2504 }
2505 }
2506 EXPORT_SYMBOL(rvt_comm_est);
2507
rvt_rc_error(struct rvt_qp * qp,enum ib_wc_status err)2508 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
2509 {
2510 unsigned long flags;
2511 int lastwqe;
2512
2513 spin_lock_irqsave(&qp->s_lock, flags);
2514 lastwqe = rvt_error_qp(qp, err);
2515 spin_unlock_irqrestore(&qp->s_lock, flags);
2516
2517 if (lastwqe) {
2518 struct ib_event ev;
2519
2520 ev.device = qp->ibqp.device;
2521 ev.element.qp = &qp->ibqp;
2522 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
2523 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2524 }
2525 }
2526 EXPORT_SYMBOL(rvt_rc_error);
2527
2528 /*
2529 * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2530 * @index - the index
2531 * return usec from an index into ib_rvt_rnr_table
2532 */
rvt_rnr_tbl_to_usec(u32 index)2533 unsigned long rvt_rnr_tbl_to_usec(u32 index)
2534 {
2535 return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
2536 }
2537 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
2538
rvt_aeth_to_usec(u32 aeth)2539 static inline unsigned long rvt_aeth_to_usec(u32 aeth)
2540 {
2541 return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
2542 IB_AETH_CREDIT_MASK];
2543 }
2544
2545 /*
2546 * rvt_add_retry_timer_ext - add/start a retry timer
2547 * @qp - the QP
2548 * @shift - timeout shift to wait for multiple packets
2549 * add a retry timer on the QP
2550 */
rvt_add_retry_timer_ext(struct rvt_qp * qp,u8 shift)2551 void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
2552 {
2553 struct ib_qp *ibqp = &qp->ibqp;
2554 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2555
2556 lockdep_assert_held(&qp->s_lock);
2557 qp->s_flags |= RVT_S_TIMER;
2558 /* 4.096 usec. * (1 << qp->timeout) */
2559 qp->s_timer.expires = jiffies + rdi->busy_jiffies +
2560 (qp->timeout_jiffies << shift);
2561 add_timer(&qp->s_timer);
2562 }
2563 EXPORT_SYMBOL(rvt_add_retry_timer_ext);
2564
2565 /**
2566 * rvt_add_rnr_timer - add/start an rnr timer
2567 * @qp - the QP
2568 * @aeth - aeth of RNR timeout, simulated aeth for loopback
2569 * add an rnr timer on the QP
2570 */
rvt_add_rnr_timer(struct rvt_qp * qp,u32 aeth)2571 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
2572 {
2573 u32 to;
2574
2575 lockdep_assert_held(&qp->s_lock);
2576 qp->s_flags |= RVT_S_WAIT_RNR;
2577 to = rvt_aeth_to_usec(aeth);
2578 trace_rvt_rnrnak_add(qp, to);
2579 hrtimer_start(&qp->s_rnr_timer,
2580 ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
2581 }
2582 EXPORT_SYMBOL(rvt_add_rnr_timer);
2583
2584 /**
2585 * rvt_stop_rc_timers - stop all timers
2586 * @qp - the QP
2587 * stop any pending timers
2588 */
rvt_stop_rc_timers(struct rvt_qp * qp)2589 void rvt_stop_rc_timers(struct rvt_qp *qp)
2590 {
2591 lockdep_assert_held(&qp->s_lock);
2592 /* Remove QP from all timers */
2593 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
2594 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
2595 del_timer(&qp->s_timer);
2596 hrtimer_try_to_cancel(&qp->s_rnr_timer);
2597 }
2598 }
2599 EXPORT_SYMBOL(rvt_stop_rc_timers);
2600
2601 /**
2602 * rvt_stop_rnr_timer - stop an rnr timer
2603 * @qp - the QP
2604 *
2605 * stop an rnr timer and return if the timer
2606 * had been pending.
2607 */
rvt_stop_rnr_timer(struct rvt_qp * qp)2608 static void rvt_stop_rnr_timer(struct rvt_qp *qp)
2609 {
2610 lockdep_assert_held(&qp->s_lock);
2611 /* Remove QP from rnr timer */
2612 if (qp->s_flags & RVT_S_WAIT_RNR) {
2613 qp->s_flags &= ~RVT_S_WAIT_RNR;
2614 trace_rvt_rnrnak_stop(qp, 0);
2615 }
2616 }
2617
2618 /**
2619 * rvt_del_timers_sync - wait for any timeout routines to exit
2620 * @qp - the QP
2621 */
rvt_del_timers_sync(struct rvt_qp * qp)2622 void rvt_del_timers_sync(struct rvt_qp *qp)
2623 {
2624 del_timer_sync(&qp->s_timer);
2625 hrtimer_cancel(&qp->s_rnr_timer);
2626 }
2627 EXPORT_SYMBOL(rvt_del_timers_sync);
2628
2629 /**
2630 * This is called from s_timer for missing responses.
2631 */
rvt_rc_timeout(struct timer_list * t)2632 static void rvt_rc_timeout(struct timer_list *t)
2633 {
2634 struct rvt_qp *qp = from_timer(qp, t, s_timer);
2635 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2636 unsigned long flags;
2637
2638 spin_lock_irqsave(&qp->r_lock, flags);
2639 spin_lock(&qp->s_lock);
2640 if (qp->s_flags & RVT_S_TIMER) {
2641 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2642
2643 qp->s_flags &= ~RVT_S_TIMER;
2644 rvp->n_rc_timeouts++;
2645 del_timer(&qp->s_timer);
2646 trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2647 if (rdi->driver_f.notify_restart_rc)
2648 rdi->driver_f.notify_restart_rc(qp,
2649 qp->s_last_psn + 1,
2650 1);
2651 rdi->driver_f.schedule_send(qp);
2652 }
2653 spin_unlock(&qp->s_lock);
2654 spin_unlock_irqrestore(&qp->r_lock, flags);
2655 }
2656
2657 /*
2658 * This is called from s_timer for RNR timeouts.
2659 */
rvt_rc_rnr_retry(struct hrtimer * t)2660 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
2661 {
2662 struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2663 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2664 unsigned long flags;
2665
2666 spin_lock_irqsave(&qp->s_lock, flags);
2667 rvt_stop_rnr_timer(qp);
2668 trace_rvt_rnrnak_timeout(qp, 0);
2669 rdi->driver_f.schedule_send(qp);
2670 spin_unlock_irqrestore(&qp->s_lock, flags);
2671 return HRTIMER_NORESTART;
2672 }
2673 EXPORT_SYMBOL(rvt_rc_rnr_retry);
2674
2675 /**
2676 * rvt_qp_iter_init - initial for QP iteration
2677 * @rdi: rvt devinfo
2678 * @v: u64 value
2679 *
2680 * This returns an iterator suitable for iterating QPs
2681 * in the system.
2682 *
2683 * The @cb is a user defined callback and @v is a 64
2684 * bit value passed to and relevant for processing in the
2685 * @cb. An example use case would be to alter QP processing
2686 * based on criteria not part of the rvt_qp.
2687 *
2688 * Use cases that require memory allocation to succeed
2689 * must preallocate appropriately.
2690 *
2691 * Return: a pointer to an rvt_qp_iter or NULL
2692 */
rvt_qp_iter_init(struct rvt_dev_info * rdi,u64 v,void (* cb)(struct rvt_qp * qp,u64 v))2693 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
2694 u64 v,
2695 void (*cb)(struct rvt_qp *qp, u64 v))
2696 {
2697 struct rvt_qp_iter *i;
2698
2699 i = kzalloc(sizeof(*i), GFP_KERNEL);
2700 if (!i)
2701 return NULL;
2702
2703 i->rdi = rdi;
2704 /* number of special QPs (SMI/GSI) for device */
2705 i->specials = rdi->ibdev.phys_port_cnt * 2;
2706 i->v = v;
2707 i->cb = cb;
2708
2709 return i;
2710 }
2711 EXPORT_SYMBOL(rvt_qp_iter_init);
2712
2713 /**
2714 * rvt_qp_iter_next - return the next QP in iter
2715 * @iter - the iterator
2716 *
2717 * Fine grained QP iterator suitable for use
2718 * with debugfs seq_file mechanisms.
2719 *
2720 * Updates iter->qp with the current QP when the return
2721 * value is 0.
2722 *
2723 * Return: 0 - iter->qp is valid 1 - no more QPs
2724 */
rvt_qp_iter_next(struct rvt_qp_iter * iter)2725 int rvt_qp_iter_next(struct rvt_qp_iter *iter)
2726 __must_hold(RCU)
2727 {
2728 int n = iter->n;
2729 int ret = 1;
2730 struct rvt_qp *pqp = iter->qp;
2731 struct rvt_qp *qp;
2732 struct rvt_dev_info *rdi = iter->rdi;
2733
2734 /*
2735 * The approach is to consider the special qps
2736 * as additional table entries before the
2737 * real hash table. Since the qp code sets
2738 * the qp->next hash link to NULL, this works just fine.
2739 *
2740 * iter->specials is 2 * # ports
2741 *
2742 * n = 0..iter->specials is the special qp indices
2743 *
2744 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2745 * the potential hash bucket entries
2746 *
2747 */
2748 for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) {
2749 if (pqp) {
2750 qp = rcu_dereference(pqp->next);
2751 } else {
2752 if (n < iter->specials) {
2753 struct rvt_ibport *rvp;
2754 int pidx;
2755
2756 pidx = n % rdi->ibdev.phys_port_cnt;
2757 rvp = rdi->ports[pidx];
2758 qp = rcu_dereference(rvp->qp[n & 1]);
2759 } else {
2760 qp = rcu_dereference(
2761 rdi->qp_dev->qp_table[
2762 (n - iter->specials)]);
2763 }
2764 }
2765 pqp = qp;
2766 if (qp) {
2767 iter->qp = qp;
2768 iter->n = n;
2769 return 0;
2770 }
2771 }
2772 return ret;
2773 }
2774 EXPORT_SYMBOL(rvt_qp_iter_next);
2775
2776 /**
2777 * rvt_qp_iter - iterate all QPs
2778 * @rdi - rvt devinfo
2779 * @v - a 64 bit value
2780 * @cb - a callback
2781 *
2782 * This provides a way for iterating all QPs.
2783 *
2784 * The @cb is a user defined callback and @v is a 64
2785 * bit value passed to and relevant for processing in the
2786 * cb. An example use case would be to alter QP processing
2787 * based on criteria not part of the rvt_qp.
2788 *
2789 * The code has an internal iterator to simplify
2790 * non seq_file use cases.
2791 */
rvt_qp_iter(struct rvt_dev_info * rdi,u64 v,void (* cb)(struct rvt_qp * qp,u64 v))2792 void rvt_qp_iter(struct rvt_dev_info *rdi,
2793 u64 v,
2794 void (*cb)(struct rvt_qp *qp, u64 v))
2795 {
2796 int ret;
2797 struct rvt_qp_iter i = {
2798 .rdi = rdi,
2799 .specials = rdi->ibdev.phys_port_cnt * 2,
2800 .v = v,
2801 .cb = cb
2802 };
2803
2804 rcu_read_lock();
2805 do {
2806 ret = rvt_qp_iter_next(&i);
2807 if (!ret) {
2808 rvt_get_qp(i.qp);
2809 rcu_read_unlock();
2810 i.cb(i.qp, i.v);
2811 rcu_read_lock();
2812 rvt_put_qp(i.qp);
2813 }
2814 } while (!ret);
2815 rcu_read_unlock();
2816 }
2817 EXPORT_SYMBOL(rvt_qp_iter);
2818
2819 /*
2820 * This should be called with s_lock held.
2821 */
rvt_send_complete(struct rvt_qp * qp,struct rvt_swqe * wqe,enum ib_wc_status status)2822 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
2823 enum ib_wc_status status)
2824 {
2825 u32 old_last, last;
2826 struct rvt_dev_info *rdi;
2827
2828 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2829 return;
2830 rdi = ib_to_rvt(qp->ibqp.device);
2831
2832 old_last = qp->s_last;
2833 trace_rvt_qp_send_completion(qp, wqe, old_last);
2834 last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
2835 status);
2836 if (qp->s_acked == old_last)
2837 qp->s_acked = last;
2838 if (qp->s_cur == old_last)
2839 qp->s_cur = last;
2840 if (qp->s_tail == old_last)
2841 qp->s_tail = last;
2842 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
2843 qp->s_draining = 0;
2844 }
2845 EXPORT_SYMBOL(rvt_send_complete);
2846
2847 /**
2848 * rvt_copy_sge - copy data to SGE memory
2849 * @qp: associated QP
2850 * @ss: the SGE state
2851 * @data: the data to copy
2852 * @length: the length of the data
2853 * @release: boolean to release MR
2854 * @copy_last: do a separate copy of the last 8 bytes
2855 */
rvt_copy_sge(struct rvt_qp * qp,struct rvt_sge_state * ss,void * data,u32 length,bool release,bool copy_last)2856 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
2857 void *data, u32 length,
2858 bool release, bool copy_last)
2859 {
2860 struct rvt_sge *sge = &ss->sge;
2861 int i;
2862 bool in_last = false;
2863 bool cacheless_copy = false;
2864 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2865 struct rvt_wss *wss = rdi->wss;
2866 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
2867
2868 if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
2869 cacheless_copy = length >= PAGE_SIZE;
2870 } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
2871 if (length >= PAGE_SIZE) {
2872 /*
2873 * NOTE: this *assumes*:
2874 * o The first vaddr is the dest.
2875 * o If multiple pages, then vaddr is sequential.
2876 */
2877 wss_insert(wss, sge->vaddr);
2878 if (length >= (2 * PAGE_SIZE))
2879 wss_insert(wss, (sge->vaddr + PAGE_SIZE));
2880
2881 cacheless_copy = wss_exceeds_threshold(wss);
2882 } else {
2883 wss_advance_clean_counter(wss);
2884 }
2885 }
2886
2887 if (copy_last) {
2888 if (length > 8) {
2889 length -= 8;
2890 } else {
2891 copy_last = false;
2892 in_last = true;
2893 }
2894 }
2895
2896 again:
2897 while (length) {
2898 u32 len = rvt_get_sge_length(sge, length);
2899
2900 WARN_ON_ONCE(len == 0);
2901 if (unlikely(in_last)) {
2902 /* enforce byte transfer ordering */
2903 for (i = 0; i < len; i++)
2904 ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
2905 } else if (cacheless_copy) {
2906 cacheless_memcpy(sge->vaddr, data, len);
2907 } else {
2908 memcpy(sge->vaddr, data, len);
2909 }
2910 rvt_update_sge(ss, len, release);
2911 data += len;
2912 length -= len;
2913 }
2914
2915 if (copy_last) {
2916 copy_last = false;
2917 in_last = true;
2918 length = 8;
2919 goto again;
2920 }
2921 }
2922 EXPORT_SYMBOL(rvt_copy_sge);
2923
loopback_qp_drop(struct rvt_ibport * rvp,struct rvt_qp * sqp)2924 static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
2925 struct rvt_qp *sqp)
2926 {
2927 rvp->n_pkt_drops++;
2928 /*
2929 * For RC, the requester would timeout and retry so
2930 * shortcut the timeouts and just signal too many retries.
2931 */
2932 return sqp->ibqp.qp_type == IB_QPT_RC ?
2933 IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS;
2934 }
2935
2936 /**
2937 * ruc_loopback - handle UC and RC loopback requests
2938 * @sqp: the sending QP
2939 *
2940 * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2941 * Note that although we are single threaded due to the send engine, we still
2942 * have to protect against post_send(). We don't have to worry about
2943 * receive interrupts since this is a connected protocol and all packets
2944 * will pass through here.
2945 */
rvt_ruc_loopback(struct rvt_qp * sqp)2946 void rvt_ruc_loopback(struct rvt_qp *sqp)
2947 {
2948 struct rvt_ibport *rvp = NULL;
2949 struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
2950 struct rvt_qp *qp;
2951 struct rvt_swqe *wqe;
2952 struct rvt_sge *sge;
2953 unsigned long flags;
2954 struct ib_wc wc;
2955 u64 sdata;
2956 atomic64_t *maddr;
2957 enum ib_wc_status send_status;
2958 bool release;
2959 int ret;
2960 bool copy_last = false;
2961 int local_ops = 0;
2962
2963 rcu_read_lock();
2964 rvp = rdi->ports[sqp->port_num - 1];
2965
2966 /*
2967 * Note that we check the responder QP state after
2968 * checking the requester's state.
2969 */
2970
2971 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
2972 sqp->remote_qpn);
2973
2974 spin_lock_irqsave(&sqp->s_lock, flags);
2975
2976 /* Return if we are already busy processing a work request. */
2977 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
2978 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2979 goto unlock;
2980
2981 sqp->s_flags |= RVT_S_BUSY;
2982
2983 again:
2984 if (sqp->s_last == READ_ONCE(sqp->s_head))
2985 goto clr_busy;
2986 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
2987
2988 /* Return if it is not OK to start a new work request. */
2989 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
2990 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
2991 goto clr_busy;
2992 /* We are in the error state, flush the work request. */
2993 send_status = IB_WC_WR_FLUSH_ERR;
2994 goto flush_send;
2995 }
2996
2997 /*
2998 * We can rely on the entry not changing without the s_lock
2999 * being held until we update s_last.
3000 * We increment s_cur to indicate s_last is in progress.
3001 */
3002 if (sqp->s_last == sqp->s_cur) {
3003 if (++sqp->s_cur >= sqp->s_size)
3004 sqp->s_cur = 0;
3005 }
3006 spin_unlock_irqrestore(&sqp->s_lock, flags);
3007
3008 if (!qp) {
3009 send_status = loopback_qp_drop(rvp, sqp);
3010 goto serr_no_r_lock;
3011 }
3012 spin_lock_irqsave(&qp->r_lock, flags);
3013 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
3014 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
3015 send_status = loopback_qp_drop(rvp, sqp);
3016 goto serr;
3017 }
3018
3019 memset(&wc, 0, sizeof(wc));
3020 send_status = IB_WC_SUCCESS;
3021
3022 release = true;
3023 sqp->s_sge.sge = wqe->sg_list[0];
3024 sqp->s_sge.sg_list = wqe->sg_list + 1;
3025 sqp->s_sge.num_sge = wqe->wr.num_sge;
3026 sqp->s_len = wqe->length;
3027 switch (wqe->wr.opcode) {
3028 case IB_WR_REG_MR:
3029 goto send_comp;
3030
3031 case IB_WR_LOCAL_INV:
3032 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
3033 if (rvt_invalidate_rkey(sqp,
3034 wqe->wr.ex.invalidate_rkey))
3035 send_status = IB_WC_LOC_PROT_ERR;
3036 local_ops = 1;
3037 }
3038 goto send_comp;
3039
3040 case IB_WR_SEND_WITH_INV:
3041 case IB_WR_SEND_WITH_IMM:
3042 case IB_WR_SEND:
3043 ret = rvt_get_rwqe(qp, false);
3044 if (ret < 0)
3045 goto op_err;
3046 if (!ret)
3047 goto rnr_nak;
3048 if (wqe->length > qp->r_len)
3049 goto inv_err;
3050 switch (wqe->wr.opcode) {
3051 case IB_WR_SEND_WITH_INV:
3052 if (!rvt_invalidate_rkey(qp,
3053 wqe->wr.ex.invalidate_rkey)) {
3054 wc.wc_flags = IB_WC_WITH_INVALIDATE;
3055 wc.ex.invalidate_rkey =
3056 wqe->wr.ex.invalidate_rkey;
3057 }
3058 break;
3059 case IB_WR_SEND_WITH_IMM:
3060 wc.wc_flags = IB_WC_WITH_IMM;
3061 wc.ex.imm_data = wqe->wr.ex.imm_data;
3062 break;
3063 default:
3064 break;
3065 }
3066 break;
3067
3068 case IB_WR_RDMA_WRITE_WITH_IMM:
3069 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3070 goto inv_err;
3071 wc.wc_flags = IB_WC_WITH_IMM;
3072 wc.ex.imm_data = wqe->wr.ex.imm_data;
3073 ret = rvt_get_rwqe(qp, true);
3074 if (ret < 0)
3075 goto op_err;
3076 if (!ret)
3077 goto rnr_nak;
3078 /* skip copy_last set and qp_access_flags recheck */
3079 goto do_write;
3080 case IB_WR_RDMA_WRITE:
3081 copy_last = rvt_is_user_qp(qp);
3082 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3083 goto inv_err;
3084 do_write:
3085 if (wqe->length == 0)
3086 break;
3087 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
3088 wqe->rdma_wr.remote_addr,
3089 wqe->rdma_wr.rkey,
3090 IB_ACCESS_REMOTE_WRITE)))
3091 goto acc_err;
3092 qp->r_sge.sg_list = NULL;
3093 qp->r_sge.num_sge = 1;
3094 qp->r_sge.total_len = wqe->length;
3095 break;
3096
3097 case IB_WR_RDMA_READ:
3098 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3099 goto inv_err;
3100 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
3101 wqe->rdma_wr.remote_addr,
3102 wqe->rdma_wr.rkey,
3103 IB_ACCESS_REMOTE_READ)))
3104 goto acc_err;
3105 release = false;
3106 sqp->s_sge.sg_list = NULL;
3107 sqp->s_sge.num_sge = 1;
3108 qp->r_sge.sge = wqe->sg_list[0];
3109 qp->r_sge.sg_list = wqe->sg_list + 1;
3110 qp->r_sge.num_sge = wqe->wr.num_sge;
3111 qp->r_sge.total_len = wqe->length;
3112 break;
3113
3114 case IB_WR_ATOMIC_CMP_AND_SWP:
3115 case IB_WR_ATOMIC_FETCH_AND_ADD:
3116 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
3117 goto inv_err;
3118 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3119 wqe->atomic_wr.remote_addr,
3120 wqe->atomic_wr.rkey,
3121 IB_ACCESS_REMOTE_ATOMIC)))
3122 goto acc_err;
3123 /* Perform atomic OP and save result. */
3124 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3125 sdata = wqe->atomic_wr.compare_add;
3126 *(u64 *)sqp->s_sge.sge.vaddr =
3127 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
3128 (u64)atomic64_add_return(sdata, maddr) - sdata :
3129 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3130 sdata, wqe->atomic_wr.swap);
3131 rvt_put_mr(qp->r_sge.sge.mr);
3132 qp->r_sge.num_sge = 0;
3133 goto send_comp;
3134
3135 default:
3136 send_status = IB_WC_LOC_QP_OP_ERR;
3137 goto serr;
3138 }
3139
3140 sge = &sqp->s_sge.sge;
3141 while (sqp->s_len) {
3142 u32 len = rvt_get_sge_length(sge, sqp->s_len);
3143
3144 WARN_ON_ONCE(len == 0);
3145 rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
3146 len, release, copy_last);
3147 rvt_update_sge(&sqp->s_sge, len, !release);
3148 sqp->s_len -= len;
3149 }
3150 if (release)
3151 rvt_put_ss(&qp->r_sge);
3152
3153 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
3154 goto send_comp;
3155
3156 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
3157 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
3158 else
3159 wc.opcode = IB_WC_RECV;
3160 wc.wr_id = qp->r_wr_id;
3161 wc.status = IB_WC_SUCCESS;
3162 wc.byte_len = wqe->length;
3163 wc.qp = &qp->ibqp;
3164 wc.src_qp = qp->remote_qpn;
3165 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
3166 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3167 wc.port_num = 1;
3168 /* Signal completion event if the solicited bit is set. */
3169 rvt_recv_cq(qp, &wc, wqe->wr.send_flags & IB_SEND_SOLICITED);
3170
3171 send_comp:
3172 spin_unlock_irqrestore(&qp->r_lock, flags);
3173 spin_lock_irqsave(&sqp->s_lock, flags);
3174 rvp->n_loop_pkts++;
3175 flush_send:
3176 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
3177 rvt_send_complete(sqp, wqe, send_status);
3178 if (local_ops) {
3179 atomic_dec(&sqp->local_ops_pending);
3180 local_ops = 0;
3181 }
3182 goto again;
3183
3184 rnr_nak:
3185 /* Handle RNR NAK */
3186 if (qp->ibqp.qp_type == IB_QPT_UC)
3187 goto send_comp;
3188 rvp->n_rnr_naks++;
3189 /*
3190 * Note: we don't need the s_lock held since the BUSY flag
3191 * makes this single threaded.
3192 */
3193 if (sqp->s_rnr_retry == 0) {
3194 send_status = IB_WC_RNR_RETRY_EXC_ERR;
3195 goto serr;
3196 }
3197 if (sqp->s_rnr_retry_cnt < 7)
3198 sqp->s_rnr_retry--;
3199 spin_unlock_irqrestore(&qp->r_lock, flags);
3200 spin_lock_irqsave(&sqp->s_lock, flags);
3201 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
3202 goto clr_busy;
3203 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
3204 IB_AETH_CREDIT_SHIFT);
3205 goto clr_busy;
3206
3207 op_err:
3208 send_status = IB_WC_REM_OP_ERR;
3209 wc.status = IB_WC_LOC_QP_OP_ERR;
3210 goto err;
3211
3212 inv_err:
3213 send_status =
3214 sqp->ibqp.qp_type == IB_QPT_RC ?
3215 IB_WC_REM_INV_REQ_ERR :
3216 IB_WC_SUCCESS;
3217 wc.status = IB_WC_LOC_QP_OP_ERR;
3218 goto err;
3219
3220 acc_err:
3221 send_status = IB_WC_REM_ACCESS_ERR;
3222 wc.status = IB_WC_LOC_PROT_ERR;
3223 err:
3224 /* responder goes to error state */
3225 rvt_rc_error(qp, wc.status);
3226
3227 serr:
3228 spin_unlock_irqrestore(&qp->r_lock, flags);
3229 serr_no_r_lock:
3230 spin_lock_irqsave(&sqp->s_lock, flags);
3231 rvt_send_complete(sqp, wqe, send_status);
3232 if (sqp->ibqp.qp_type == IB_QPT_RC) {
3233 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
3234
3235 sqp->s_flags &= ~RVT_S_BUSY;
3236 spin_unlock_irqrestore(&sqp->s_lock, flags);
3237 if (lastwqe) {
3238 struct ib_event ev;
3239
3240 ev.device = sqp->ibqp.device;
3241 ev.element.qp = &sqp->ibqp;
3242 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
3243 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
3244 }
3245 goto done;
3246 }
3247 clr_busy:
3248 sqp->s_flags &= ~RVT_S_BUSY;
3249 unlock:
3250 spin_unlock_irqrestore(&sqp->s_lock, flags);
3251 done:
3252 rcu_read_unlock();
3253 }
3254 EXPORT_SYMBOL(rvt_ruc_loopback);
3255