1 /*
2 * Copyright(c) 2016 - 2020 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/ib_hdrs.h>
55 #include <rdma/opa_addr.h>
56 #include <rdma/uverbs_ioctl.h>
57 #include "qp.h"
58 #include "vt.h"
59 #include "trace.h"
60
61 #define RVT_RWQ_COUNT_THRESHOLD 16
62
63 static void rvt_rc_timeout(struct timer_list *t);
64 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
65 enum ib_qp_type type);
66
67 /*
68 * Convert the AETH RNR timeout code into the number of microseconds.
69 */
70 static const u32 ib_rvt_rnr_table[32] = {
71 655360, /* 00: 655.36 */
72 10, /* 01: .01 */
73 20, /* 02 .02 */
74 30, /* 03: .03 */
75 40, /* 04: .04 */
76 60, /* 05: .06 */
77 80, /* 06: .08 */
78 120, /* 07: .12 */
79 160, /* 08: .16 */
80 240, /* 09: .24 */
81 320, /* 0A: .32 */
82 480, /* 0B: .48 */
83 640, /* 0C: .64 */
84 960, /* 0D: .96 */
85 1280, /* 0E: 1.28 */
86 1920, /* 0F: 1.92 */
87 2560, /* 10: 2.56 */
88 3840, /* 11: 3.84 */
89 5120, /* 12: 5.12 */
90 7680, /* 13: 7.68 */
91 10240, /* 14: 10.24 */
92 15360, /* 15: 15.36 */
93 20480, /* 16: 20.48 */
94 30720, /* 17: 30.72 */
95 40960, /* 18: 40.96 */
96 61440, /* 19: 61.44 */
97 81920, /* 1A: 81.92 */
98 122880, /* 1B: 122.88 */
99 163840, /* 1C: 163.84 */
100 245760, /* 1D: 245.76 */
101 327680, /* 1E: 327.68 */
102 491520 /* 1F: 491.52 */
103 };
104
105 /*
106 * Note that it is OK to post send work requests in the SQE and ERR
107 * states; rvt_do_send() will process them and generate error
108 * completions as per IB 1.2 C10-96.
109 */
110 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
111 [IB_QPS_RESET] = 0,
112 [IB_QPS_INIT] = RVT_POST_RECV_OK,
113 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
114 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
115 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
116 RVT_PROCESS_NEXT_SEND_OK,
117 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
118 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
119 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
120 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
121 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
122 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
123 };
124 EXPORT_SYMBOL(ib_rvt_state_ops);
125
126 /* platform specific: return the last level cache (llc) size, in KiB */
rvt_wss_llc_size(void)127 static int rvt_wss_llc_size(void)
128 {
129 /* assume that the boot CPU value is universal for all CPUs */
130 return boot_cpu_data.x86_cache_size;
131 }
132
133 /* platform specific: cacheless copy */
cacheless_memcpy(void * dst,void * src,size_t n)134 static void cacheless_memcpy(void *dst, void *src, size_t n)
135 {
136 /*
137 * Use the only available X64 cacheless copy. Add a __user cast
138 * to quiet sparse. The src agument is already in the kernel so
139 * there are no security issues. The extra fault recovery machinery
140 * is not invoked.
141 */
142 __copy_user_nocache(dst, (void __user *)src, n, 0);
143 }
144
rvt_wss_exit(struct rvt_dev_info * rdi)145 void rvt_wss_exit(struct rvt_dev_info *rdi)
146 {
147 struct rvt_wss *wss = rdi->wss;
148
149 if (!wss)
150 return;
151
152 /* coded to handle partially initialized and repeat callers */
153 kfree(wss->entries);
154 wss->entries = NULL;
155 kfree(rdi->wss);
156 rdi->wss = NULL;
157 }
158
159 /**
160 * rvt_wss_init - Init wss data structures
161 *
162 * Return: 0 on success
163 */
rvt_wss_init(struct rvt_dev_info * rdi)164 int rvt_wss_init(struct rvt_dev_info *rdi)
165 {
166 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
167 unsigned int wss_threshold = rdi->dparms.wss_threshold;
168 unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
169 long llc_size;
170 long llc_bits;
171 long table_size;
172 long table_bits;
173 struct rvt_wss *wss;
174 int node = rdi->dparms.node;
175
176 if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
177 rdi->wss = NULL;
178 return 0;
179 }
180
181 rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
182 if (!rdi->wss)
183 return -ENOMEM;
184 wss = rdi->wss;
185
186 /* check for a valid percent range - default to 80 if none or invalid */
187 if (wss_threshold < 1 || wss_threshold > 100)
188 wss_threshold = 80;
189
190 /* reject a wildly large period */
191 if (wss_clean_period > 1000000)
192 wss_clean_period = 256;
193
194 /* reject a zero period */
195 if (wss_clean_period == 0)
196 wss_clean_period = 1;
197
198 /*
199 * Calculate the table size - the next power of 2 larger than the
200 * LLC size. LLC size is in KiB.
201 */
202 llc_size = rvt_wss_llc_size() * 1024;
203 table_size = roundup_pow_of_two(llc_size);
204
205 /* one bit per page in rounded up table */
206 llc_bits = llc_size / PAGE_SIZE;
207 table_bits = table_size / PAGE_SIZE;
208 wss->pages_mask = table_bits - 1;
209 wss->num_entries = table_bits / BITS_PER_LONG;
210
211 wss->threshold = (llc_bits * wss_threshold) / 100;
212 if (wss->threshold == 0)
213 wss->threshold = 1;
214
215 wss->clean_period = wss_clean_period;
216 atomic_set(&wss->clean_counter, wss_clean_period);
217
218 wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
219 GFP_KERNEL, node);
220 if (!wss->entries) {
221 rvt_wss_exit(rdi);
222 return -ENOMEM;
223 }
224
225 return 0;
226 }
227
228 /*
229 * Advance the clean counter. When the clean period has expired,
230 * clean an entry.
231 *
232 * This is implemented in atomics to avoid locking. Because multiple
233 * variables are involved, it can be racy which can lead to slightly
234 * inaccurate information. Since this is only a heuristic, this is
235 * OK. Any innaccuracies will clean themselves out as the counter
236 * advances. That said, it is unlikely the entry clean operation will
237 * race - the next possible racer will not start until the next clean
238 * period.
239 *
240 * The clean counter is implemented as a decrement to zero. When zero
241 * is reached an entry is cleaned.
242 */
wss_advance_clean_counter(struct rvt_wss * wss)243 static void wss_advance_clean_counter(struct rvt_wss *wss)
244 {
245 int entry;
246 int weight;
247 unsigned long bits;
248
249 /* become the cleaner if we decrement the counter to zero */
250 if (atomic_dec_and_test(&wss->clean_counter)) {
251 /*
252 * Set, not add, the clean period. This avoids an issue
253 * where the counter could decrement below the clean period.
254 * Doing a set can result in lost decrements, slowing the
255 * clean advance. Since this a heuristic, this possible
256 * slowdown is OK.
257 *
258 * An alternative is to loop, advancing the counter by a
259 * clean period until the result is > 0. However, this could
260 * lead to several threads keeping another in the clean loop.
261 * This could be mitigated by limiting the number of times
262 * we stay in the loop.
263 */
264 atomic_set(&wss->clean_counter, wss->clean_period);
265
266 /*
267 * Uniquely grab the entry to clean and move to next.
268 * The current entry is always the lower bits of
269 * wss.clean_entry. The table size, wss.num_entries,
270 * is always a power-of-2.
271 */
272 entry = (atomic_inc_return(&wss->clean_entry) - 1)
273 & (wss->num_entries - 1);
274
275 /* clear the entry and count the bits */
276 bits = xchg(&wss->entries[entry], 0);
277 weight = hweight64((u64)bits);
278 /* only adjust the contended total count if needed */
279 if (weight)
280 atomic_sub(weight, &wss->total_count);
281 }
282 }
283
284 /*
285 * Insert the given address into the working set array.
286 */
wss_insert(struct rvt_wss * wss,void * address)287 static void wss_insert(struct rvt_wss *wss, void *address)
288 {
289 u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
290 u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
291 u32 nr = page & (BITS_PER_LONG - 1);
292
293 if (!test_and_set_bit(nr, &wss->entries[entry]))
294 atomic_inc(&wss->total_count);
295
296 wss_advance_clean_counter(wss);
297 }
298
299 /*
300 * Is the working set larger than the threshold?
301 */
wss_exceeds_threshold(struct rvt_wss * wss)302 static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
303 {
304 return atomic_read(&wss->total_count) >= wss->threshold;
305 }
306
get_map_page(struct rvt_qpn_table * qpt,struct rvt_qpn_map * map)307 static void get_map_page(struct rvt_qpn_table *qpt,
308 struct rvt_qpn_map *map)
309 {
310 unsigned long page = get_zeroed_page(GFP_KERNEL);
311
312 /*
313 * Free the page if someone raced with us installing it.
314 */
315
316 spin_lock(&qpt->lock);
317 if (map->page)
318 free_page(page);
319 else
320 map->page = (void *)page;
321 spin_unlock(&qpt->lock);
322 }
323
324 /**
325 * init_qpn_table - initialize the QP number table for a device
326 * @qpt: the QPN table
327 */
init_qpn_table(struct rvt_dev_info * rdi,struct rvt_qpn_table * qpt)328 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
329 {
330 u32 offset, i;
331 struct rvt_qpn_map *map;
332 int ret = 0;
333
334 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
335 return -EINVAL;
336
337 spin_lock_init(&qpt->lock);
338
339 qpt->last = rdi->dparms.qpn_start;
340 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
341
342 /*
343 * Drivers may want some QPs beyond what we need for verbs let them use
344 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
345 * for those. The reserved range must be *after* the range which verbs
346 * will pick from.
347 */
348
349 /* Figure out number of bit maps needed before reserved range */
350 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
351
352 /* This should always be zero */
353 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
354
355 /* Starting with the first reserved bit map */
356 map = &qpt->map[qpt->nmaps];
357
358 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
359 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
360 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
361 if (!map->page) {
362 get_map_page(qpt, map);
363 if (!map->page) {
364 ret = -ENOMEM;
365 break;
366 }
367 }
368 set_bit(offset, map->page);
369 offset++;
370 if (offset == RVT_BITS_PER_PAGE) {
371 /* next page */
372 qpt->nmaps++;
373 map++;
374 offset = 0;
375 }
376 }
377 return ret;
378 }
379
380 /**
381 * free_qpn_table - free the QP number table for a device
382 * @qpt: the QPN table
383 */
free_qpn_table(struct rvt_qpn_table * qpt)384 static void free_qpn_table(struct rvt_qpn_table *qpt)
385 {
386 int i;
387
388 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
389 free_page((unsigned long)qpt->map[i].page);
390 }
391
392 /**
393 * rvt_driver_qp_init - Init driver qp resources
394 * @rdi: rvt dev strucutre
395 *
396 * Return: 0 on success
397 */
rvt_driver_qp_init(struct rvt_dev_info * rdi)398 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
399 {
400 int i;
401 int ret = -ENOMEM;
402
403 if (!rdi->dparms.qp_table_size)
404 return -EINVAL;
405
406 /*
407 * If driver is not doing any QP allocation then make sure it is
408 * providing the necessary QP functions.
409 */
410 if (!rdi->driver_f.free_all_qps ||
411 !rdi->driver_f.qp_priv_alloc ||
412 !rdi->driver_f.qp_priv_free ||
413 !rdi->driver_f.notify_qp_reset ||
414 !rdi->driver_f.notify_restart_rc)
415 return -EINVAL;
416
417 /* allocate parent object */
418 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
419 rdi->dparms.node);
420 if (!rdi->qp_dev)
421 return -ENOMEM;
422
423 /* allocate hash table */
424 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
425 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
426 rdi->qp_dev->qp_table =
427 kmalloc_array_node(rdi->qp_dev->qp_table_size,
428 sizeof(*rdi->qp_dev->qp_table),
429 GFP_KERNEL, rdi->dparms.node);
430 if (!rdi->qp_dev->qp_table)
431 goto no_qp_table;
432
433 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
434 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
435
436 spin_lock_init(&rdi->qp_dev->qpt_lock);
437
438 /* initialize qpn map */
439 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
440 goto fail_table;
441
442 spin_lock_init(&rdi->n_qps_lock);
443
444 return 0;
445
446 fail_table:
447 kfree(rdi->qp_dev->qp_table);
448 free_qpn_table(&rdi->qp_dev->qpn_table);
449
450 no_qp_table:
451 kfree(rdi->qp_dev);
452
453 return ret;
454 }
455
456 /**
457 * rvt_free_qp_cb - callback function to reset a qp
458 * @qp: the qp to reset
459 * @v: a 64-bit value
460 *
461 * This function resets the qp and removes it from the
462 * qp hash table.
463 */
rvt_free_qp_cb(struct rvt_qp * qp,u64 v)464 static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
465 {
466 unsigned int *qp_inuse = (unsigned int *)v;
467 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
468
469 /* Reset the qp and remove it from the qp hash list */
470 rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
471
472 /* Increment the qp_inuse count */
473 (*qp_inuse)++;
474 }
475
476 /**
477 * rvt_free_all_qps - check for QPs still in use
478 * @rdi: rvt device info structure
479 *
480 * There should not be any QPs still in use.
481 * Free memory for table.
482 * Return the number of QPs still in use.
483 */
rvt_free_all_qps(struct rvt_dev_info * rdi)484 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
485 {
486 unsigned int qp_inuse = 0;
487
488 qp_inuse += rvt_mcast_tree_empty(rdi);
489
490 rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
491
492 return qp_inuse;
493 }
494
495 /**
496 * rvt_qp_exit - clean up qps on device exit
497 * @rdi: rvt dev structure
498 *
499 * Check for qp leaks and free resources.
500 */
rvt_qp_exit(struct rvt_dev_info * rdi)501 void rvt_qp_exit(struct rvt_dev_info *rdi)
502 {
503 u32 qps_inuse = rvt_free_all_qps(rdi);
504
505 if (qps_inuse)
506 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
507 qps_inuse);
508 if (!rdi->qp_dev)
509 return;
510
511 kfree(rdi->qp_dev->qp_table);
512 free_qpn_table(&rdi->qp_dev->qpn_table);
513 kfree(rdi->qp_dev);
514 }
515
mk_qpn(struct rvt_qpn_table * qpt,struct rvt_qpn_map * map,unsigned off)516 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
517 struct rvt_qpn_map *map, unsigned off)
518 {
519 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
520 }
521
522 /**
523 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
524 * IB_QPT_SMI/IB_QPT_GSI
525 * @rdi: rvt device info structure
526 * @qpt: queue pair number table pointer
527 * @port_num: IB port number, 1 based, comes from core
528 * @exclude_prefix: prefix of special queue pair number being allocated
529 *
530 * Return: The queue pair number
531 */
alloc_qpn(struct rvt_dev_info * rdi,struct rvt_qpn_table * qpt,enum ib_qp_type type,u8 port_num,u8 exclude_prefix)532 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
533 enum ib_qp_type type, u8 port_num, u8 exclude_prefix)
534 {
535 u32 i, offset, max_scan, qpn;
536 struct rvt_qpn_map *map;
537 u32 ret;
538 u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
539 RVT_AIP_QPN_MAX : RVT_QPN_MAX;
540
541 if (rdi->driver_f.alloc_qpn)
542 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
543
544 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
545 unsigned n;
546
547 ret = type == IB_QPT_GSI;
548 n = 1 << (ret + 2 * (port_num - 1));
549 spin_lock(&qpt->lock);
550 if (qpt->flags & n)
551 ret = -EINVAL;
552 else
553 qpt->flags |= n;
554 spin_unlock(&qpt->lock);
555 goto bail;
556 }
557
558 qpn = qpt->last + qpt->incr;
559 if (qpn >= max_qpn)
560 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
561 /* offset carries bit 0 */
562 offset = qpn & RVT_BITS_PER_PAGE_MASK;
563 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
564 max_scan = qpt->nmaps - !offset;
565 for (i = 0;;) {
566 if (unlikely(!map->page)) {
567 get_map_page(qpt, map);
568 if (unlikely(!map->page))
569 break;
570 }
571 do {
572 if (!test_and_set_bit(offset, map->page)) {
573 qpt->last = qpn;
574 ret = qpn;
575 goto bail;
576 }
577 offset += qpt->incr;
578 /*
579 * This qpn might be bogus if offset >= BITS_PER_PAGE.
580 * That is OK. It gets re-assigned below
581 */
582 qpn = mk_qpn(qpt, map, offset);
583 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
584 /*
585 * In order to keep the number of pages allocated to a
586 * minimum, we scan the all existing pages before increasing
587 * the size of the bitmap table.
588 */
589 if (++i > max_scan) {
590 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
591 break;
592 map = &qpt->map[qpt->nmaps++];
593 /* start at incr with current bit 0 */
594 offset = qpt->incr | (offset & 1);
595 } else if (map < &qpt->map[qpt->nmaps]) {
596 ++map;
597 /* start at incr with current bit 0 */
598 offset = qpt->incr | (offset & 1);
599 } else {
600 map = &qpt->map[0];
601 /* wrap to first map page, invert bit 0 */
602 offset = qpt->incr | ((offset & 1) ^ 1);
603 }
604 /* there can be no set bits in low-order QoS bits */
605 WARN_ON(rdi->dparms.qos_shift > 1 &&
606 offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
607 qpn = mk_qpn(qpt, map, offset);
608 }
609
610 ret = -ENOMEM;
611
612 bail:
613 return ret;
614 }
615
616 /**
617 * rvt_clear_mr_refs - Drop help mr refs
618 * @qp: rvt qp data structure
619 * @clr_sends: If shoudl clear send side or not
620 */
rvt_clear_mr_refs(struct rvt_qp * qp,int clr_sends)621 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
622 {
623 unsigned n;
624 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
625
626 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
627 rvt_put_ss(&qp->s_rdma_read_sge);
628
629 rvt_put_ss(&qp->r_sge);
630
631 if (clr_sends) {
632 while (qp->s_last != qp->s_head) {
633 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
634
635 rvt_put_qp_swqe(qp, wqe);
636 if (++qp->s_last >= qp->s_size)
637 qp->s_last = 0;
638 smp_wmb(); /* see qp_set_savail */
639 }
640 if (qp->s_rdma_mr) {
641 rvt_put_mr(qp->s_rdma_mr);
642 qp->s_rdma_mr = NULL;
643 }
644 }
645
646 for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
647 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
648
649 if (e->rdma_sge.mr) {
650 rvt_put_mr(e->rdma_sge.mr);
651 e->rdma_sge.mr = NULL;
652 }
653 }
654 }
655
656 /**
657 * rvt_swqe_has_lkey - return true if lkey is used by swqe
658 * @wqe - the send wqe
659 * @lkey - the lkey
660 *
661 * Test the swqe for using lkey
662 */
rvt_swqe_has_lkey(struct rvt_swqe * wqe,u32 lkey)663 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
664 {
665 int i;
666
667 for (i = 0; i < wqe->wr.num_sge; i++) {
668 struct rvt_sge *sge = &wqe->sg_list[i];
669
670 if (rvt_mr_has_lkey(sge->mr, lkey))
671 return true;
672 }
673 return false;
674 }
675
676 /**
677 * rvt_qp_sends_has_lkey - return true is qp sends use lkey
678 * @qp - the rvt_qp
679 * @lkey - the lkey
680 */
rvt_qp_sends_has_lkey(struct rvt_qp * qp,u32 lkey)681 static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
682 {
683 u32 s_last = qp->s_last;
684
685 while (s_last != qp->s_head) {
686 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
687
688 if (rvt_swqe_has_lkey(wqe, lkey))
689 return true;
690
691 if (++s_last >= qp->s_size)
692 s_last = 0;
693 }
694 if (qp->s_rdma_mr)
695 if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
696 return true;
697 return false;
698 }
699
700 /**
701 * rvt_qp_acks_has_lkey - return true if acks have lkey
702 * @qp - the qp
703 * @lkey - the lkey
704 */
rvt_qp_acks_has_lkey(struct rvt_qp * qp,u32 lkey)705 static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
706 {
707 int i;
708 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
709
710 for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
711 struct rvt_ack_entry *e = &qp->s_ack_queue[i];
712
713 if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
714 return true;
715 }
716 return false;
717 }
718
719 /*
720 * rvt_qp_mr_clean - clean up remote ops for lkey
721 * @qp - the qp
722 * @lkey - the lkey that is being de-registered
723 *
724 * This routine checks if the lkey is being used by
725 * the qp.
726 *
727 * If so, the qp is put into an error state to elminate
728 * any references from the qp.
729 */
rvt_qp_mr_clean(struct rvt_qp * qp,u32 lkey)730 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
731 {
732 bool lastwqe = false;
733
734 if (qp->ibqp.qp_type == IB_QPT_SMI ||
735 qp->ibqp.qp_type == IB_QPT_GSI)
736 /* avoid special QPs */
737 return;
738 spin_lock_irq(&qp->r_lock);
739 spin_lock(&qp->s_hlock);
740 spin_lock(&qp->s_lock);
741
742 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
743 goto check_lwqe;
744
745 if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
746 rvt_qp_sends_has_lkey(qp, lkey) ||
747 rvt_qp_acks_has_lkey(qp, lkey))
748 lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
749 check_lwqe:
750 spin_unlock(&qp->s_lock);
751 spin_unlock(&qp->s_hlock);
752 spin_unlock_irq(&qp->r_lock);
753 if (lastwqe) {
754 struct ib_event ev;
755
756 ev.device = qp->ibqp.device;
757 ev.element.qp = &qp->ibqp;
758 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
759 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
760 }
761 }
762
763 /**
764 * rvt_remove_qp - remove qp form table
765 * @rdi: rvt dev struct
766 * @qp: qp to remove
767 *
768 * Remove the QP from the table so it can't be found asynchronously by
769 * the receive routine.
770 */
rvt_remove_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp)771 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
772 {
773 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
774 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
775 unsigned long flags;
776 int removed = 1;
777
778 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
779
780 if (rcu_dereference_protected(rvp->qp[0],
781 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
782 RCU_INIT_POINTER(rvp->qp[0], NULL);
783 } else if (rcu_dereference_protected(rvp->qp[1],
784 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
785 RCU_INIT_POINTER(rvp->qp[1], NULL);
786 } else {
787 struct rvt_qp *q;
788 struct rvt_qp __rcu **qpp;
789
790 removed = 0;
791 qpp = &rdi->qp_dev->qp_table[n];
792 for (; (q = rcu_dereference_protected(*qpp,
793 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
794 qpp = &q->next) {
795 if (q == qp) {
796 RCU_INIT_POINTER(*qpp,
797 rcu_dereference_protected(qp->next,
798 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
799 removed = 1;
800 trace_rvt_qpremove(qp, n);
801 break;
802 }
803 }
804 }
805
806 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
807 if (removed) {
808 synchronize_rcu();
809 rvt_put_qp(qp);
810 }
811 }
812
813 /**
814 * rvt_alloc_rq - allocate memory for user or kernel buffer
815 * @rq: receive queue data structure
816 * @size: number of request queue entries
817 * @node: The NUMA node
818 * @udata: True if user data is available or not false
819 *
820 * Return: If memory allocation failed, return -ENONEM
821 * This function is used by both shared receive
822 * queues and non-shared receive queues to allocate
823 * memory.
824 */
rvt_alloc_rq(struct rvt_rq * rq,u32 size,int node,struct ib_udata * udata)825 int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
826 struct ib_udata *udata)
827 {
828 if (udata) {
829 rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size);
830 if (!rq->wq)
831 goto bail;
832 /* need kwq with no buffers */
833 rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node);
834 if (!rq->kwq)
835 goto bail;
836 rq->kwq->curr_wq = rq->wq->wq;
837 } else {
838 /* need kwq with buffers */
839 rq->kwq =
840 vzalloc_node(sizeof(struct rvt_krwq) + size, node);
841 if (!rq->kwq)
842 goto bail;
843 rq->kwq->curr_wq = rq->kwq->wq;
844 }
845
846 spin_lock_init(&rq->kwq->p_lock);
847 spin_lock_init(&rq->kwq->c_lock);
848 return 0;
849 bail:
850 rvt_free_rq(rq);
851 return -ENOMEM;
852 }
853
854 /**
855 * rvt_init_qp - initialize the QP state to the reset state
856 * @qp: the QP to init or reinit
857 * @type: the QP type
858 *
859 * This function is called from both rvt_create_qp() and
860 * rvt_reset_qp(). The difference is that the reset
861 * patch the necessary locks to protect against concurent
862 * access.
863 */
rvt_init_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)864 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
865 enum ib_qp_type type)
866 {
867 qp->remote_qpn = 0;
868 qp->qkey = 0;
869 qp->qp_access_flags = 0;
870 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
871 qp->s_hdrwords = 0;
872 qp->s_wqe = NULL;
873 qp->s_draining = 0;
874 qp->s_next_psn = 0;
875 qp->s_last_psn = 0;
876 qp->s_sending_psn = 0;
877 qp->s_sending_hpsn = 0;
878 qp->s_psn = 0;
879 qp->r_psn = 0;
880 qp->r_msn = 0;
881 if (type == IB_QPT_RC) {
882 qp->s_state = IB_OPCODE_RC_SEND_LAST;
883 qp->r_state = IB_OPCODE_RC_SEND_LAST;
884 } else {
885 qp->s_state = IB_OPCODE_UC_SEND_LAST;
886 qp->r_state = IB_OPCODE_UC_SEND_LAST;
887 }
888 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
889 qp->r_nak_state = 0;
890 qp->r_aflags = 0;
891 qp->r_flags = 0;
892 qp->s_head = 0;
893 qp->s_tail = 0;
894 qp->s_cur = 0;
895 qp->s_acked = 0;
896 qp->s_last = 0;
897 qp->s_ssn = 1;
898 qp->s_lsn = 0;
899 qp->s_mig_state = IB_MIG_MIGRATED;
900 qp->r_head_ack_queue = 0;
901 qp->s_tail_ack_queue = 0;
902 qp->s_acked_ack_queue = 0;
903 qp->s_num_rd_atomic = 0;
904 qp->r_sge.num_sge = 0;
905 atomic_set(&qp->s_reserved_used, 0);
906 }
907
908 /**
909 * _rvt_reset_qp - initialize the QP state to the reset state
910 * @qp: the QP to reset
911 * @type: the QP type
912 *
913 * r_lock, s_hlock, and s_lock are required to be held by the caller
914 */
_rvt_reset_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)915 static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
916 enum ib_qp_type type)
917 __must_hold(&qp->s_lock)
918 __must_hold(&qp->s_hlock)
919 __must_hold(&qp->r_lock)
920 {
921 lockdep_assert_held(&qp->r_lock);
922 lockdep_assert_held(&qp->s_hlock);
923 lockdep_assert_held(&qp->s_lock);
924 if (qp->state != IB_QPS_RESET) {
925 qp->state = IB_QPS_RESET;
926
927 /* Let drivers flush their waitlist */
928 rdi->driver_f.flush_qp_waiters(qp);
929 rvt_stop_rc_timers(qp);
930 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
931 spin_unlock(&qp->s_lock);
932 spin_unlock(&qp->s_hlock);
933 spin_unlock_irq(&qp->r_lock);
934
935 /* Stop the send queue and the retry timer */
936 rdi->driver_f.stop_send_queue(qp);
937 rvt_del_timers_sync(qp);
938 /* Wait for things to stop */
939 rdi->driver_f.quiesce_qp(qp);
940
941 /* take qp out the hash and wait for it to be unused */
942 rvt_remove_qp(rdi, qp);
943
944 /* grab the lock b/c it was locked at call time */
945 spin_lock_irq(&qp->r_lock);
946 spin_lock(&qp->s_hlock);
947 spin_lock(&qp->s_lock);
948
949 rvt_clear_mr_refs(qp, 1);
950 /*
951 * Let the driver do any tear down or re-init it needs to for
952 * a qp that has been reset
953 */
954 rdi->driver_f.notify_qp_reset(qp);
955 }
956 rvt_init_qp(rdi, qp, type);
957 lockdep_assert_held(&qp->r_lock);
958 lockdep_assert_held(&qp->s_hlock);
959 lockdep_assert_held(&qp->s_lock);
960 }
961
962 /**
963 * rvt_reset_qp - initialize the QP state to the reset state
964 * @rdi: the device info
965 * @qp: the QP to reset
966 * @type: the QP type
967 *
968 * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
969 * before calling _rvt_reset_qp().
970 */
rvt_reset_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)971 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
972 enum ib_qp_type type)
973 {
974 spin_lock_irq(&qp->r_lock);
975 spin_lock(&qp->s_hlock);
976 spin_lock(&qp->s_lock);
977 _rvt_reset_qp(rdi, qp, type);
978 spin_unlock(&qp->s_lock);
979 spin_unlock(&qp->s_hlock);
980 spin_unlock_irq(&qp->r_lock);
981 }
982
983 /** rvt_free_qpn - Free a qpn from the bit map
984 * @qpt: QP table
985 * @qpn: queue pair number to free
986 */
rvt_free_qpn(struct rvt_qpn_table * qpt,u32 qpn)987 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
988 {
989 struct rvt_qpn_map *map;
990
991 if ((qpn & RVT_AIP_QP_PREFIX_MASK) == RVT_AIP_QP_BASE)
992 qpn &= RVT_AIP_QP_SUFFIX;
993
994 map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
995 if (map->page)
996 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
997 }
998
999 /**
1000 * get_allowed_ops - Given a QP type return the appropriate allowed OP
1001 * @type: valid, supported, QP type
1002 */
get_allowed_ops(enum ib_qp_type type)1003 static u8 get_allowed_ops(enum ib_qp_type type)
1004 {
1005 return type == IB_QPT_RC ? IB_OPCODE_RC : type == IB_QPT_UC ?
1006 IB_OPCODE_UC : IB_OPCODE_UD;
1007 }
1008
1009 /**
1010 * free_ud_wq_attr - Clean up AH attribute cache for UD QPs
1011 * @qp: Valid QP with allowed_ops set
1012 *
1013 * The rvt_swqe data structure being used is a union, so this is
1014 * only valid for UD QPs.
1015 */
free_ud_wq_attr(struct rvt_qp * qp)1016 static void free_ud_wq_attr(struct rvt_qp *qp)
1017 {
1018 struct rvt_swqe *wqe;
1019 int i;
1020
1021 for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1022 wqe = rvt_get_swqe_ptr(qp, i);
1023 kfree(wqe->ud_wr.attr);
1024 wqe->ud_wr.attr = NULL;
1025 }
1026 }
1027
1028 /**
1029 * alloc_ud_wq_attr - AH attribute cache for UD QPs
1030 * @qp: Valid QP with allowed_ops set
1031 * @node: Numa node for allocation
1032 *
1033 * The rvt_swqe data structure being used is a union, so this is
1034 * only valid for UD QPs.
1035 */
alloc_ud_wq_attr(struct rvt_qp * qp,int node)1036 static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
1037 {
1038 struct rvt_swqe *wqe;
1039 int i;
1040
1041 for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1042 wqe = rvt_get_swqe_ptr(qp, i);
1043 wqe->ud_wr.attr = kzalloc_node(sizeof(*wqe->ud_wr.attr),
1044 GFP_KERNEL, node);
1045 if (!wqe->ud_wr.attr) {
1046 free_ud_wq_attr(qp);
1047 return -ENOMEM;
1048 }
1049 }
1050
1051 return 0;
1052 }
1053
1054 /**
1055 * rvt_create_qp - create a queue pair for a device
1056 * @ibpd: the protection domain who's device we create the queue pair for
1057 * @init_attr: the attributes of the queue pair
1058 * @udata: user data for libibverbs.so
1059 *
1060 * Queue pair creation is mostly an rvt issue. However, drivers have their own
1061 * unique idea of what queue pair numbers mean. For instance there is a reserved
1062 * range for PSM.
1063 *
1064 * Return: the queue pair on success, otherwise returns an errno.
1065 *
1066 * Called by the ib_create_qp() core verbs function.
1067 */
rvt_create_qp(struct ib_pd * ibpd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)1068 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
1069 struct ib_qp_init_attr *init_attr,
1070 struct ib_udata *udata)
1071 {
1072 struct rvt_qp *qp;
1073 int err;
1074 struct rvt_swqe *swq = NULL;
1075 size_t sz;
1076 size_t sg_list_sz;
1077 struct ib_qp *ret = ERR_PTR(-ENOMEM);
1078 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
1079 void *priv = NULL;
1080 size_t sqsize;
1081 u8 exclude_prefix = 0;
1082
1083 if (!rdi)
1084 return ERR_PTR(-EINVAL);
1085
1086 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
1087 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
1088 (init_attr->create_flags &&
1089 init_attr->create_flags != IB_QP_CREATE_NETDEV_USE))
1090 return ERR_PTR(-EINVAL);
1091
1092 /* Check receive queue parameters if no SRQ is specified. */
1093 if (!init_attr->srq) {
1094 if (init_attr->cap.max_recv_sge >
1095 rdi->dparms.props.max_recv_sge ||
1096 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
1097 return ERR_PTR(-EINVAL);
1098
1099 if (init_attr->cap.max_send_sge +
1100 init_attr->cap.max_send_wr +
1101 init_attr->cap.max_recv_sge +
1102 init_attr->cap.max_recv_wr == 0)
1103 return ERR_PTR(-EINVAL);
1104 }
1105 sqsize =
1106 init_attr->cap.max_send_wr + 1 +
1107 rdi->dparms.reserved_operations;
1108 switch (init_attr->qp_type) {
1109 case IB_QPT_SMI:
1110 case IB_QPT_GSI:
1111 if (init_attr->port_num == 0 ||
1112 init_attr->port_num > ibpd->device->phys_port_cnt)
1113 return ERR_PTR(-EINVAL);
1114 fallthrough;
1115 case IB_QPT_UC:
1116 case IB_QPT_RC:
1117 case IB_QPT_UD:
1118 sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge);
1119 swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
1120 if (!swq)
1121 return ERR_PTR(-ENOMEM);
1122
1123 sz = sizeof(*qp);
1124 sg_list_sz = 0;
1125 if (init_attr->srq) {
1126 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
1127
1128 if (srq->rq.max_sge > 1)
1129 sg_list_sz = sizeof(*qp->r_sg_list) *
1130 (srq->rq.max_sge - 1);
1131 } else if (init_attr->cap.max_recv_sge > 1)
1132 sg_list_sz = sizeof(*qp->r_sg_list) *
1133 (init_attr->cap.max_recv_sge - 1);
1134 qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
1135 rdi->dparms.node);
1136 if (!qp)
1137 goto bail_swq;
1138 qp->allowed_ops = get_allowed_ops(init_attr->qp_type);
1139
1140 RCU_INIT_POINTER(qp->next, NULL);
1141 if (init_attr->qp_type == IB_QPT_RC) {
1142 qp->s_ack_queue =
1143 kcalloc_node(rvt_max_atomic(rdi),
1144 sizeof(*qp->s_ack_queue),
1145 GFP_KERNEL,
1146 rdi->dparms.node);
1147 if (!qp->s_ack_queue)
1148 goto bail_qp;
1149 }
1150 /* initialize timers needed for rc qp */
1151 timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
1152 hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
1153 HRTIMER_MODE_REL);
1154 qp->s_rnr_timer.function = rvt_rc_rnr_retry;
1155
1156 /*
1157 * Driver needs to set up it's private QP structure and do any
1158 * initialization that is needed.
1159 */
1160 priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
1161 if (IS_ERR(priv)) {
1162 ret = priv;
1163 goto bail_qp;
1164 }
1165 qp->priv = priv;
1166 qp->timeout_jiffies =
1167 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1168 1000UL);
1169 if (init_attr->srq) {
1170 sz = 0;
1171 } else {
1172 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1173 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1174 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1175 sizeof(struct rvt_rwqe);
1176 err = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
1177 rdi->dparms.node, udata);
1178 if (err) {
1179 ret = ERR_PTR(err);
1180 goto bail_driver_priv;
1181 }
1182 }
1183
1184 /*
1185 * ib_create_qp() will initialize qp->ibqp
1186 * except for qp->ibqp.qp_num.
1187 */
1188 spin_lock_init(&qp->r_lock);
1189 spin_lock_init(&qp->s_hlock);
1190 spin_lock_init(&qp->s_lock);
1191 atomic_set(&qp->refcount, 0);
1192 atomic_set(&qp->local_ops_pending, 0);
1193 init_waitqueue_head(&qp->wait);
1194 INIT_LIST_HEAD(&qp->rspwait);
1195 qp->state = IB_QPS_RESET;
1196 qp->s_wq = swq;
1197 qp->s_size = sqsize;
1198 qp->s_avail = init_attr->cap.max_send_wr;
1199 qp->s_max_sge = init_attr->cap.max_send_sge;
1200 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1201 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
1202 err = alloc_ud_wq_attr(qp, rdi->dparms.node);
1203 if (err) {
1204 ret = (ERR_PTR(err));
1205 goto bail_rq_rvt;
1206 }
1207
1208 if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
1209 exclude_prefix = RVT_AIP_QP_PREFIX;
1210
1211 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
1212 init_attr->qp_type,
1213 init_attr->port_num,
1214 exclude_prefix);
1215 if (err < 0) {
1216 ret = ERR_PTR(err);
1217 goto bail_rq_wq;
1218 }
1219 qp->ibqp.qp_num = err;
1220 if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
1221 qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
1222 qp->port_num = init_attr->port_num;
1223 rvt_init_qp(rdi, qp, init_attr->qp_type);
1224 if (rdi->driver_f.qp_priv_init) {
1225 err = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
1226 if (err) {
1227 ret = ERR_PTR(err);
1228 goto bail_rq_wq;
1229 }
1230 }
1231 break;
1232
1233 default:
1234 /* Don't support raw QPs */
1235 return ERR_PTR(-EOPNOTSUPP);
1236 }
1237
1238 init_attr->cap.max_inline_data = 0;
1239
1240 /*
1241 * Return the address of the RWQ as the offset to mmap.
1242 * See rvt_mmap() for details.
1243 */
1244 if (udata && udata->outlen >= sizeof(__u64)) {
1245 if (!qp->r_rq.wq) {
1246 __u64 offset = 0;
1247
1248 err = ib_copy_to_udata(udata, &offset,
1249 sizeof(offset));
1250 if (err) {
1251 ret = ERR_PTR(err);
1252 goto bail_qpn;
1253 }
1254 } else {
1255 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
1256
1257 qp->ip = rvt_create_mmap_info(rdi, s, udata,
1258 qp->r_rq.wq);
1259 if (IS_ERR(qp->ip)) {
1260 ret = ERR_CAST(qp->ip);
1261 goto bail_qpn;
1262 }
1263
1264 err = ib_copy_to_udata(udata, &qp->ip->offset,
1265 sizeof(qp->ip->offset));
1266 if (err) {
1267 ret = ERR_PTR(err);
1268 goto bail_ip;
1269 }
1270 }
1271 qp->pid = current->pid;
1272 }
1273
1274 spin_lock(&rdi->n_qps_lock);
1275 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
1276 spin_unlock(&rdi->n_qps_lock);
1277 ret = ERR_PTR(-ENOMEM);
1278 goto bail_ip;
1279 }
1280
1281 rdi->n_qps_allocated++;
1282 /*
1283 * Maintain a busy_jiffies variable that will be added to the timeout
1284 * period in mod_retry_timer and add_retry_timer. This busy jiffies
1285 * is scaled by the number of rc qps created for the device to reduce
1286 * the number of timeouts occurring when there is a large number of
1287 * qps. busy_jiffies is incremented every rc qp scaling interval.
1288 * The scaling interval is selected based on extensive performance
1289 * evaluation of targeted workloads.
1290 */
1291 if (init_attr->qp_type == IB_QPT_RC) {
1292 rdi->n_rc_qps++;
1293 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1294 }
1295 spin_unlock(&rdi->n_qps_lock);
1296
1297 if (qp->ip) {
1298 spin_lock_irq(&rdi->pending_lock);
1299 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
1300 spin_unlock_irq(&rdi->pending_lock);
1301 }
1302
1303 ret = &qp->ibqp;
1304
1305 return ret;
1306
1307 bail_ip:
1308 if (qp->ip)
1309 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1310
1311 bail_qpn:
1312 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1313
1314 bail_rq_wq:
1315 free_ud_wq_attr(qp);
1316
1317 bail_rq_rvt:
1318 rvt_free_rq(&qp->r_rq);
1319
1320 bail_driver_priv:
1321 rdi->driver_f.qp_priv_free(rdi, qp);
1322
1323 bail_qp:
1324 kfree(qp->s_ack_queue);
1325 kfree(qp);
1326
1327 bail_swq:
1328 vfree(swq);
1329
1330 return ret;
1331 }
1332
1333 /**
1334 * rvt_error_qp - put a QP into the error state
1335 * @qp: the QP to put into the error state
1336 * @err: the receive completion error to signal if a RWQE is active
1337 *
1338 * Flushes both send and receive work queues.
1339 *
1340 * Return: true if last WQE event should be generated.
1341 * The QP r_lock and s_lock should be held and interrupts disabled.
1342 * If we are already in error state, just return.
1343 */
rvt_error_qp(struct rvt_qp * qp,enum ib_wc_status err)1344 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
1345 {
1346 struct ib_wc wc;
1347 int ret = 0;
1348 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1349
1350 lockdep_assert_held(&qp->r_lock);
1351 lockdep_assert_held(&qp->s_lock);
1352 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
1353 goto bail;
1354
1355 qp->state = IB_QPS_ERR;
1356
1357 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1358 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1359 del_timer(&qp->s_timer);
1360 }
1361
1362 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
1363 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
1364
1365 rdi->driver_f.notify_error_qp(qp);
1366
1367 /* Schedule the sending tasklet to drain the send work queue. */
1368 if (READ_ONCE(qp->s_last) != qp->s_head)
1369 rdi->driver_f.schedule_send(qp);
1370
1371 rvt_clear_mr_refs(qp, 0);
1372
1373 memset(&wc, 0, sizeof(wc));
1374 wc.qp = &qp->ibqp;
1375 wc.opcode = IB_WC_RECV;
1376
1377 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
1378 wc.wr_id = qp->r_wr_id;
1379 wc.status = err;
1380 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1381 }
1382 wc.status = IB_WC_WR_FLUSH_ERR;
1383
1384 if (qp->r_rq.kwq) {
1385 u32 head;
1386 u32 tail;
1387 struct rvt_rwq *wq = NULL;
1388 struct rvt_krwq *kwq = NULL;
1389
1390 spin_lock(&qp->r_rq.kwq->c_lock);
1391 /* qp->ip used to validate if there is a user buffer mmaped */
1392 if (qp->ip) {
1393 wq = qp->r_rq.wq;
1394 head = RDMA_READ_UAPI_ATOMIC(wq->head);
1395 tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
1396 } else {
1397 kwq = qp->r_rq.kwq;
1398 head = kwq->head;
1399 tail = kwq->tail;
1400 }
1401 /* sanity check pointers before trusting them */
1402 if (head >= qp->r_rq.size)
1403 head = 0;
1404 if (tail >= qp->r_rq.size)
1405 tail = 0;
1406 while (tail != head) {
1407 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1408 if (++tail >= qp->r_rq.size)
1409 tail = 0;
1410 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1411 }
1412 if (qp->ip)
1413 RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
1414 else
1415 kwq->tail = tail;
1416 spin_unlock(&qp->r_rq.kwq->c_lock);
1417 } else if (qp->ibqp.event_handler) {
1418 ret = 1;
1419 }
1420
1421 bail:
1422 return ret;
1423 }
1424 EXPORT_SYMBOL(rvt_error_qp);
1425
1426 /*
1427 * Put the QP into the hash table.
1428 * The hash table holds a reference to the QP.
1429 */
rvt_insert_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp)1430 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1431 {
1432 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1433 unsigned long flags;
1434
1435 rvt_get_qp(qp);
1436 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1437
1438 if (qp->ibqp.qp_num <= 1) {
1439 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1440 } else {
1441 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1442
1443 qp->next = rdi->qp_dev->qp_table[n];
1444 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1445 trace_rvt_qpinsert(qp, n);
1446 }
1447
1448 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1449 }
1450
1451 /**
1452 * rvt_modify_qp - modify the attributes of a queue pair
1453 * @ibqp: the queue pair who's attributes we're modifying
1454 * @attr: the new attributes
1455 * @attr_mask: the mask of attributes to modify
1456 * @udata: user data for libibverbs.so
1457 *
1458 * Return: 0 on success, otherwise returns an errno.
1459 */
rvt_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1460 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1461 int attr_mask, struct ib_udata *udata)
1462 {
1463 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1464 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1465 enum ib_qp_state cur_state, new_state;
1466 struct ib_event ev;
1467 int lastwqe = 0;
1468 int mig = 0;
1469 int pmtu = 0; /* for gcc warning only */
1470 int opa_ah;
1471
1472 spin_lock_irq(&qp->r_lock);
1473 spin_lock(&qp->s_hlock);
1474 spin_lock(&qp->s_lock);
1475
1476 cur_state = attr_mask & IB_QP_CUR_STATE ?
1477 attr->cur_qp_state : qp->state;
1478 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1479 opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
1480
1481 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1482 attr_mask))
1483 goto inval;
1484
1485 if (rdi->driver_f.check_modify_qp &&
1486 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1487 goto inval;
1488
1489 if (attr_mask & IB_QP_AV) {
1490 if (opa_ah) {
1491 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1492 opa_get_mcast_base(OPA_MCAST_NR))
1493 goto inval;
1494 } else {
1495 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1496 be16_to_cpu(IB_MULTICAST_LID_BASE))
1497 goto inval;
1498 }
1499
1500 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1501 goto inval;
1502 }
1503
1504 if (attr_mask & IB_QP_ALT_PATH) {
1505 if (opa_ah) {
1506 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1507 opa_get_mcast_base(OPA_MCAST_NR))
1508 goto inval;
1509 } else {
1510 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1511 be16_to_cpu(IB_MULTICAST_LID_BASE))
1512 goto inval;
1513 }
1514
1515 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1516 goto inval;
1517 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1518 goto inval;
1519 }
1520
1521 if (attr_mask & IB_QP_PKEY_INDEX)
1522 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1523 goto inval;
1524
1525 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1526 if (attr->min_rnr_timer > 31)
1527 goto inval;
1528
1529 if (attr_mask & IB_QP_PORT)
1530 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1531 qp->ibqp.qp_type == IB_QPT_GSI ||
1532 attr->port_num == 0 ||
1533 attr->port_num > ibqp->device->phys_port_cnt)
1534 goto inval;
1535
1536 if (attr_mask & IB_QP_DEST_QPN)
1537 if (attr->dest_qp_num > RVT_QPN_MASK)
1538 goto inval;
1539
1540 if (attr_mask & IB_QP_RETRY_CNT)
1541 if (attr->retry_cnt > 7)
1542 goto inval;
1543
1544 if (attr_mask & IB_QP_RNR_RETRY)
1545 if (attr->rnr_retry > 7)
1546 goto inval;
1547
1548 /*
1549 * Don't allow invalid path_mtu values. OK to set greater
1550 * than the active mtu (or even the max_cap, if we have tuned
1551 * that to a small mtu. We'll set qp->path_mtu
1552 * to the lesser of requested attribute mtu and active,
1553 * for packetizing messages.
1554 * Note that the QP port has to be set in INIT and MTU in RTR.
1555 */
1556 if (attr_mask & IB_QP_PATH_MTU) {
1557 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1558 if (pmtu < 0)
1559 goto inval;
1560 }
1561
1562 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1563 if (attr->path_mig_state == IB_MIG_REARM) {
1564 if (qp->s_mig_state == IB_MIG_ARMED)
1565 goto inval;
1566 if (new_state != IB_QPS_RTS)
1567 goto inval;
1568 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1569 if (qp->s_mig_state == IB_MIG_REARM)
1570 goto inval;
1571 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1572 goto inval;
1573 if (qp->s_mig_state == IB_MIG_ARMED)
1574 mig = 1;
1575 } else {
1576 goto inval;
1577 }
1578 }
1579
1580 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1581 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1582 goto inval;
1583
1584 switch (new_state) {
1585 case IB_QPS_RESET:
1586 if (qp->state != IB_QPS_RESET)
1587 _rvt_reset_qp(rdi, qp, ibqp->qp_type);
1588 break;
1589
1590 case IB_QPS_RTR:
1591 /* Allow event to re-trigger if QP set to RTR more than once */
1592 qp->r_flags &= ~RVT_R_COMM_EST;
1593 qp->state = new_state;
1594 break;
1595
1596 case IB_QPS_SQD:
1597 qp->s_draining = qp->s_last != qp->s_cur;
1598 qp->state = new_state;
1599 break;
1600
1601 case IB_QPS_SQE:
1602 if (qp->ibqp.qp_type == IB_QPT_RC)
1603 goto inval;
1604 qp->state = new_state;
1605 break;
1606
1607 case IB_QPS_ERR:
1608 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1609 break;
1610
1611 default:
1612 qp->state = new_state;
1613 break;
1614 }
1615
1616 if (attr_mask & IB_QP_PKEY_INDEX)
1617 qp->s_pkey_index = attr->pkey_index;
1618
1619 if (attr_mask & IB_QP_PORT)
1620 qp->port_num = attr->port_num;
1621
1622 if (attr_mask & IB_QP_DEST_QPN)
1623 qp->remote_qpn = attr->dest_qp_num;
1624
1625 if (attr_mask & IB_QP_SQ_PSN) {
1626 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1627 qp->s_psn = qp->s_next_psn;
1628 qp->s_sending_psn = qp->s_next_psn;
1629 qp->s_last_psn = qp->s_next_psn - 1;
1630 qp->s_sending_hpsn = qp->s_last_psn;
1631 }
1632
1633 if (attr_mask & IB_QP_RQ_PSN)
1634 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1635
1636 if (attr_mask & IB_QP_ACCESS_FLAGS)
1637 qp->qp_access_flags = attr->qp_access_flags;
1638
1639 if (attr_mask & IB_QP_AV) {
1640 rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
1641 qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1642 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1643 }
1644
1645 if (attr_mask & IB_QP_ALT_PATH) {
1646 rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
1647 qp->s_alt_pkey_index = attr->alt_pkey_index;
1648 }
1649
1650 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1651 qp->s_mig_state = attr->path_mig_state;
1652 if (mig) {
1653 qp->remote_ah_attr = qp->alt_ah_attr;
1654 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1655 qp->s_pkey_index = qp->s_alt_pkey_index;
1656 }
1657 }
1658
1659 if (attr_mask & IB_QP_PATH_MTU) {
1660 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1661 qp->log_pmtu = ilog2(qp->pmtu);
1662 }
1663
1664 if (attr_mask & IB_QP_RETRY_CNT) {
1665 qp->s_retry_cnt = attr->retry_cnt;
1666 qp->s_retry = attr->retry_cnt;
1667 }
1668
1669 if (attr_mask & IB_QP_RNR_RETRY) {
1670 qp->s_rnr_retry_cnt = attr->rnr_retry;
1671 qp->s_rnr_retry = attr->rnr_retry;
1672 }
1673
1674 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1675 qp->r_min_rnr_timer = attr->min_rnr_timer;
1676
1677 if (attr_mask & IB_QP_TIMEOUT) {
1678 qp->timeout = attr->timeout;
1679 qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1680 }
1681
1682 if (attr_mask & IB_QP_QKEY)
1683 qp->qkey = attr->qkey;
1684
1685 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1686 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1687
1688 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1689 qp->s_max_rd_atomic = attr->max_rd_atomic;
1690
1691 if (rdi->driver_f.modify_qp)
1692 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1693
1694 spin_unlock(&qp->s_lock);
1695 spin_unlock(&qp->s_hlock);
1696 spin_unlock_irq(&qp->r_lock);
1697
1698 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1699 rvt_insert_qp(rdi, qp);
1700
1701 if (lastwqe) {
1702 ev.device = qp->ibqp.device;
1703 ev.element.qp = &qp->ibqp;
1704 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1705 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1706 }
1707 if (mig) {
1708 ev.device = qp->ibqp.device;
1709 ev.element.qp = &qp->ibqp;
1710 ev.event = IB_EVENT_PATH_MIG;
1711 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1712 }
1713 return 0;
1714
1715 inval:
1716 spin_unlock(&qp->s_lock);
1717 spin_unlock(&qp->s_hlock);
1718 spin_unlock_irq(&qp->r_lock);
1719 return -EINVAL;
1720 }
1721
1722 /**
1723 * rvt_destroy_qp - destroy a queue pair
1724 * @ibqp: the queue pair to destroy
1725 *
1726 * Note that this can be called while the QP is actively sending or
1727 * receiving!
1728 *
1729 * Return: 0 on success.
1730 */
rvt_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)1731 int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1732 {
1733 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1734 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1735
1736 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1737
1738 wait_event(qp->wait, !atomic_read(&qp->refcount));
1739 /* qpn is now available for use again */
1740 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1741
1742 spin_lock(&rdi->n_qps_lock);
1743 rdi->n_qps_allocated--;
1744 if (qp->ibqp.qp_type == IB_QPT_RC) {
1745 rdi->n_rc_qps--;
1746 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1747 }
1748 spin_unlock(&rdi->n_qps_lock);
1749
1750 if (qp->ip)
1751 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1752 kvfree(qp->r_rq.kwq);
1753 rdi->driver_f.qp_priv_free(rdi, qp);
1754 kfree(qp->s_ack_queue);
1755 rdma_destroy_ah_attr(&qp->remote_ah_attr);
1756 rdma_destroy_ah_attr(&qp->alt_ah_attr);
1757 free_ud_wq_attr(qp);
1758 vfree(qp->s_wq);
1759 kfree(qp);
1760 return 0;
1761 }
1762
1763 /**
1764 * rvt_query_qp - query an ipbq
1765 * @ibqp: IB qp to query
1766 * @attr: attr struct to fill in
1767 * @attr_mask: attr mask ignored
1768 * @init_attr: struct to fill in
1769 *
1770 * Return: always 0
1771 */
rvt_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_qp_init_attr * init_attr)1772 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1773 int attr_mask, struct ib_qp_init_attr *init_attr)
1774 {
1775 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1776 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1777
1778 attr->qp_state = qp->state;
1779 attr->cur_qp_state = attr->qp_state;
1780 attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1781 attr->path_mig_state = qp->s_mig_state;
1782 attr->qkey = qp->qkey;
1783 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1784 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1785 attr->dest_qp_num = qp->remote_qpn;
1786 attr->qp_access_flags = qp->qp_access_flags;
1787 attr->cap.max_send_wr = qp->s_size - 1 -
1788 rdi->dparms.reserved_operations;
1789 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1790 attr->cap.max_send_sge = qp->s_max_sge;
1791 attr->cap.max_recv_sge = qp->r_rq.max_sge;
1792 attr->cap.max_inline_data = 0;
1793 attr->ah_attr = qp->remote_ah_attr;
1794 attr->alt_ah_attr = qp->alt_ah_attr;
1795 attr->pkey_index = qp->s_pkey_index;
1796 attr->alt_pkey_index = qp->s_alt_pkey_index;
1797 attr->en_sqd_async_notify = 0;
1798 attr->sq_draining = qp->s_draining;
1799 attr->max_rd_atomic = qp->s_max_rd_atomic;
1800 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1801 attr->min_rnr_timer = qp->r_min_rnr_timer;
1802 attr->port_num = qp->port_num;
1803 attr->timeout = qp->timeout;
1804 attr->retry_cnt = qp->s_retry_cnt;
1805 attr->rnr_retry = qp->s_rnr_retry_cnt;
1806 attr->alt_port_num =
1807 rdma_ah_get_port_num(&qp->alt_ah_attr);
1808 attr->alt_timeout = qp->alt_timeout;
1809
1810 init_attr->event_handler = qp->ibqp.event_handler;
1811 init_attr->qp_context = qp->ibqp.qp_context;
1812 init_attr->send_cq = qp->ibqp.send_cq;
1813 init_attr->recv_cq = qp->ibqp.recv_cq;
1814 init_attr->srq = qp->ibqp.srq;
1815 init_attr->cap = attr->cap;
1816 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1817 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1818 else
1819 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1820 init_attr->qp_type = qp->ibqp.qp_type;
1821 init_attr->port_num = qp->port_num;
1822 return 0;
1823 }
1824
1825 /**
1826 * rvt_post_receive - post a receive on a QP
1827 * @ibqp: the QP to post the receive on
1828 * @wr: the WR to post
1829 * @bad_wr: the first bad WR is put here
1830 *
1831 * This may be called from interrupt context.
1832 *
1833 * Return: 0 on success otherwise errno
1834 */
rvt_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1835 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1836 const struct ib_recv_wr **bad_wr)
1837 {
1838 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1839 struct rvt_krwq *wq = qp->r_rq.kwq;
1840 unsigned long flags;
1841 int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1842 !qp->ibqp.srq;
1843
1844 /* Check that state is OK to post receive. */
1845 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1846 *bad_wr = wr;
1847 return -EINVAL;
1848 }
1849
1850 for (; wr; wr = wr->next) {
1851 struct rvt_rwqe *wqe;
1852 u32 next;
1853 int i;
1854
1855 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1856 *bad_wr = wr;
1857 return -EINVAL;
1858 }
1859
1860 spin_lock_irqsave(&qp->r_rq.kwq->p_lock, flags);
1861 next = wq->head + 1;
1862 if (next >= qp->r_rq.size)
1863 next = 0;
1864 if (next == READ_ONCE(wq->tail)) {
1865 spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1866 *bad_wr = wr;
1867 return -ENOMEM;
1868 }
1869 if (unlikely(qp_err_flush)) {
1870 struct ib_wc wc;
1871
1872 memset(&wc, 0, sizeof(wc));
1873 wc.qp = &qp->ibqp;
1874 wc.opcode = IB_WC_RECV;
1875 wc.wr_id = wr->wr_id;
1876 wc.status = IB_WC_WR_FLUSH_ERR;
1877 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1878 } else {
1879 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1880 wqe->wr_id = wr->wr_id;
1881 wqe->num_sge = wr->num_sge;
1882 for (i = 0; i < wr->num_sge; i++) {
1883 wqe->sg_list[i].addr = wr->sg_list[i].addr;
1884 wqe->sg_list[i].length = wr->sg_list[i].length;
1885 wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
1886 }
1887 /*
1888 * Make sure queue entry is written
1889 * before the head index.
1890 */
1891 smp_store_release(&wq->head, next);
1892 }
1893 spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1894 }
1895 return 0;
1896 }
1897
1898 /**
1899 * rvt_qp_valid_operation - validate post send wr request
1900 * @qp - the qp
1901 * @post-parms - the post send table for the driver
1902 * @wr - the work request
1903 *
1904 * The routine validates the operation based on the
1905 * validation table an returns the length of the operation
1906 * which can extend beyond the ib_send_bw. Operation
1907 * dependent flags key atomic operation validation.
1908 *
1909 * There is an exception for UD qps that validates the pd and
1910 * overrides the length to include the additional UD specific
1911 * length.
1912 *
1913 * Returns a negative error or the length of the work request
1914 * for building the swqe.
1915 */
rvt_qp_valid_operation(struct rvt_qp * qp,const struct rvt_operation_params * post_parms,const struct ib_send_wr * wr)1916 static inline int rvt_qp_valid_operation(
1917 struct rvt_qp *qp,
1918 const struct rvt_operation_params *post_parms,
1919 const struct ib_send_wr *wr)
1920 {
1921 int len;
1922
1923 if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1924 return -EINVAL;
1925 if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1926 return -EINVAL;
1927 if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1928 ibpd_to_rvtpd(qp->ibqp.pd)->user)
1929 return -EINVAL;
1930 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1931 (wr->num_sge == 0 ||
1932 wr->sg_list[0].length < sizeof(u64) ||
1933 wr->sg_list[0].addr & (sizeof(u64) - 1)))
1934 return -EINVAL;
1935 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1936 !qp->s_max_rd_atomic)
1937 return -EINVAL;
1938 len = post_parms[wr->opcode].length;
1939 /* UD specific */
1940 if (qp->ibqp.qp_type != IB_QPT_UC &&
1941 qp->ibqp.qp_type != IB_QPT_RC) {
1942 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1943 return -EINVAL;
1944 len = sizeof(struct ib_ud_wr);
1945 }
1946 return len;
1947 }
1948
1949 /**
1950 * rvt_qp_is_avail - determine queue capacity
1951 * @qp: the qp
1952 * @rdi: the rdmavt device
1953 * @reserved_op: is reserved operation
1954 *
1955 * This assumes the s_hlock is held but the s_last
1956 * qp variable is uncontrolled.
1957 *
1958 * For non reserved operations, the qp->s_avail
1959 * may be changed.
1960 *
1961 * The return value is zero or a -ENOMEM.
1962 */
rvt_qp_is_avail(struct rvt_qp * qp,struct rvt_dev_info * rdi,bool reserved_op)1963 static inline int rvt_qp_is_avail(
1964 struct rvt_qp *qp,
1965 struct rvt_dev_info *rdi,
1966 bool reserved_op)
1967 {
1968 u32 slast;
1969 u32 avail;
1970 u32 reserved_used;
1971
1972 /* see rvt_qp_wqe_unreserve() */
1973 smp_mb__before_atomic();
1974 if (unlikely(reserved_op)) {
1975 /* see rvt_qp_wqe_unreserve() */
1976 reserved_used = atomic_read(&qp->s_reserved_used);
1977 if (reserved_used >= rdi->dparms.reserved_operations)
1978 return -ENOMEM;
1979 return 0;
1980 }
1981 /* non-reserved operations */
1982 if (likely(qp->s_avail))
1983 return 0;
1984 /* See rvt_qp_complete_swqe() */
1985 slast = smp_load_acquire(&qp->s_last);
1986 if (qp->s_head >= slast)
1987 avail = qp->s_size - (qp->s_head - slast);
1988 else
1989 avail = slast - qp->s_head;
1990
1991 reserved_used = atomic_read(&qp->s_reserved_used);
1992 avail = avail - 1 -
1993 (rdi->dparms.reserved_operations - reserved_used);
1994 /* insure we don't assign a negative s_avail */
1995 if ((s32)avail <= 0)
1996 return -ENOMEM;
1997 qp->s_avail = avail;
1998 if (WARN_ON(qp->s_avail >
1999 (qp->s_size - 1 - rdi->dparms.reserved_operations)))
2000 rvt_pr_err(rdi,
2001 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
2002 qp->ibqp.qp_num, qp->s_size, qp->s_avail,
2003 qp->s_head, qp->s_tail, qp->s_cur,
2004 qp->s_acked, qp->s_last);
2005 return 0;
2006 }
2007
2008 /**
2009 * rvt_post_one_wr - post one RC, UC, or UD send work request
2010 * @qp: the QP to post on
2011 * @wr: the work request to send
2012 */
rvt_post_one_wr(struct rvt_qp * qp,const struct ib_send_wr * wr,bool * call_send)2013 static int rvt_post_one_wr(struct rvt_qp *qp,
2014 const struct ib_send_wr *wr,
2015 bool *call_send)
2016 {
2017 struct rvt_swqe *wqe;
2018 u32 next;
2019 int i;
2020 int j;
2021 int acc;
2022 struct rvt_lkey_table *rkt;
2023 struct rvt_pd *pd;
2024 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2025 u8 log_pmtu;
2026 int ret;
2027 size_t cplen;
2028 bool reserved_op;
2029 int local_ops_delayed = 0;
2030
2031 BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
2032
2033 /* IB spec says that num_sge == 0 is OK. */
2034 if (unlikely(wr->num_sge > qp->s_max_sge))
2035 return -EINVAL;
2036
2037 ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
2038 if (ret < 0)
2039 return ret;
2040 cplen = ret;
2041
2042 /*
2043 * Local operations include fast register and local invalidate.
2044 * Fast register needs to be processed immediately because the
2045 * registered lkey may be used by following work requests and the
2046 * lkey needs to be valid at the time those requests are posted.
2047 * Local invalidate can be processed immediately if fencing is
2048 * not required and no previous local invalidate ops are pending.
2049 * Signaled local operations that have been processed immediately
2050 * need to have requests with "completion only" flags set posted
2051 * to the send queue in order to generate completions.
2052 */
2053 if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
2054 switch (wr->opcode) {
2055 case IB_WR_REG_MR:
2056 ret = rvt_fast_reg_mr(qp,
2057 reg_wr(wr)->mr,
2058 reg_wr(wr)->key,
2059 reg_wr(wr)->access);
2060 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2061 return ret;
2062 break;
2063 case IB_WR_LOCAL_INV:
2064 if ((wr->send_flags & IB_SEND_FENCE) ||
2065 atomic_read(&qp->local_ops_pending)) {
2066 local_ops_delayed = 1;
2067 } else {
2068 ret = rvt_invalidate_rkey(
2069 qp, wr->ex.invalidate_rkey);
2070 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2071 return ret;
2072 }
2073 break;
2074 default:
2075 return -EINVAL;
2076 }
2077 }
2078
2079 reserved_op = rdi->post_parms[wr->opcode].flags &
2080 RVT_OPERATION_USE_RESERVE;
2081 /* check for avail */
2082 ret = rvt_qp_is_avail(qp, rdi, reserved_op);
2083 if (ret)
2084 return ret;
2085 next = qp->s_head + 1;
2086 if (next >= qp->s_size)
2087 next = 0;
2088
2089 rkt = &rdi->lkey_table;
2090 pd = ibpd_to_rvtpd(qp->ibqp.pd);
2091 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
2092
2093 /* cplen has length from above */
2094 memcpy(&wqe->wr, wr, cplen);
2095
2096 wqe->length = 0;
2097 j = 0;
2098 if (wr->num_sge) {
2099 struct rvt_sge *last_sge = NULL;
2100
2101 acc = wr->opcode >= IB_WR_RDMA_READ ?
2102 IB_ACCESS_LOCAL_WRITE : 0;
2103 for (i = 0; i < wr->num_sge; i++) {
2104 u32 length = wr->sg_list[i].length;
2105
2106 if (length == 0)
2107 continue;
2108 ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
2109 &wr->sg_list[i], acc);
2110 if (unlikely(ret < 0))
2111 goto bail_inval_free;
2112 wqe->length += length;
2113 if (ret)
2114 last_sge = &wqe->sg_list[j];
2115 j += ret;
2116 }
2117 wqe->wr.num_sge = j;
2118 }
2119
2120 /*
2121 * Calculate and set SWQE PSN values prior to handing it off
2122 * to the driver's check routine. This give the driver the
2123 * opportunity to adjust PSN values based on internal checks.
2124 */
2125 log_pmtu = qp->log_pmtu;
2126 if (qp->allowed_ops == IB_OPCODE_UD) {
2127 struct rvt_ah *ah = rvt_get_swqe_ah(wqe);
2128
2129 log_pmtu = ah->log_pmtu;
2130 rdma_copy_ah_attr(wqe->ud_wr.attr, &ah->attr);
2131 }
2132
2133 if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
2134 if (local_ops_delayed)
2135 atomic_inc(&qp->local_ops_pending);
2136 else
2137 wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
2138 wqe->ssn = 0;
2139 wqe->psn = 0;
2140 wqe->lpsn = 0;
2141 } else {
2142 wqe->ssn = qp->s_ssn++;
2143 wqe->psn = qp->s_next_psn;
2144 wqe->lpsn = wqe->psn +
2145 (wqe->length ?
2146 ((wqe->length - 1) >> log_pmtu) :
2147 0);
2148 }
2149
2150 /* general part of wqe valid - allow for driver checks */
2151 if (rdi->driver_f.setup_wqe) {
2152 ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
2153 if (ret < 0)
2154 goto bail_inval_free_ref;
2155 }
2156
2157 if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
2158 qp->s_next_psn = wqe->lpsn + 1;
2159
2160 if (unlikely(reserved_op)) {
2161 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
2162 rvt_qp_wqe_reserve(qp, wqe);
2163 } else {
2164 wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
2165 qp->s_avail--;
2166 }
2167 trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
2168 smp_wmb(); /* see request builders */
2169 qp->s_head = next;
2170
2171 return 0;
2172
2173 bail_inval_free_ref:
2174 if (qp->allowed_ops == IB_OPCODE_UD)
2175 rdma_destroy_ah_attr(wqe->ud_wr.attr);
2176 bail_inval_free:
2177 /* release mr holds */
2178 while (j) {
2179 struct rvt_sge *sge = &wqe->sg_list[--j];
2180
2181 rvt_put_mr(sge->mr);
2182 }
2183 return ret;
2184 }
2185
2186 /**
2187 * rvt_post_send - post a send on a QP
2188 * @ibqp: the QP to post the send on
2189 * @wr: the list of work requests to post
2190 * @bad_wr: the first bad WR is put here
2191 *
2192 * This may be called from interrupt context.
2193 *
2194 * Return: 0 on success else errno
2195 */
rvt_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)2196 int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2197 const struct ib_send_wr **bad_wr)
2198 {
2199 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
2200 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2201 unsigned long flags = 0;
2202 bool call_send;
2203 unsigned nreq = 0;
2204 int err = 0;
2205
2206 spin_lock_irqsave(&qp->s_hlock, flags);
2207
2208 /*
2209 * Ensure QP state is such that we can send. If not bail out early,
2210 * there is no need to do this every time we post a send.
2211 */
2212 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
2213 spin_unlock_irqrestore(&qp->s_hlock, flags);
2214 return -EINVAL;
2215 }
2216
2217 /*
2218 * If the send queue is empty, and we only have a single WR then just go
2219 * ahead and kick the send engine into gear. Otherwise we will always
2220 * just schedule the send to happen later.
2221 */
2222 call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
2223
2224 for (; wr; wr = wr->next) {
2225 err = rvt_post_one_wr(qp, wr, &call_send);
2226 if (unlikely(err)) {
2227 *bad_wr = wr;
2228 goto bail;
2229 }
2230 nreq++;
2231 }
2232 bail:
2233 spin_unlock_irqrestore(&qp->s_hlock, flags);
2234 if (nreq) {
2235 /*
2236 * Only call do_send if there is exactly one packet, and the
2237 * driver said it was ok.
2238 */
2239 if (nreq == 1 && call_send)
2240 rdi->driver_f.do_send(qp);
2241 else
2242 rdi->driver_f.schedule_send_no_lock(qp);
2243 }
2244 return err;
2245 }
2246
2247 /**
2248 * rvt_post_srq_receive - post a receive on a shared receive queue
2249 * @ibsrq: the SRQ to post the receive on
2250 * @wr: the list of work requests to post
2251 * @bad_wr: A pointer to the first WR to cause a problem is put here
2252 *
2253 * This may be called from interrupt context.
2254 *
2255 * Return: 0 on success else errno
2256 */
rvt_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)2257 int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2258 const struct ib_recv_wr **bad_wr)
2259 {
2260 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
2261 struct rvt_krwq *wq;
2262 unsigned long flags;
2263
2264 for (; wr; wr = wr->next) {
2265 struct rvt_rwqe *wqe;
2266 u32 next;
2267 int i;
2268
2269 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
2270 *bad_wr = wr;
2271 return -EINVAL;
2272 }
2273
2274 spin_lock_irqsave(&srq->rq.kwq->p_lock, flags);
2275 wq = srq->rq.kwq;
2276 next = wq->head + 1;
2277 if (next >= srq->rq.size)
2278 next = 0;
2279 if (next == READ_ONCE(wq->tail)) {
2280 spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2281 *bad_wr = wr;
2282 return -ENOMEM;
2283 }
2284
2285 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
2286 wqe->wr_id = wr->wr_id;
2287 wqe->num_sge = wr->num_sge;
2288 for (i = 0; i < wr->num_sge; i++) {
2289 wqe->sg_list[i].addr = wr->sg_list[i].addr;
2290 wqe->sg_list[i].length = wr->sg_list[i].length;
2291 wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
2292 }
2293 /* Make sure queue entry is written before the head index. */
2294 smp_store_release(&wq->head, next);
2295 spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2296 }
2297 return 0;
2298 }
2299
2300 /*
2301 * rvt used the internal kernel struct as part of its ABI, for now make sure
2302 * the kernel struct does not change layout. FIXME: rvt should never cast the
2303 * user struct to a kernel struct.
2304 */
rvt_cast_sge(struct rvt_wqe_sge * sge)2305 static struct ib_sge *rvt_cast_sge(struct rvt_wqe_sge *sge)
2306 {
2307 BUILD_BUG_ON(offsetof(struct ib_sge, addr) !=
2308 offsetof(struct rvt_wqe_sge, addr));
2309 BUILD_BUG_ON(offsetof(struct ib_sge, length) !=
2310 offsetof(struct rvt_wqe_sge, length));
2311 BUILD_BUG_ON(offsetof(struct ib_sge, lkey) !=
2312 offsetof(struct rvt_wqe_sge, lkey));
2313 return (struct ib_sge *)sge;
2314 }
2315
2316 /*
2317 * Validate a RWQE and fill in the SGE state.
2318 * Return 1 if OK.
2319 */
init_sge(struct rvt_qp * qp,struct rvt_rwqe * wqe)2320 static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
2321 {
2322 int i, j, ret;
2323 struct ib_wc wc;
2324 struct rvt_lkey_table *rkt;
2325 struct rvt_pd *pd;
2326 struct rvt_sge_state *ss;
2327 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2328
2329 rkt = &rdi->lkey_table;
2330 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
2331 ss = &qp->r_sge;
2332 ss->sg_list = qp->r_sg_list;
2333 qp->r_len = 0;
2334 for (i = j = 0; i < wqe->num_sge; i++) {
2335 if (wqe->sg_list[i].length == 0)
2336 continue;
2337 /* Check LKEY */
2338 ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
2339 NULL, rvt_cast_sge(&wqe->sg_list[i]),
2340 IB_ACCESS_LOCAL_WRITE);
2341 if (unlikely(ret <= 0))
2342 goto bad_lkey;
2343 qp->r_len += wqe->sg_list[i].length;
2344 j++;
2345 }
2346 ss->num_sge = j;
2347 ss->total_len = qp->r_len;
2348 return 1;
2349
2350 bad_lkey:
2351 while (j) {
2352 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
2353
2354 rvt_put_mr(sge->mr);
2355 }
2356 ss->num_sge = 0;
2357 memset(&wc, 0, sizeof(wc));
2358 wc.wr_id = wqe->wr_id;
2359 wc.status = IB_WC_LOC_PROT_ERR;
2360 wc.opcode = IB_WC_RECV;
2361 wc.qp = &qp->ibqp;
2362 /* Signal solicited completion event. */
2363 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
2364 return 0;
2365 }
2366
2367 /**
2368 * get_rvt_head - get head indices of the circular buffer
2369 * @rq: data structure for request queue entry
2370 * @ip: the QP
2371 *
2372 * Return - head index value
2373 */
get_rvt_head(struct rvt_rq * rq,void * ip)2374 static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip)
2375 {
2376 u32 head;
2377
2378 if (ip)
2379 head = RDMA_READ_UAPI_ATOMIC(rq->wq->head);
2380 else
2381 head = rq->kwq->head;
2382
2383 return head;
2384 }
2385
2386 /**
2387 * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2388 * @qp: the QP
2389 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2390 *
2391 * Return -1 if there is a local error, 0 if no RWQE is available,
2392 * otherwise return 1.
2393 *
2394 * Can be called from interrupt level.
2395 */
rvt_get_rwqe(struct rvt_qp * qp,bool wr_id_only)2396 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
2397 {
2398 unsigned long flags;
2399 struct rvt_rq *rq;
2400 struct rvt_krwq *kwq = NULL;
2401 struct rvt_rwq *wq;
2402 struct rvt_srq *srq;
2403 struct rvt_rwqe *wqe;
2404 void (*handler)(struct ib_event *, void *);
2405 u32 tail;
2406 u32 head;
2407 int ret;
2408 void *ip = NULL;
2409
2410 if (qp->ibqp.srq) {
2411 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
2412 handler = srq->ibsrq.event_handler;
2413 rq = &srq->rq;
2414 ip = srq->ip;
2415 } else {
2416 srq = NULL;
2417 handler = NULL;
2418 rq = &qp->r_rq;
2419 ip = qp->ip;
2420 }
2421
2422 spin_lock_irqsave(&rq->kwq->c_lock, flags);
2423 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2424 ret = 0;
2425 goto unlock;
2426 }
2427 kwq = rq->kwq;
2428 if (ip) {
2429 wq = rq->wq;
2430 tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
2431 } else {
2432 tail = kwq->tail;
2433 }
2434
2435 /* Validate tail before using it since it is user writable. */
2436 if (tail >= rq->size)
2437 tail = 0;
2438
2439 if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
2440 head = get_rvt_head(rq, ip);
2441 kwq->count = rvt_get_rq_count(rq, head, tail);
2442 }
2443 if (unlikely(kwq->count == 0)) {
2444 ret = 0;
2445 goto unlock;
2446 }
2447 /* Make sure entry is read after the count is read. */
2448 smp_rmb();
2449 wqe = rvt_get_rwqe_ptr(rq, tail);
2450 /*
2451 * Even though we update the tail index in memory, the verbs
2452 * consumer is not supposed to post more entries until a
2453 * completion is generated.
2454 */
2455 if (++tail >= rq->size)
2456 tail = 0;
2457 if (ip)
2458 RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
2459 else
2460 kwq->tail = tail;
2461 if (!wr_id_only && !init_sge(qp, wqe)) {
2462 ret = -1;
2463 goto unlock;
2464 }
2465 qp->r_wr_id = wqe->wr_id;
2466
2467 kwq->count--;
2468 ret = 1;
2469 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
2470 if (handler) {
2471 /*
2472 * Validate head pointer value and compute
2473 * the number of remaining WQEs.
2474 */
2475 if (kwq->count < srq->limit) {
2476 kwq->count =
2477 rvt_get_rq_count(rq,
2478 get_rvt_head(rq, ip), tail);
2479 if (kwq->count < srq->limit) {
2480 struct ib_event ev;
2481
2482 srq->limit = 0;
2483 spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2484 ev.device = qp->ibqp.device;
2485 ev.element.srq = qp->ibqp.srq;
2486 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
2487 handler(&ev, srq->ibsrq.srq_context);
2488 goto bail;
2489 }
2490 }
2491 }
2492 unlock:
2493 spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2494 bail:
2495 return ret;
2496 }
2497 EXPORT_SYMBOL(rvt_get_rwqe);
2498
2499 /**
2500 * qp_comm_est - handle trap with QP established
2501 * @qp: the QP
2502 */
rvt_comm_est(struct rvt_qp * qp)2503 void rvt_comm_est(struct rvt_qp *qp)
2504 {
2505 qp->r_flags |= RVT_R_COMM_EST;
2506 if (qp->ibqp.event_handler) {
2507 struct ib_event ev;
2508
2509 ev.device = qp->ibqp.device;
2510 ev.element.qp = &qp->ibqp;
2511 ev.event = IB_EVENT_COMM_EST;
2512 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2513 }
2514 }
2515 EXPORT_SYMBOL(rvt_comm_est);
2516
rvt_rc_error(struct rvt_qp * qp,enum ib_wc_status err)2517 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
2518 {
2519 unsigned long flags;
2520 int lastwqe;
2521
2522 spin_lock_irqsave(&qp->s_lock, flags);
2523 lastwqe = rvt_error_qp(qp, err);
2524 spin_unlock_irqrestore(&qp->s_lock, flags);
2525
2526 if (lastwqe) {
2527 struct ib_event ev;
2528
2529 ev.device = qp->ibqp.device;
2530 ev.element.qp = &qp->ibqp;
2531 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
2532 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2533 }
2534 }
2535 EXPORT_SYMBOL(rvt_rc_error);
2536
2537 /*
2538 * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2539 * @index - the index
2540 * return usec from an index into ib_rvt_rnr_table
2541 */
rvt_rnr_tbl_to_usec(u32 index)2542 unsigned long rvt_rnr_tbl_to_usec(u32 index)
2543 {
2544 return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
2545 }
2546 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
2547
rvt_aeth_to_usec(u32 aeth)2548 static inline unsigned long rvt_aeth_to_usec(u32 aeth)
2549 {
2550 return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
2551 IB_AETH_CREDIT_MASK];
2552 }
2553
2554 /*
2555 * rvt_add_retry_timer_ext - add/start a retry timer
2556 * @qp - the QP
2557 * @shift - timeout shift to wait for multiple packets
2558 * add a retry timer on the QP
2559 */
rvt_add_retry_timer_ext(struct rvt_qp * qp,u8 shift)2560 void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
2561 {
2562 struct ib_qp *ibqp = &qp->ibqp;
2563 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2564
2565 lockdep_assert_held(&qp->s_lock);
2566 qp->s_flags |= RVT_S_TIMER;
2567 /* 4.096 usec. * (1 << qp->timeout) */
2568 qp->s_timer.expires = jiffies + rdi->busy_jiffies +
2569 (qp->timeout_jiffies << shift);
2570 add_timer(&qp->s_timer);
2571 }
2572 EXPORT_SYMBOL(rvt_add_retry_timer_ext);
2573
2574 /**
2575 * rvt_add_rnr_timer - add/start an rnr timer on the QP
2576 * @qp: the QP
2577 * @aeth: aeth of RNR timeout, simulated aeth for loopback
2578 */
rvt_add_rnr_timer(struct rvt_qp * qp,u32 aeth)2579 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
2580 {
2581 u32 to;
2582
2583 lockdep_assert_held(&qp->s_lock);
2584 qp->s_flags |= RVT_S_WAIT_RNR;
2585 to = rvt_aeth_to_usec(aeth);
2586 trace_rvt_rnrnak_add(qp, to);
2587 hrtimer_start(&qp->s_rnr_timer,
2588 ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
2589 }
2590 EXPORT_SYMBOL(rvt_add_rnr_timer);
2591
2592 /**
2593 * rvt_stop_rc_timers - stop all timers
2594 * @qp: the QP
2595 * stop any pending timers
2596 */
rvt_stop_rc_timers(struct rvt_qp * qp)2597 void rvt_stop_rc_timers(struct rvt_qp *qp)
2598 {
2599 lockdep_assert_held(&qp->s_lock);
2600 /* Remove QP from all timers */
2601 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
2602 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
2603 del_timer(&qp->s_timer);
2604 hrtimer_try_to_cancel(&qp->s_rnr_timer);
2605 }
2606 }
2607 EXPORT_SYMBOL(rvt_stop_rc_timers);
2608
2609 /**
2610 * rvt_stop_rnr_timer - stop an rnr timer
2611 * @qp - the QP
2612 *
2613 * stop an rnr timer and return if the timer
2614 * had been pending.
2615 */
rvt_stop_rnr_timer(struct rvt_qp * qp)2616 static void rvt_stop_rnr_timer(struct rvt_qp *qp)
2617 {
2618 lockdep_assert_held(&qp->s_lock);
2619 /* Remove QP from rnr timer */
2620 if (qp->s_flags & RVT_S_WAIT_RNR) {
2621 qp->s_flags &= ~RVT_S_WAIT_RNR;
2622 trace_rvt_rnrnak_stop(qp, 0);
2623 }
2624 }
2625
2626 /**
2627 * rvt_del_timers_sync - wait for any timeout routines to exit
2628 * @qp: the QP
2629 */
rvt_del_timers_sync(struct rvt_qp * qp)2630 void rvt_del_timers_sync(struct rvt_qp *qp)
2631 {
2632 del_timer_sync(&qp->s_timer);
2633 hrtimer_cancel(&qp->s_rnr_timer);
2634 }
2635 EXPORT_SYMBOL(rvt_del_timers_sync);
2636
2637 /*
2638 * This is called from s_timer for missing responses.
2639 */
rvt_rc_timeout(struct timer_list * t)2640 static void rvt_rc_timeout(struct timer_list *t)
2641 {
2642 struct rvt_qp *qp = from_timer(qp, t, s_timer);
2643 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2644 unsigned long flags;
2645
2646 spin_lock_irqsave(&qp->r_lock, flags);
2647 spin_lock(&qp->s_lock);
2648 if (qp->s_flags & RVT_S_TIMER) {
2649 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2650
2651 qp->s_flags &= ~RVT_S_TIMER;
2652 rvp->n_rc_timeouts++;
2653 del_timer(&qp->s_timer);
2654 trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2655 if (rdi->driver_f.notify_restart_rc)
2656 rdi->driver_f.notify_restart_rc(qp,
2657 qp->s_last_psn + 1,
2658 1);
2659 rdi->driver_f.schedule_send(qp);
2660 }
2661 spin_unlock(&qp->s_lock);
2662 spin_unlock_irqrestore(&qp->r_lock, flags);
2663 }
2664
2665 /*
2666 * This is called from s_timer for RNR timeouts.
2667 */
rvt_rc_rnr_retry(struct hrtimer * t)2668 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
2669 {
2670 struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2671 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2672 unsigned long flags;
2673
2674 spin_lock_irqsave(&qp->s_lock, flags);
2675 rvt_stop_rnr_timer(qp);
2676 trace_rvt_rnrnak_timeout(qp, 0);
2677 rdi->driver_f.schedule_send(qp);
2678 spin_unlock_irqrestore(&qp->s_lock, flags);
2679 return HRTIMER_NORESTART;
2680 }
2681 EXPORT_SYMBOL(rvt_rc_rnr_retry);
2682
2683 /**
2684 * rvt_qp_iter_init - initial for QP iteration
2685 * @rdi: rvt devinfo
2686 * @v: u64 value
2687 * @cb: user-defined callback
2688 *
2689 * This returns an iterator suitable for iterating QPs
2690 * in the system.
2691 *
2692 * The @cb is a user-defined callback and @v is a 64-bit
2693 * value passed to and relevant for processing in the
2694 * @cb. An example use case would be to alter QP processing
2695 * based on criteria not part of the rvt_qp.
2696 *
2697 * Use cases that require memory allocation to succeed
2698 * must preallocate appropriately.
2699 *
2700 * Return: a pointer to an rvt_qp_iter or NULL
2701 */
rvt_qp_iter_init(struct rvt_dev_info * rdi,u64 v,void (* cb)(struct rvt_qp * qp,u64 v))2702 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
2703 u64 v,
2704 void (*cb)(struct rvt_qp *qp, u64 v))
2705 {
2706 struct rvt_qp_iter *i;
2707
2708 i = kzalloc(sizeof(*i), GFP_KERNEL);
2709 if (!i)
2710 return NULL;
2711
2712 i->rdi = rdi;
2713 /* number of special QPs (SMI/GSI) for device */
2714 i->specials = rdi->ibdev.phys_port_cnt * 2;
2715 i->v = v;
2716 i->cb = cb;
2717
2718 return i;
2719 }
2720 EXPORT_SYMBOL(rvt_qp_iter_init);
2721
2722 /**
2723 * rvt_qp_iter_next - return the next QP in iter
2724 * @iter: the iterator
2725 *
2726 * Fine grained QP iterator suitable for use
2727 * with debugfs seq_file mechanisms.
2728 *
2729 * Updates iter->qp with the current QP when the return
2730 * value is 0.
2731 *
2732 * Return: 0 - iter->qp is valid 1 - no more QPs
2733 */
rvt_qp_iter_next(struct rvt_qp_iter * iter)2734 int rvt_qp_iter_next(struct rvt_qp_iter *iter)
2735 __must_hold(RCU)
2736 {
2737 int n = iter->n;
2738 int ret = 1;
2739 struct rvt_qp *pqp = iter->qp;
2740 struct rvt_qp *qp;
2741 struct rvt_dev_info *rdi = iter->rdi;
2742
2743 /*
2744 * The approach is to consider the special qps
2745 * as additional table entries before the
2746 * real hash table. Since the qp code sets
2747 * the qp->next hash link to NULL, this works just fine.
2748 *
2749 * iter->specials is 2 * # ports
2750 *
2751 * n = 0..iter->specials is the special qp indices
2752 *
2753 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2754 * the potential hash bucket entries
2755 *
2756 */
2757 for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) {
2758 if (pqp) {
2759 qp = rcu_dereference(pqp->next);
2760 } else {
2761 if (n < iter->specials) {
2762 struct rvt_ibport *rvp;
2763 int pidx;
2764
2765 pidx = n % rdi->ibdev.phys_port_cnt;
2766 rvp = rdi->ports[pidx];
2767 qp = rcu_dereference(rvp->qp[n & 1]);
2768 } else {
2769 qp = rcu_dereference(
2770 rdi->qp_dev->qp_table[
2771 (n - iter->specials)]);
2772 }
2773 }
2774 pqp = qp;
2775 if (qp) {
2776 iter->qp = qp;
2777 iter->n = n;
2778 return 0;
2779 }
2780 }
2781 return ret;
2782 }
2783 EXPORT_SYMBOL(rvt_qp_iter_next);
2784
2785 /**
2786 * rvt_qp_iter - iterate all QPs
2787 * @rdi: rvt devinfo
2788 * @v: a 64-bit value
2789 * @cb: a callback
2790 *
2791 * This provides a way for iterating all QPs.
2792 *
2793 * The @cb is a user-defined callback and @v is a 64-bit
2794 * value passed to and relevant for processing in the
2795 * cb. An example use case would be to alter QP processing
2796 * based on criteria not part of the rvt_qp.
2797 *
2798 * The code has an internal iterator to simplify
2799 * non seq_file use cases.
2800 */
rvt_qp_iter(struct rvt_dev_info * rdi,u64 v,void (* cb)(struct rvt_qp * qp,u64 v))2801 void rvt_qp_iter(struct rvt_dev_info *rdi,
2802 u64 v,
2803 void (*cb)(struct rvt_qp *qp, u64 v))
2804 {
2805 int ret;
2806 struct rvt_qp_iter i = {
2807 .rdi = rdi,
2808 .specials = rdi->ibdev.phys_port_cnt * 2,
2809 .v = v,
2810 .cb = cb
2811 };
2812
2813 rcu_read_lock();
2814 do {
2815 ret = rvt_qp_iter_next(&i);
2816 if (!ret) {
2817 rvt_get_qp(i.qp);
2818 rcu_read_unlock();
2819 i.cb(i.qp, i.v);
2820 rcu_read_lock();
2821 rvt_put_qp(i.qp);
2822 }
2823 } while (!ret);
2824 rcu_read_unlock();
2825 }
2826 EXPORT_SYMBOL(rvt_qp_iter);
2827
2828 /*
2829 * This should be called with s_lock held.
2830 */
rvt_send_complete(struct rvt_qp * qp,struct rvt_swqe * wqe,enum ib_wc_status status)2831 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
2832 enum ib_wc_status status)
2833 {
2834 u32 old_last, last;
2835 struct rvt_dev_info *rdi;
2836
2837 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2838 return;
2839 rdi = ib_to_rvt(qp->ibqp.device);
2840
2841 old_last = qp->s_last;
2842 trace_rvt_qp_send_completion(qp, wqe, old_last);
2843 last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
2844 status);
2845 if (qp->s_acked == old_last)
2846 qp->s_acked = last;
2847 if (qp->s_cur == old_last)
2848 qp->s_cur = last;
2849 if (qp->s_tail == old_last)
2850 qp->s_tail = last;
2851 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
2852 qp->s_draining = 0;
2853 }
2854 EXPORT_SYMBOL(rvt_send_complete);
2855
2856 /**
2857 * rvt_copy_sge - copy data to SGE memory
2858 * @qp: associated QP
2859 * @ss: the SGE state
2860 * @data: the data to copy
2861 * @length: the length of the data
2862 * @release: boolean to release MR
2863 * @copy_last: do a separate copy of the last 8 bytes
2864 */
rvt_copy_sge(struct rvt_qp * qp,struct rvt_sge_state * ss,void * data,u32 length,bool release,bool copy_last)2865 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
2866 void *data, u32 length,
2867 bool release, bool copy_last)
2868 {
2869 struct rvt_sge *sge = &ss->sge;
2870 int i;
2871 bool in_last = false;
2872 bool cacheless_copy = false;
2873 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2874 struct rvt_wss *wss = rdi->wss;
2875 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
2876
2877 if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
2878 cacheless_copy = length >= PAGE_SIZE;
2879 } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
2880 if (length >= PAGE_SIZE) {
2881 /*
2882 * NOTE: this *assumes*:
2883 * o The first vaddr is the dest.
2884 * o If multiple pages, then vaddr is sequential.
2885 */
2886 wss_insert(wss, sge->vaddr);
2887 if (length >= (2 * PAGE_SIZE))
2888 wss_insert(wss, (sge->vaddr + PAGE_SIZE));
2889
2890 cacheless_copy = wss_exceeds_threshold(wss);
2891 } else {
2892 wss_advance_clean_counter(wss);
2893 }
2894 }
2895
2896 if (copy_last) {
2897 if (length > 8) {
2898 length -= 8;
2899 } else {
2900 copy_last = false;
2901 in_last = true;
2902 }
2903 }
2904
2905 again:
2906 while (length) {
2907 u32 len = rvt_get_sge_length(sge, length);
2908
2909 WARN_ON_ONCE(len == 0);
2910 if (unlikely(in_last)) {
2911 /* enforce byte transfer ordering */
2912 for (i = 0; i < len; i++)
2913 ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
2914 } else if (cacheless_copy) {
2915 cacheless_memcpy(sge->vaddr, data, len);
2916 } else {
2917 memcpy(sge->vaddr, data, len);
2918 }
2919 rvt_update_sge(ss, len, release);
2920 data += len;
2921 length -= len;
2922 }
2923
2924 if (copy_last) {
2925 copy_last = false;
2926 in_last = true;
2927 length = 8;
2928 goto again;
2929 }
2930 }
2931 EXPORT_SYMBOL(rvt_copy_sge);
2932
loopback_qp_drop(struct rvt_ibport * rvp,struct rvt_qp * sqp)2933 static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
2934 struct rvt_qp *sqp)
2935 {
2936 rvp->n_pkt_drops++;
2937 /*
2938 * For RC, the requester would timeout and retry so
2939 * shortcut the timeouts and just signal too many retries.
2940 */
2941 return sqp->ibqp.qp_type == IB_QPT_RC ?
2942 IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS;
2943 }
2944
2945 /**
2946 * ruc_loopback - handle UC and RC loopback requests
2947 * @sqp: the sending QP
2948 *
2949 * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2950 * Note that although we are single threaded due to the send engine, we still
2951 * have to protect against post_send(). We don't have to worry about
2952 * receive interrupts since this is a connected protocol and all packets
2953 * will pass through here.
2954 */
rvt_ruc_loopback(struct rvt_qp * sqp)2955 void rvt_ruc_loopback(struct rvt_qp *sqp)
2956 {
2957 struct rvt_ibport *rvp = NULL;
2958 struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
2959 struct rvt_qp *qp;
2960 struct rvt_swqe *wqe;
2961 struct rvt_sge *sge;
2962 unsigned long flags;
2963 struct ib_wc wc;
2964 u64 sdata;
2965 atomic64_t *maddr;
2966 enum ib_wc_status send_status;
2967 bool release;
2968 int ret;
2969 bool copy_last = false;
2970 int local_ops = 0;
2971
2972 rcu_read_lock();
2973 rvp = rdi->ports[sqp->port_num - 1];
2974
2975 /*
2976 * Note that we check the responder QP state after
2977 * checking the requester's state.
2978 */
2979
2980 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
2981 sqp->remote_qpn);
2982
2983 spin_lock_irqsave(&sqp->s_lock, flags);
2984
2985 /* Return if we are already busy processing a work request. */
2986 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
2987 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2988 goto unlock;
2989
2990 sqp->s_flags |= RVT_S_BUSY;
2991
2992 again:
2993 if (sqp->s_last == READ_ONCE(sqp->s_head))
2994 goto clr_busy;
2995 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
2996
2997 /* Return if it is not OK to start a new work request. */
2998 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
2999 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
3000 goto clr_busy;
3001 /* We are in the error state, flush the work request. */
3002 send_status = IB_WC_WR_FLUSH_ERR;
3003 goto flush_send;
3004 }
3005
3006 /*
3007 * We can rely on the entry not changing without the s_lock
3008 * being held until we update s_last.
3009 * We increment s_cur to indicate s_last is in progress.
3010 */
3011 if (sqp->s_last == sqp->s_cur) {
3012 if (++sqp->s_cur >= sqp->s_size)
3013 sqp->s_cur = 0;
3014 }
3015 spin_unlock_irqrestore(&sqp->s_lock, flags);
3016
3017 if (!qp) {
3018 send_status = loopback_qp_drop(rvp, sqp);
3019 goto serr_no_r_lock;
3020 }
3021 spin_lock_irqsave(&qp->r_lock, flags);
3022 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
3023 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
3024 send_status = loopback_qp_drop(rvp, sqp);
3025 goto serr;
3026 }
3027
3028 memset(&wc, 0, sizeof(wc));
3029 send_status = IB_WC_SUCCESS;
3030
3031 release = true;
3032 sqp->s_sge.sge = wqe->sg_list[0];
3033 sqp->s_sge.sg_list = wqe->sg_list + 1;
3034 sqp->s_sge.num_sge = wqe->wr.num_sge;
3035 sqp->s_len = wqe->length;
3036 switch (wqe->wr.opcode) {
3037 case IB_WR_REG_MR:
3038 goto send_comp;
3039
3040 case IB_WR_LOCAL_INV:
3041 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
3042 if (rvt_invalidate_rkey(sqp,
3043 wqe->wr.ex.invalidate_rkey))
3044 send_status = IB_WC_LOC_PROT_ERR;
3045 local_ops = 1;
3046 }
3047 goto send_comp;
3048
3049 case IB_WR_SEND_WITH_INV:
3050 case IB_WR_SEND_WITH_IMM:
3051 case IB_WR_SEND:
3052 ret = rvt_get_rwqe(qp, false);
3053 if (ret < 0)
3054 goto op_err;
3055 if (!ret)
3056 goto rnr_nak;
3057 if (wqe->length > qp->r_len)
3058 goto inv_err;
3059 switch (wqe->wr.opcode) {
3060 case IB_WR_SEND_WITH_INV:
3061 if (!rvt_invalidate_rkey(qp,
3062 wqe->wr.ex.invalidate_rkey)) {
3063 wc.wc_flags = IB_WC_WITH_INVALIDATE;
3064 wc.ex.invalidate_rkey =
3065 wqe->wr.ex.invalidate_rkey;
3066 }
3067 break;
3068 case IB_WR_SEND_WITH_IMM:
3069 wc.wc_flags = IB_WC_WITH_IMM;
3070 wc.ex.imm_data = wqe->wr.ex.imm_data;
3071 break;
3072 default:
3073 break;
3074 }
3075 break;
3076
3077 case IB_WR_RDMA_WRITE_WITH_IMM:
3078 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3079 goto inv_err;
3080 wc.wc_flags = IB_WC_WITH_IMM;
3081 wc.ex.imm_data = wqe->wr.ex.imm_data;
3082 ret = rvt_get_rwqe(qp, true);
3083 if (ret < 0)
3084 goto op_err;
3085 if (!ret)
3086 goto rnr_nak;
3087 /* skip copy_last set and qp_access_flags recheck */
3088 goto do_write;
3089 case IB_WR_RDMA_WRITE:
3090 copy_last = rvt_is_user_qp(qp);
3091 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3092 goto inv_err;
3093 do_write:
3094 if (wqe->length == 0)
3095 break;
3096 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
3097 wqe->rdma_wr.remote_addr,
3098 wqe->rdma_wr.rkey,
3099 IB_ACCESS_REMOTE_WRITE)))
3100 goto acc_err;
3101 qp->r_sge.sg_list = NULL;
3102 qp->r_sge.num_sge = 1;
3103 qp->r_sge.total_len = wqe->length;
3104 break;
3105
3106 case IB_WR_RDMA_READ:
3107 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3108 goto inv_err;
3109 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
3110 wqe->rdma_wr.remote_addr,
3111 wqe->rdma_wr.rkey,
3112 IB_ACCESS_REMOTE_READ)))
3113 goto acc_err;
3114 release = false;
3115 sqp->s_sge.sg_list = NULL;
3116 sqp->s_sge.num_sge = 1;
3117 qp->r_sge.sge = wqe->sg_list[0];
3118 qp->r_sge.sg_list = wqe->sg_list + 1;
3119 qp->r_sge.num_sge = wqe->wr.num_sge;
3120 qp->r_sge.total_len = wqe->length;
3121 break;
3122
3123 case IB_WR_ATOMIC_CMP_AND_SWP:
3124 case IB_WR_ATOMIC_FETCH_AND_ADD:
3125 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
3126 goto inv_err;
3127 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3128 wqe->atomic_wr.remote_addr,
3129 wqe->atomic_wr.rkey,
3130 IB_ACCESS_REMOTE_ATOMIC)))
3131 goto acc_err;
3132 /* Perform atomic OP and save result. */
3133 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3134 sdata = wqe->atomic_wr.compare_add;
3135 *(u64 *)sqp->s_sge.sge.vaddr =
3136 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
3137 (u64)atomic64_add_return(sdata, maddr) - sdata :
3138 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3139 sdata, wqe->atomic_wr.swap);
3140 rvt_put_mr(qp->r_sge.sge.mr);
3141 qp->r_sge.num_sge = 0;
3142 goto send_comp;
3143
3144 default:
3145 send_status = IB_WC_LOC_QP_OP_ERR;
3146 goto serr;
3147 }
3148
3149 sge = &sqp->s_sge.sge;
3150 while (sqp->s_len) {
3151 u32 len = rvt_get_sge_length(sge, sqp->s_len);
3152
3153 WARN_ON_ONCE(len == 0);
3154 rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
3155 len, release, copy_last);
3156 rvt_update_sge(&sqp->s_sge, len, !release);
3157 sqp->s_len -= len;
3158 }
3159 if (release)
3160 rvt_put_ss(&qp->r_sge);
3161
3162 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
3163 goto send_comp;
3164
3165 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
3166 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
3167 else
3168 wc.opcode = IB_WC_RECV;
3169 wc.wr_id = qp->r_wr_id;
3170 wc.status = IB_WC_SUCCESS;
3171 wc.byte_len = wqe->length;
3172 wc.qp = &qp->ibqp;
3173 wc.src_qp = qp->remote_qpn;
3174 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
3175 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3176 wc.port_num = 1;
3177 /* Signal completion event if the solicited bit is set. */
3178 rvt_recv_cq(qp, &wc, wqe->wr.send_flags & IB_SEND_SOLICITED);
3179
3180 send_comp:
3181 spin_unlock_irqrestore(&qp->r_lock, flags);
3182 spin_lock_irqsave(&sqp->s_lock, flags);
3183 rvp->n_loop_pkts++;
3184 flush_send:
3185 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
3186 rvt_send_complete(sqp, wqe, send_status);
3187 if (local_ops) {
3188 atomic_dec(&sqp->local_ops_pending);
3189 local_ops = 0;
3190 }
3191 goto again;
3192
3193 rnr_nak:
3194 /* Handle RNR NAK */
3195 if (qp->ibqp.qp_type == IB_QPT_UC)
3196 goto send_comp;
3197 rvp->n_rnr_naks++;
3198 /*
3199 * Note: we don't need the s_lock held since the BUSY flag
3200 * makes this single threaded.
3201 */
3202 if (sqp->s_rnr_retry == 0) {
3203 send_status = IB_WC_RNR_RETRY_EXC_ERR;
3204 goto serr;
3205 }
3206 if (sqp->s_rnr_retry_cnt < 7)
3207 sqp->s_rnr_retry--;
3208 spin_unlock_irqrestore(&qp->r_lock, flags);
3209 spin_lock_irqsave(&sqp->s_lock, flags);
3210 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
3211 goto clr_busy;
3212 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
3213 IB_AETH_CREDIT_SHIFT);
3214 goto clr_busy;
3215
3216 op_err:
3217 send_status = IB_WC_REM_OP_ERR;
3218 wc.status = IB_WC_LOC_QP_OP_ERR;
3219 goto err;
3220
3221 inv_err:
3222 send_status =
3223 sqp->ibqp.qp_type == IB_QPT_RC ?
3224 IB_WC_REM_INV_REQ_ERR :
3225 IB_WC_SUCCESS;
3226 wc.status = IB_WC_LOC_QP_OP_ERR;
3227 goto err;
3228
3229 acc_err:
3230 send_status = IB_WC_REM_ACCESS_ERR;
3231 wc.status = IB_WC_LOC_PROT_ERR;
3232 err:
3233 /* responder goes to error state */
3234 rvt_rc_error(qp, wc.status);
3235
3236 serr:
3237 spin_unlock_irqrestore(&qp->r_lock, flags);
3238 serr_no_r_lock:
3239 spin_lock_irqsave(&sqp->s_lock, flags);
3240 rvt_send_complete(sqp, wqe, send_status);
3241 if (sqp->ibqp.qp_type == IB_QPT_RC) {
3242 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
3243
3244 sqp->s_flags &= ~RVT_S_BUSY;
3245 spin_unlock_irqrestore(&sqp->s_lock, flags);
3246 if (lastwqe) {
3247 struct ib_event ev;
3248
3249 ev.device = sqp->ibqp.device;
3250 ev.element.qp = &sqp->ibqp;
3251 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
3252 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
3253 }
3254 goto done;
3255 }
3256 clr_busy:
3257 sqp->s_flags &= ~RVT_S_BUSY;
3258 unlock:
3259 spin_unlock_irqrestore(&sqp->s_lock, flags);
3260 done:
3261 rcu_read_unlock();
3262 }
3263 EXPORT_SYMBOL(rvt_ruc_loopback);
3264