1 /*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/platform_device.h>
35 #include <rdma/ib_addr.h>
36 #include <rdma/ib_umem.h>
37 #include "hns_roce_common.h"
38 #include "hns_roce_device.h"
39 #include "hns_roce_hem.h"
40 #include <rdma/hns-abi.h>
41
42 #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
43
hns_roce_qp_event(struct hns_roce_dev * hr_dev,u32 qpn,int event_type)44 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
45 {
46 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
47 struct device *dev = hr_dev->dev;
48 struct hns_roce_qp *qp;
49
50 spin_lock(&qp_table->lock);
51
52 qp = __hns_roce_qp_lookup(hr_dev, qpn);
53 if (qp)
54 atomic_inc(&qp->refcount);
55
56 spin_unlock(&qp_table->lock);
57
58 if (!qp) {
59 dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
60 return;
61 }
62
63 qp->event(qp, (enum hns_roce_event)event_type);
64
65 if (atomic_dec_and_test(&qp->refcount))
66 complete(&qp->free);
67 }
68 EXPORT_SYMBOL_GPL(hns_roce_qp_event);
69
hns_roce_ib_qp_event(struct hns_roce_qp * hr_qp,enum hns_roce_event type)70 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
71 enum hns_roce_event type)
72 {
73 struct ib_event event;
74 struct ib_qp *ibqp = &hr_qp->ibqp;
75
76 if (ibqp->event_handler) {
77 event.device = ibqp->device;
78 event.element.qp = ibqp;
79 switch (type) {
80 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
81 event.event = IB_EVENT_PATH_MIG;
82 break;
83 case HNS_ROCE_EVENT_TYPE_COMM_EST:
84 event.event = IB_EVENT_COMM_EST;
85 break;
86 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
87 event.event = IB_EVENT_SQ_DRAINED;
88 break;
89 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
90 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
91 break;
92 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
93 event.event = IB_EVENT_QP_FATAL;
94 break;
95 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
96 event.event = IB_EVENT_PATH_MIG_ERR;
97 break;
98 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
99 event.event = IB_EVENT_QP_REQ_ERR;
100 break;
101 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
102 event.event = IB_EVENT_QP_ACCESS_ERR;
103 break;
104 default:
105 dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
106 type, hr_qp->qpn);
107 return;
108 }
109 ibqp->event_handler(&event, ibqp->qp_context);
110 }
111 }
112
hns_roce_reserve_range_qp(struct hns_roce_dev * hr_dev,int cnt,int align,unsigned long * base)113 static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
114 int align, unsigned long *base)
115 {
116 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
117
118 return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
119 base) ?
120 -ENOMEM :
121 0;
122 }
123
to_hns_roce_state(enum ib_qp_state state)124 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
125 {
126 switch (state) {
127 case IB_QPS_RESET:
128 return HNS_ROCE_QP_STATE_RST;
129 case IB_QPS_INIT:
130 return HNS_ROCE_QP_STATE_INIT;
131 case IB_QPS_RTR:
132 return HNS_ROCE_QP_STATE_RTR;
133 case IB_QPS_RTS:
134 return HNS_ROCE_QP_STATE_RTS;
135 case IB_QPS_SQD:
136 return HNS_ROCE_QP_STATE_SQD;
137 case IB_QPS_ERR:
138 return HNS_ROCE_QP_STATE_ERR;
139 default:
140 return HNS_ROCE_QP_NUM_STATE;
141 }
142 }
143 EXPORT_SYMBOL_GPL(to_hns_roce_state);
144
hns_roce_gsi_qp_alloc(struct hns_roce_dev * hr_dev,unsigned long qpn,struct hns_roce_qp * hr_qp)145 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
146 struct hns_roce_qp *hr_qp)
147 {
148 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
149 int ret;
150
151 if (!qpn)
152 return -EINVAL;
153
154 hr_qp->qpn = qpn;
155
156 spin_lock_irq(&qp_table->lock);
157 ret = radix_tree_insert(&hr_dev->qp_table_tree,
158 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
159 spin_unlock_irq(&qp_table->lock);
160 if (ret) {
161 dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
162 goto err_put_irrl;
163 }
164
165 atomic_set(&hr_qp->refcount, 1);
166 init_completion(&hr_qp->free);
167
168 return 0;
169
170 err_put_irrl:
171
172 return ret;
173 }
174
hns_roce_qp_alloc(struct hns_roce_dev * hr_dev,unsigned long qpn,struct hns_roce_qp * hr_qp)175 static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
176 struct hns_roce_qp *hr_qp)
177 {
178 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
179 struct device *dev = hr_dev->dev;
180 int ret;
181
182 if (!qpn)
183 return -EINVAL;
184
185 hr_qp->qpn = qpn;
186
187 /* Alloc memory for QPC */
188 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
189 if (ret) {
190 dev_err(dev, "QPC table get failed\n");
191 goto err_out;
192 }
193
194 /* Alloc memory for IRRL */
195 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
196 if (ret) {
197 dev_err(dev, "IRRL table get failed\n");
198 goto err_put_qp;
199 }
200
201 if (hr_dev->caps.trrl_entry_sz) {
202 /* Alloc memory for TRRL */
203 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
204 hr_qp->qpn);
205 if (ret) {
206 dev_err(dev, "TRRL table get failed\n");
207 goto err_put_irrl;
208 }
209 }
210
211 spin_lock_irq(&qp_table->lock);
212 ret = radix_tree_insert(&hr_dev->qp_table_tree,
213 hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
214 spin_unlock_irq(&qp_table->lock);
215 if (ret) {
216 dev_err(dev, "QPC radix_tree_insert failed\n");
217 goto err_put_trrl;
218 }
219
220 atomic_set(&hr_qp->refcount, 1);
221 init_completion(&hr_qp->free);
222
223 return 0;
224
225 err_put_trrl:
226 if (hr_dev->caps.trrl_entry_sz)
227 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
228
229 err_put_irrl:
230 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
231
232 err_put_qp:
233 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
234
235 err_out:
236 return ret;
237 }
238
hns_roce_qp_remove(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)239 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
240 {
241 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
242 unsigned long flags;
243
244 spin_lock_irqsave(&qp_table->lock, flags);
245 radix_tree_delete(&hr_dev->qp_table_tree,
246 hr_qp->qpn & (hr_dev->caps.num_qps - 1));
247 spin_unlock_irqrestore(&qp_table->lock, flags);
248 }
249 EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
250
hns_roce_qp_free(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)251 void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
252 {
253 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
254
255 if (atomic_dec_and_test(&hr_qp->refcount))
256 complete(&hr_qp->free);
257 wait_for_completion(&hr_qp->free);
258
259 if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
260 if (hr_dev->caps.trrl_entry_sz)
261 hns_roce_table_put(hr_dev, &qp_table->trrl_table,
262 hr_qp->qpn);
263 hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
264 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
265 }
266 }
267 EXPORT_SYMBOL_GPL(hns_roce_qp_free);
268
hns_roce_release_range_qp(struct hns_roce_dev * hr_dev,int base_qpn,int cnt)269 void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
270 int cnt)
271 {
272 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
273
274 if (base_qpn < SQP_NUM)
275 return;
276
277 hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR);
278 }
279 EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
280
hns_roce_set_rq_size(struct hns_roce_dev * hr_dev,struct ib_qp_cap * cap,int is_user,int has_srq,struct hns_roce_qp * hr_qp)281 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
282 struct ib_qp_cap *cap, int is_user, int has_srq,
283 struct hns_roce_qp *hr_qp)
284 {
285 struct device *dev = hr_dev->dev;
286 u32 max_cnt;
287
288 /* Check the validity of QP support capacity */
289 if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
290 cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
291 dev_err(dev, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
292 cap->max_recv_wr, cap->max_recv_sge);
293 return -EINVAL;
294 }
295
296 /* If srq exit, set zero for relative number of rq */
297 if (has_srq) {
298 if (cap->max_recv_wr) {
299 dev_dbg(dev, "srq no need config max_recv_wr\n");
300 return -EINVAL;
301 }
302
303 hr_qp->rq.wqe_cnt = hr_qp->rq.max_gs = 0;
304 } else {
305 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge)) {
306 dev_err(dev, "user space no need config max_recv_wr max_recv_sge\n");
307 return -EINVAL;
308 }
309
310 if (hr_dev->caps.min_wqes)
311 max_cnt = max(cap->max_recv_wr, hr_dev->caps.min_wqes);
312 else
313 max_cnt = cap->max_recv_wr;
314
315 hr_qp->rq.wqe_cnt = roundup_pow_of_two(max_cnt);
316
317 if ((u32)hr_qp->rq.wqe_cnt > hr_dev->caps.max_wqes) {
318 dev_err(dev, "while setting rq size, rq.wqe_cnt too large\n");
319 return -EINVAL;
320 }
321
322 max_cnt = max(1U, cap->max_recv_sge);
323 hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
324 if (hr_dev->caps.max_rq_sg <= 2)
325 hr_qp->rq.wqe_shift =
326 ilog2(hr_dev->caps.max_rq_desc_sz);
327 else
328 hr_qp->rq.wqe_shift =
329 ilog2(hr_dev->caps.max_rq_desc_sz
330 * hr_qp->rq.max_gs);
331 }
332
333 cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt;
334 cap->max_recv_sge = hr_qp->rq.max_gs;
335
336 return 0;
337 }
338
hns_roce_set_user_sq_size(struct hns_roce_dev * hr_dev,struct ib_qp_cap * cap,struct hns_roce_qp * hr_qp,struct hns_roce_ib_create_qp * ucmd)339 static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
340 struct ib_qp_cap *cap,
341 struct hns_roce_qp *hr_qp,
342 struct hns_roce_ib_create_qp *ucmd)
343 {
344 u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
345 u8 max_sq_stride = ilog2(roundup_sq_stride);
346 u32 page_size;
347 u32 max_cnt;
348
349 /* Sanity check SQ size before proceeding */
350 if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
351 ucmd->log_sq_stride > max_sq_stride ||
352 ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
353 dev_err(hr_dev->dev, "check SQ size error!\n");
354 return -EINVAL;
355 }
356
357 if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
358 dev_err(hr_dev->dev, "SQ sge error! max_send_sge=%d\n",
359 cap->max_send_sge);
360 return -EINVAL;
361 }
362
363 hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
364 hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
365
366 max_cnt = max(1U, cap->max_send_sge);
367 if (hr_dev->caps.max_sq_sg <= 2)
368 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
369 else
370 hr_qp->sq.max_gs = max_cnt;
371
372 if (hr_qp->sq.max_gs > 2)
373 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
374 (hr_qp->sq.max_gs - 2));
375 hr_qp->sge.sge_shift = 4;
376
377 /* Get buf size, SQ and RQ are aligned to page_szie */
378 if (hr_dev->caps.max_sq_sg <= 2) {
379 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
380 hr_qp->rq.wqe_shift), PAGE_SIZE) +
381 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
382 hr_qp->sq.wqe_shift), PAGE_SIZE);
383
384 hr_qp->sq.offset = 0;
385 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
386 hr_qp->sq.wqe_shift), PAGE_SIZE);
387 } else {
388 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
389 hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
390 hr_qp->rq.wqe_shift), page_size) +
391 HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
392 hr_qp->sge.sge_shift), page_size) +
393 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt <<
394 hr_qp->sq.wqe_shift), page_size);
395
396 hr_qp->sq.offset = 0;
397 if (hr_qp->sge.sge_cnt) {
398 hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
399 (hr_qp->sq.wqe_cnt <<
400 hr_qp->sq.wqe_shift),
401 page_size);
402 hr_qp->rq.offset = hr_qp->sge.offset +
403 HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
404 hr_qp->sge.sge_shift),
405 page_size);
406 } else {
407 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP(
408 (hr_qp->sq.wqe_cnt <<
409 hr_qp->sq.wqe_shift),
410 page_size);
411 }
412 }
413
414 return 0;
415 }
416
hns_roce_set_kernel_sq_size(struct hns_roce_dev * hr_dev,struct ib_qp_cap * cap,struct hns_roce_qp * hr_qp)417 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
418 struct ib_qp_cap *cap,
419 struct hns_roce_qp *hr_qp)
420 {
421 struct device *dev = hr_dev->dev;
422 u32 page_size;
423 u32 max_cnt;
424 int size;
425
426 if (cap->max_send_wr > hr_dev->caps.max_wqes ||
427 cap->max_send_sge > hr_dev->caps.max_sq_sg ||
428 cap->max_inline_data > hr_dev->caps.max_sq_inline) {
429 dev_err(dev, "SQ WR or sge or inline data error!\n");
430 return -EINVAL;
431 }
432
433 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
434 hr_qp->sq_max_wqes_per_wr = 1;
435 hr_qp->sq_spare_wqes = 0;
436
437 if (hr_dev->caps.min_wqes)
438 max_cnt = max(cap->max_send_wr, hr_dev->caps.min_wqes);
439 else
440 max_cnt = cap->max_send_wr;
441
442 hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt);
443 if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) {
444 dev_err(dev, "while setting kernel sq size, sq.wqe_cnt too large\n");
445 return -EINVAL;
446 }
447
448 /* Get data_seg numbers */
449 max_cnt = max(1U, cap->max_send_sge);
450 if (hr_dev->caps.max_sq_sg <= 2)
451 hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
452 else
453 hr_qp->sq.max_gs = max_cnt;
454
455 if (hr_qp->sq.max_gs > 2) {
456 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
457 (hr_qp->sq.max_gs - 2));
458 hr_qp->sge.sge_shift = 4;
459 }
460
461 /* ud sqwqe's sge use extend sge */
462 if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
463 hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
464 hr_qp->sq.max_gs);
465 hr_qp->sge.sge_shift = 4;
466 }
467
468 /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
469 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
470 hr_qp->sq.offset = 0;
471 size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift,
472 page_size);
473
474 if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
475 hr_qp->sge.offset = size;
476 size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
477 hr_qp->sge.sge_shift, page_size);
478 }
479
480 hr_qp->rq.offset = size;
481 size += HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt << hr_qp->rq.wqe_shift),
482 page_size);
483 hr_qp->buff_size = size;
484
485 /* Get wr and sge number which send */
486 cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt;
487 cap->max_send_sge = hr_qp->sq.max_gs;
488
489 /* We don't support inline sends for kernel QPs (yet) */
490 cap->max_inline_data = 0;
491
492 return 0;
493 }
494
hns_roce_qp_has_sq(struct ib_qp_init_attr * attr)495 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
496 {
497 if (attr->qp_type == IB_QPT_XRC_TGT)
498 return 0;
499
500 return 1;
501 }
502
hns_roce_qp_has_rq(struct ib_qp_init_attr * attr)503 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
504 {
505 if (attr->qp_type == IB_QPT_XRC_INI ||
506 attr->qp_type == IB_QPT_XRC_TGT || attr->srq)
507 return 0;
508
509 return 1;
510 }
511
hns_roce_create_qp_common(struct hns_roce_dev * hr_dev,struct ib_pd * ib_pd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,unsigned long sqpn,struct hns_roce_qp * hr_qp)512 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
513 struct ib_pd *ib_pd,
514 struct ib_qp_init_attr *init_attr,
515 struct ib_udata *udata, unsigned long sqpn,
516 struct hns_roce_qp *hr_qp)
517 {
518 struct device *dev = hr_dev->dev;
519 struct hns_roce_ib_create_qp ucmd;
520 struct hns_roce_ib_create_qp_resp resp = {};
521 unsigned long qpn = 0;
522 int ret = 0;
523 u32 page_shift;
524 u32 npages;
525 int i;
526
527 mutex_init(&hr_qp->mutex);
528 spin_lock_init(&hr_qp->sq.lock);
529 spin_lock_init(&hr_qp->rq.lock);
530
531 hr_qp->state = IB_QPS_RESET;
532
533 hr_qp->ibqp.qp_type = init_attr->qp_type;
534
535 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
536 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR);
537 else
538 hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
539
540 ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
541 !!init_attr->srq, hr_qp);
542 if (ret) {
543 dev_err(dev, "hns_roce_set_rq_size failed\n");
544 goto err_out;
545 }
546
547 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
548 /* allocate recv inline buf */
549 hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
550 sizeof(struct hns_roce_rinl_wqe),
551 GFP_KERNEL);
552 if (!hr_qp->rq_inl_buf.wqe_list) {
553 ret = -ENOMEM;
554 goto err_out;
555 }
556
557 hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
558
559 /* Firstly, allocate a list of sge space buffer */
560 hr_qp->rq_inl_buf.wqe_list[0].sg_list =
561 kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
562 init_attr->cap.max_recv_sge *
563 sizeof(struct hns_roce_rinl_sge),
564 GFP_KERNEL);
565 if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
566 ret = -ENOMEM;
567 goto err_wqe_list;
568 }
569
570 for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
571 /* Secondly, reallocate the buffer */
572 hr_qp->rq_inl_buf.wqe_list[i].sg_list =
573 &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
574 init_attr->cap.max_recv_sge];
575 }
576
577 if (ib_pd->uobject) {
578 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
579 dev_err(dev, "ib_copy_from_udata error for create qp\n");
580 ret = -EFAULT;
581 goto err_rq_sge_list;
582 }
583
584 ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
585 &ucmd);
586 if (ret) {
587 dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
588 goto err_rq_sge_list;
589 }
590
591 hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
592 ucmd.buf_addr, hr_qp->buff_size, 0,
593 0);
594 if (IS_ERR(hr_qp->umem)) {
595 dev_err(dev, "ib_umem_get error for create qp\n");
596 ret = PTR_ERR(hr_qp->umem);
597 goto err_rq_sge_list;
598 }
599
600 hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
601 if (hr_dev->caps.mtt_buf_pg_sz) {
602 npages = (ib_umem_page_count(hr_qp->umem) +
603 (1 << hr_dev->caps.mtt_buf_pg_sz) - 1) /
604 (1 << hr_dev->caps.mtt_buf_pg_sz);
605 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
606 ret = hns_roce_mtt_init(hr_dev, npages,
607 page_shift,
608 &hr_qp->mtt);
609 } else {
610 ret = hns_roce_mtt_init(hr_dev,
611 ib_umem_page_count(hr_qp->umem),
612 hr_qp->umem->page_shift,
613 &hr_qp->mtt);
614 }
615 if (ret) {
616 dev_err(dev, "hns_roce_mtt_init error for create qp\n");
617 goto err_buf;
618 }
619
620 ret = hns_roce_ib_umem_write_mtt(hr_dev, &hr_qp->mtt,
621 hr_qp->umem);
622 if (ret) {
623 dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
624 goto err_mtt;
625 }
626
627 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
628 (udata->inlen >= sizeof(ucmd)) &&
629 (udata->outlen >= sizeof(resp)) &&
630 hns_roce_qp_has_sq(init_attr)) {
631 ret = hns_roce_db_map_user(
632 to_hr_ucontext(ib_pd->uobject->context),
633 ucmd.sdb_addr, &hr_qp->sdb);
634 if (ret) {
635 dev_err(dev, "sq record doorbell map failed!\n");
636 goto err_mtt;
637 }
638
639 /* indicate kernel supports sq record db */
640 resp.cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB;
641 hr_qp->sdb_en = 1;
642 }
643
644 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
645 (udata->outlen >= sizeof(resp)) &&
646 hns_roce_qp_has_rq(init_attr)) {
647 ret = hns_roce_db_map_user(
648 to_hr_ucontext(ib_pd->uobject->context),
649 ucmd.db_addr, &hr_qp->rdb);
650 if (ret) {
651 dev_err(dev, "rq record doorbell map failed!\n");
652 goto err_sq_dbmap;
653 }
654 }
655 } else {
656 if (init_attr->create_flags &
657 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
658 dev_err(dev, "init_attr->create_flags error!\n");
659 ret = -EINVAL;
660 goto err_rq_sge_list;
661 }
662
663 if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
664 dev_err(dev, "init_attr->create_flags error!\n");
665 ret = -EINVAL;
666 goto err_rq_sge_list;
667 }
668
669 /* Set SQ size */
670 ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
671 hr_qp);
672 if (ret) {
673 dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
674 goto err_rq_sge_list;
675 }
676
677 /* QP doorbell register address */
678 hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
679 DB_REG_OFFSET * hr_dev->priv_uar.index;
680 hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
681 DB_REG_OFFSET * hr_dev->priv_uar.index;
682
683 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
684 hns_roce_qp_has_rq(init_attr)) {
685 ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
686 if (ret) {
687 dev_err(dev, "rq record doorbell alloc failed!\n");
688 goto err_rq_sge_list;
689 }
690 *hr_qp->rdb.db_record = 0;
691 hr_qp->rdb_en = 1;
692 }
693
694 /* Allocate QP buf */
695 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
696 if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
697 (1 << page_shift) * 2,
698 &hr_qp->hr_buf, page_shift)) {
699 dev_err(dev, "hns_roce_buf_alloc error!\n");
700 ret = -ENOMEM;
701 goto err_db;
702 }
703
704 hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
705 /* Write MTT */
706 ret = hns_roce_mtt_init(hr_dev, hr_qp->hr_buf.npages,
707 hr_qp->hr_buf.page_shift, &hr_qp->mtt);
708 if (ret) {
709 dev_err(dev, "hns_roce_mtt_init error for kernel create qp\n");
710 goto err_buf;
711 }
712
713 ret = hns_roce_buf_write_mtt(hr_dev, &hr_qp->mtt,
714 &hr_qp->hr_buf);
715 if (ret) {
716 dev_err(dev, "hns_roce_buf_write_mtt error for kernel create qp\n");
717 goto err_mtt;
718 }
719
720 hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
721 GFP_KERNEL);
722 hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
723 GFP_KERNEL);
724 if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
725 ret = -ENOMEM;
726 goto err_wrid;
727 }
728 }
729
730 if (sqpn) {
731 qpn = sqpn;
732 } else {
733 /* Get QPN */
734 ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn);
735 if (ret) {
736 dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n");
737 goto err_wrid;
738 }
739 }
740
741 if (init_attr->qp_type == IB_QPT_GSI &&
742 hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
743 /* In v1 engine, GSI QP context in RoCE engine's register */
744 ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
745 if (ret) {
746 dev_err(dev, "hns_roce_qp_alloc failed!\n");
747 goto err_qpn;
748 }
749 } else {
750 ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp);
751 if (ret) {
752 dev_err(dev, "hns_roce_qp_alloc failed!\n");
753 goto err_qpn;
754 }
755 }
756
757 if (sqpn)
758 hr_qp->doorbell_qpn = 1;
759 else
760 hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
761
762 if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) &&
763 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
764
765 /* indicate kernel supports rq record db */
766 resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
767 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
768 if (ret)
769 goto err_qp;
770
771 hr_qp->rdb_en = 1;
772 }
773 hr_qp->event = hns_roce_ib_qp_event;
774
775 return 0;
776
777 err_qp:
778 if (init_attr->qp_type == IB_QPT_GSI &&
779 hr_dev->hw_rev == HNS_ROCE_HW_VER1)
780 hns_roce_qp_remove(hr_dev, hr_qp);
781 else
782 hns_roce_qp_free(hr_dev, hr_qp);
783
784 err_qpn:
785 if (!sqpn)
786 hns_roce_release_range_qp(hr_dev, qpn, 1);
787
788 err_wrid:
789 if (ib_pd->uobject) {
790 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
791 (udata->outlen >= sizeof(resp)) &&
792 hns_roce_qp_has_rq(init_attr))
793 hns_roce_db_unmap_user(
794 to_hr_ucontext(ib_pd->uobject->context),
795 &hr_qp->rdb);
796 } else {
797 kfree(hr_qp->sq.wrid);
798 kfree(hr_qp->rq.wrid);
799 }
800
801 err_sq_dbmap:
802 if (ib_pd->uobject)
803 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
804 (udata->inlen >= sizeof(ucmd)) &&
805 (udata->outlen >= sizeof(resp)) &&
806 hns_roce_qp_has_sq(init_attr))
807 hns_roce_db_unmap_user(
808 to_hr_ucontext(ib_pd->uobject->context),
809 &hr_qp->sdb);
810
811 err_mtt:
812 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
813
814 err_buf:
815 if (ib_pd->uobject)
816 ib_umem_release(hr_qp->umem);
817 else
818 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
819
820 err_db:
821 if (!ib_pd->uobject && hns_roce_qp_has_rq(init_attr) &&
822 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
823 hns_roce_free_db(hr_dev, &hr_qp->rdb);
824
825 err_rq_sge_list:
826 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
827 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
828
829 err_wqe_list:
830 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
831 kfree(hr_qp->rq_inl_buf.wqe_list);
832
833 err_out:
834 return ret;
835 }
836
hns_roce_create_qp(struct ib_pd * pd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)837 struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
838 struct ib_qp_init_attr *init_attr,
839 struct ib_udata *udata)
840 {
841 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
842 struct device *dev = hr_dev->dev;
843 struct hns_roce_sqp *hr_sqp;
844 struct hns_roce_qp *hr_qp;
845 int ret;
846
847 switch (init_attr->qp_type) {
848 case IB_QPT_RC: {
849 hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
850 if (!hr_qp)
851 return ERR_PTR(-ENOMEM);
852
853 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0,
854 hr_qp);
855 if (ret) {
856 dev_err(dev, "Create RC QP failed\n");
857 kfree(hr_qp);
858 return ERR_PTR(ret);
859 }
860
861 hr_qp->ibqp.qp_num = hr_qp->qpn;
862
863 break;
864 }
865 case IB_QPT_GSI: {
866 /* Userspace is not allowed to create special QPs: */
867 if (pd->uobject) {
868 dev_err(dev, "not support usr space GSI\n");
869 return ERR_PTR(-EINVAL);
870 }
871
872 hr_sqp = kzalloc(sizeof(*hr_sqp), GFP_KERNEL);
873 if (!hr_sqp)
874 return ERR_PTR(-ENOMEM);
875
876 hr_qp = &hr_sqp->hr_qp;
877 hr_qp->port = init_attr->port_num - 1;
878 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
879
880 /* when hw version is v1, the sqpn is allocated */
881 if (hr_dev->caps.max_sq_sg <= 2)
882 hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
883 hr_dev->iboe.phy_port[hr_qp->port];
884 else
885 hr_qp->ibqp.qp_num = 1;
886
887 ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
888 hr_qp->ibqp.qp_num, hr_qp);
889 if (ret) {
890 dev_err(dev, "Create GSI QP failed!\n");
891 kfree(hr_sqp);
892 return ERR_PTR(ret);
893 }
894
895 break;
896 }
897 default:{
898 dev_err(dev, "not support QP type %d\n", init_attr->qp_type);
899 return ERR_PTR(-EINVAL);
900 }
901 }
902
903 return &hr_qp->ibqp;
904 }
905 EXPORT_SYMBOL_GPL(hns_roce_create_qp);
906
to_hr_qp_type(int qp_type)907 int to_hr_qp_type(int qp_type)
908 {
909 int transport_type;
910
911 if (qp_type == IB_QPT_RC)
912 transport_type = SERV_TYPE_RC;
913 else if (qp_type == IB_QPT_UC)
914 transport_type = SERV_TYPE_UC;
915 else if (qp_type == IB_QPT_UD)
916 transport_type = SERV_TYPE_UD;
917 else if (qp_type == IB_QPT_GSI)
918 transport_type = SERV_TYPE_UD;
919 else
920 transport_type = -1;
921
922 return transport_type;
923 }
924 EXPORT_SYMBOL_GPL(to_hr_qp_type);
925
hns_roce_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)926 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
927 int attr_mask, struct ib_udata *udata)
928 {
929 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
930 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
931 enum ib_qp_state cur_state, new_state;
932 struct device *dev = hr_dev->dev;
933 int ret = -EINVAL;
934 int p;
935 enum ib_mtu active_mtu;
936
937 mutex_lock(&hr_qp->mutex);
938
939 cur_state = attr_mask & IB_QP_CUR_STATE ?
940 attr->cur_qp_state : (enum ib_qp_state)hr_qp->state;
941 new_state = attr_mask & IB_QP_STATE ?
942 attr->qp_state : cur_state;
943
944 if (ibqp->uobject &&
945 (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
946 if (hr_qp->sdb_en == 1) {
947 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
948 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
949 } else {
950 dev_warn(dev, "flush cqe is not supported in userspace!\n");
951 goto out;
952 }
953 }
954
955 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
956 IB_LINK_LAYER_ETHERNET)) {
957 dev_err(dev, "ib_modify_qp_is_ok failed\n");
958 goto out;
959 }
960
961 if ((attr_mask & IB_QP_PORT) &&
962 (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
963 dev_err(dev, "attr port_num invalid.attr->port_num=%d\n",
964 attr->port_num);
965 goto out;
966 }
967
968 if (attr_mask & IB_QP_PKEY_INDEX) {
969 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
970 if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
971 dev_err(dev, "attr pkey_index invalid.attr->pkey_index=%d\n",
972 attr->pkey_index);
973 goto out;
974 }
975 }
976
977 if (attr_mask & IB_QP_PATH_MTU) {
978 p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
979 active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
980
981 if ((hr_dev->caps.max_mtu == IB_MTU_4096 &&
982 attr->path_mtu > IB_MTU_4096) ||
983 (hr_dev->caps.max_mtu == IB_MTU_2048 &&
984 attr->path_mtu > IB_MTU_2048) ||
985 attr->path_mtu < IB_MTU_256 ||
986 attr->path_mtu > active_mtu) {
987 dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
988 attr->path_mtu);
989 goto out;
990 }
991 }
992
993 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
994 attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
995 dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
996 attr->max_rd_atomic);
997 goto out;
998 }
999
1000 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1001 attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
1002 dev_err(dev, "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
1003 attr->max_dest_rd_atomic);
1004 goto out;
1005 }
1006
1007 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1008 if (hr_dev->caps.min_wqes) {
1009 ret = -EPERM;
1010 dev_err(dev, "cur_state=%d new_state=%d\n", cur_state,
1011 new_state);
1012 } else {
1013 ret = 0;
1014 }
1015
1016 goto out;
1017 }
1018
1019 ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
1020 new_state);
1021
1022 out:
1023 mutex_unlock(&hr_qp->mutex);
1024
1025 return ret;
1026 }
1027
hns_roce_lock_cqs(struct hns_roce_cq * send_cq,struct hns_roce_cq * recv_cq)1028 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
1029 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1030 {
1031 if (send_cq == recv_cq) {
1032 spin_lock_irq(&send_cq->lock);
1033 __acquire(&recv_cq->lock);
1034 } else if (send_cq->cqn < recv_cq->cqn) {
1035 spin_lock_irq(&send_cq->lock);
1036 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1037 } else {
1038 spin_lock_irq(&recv_cq->lock);
1039 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1040 }
1041 }
1042 EXPORT_SYMBOL_GPL(hns_roce_lock_cqs);
1043
hns_roce_unlock_cqs(struct hns_roce_cq * send_cq,struct hns_roce_cq * recv_cq)1044 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1045 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
1046 __releases(&recv_cq->lock)
1047 {
1048 if (send_cq == recv_cq) {
1049 __release(&recv_cq->lock);
1050 spin_unlock_irq(&send_cq->lock);
1051 } else if (send_cq->cqn < recv_cq->cqn) {
1052 spin_unlock(&recv_cq->lock);
1053 spin_unlock_irq(&send_cq->lock);
1054 } else {
1055 spin_unlock(&send_cq->lock);
1056 spin_unlock_irq(&recv_cq->lock);
1057 }
1058 }
1059 EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
1060
get_wqe(struct hns_roce_qp * hr_qp,int offset)1061 static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
1062 {
1063
1064 return hns_roce_buf_offset(&hr_qp->hr_buf, offset);
1065 }
1066
get_recv_wqe(struct hns_roce_qp * hr_qp,int n)1067 void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
1068 {
1069 return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1070 }
1071 EXPORT_SYMBOL_GPL(get_recv_wqe);
1072
get_send_wqe(struct hns_roce_qp * hr_qp,int n)1073 void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
1074 {
1075 return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1076 }
1077 EXPORT_SYMBOL_GPL(get_send_wqe);
1078
get_send_extend_sge(struct hns_roce_qp * hr_qp,int n)1079 void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n)
1080 {
1081 return hns_roce_buf_offset(&hr_qp->hr_buf, hr_qp->sge.offset +
1082 (n << hr_qp->sge.sge_shift));
1083 }
1084 EXPORT_SYMBOL_GPL(get_send_extend_sge);
1085
hns_roce_wq_overflow(struct hns_roce_wq * hr_wq,int nreq,struct ib_cq * ib_cq)1086 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
1087 struct ib_cq *ib_cq)
1088 {
1089 struct hns_roce_cq *hr_cq;
1090 u32 cur;
1091
1092 cur = hr_wq->head - hr_wq->tail;
1093 if (likely(cur + nreq < hr_wq->max_post))
1094 return false;
1095
1096 hr_cq = to_hr_cq(ib_cq);
1097 spin_lock(&hr_cq->lock);
1098 cur = hr_wq->head - hr_wq->tail;
1099 spin_unlock(&hr_cq->lock);
1100
1101 return cur + nreq >= hr_wq->max_post;
1102 }
1103 EXPORT_SYMBOL_GPL(hns_roce_wq_overflow);
1104
hns_roce_init_qp_table(struct hns_roce_dev * hr_dev)1105 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1106 {
1107 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1108 int reserved_from_top = 0;
1109 int ret;
1110
1111 spin_lock_init(&qp_table->lock);
1112 INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
1113
1114 /* A port include two SQP, six port total 12 */
1115 ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
1116 hr_dev->caps.num_qps - 1, SQP_NUM,
1117 reserved_from_top);
1118 if (ret) {
1119 dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
1120 ret);
1121 return ret;
1122 }
1123
1124 return 0;
1125 }
1126
hns_roce_cleanup_qp_table(struct hns_roce_dev * hr_dev)1127 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1128 {
1129 hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
1130 }
1131