1 /*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39 #include <linux/interrupt.h>
40 #include <linux/types.h>
41 #include <linux/pci.h>
42 #include <linux/netdevice.h>
43 #include <linux/if_ether.h>
44
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_mad.h>
50 #include <rdma/ib_cache.h>
51
52 #include "bnxt_ulp.h"
53
54 #include "roce_hsi.h"
55 #include "qplib_res.h"
56 #include "qplib_sp.h"
57 #include "qplib_fp.h"
58 #include "qplib_rcfw.h"
59
60 #include "bnxt_re.h"
61 #include "ib_verbs.h"
62 #include <rdma/bnxt_re-abi.h>
63
__from_ib_access_flags(int iflags)64 static int __from_ib_access_flags(int iflags)
65 {
66 int qflags = 0;
67
68 if (iflags & IB_ACCESS_LOCAL_WRITE)
69 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
70 if (iflags & IB_ACCESS_REMOTE_READ)
71 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
72 if (iflags & IB_ACCESS_REMOTE_WRITE)
73 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
74 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
75 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
76 if (iflags & IB_ACCESS_MW_BIND)
77 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
78 if (iflags & IB_ZERO_BASED)
79 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
80 if (iflags & IB_ACCESS_ON_DEMAND)
81 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
82 return qflags;
83 };
84
__to_ib_access_flags(int qflags)85 static enum ib_access_flags __to_ib_access_flags(int qflags)
86 {
87 enum ib_access_flags iflags = 0;
88
89 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
90 iflags |= IB_ACCESS_LOCAL_WRITE;
91 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
92 iflags |= IB_ACCESS_REMOTE_WRITE;
93 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
94 iflags |= IB_ACCESS_REMOTE_READ;
95 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
96 iflags |= IB_ACCESS_REMOTE_ATOMIC;
97 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
98 iflags |= IB_ACCESS_MW_BIND;
99 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
100 iflags |= IB_ZERO_BASED;
101 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
102 iflags |= IB_ACCESS_ON_DEMAND;
103 return iflags;
104 };
105
bnxt_re_build_sgl(struct ib_sge * ib_sg_list,struct bnxt_qplib_sge * sg_list,int num)106 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
107 struct bnxt_qplib_sge *sg_list, int num)
108 {
109 int i, total = 0;
110
111 for (i = 0; i < num; i++) {
112 sg_list[i].addr = ib_sg_list[i].addr;
113 sg_list[i].lkey = ib_sg_list[i].lkey;
114 sg_list[i].size = ib_sg_list[i].length;
115 total += sg_list[i].size;
116 }
117 return total;
118 }
119
120 /* Device */
bnxt_re_get_netdev(struct ib_device * ibdev,u8 port_num)121 struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
122 {
123 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
124 struct net_device *netdev = NULL;
125
126 rcu_read_lock();
127 if (rdev)
128 netdev = rdev->netdev;
129 if (netdev)
130 dev_hold(netdev);
131
132 rcu_read_unlock();
133 return netdev;
134 }
135
bnxt_re_query_device(struct ib_device * ibdev,struct ib_device_attr * ib_attr,struct ib_udata * udata)136 int bnxt_re_query_device(struct ib_device *ibdev,
137 struct ib_device_attr *ib_attr,
138 struct ib_udata *udata)
139 {
140 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
141 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
142
143 memset(ib_attr, 0, sizeof(*ib_attr));
144 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
145 min(sizeof(dev_attr->fw_ver),
146 sizeof(ib_attr->fw_ver)));
147 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
148 (u8 *)&ib_attr->sys_image_guid);
149 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
150 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
151
152 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
153 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
154 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
155 ib_attr->max_qp = dev_attr->max_qp;
156 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
157 ib_attr->device_cap_flags =
158 IB_DEVICE_CURR_QP_STATE_MOD
159 | IB_DEVICE_RC_RNR_NAK_GEN
160 | IB_DEVICE_SHUTDOWN_PORT
161 | IB_DEVICE_SYS_IMAGE_GUID
162 | IB_DEVICE_LOCAL_DMA_LKEY
163 | IB_DEVICE_RESIZE_MAX_WR
164 | IB_DEVICE_PORT_ACTIVE_EVENT
165 | IB_DEVICE_N_NOTIFY_CQ
166 | IB_DEVICE_MEM_WINDOW
167 | IB_DEVICE_MEM_WINDOW_TYPE_2B
168 | IB_DEVICE_MEM_MGT_EXTENSIONS;
169 ib_attr->max_send_sge = dev_attr->max_qp_sges;
170 ib_attr->max_recv_sge = dev_attr->max_qp_sges;
171 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
172 ib_attr->max_cq = dev_attr->max_cq;
173 ib_attr->max_cqe = dev_attr->max_cq_wqes;
174 ib_attr->max_mr = dev_attr->max_mr;
175 ib_attr->max_pd = dev_attr->max_pd;
176 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
177 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
178 ib_attr->atomic_cap = IB_ATOMIC_NONE;
179 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
180
181 ib_attr->max_ee_rd_atom = 0;
182 ib_attr->max_res_rd_atom = 0;
183 ib_attr->max_ee_init_rd_atom = 0;
184 ib_attr->max_ee = 0;
185 ib_attr->max_rdd = 0;
186 ib_attr->max_mw = dev_attr->max_mw;
187 ib_attr->max_raw_ipv6_qp = 0;
188 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
189 ib_attr->max_mcast_grp = 0;
190 ib_attr->max_mcast_qp_attach = 0;
191 ib_attr->max_total_mcast_qp_attach = 0;
192 ib_attr->max_ah = dev_attr->max_ah;
193
194 ib_attr->max_fmr = 0;
195 ib_attr->max_map_per_fmr = 0;
196
197 ib_attr->max_srq = dev_attr->max_srq;
198 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
199 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
200
201 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
202
203 ib_attr->max_pkeys = 1;
204 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
205 return 0;
206 }
207
bnxt_re_modify_device(struct ib_device * ibdev,int device_modify_mask,struct ib_device_modify * device_modify)208 int bnxt_re_modify_device(struct ib_device *ibdev,
209 int device_modify_mask,
210 struct ib_device_modify *device_modify)
211 {
212 switch (device_modify_mask) {
213 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
214 /* Modify the GUID requires the modification of the GID table */
215 /* GUID should be made as READ-ONLY */
216 break;
217 case IB_DEVICE_MODIFY_NODE_DESC:
218 /* Node Desc should be made as READ-ONLY */
219 break;
220 default:
221 break;
222 }
223 return 0;
224 }
225
226 /* Port */
bnxt_re_query_port(struct ib_device * ibdev,u8 port_num,struct ib_port_attr * port_attr)227 int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
228 struct ib_port_attr *port_attr)
229 {
230 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
231 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
232
233 memset(port_attr, 0, sizeof(*port_attr));
234
235 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
236 port_attr->state = IB_PORT_ACTIVE;
237 port_attr->phys_state = 5;
238 } else {
239 port_attr->state = IB_PORT_DOWN;
240 port_attr->phys_state = 3;
241 }
242 port_attr->max_mtu = IB_MTU_4096;
243 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
244 port_attr->gid_tbl_len = dev_attr->max_sgid;
245 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
246 IB_PORT_DEVICE_MGMT_SUP |
247 IB_PORT_VENDOR_CLASS_SUP;
248 port_attr->ip_gids = true;
249
250 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
251 port_attr->bad_pkey_cntr = 0;
252 port_attr->qkey_viol_cntr = 0;
253 port_attr->pkey_tbl_len = dev_attr->max_pkey;
254 port_attr->lid = 0;
255 port_attr->sm_lid = 0;
256 port_attr->lmc = 0;
257 port_attr->max_vl_num = 4;
258 port_attr->sm_sl = 0;
259 port_attr->subnet_timeout = 0;
260 port_attr->init_type_reply = 0;
261 port_attr->active_speed = rdev->active_speed;
262 port_attr->active_width = rdev->active_width;
263
264 return 0;
265 }
266
bnxt_re_get_port_immutable(struct ib_device * ibdev,u8 port_num,struct ib_port_immutable * immutable)267 int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
268 struct ib_port_immutable *immutable)
269 {
270 struct ib_port_attr port_attr;
271
272 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
273 return -EINVAL;
274
275 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
276 immutable->gid_tbl_len = port_attr.gid_tbl_len;
277 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
278 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
279 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
280 return 0;
281 }
282
bnxt_re_query_fw_str(struct ib_device * ibdev,char * str)283 void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
284 {
285 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
286
287 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
288 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
289 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
290 }
291
bnxt_re_query_pkey(struct ib_device * ibdev,u8 port_num,u16 index,u16 * pkey)292 int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
293 u16 index, u16 *pkey)
294 {
295 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
296
297 /* Ignore port_num */
298
299 memset(pkey, 0, sizeof(*pkey));
300 return bnxt_qplib_get_pkey(&rdev->qplib_res,
301 &rdev->qplib_res.pkey_tbl, index, pkey);
302 }
303
bnxt_re_query_gid(struct ib_device * ibdev,u8 port_num,int index,union ib_gid * gid)304 int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
305 int index, union ib_gid *gid)
306 {
307 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
308 int rc = 0;
309
310 /* Ignore port_num */
311 memset(gid, 0, sizeof(*gid));
312 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
313 &rdev->qplib_res.sgid_tbl, index,
314 (struct bnxt_qplib_gid *)gid);
315 return rc;
316 }
317
bnxt_re_del_gid(const struct ib_gid_attr * attr,void ** context)318 int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
319 {
320 int rc = 0;
321 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
322 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
323 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
324 struct bnxt_qplib_gid *gid_to_del;
325
326 /* Delete the entry from the hardware */
327 ctx = *context;
328 if (!ctx)
329 return -EINVAL;
330
331 if (sgid_tbl && sgid_tbl->active) {
332 if (ctx->idx >= sgid_tbl->max)
333 return -EINVAL;
334 gid_to_del = &sgid_tbl->tbl[ctx->idx];
335 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
336 * or via the ib_unregister_device path. In the former case QP1
337 * may not be destroyed yet, in which case just return as FW
338 * needs that entry to be present and will fail it's deletion.
339 * We could get invoked again after QP1 is destroyed OR get an
340 * ADD_GID call with a different GID value for the same index
341 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
342 */
343 if (ctx->idx == 0 &&
344 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
345 ctx->refcnt == 1 && rdev->qp1_sqp) {
346 dev_dbg(rdev_to_dev(rdev),
347 "Trying to delete GID0 while QP1 is alive\n");
348 return -EFAULT;
349 }
350 ctx->refcnt--;
351 if (!ctx->refcnt) {
352 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
353 if (rc) {
354 dev_err(rdev_to_dev(rdev),
355 "Failed to remove GID: %#x", rc);
356 } else {
357 ctx_tbl = sgid_tbl->ctx;
358 ctx_tbl[ctx->idx] = NULL;
359 kfree(ctx);
360 }
361 }
362 } else {
363 return -EINVAL;
364 }
365 return rc;
366 }
367
bnxt_re_add_gid(const struct ib_gid_attr * attr,void ** context)368 int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
369 {
370 int rc;
371 u32 tbl_idx = 0;
372 u16 vlan_id = 0xFFFF;
373 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
374 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
375 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
376
377 if ((attr->ndev) && is_vlan_dev(attr->ndev))
378 vlan_id = vlan_dev_vlan_id(attr->ndev);
379
380 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
381 rdev->qplib_res.netdev->dev_addr,
382 vlan_id, true, &tbl_idx);
383 if (rc == -EALREADY) {
384 ctx_tbl = sgid_tbl->ctx;
385 ctx_tbl[tbl_idx]->refcnt++;
386 *context = ctx_tbl[tbl_idx];
387 return 0;
388 }
389
390 if (rc < 0) {
391 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
392 return rc;
393 }
394
395 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
396 if (!ctx)
397 return -ENOMEM;
398 ctx_tbl = sgid_tbl->ctx;
399 ctx->idx = tbl_idx;
400 ctx->refcnt = 1;
401 ctx_tbl[tbl_idx] = ctx;
402 *context = ctx;
403
404 return rc;
405 }
406
bnxt_re_get_link_layer(struct ib_device * ibdev,u8 port_num)407 enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
408 u8 port_num)
409 {
410 return IB_LINK_LAYER_ETHERNET;
411 }
412
413 #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
414
bnxt_re_create_fence_wqe(struct bnxt_re_pd * pd)415 static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
416 {
417 struct bnxt_re_fence_data *fence = &pd->fence;
418 struct ib_mr *ib_mr = &fence->mr->ib_mr;
419 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
420
421 memset(wqe, 0, sizeof(*wqe));
422 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
423 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
424 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
425 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
426 wqe->bind.zero_based = false;
427 wqe->bind.parent_l_key = ib_mr->lkey;
428 wqe->bind.va = (u64)(unsigned long)fence->va;
429 wqe->bind.length = fence->size;
430 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
431 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
432
433 /* Save the initial rkey in fence structure for now;
434 * wqe->bind.r_key will be set at (re)bind time.
435 */
436 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
437 }
438
bnxt_re_bind_fence_mw(struct bnxt_qplib_qp * qplib_qp)439 static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
440 {
441 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
442 qplib_qp);
443 struct ib_pd *ib_pd = qp->ib_qp.pd;
444 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
445 struct bnxt_re_fence_data *fence = &pd->fence;
446 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
447 struct bnxt_qplib_swqe wqe;
448 int rc;
449
450 memcpy(&wqe, fence_wqe, sizeof(wqe));
451 wqe.bind.r_key = fence->bind_rkey;
452 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
453
454 dev_dbg(rdev_to_dev(qp->rdev),
455 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
456 wqe.bind.r_key, qp->qplib_qp.id, pd);
457 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
458 if (rc) {
459 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
460 return rc;
461 }
462 bnxt_qplib_post_send_db(&qp->qplib_qp);
463
464 return rc;
465 }
466
bnxt_re_destroy_fence_mr(struct bnxt_re_pd * pd)467 static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
468 {
469 struct bnxt_re_fence_data *fence = &pd->fence;
470 struct bnxt_re_dev *rdev = pd->rdev;
471 struct device *dev = &rdev->en_dev->pdev->dev;
472 struct bnxt_re_mr *mr = fence->mr;
473
474 if (fence->mw) {
475 bnxt_re_dealloc_mw(fence->mw);
476 fence->mw = NULL;
477 }
478 if (mr) {
479 if (mr->ib_mr.rkey)
480 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
481 true);
482 if (mr->ib_mr.lkey)
483 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
484 kfree(mr);
485 fence->mr = NULL;
486 }
487 if (fence->dma_addr) {
488 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
489 DMA_BIDIRECTIONAL);
490 fence->dma_addr = 0;
491 }
492 }
493
bnxt_re_create_fence_mr(struct bnxt_re_pd * pd)494 static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
495 {
496 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
497 struct bnxt_re_fence_data *fence = &pd->fence;
498 struct bnxt_re_dev *rdev = pd->rdev;
499 struct device *dev = &rdev->en_dev->pdev->dev;
500 struct bnxt_re_mr *mr = NULL;
501 dma_addr_t dma_addr = 0;
502 struct ib_mw *mw;
503 u64 pbl_tbl;
504 int rc;
505
506 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
507 DMA_BIDIRECTIONAL);
508 rc = dma_mapping_error(dev, dma_addr);
509 if (rc) {
510 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
511 rc = -EIO;
512 fence->dma_addr = 0;
513 goto fail;
514 }
515 fence->dma_addr = dma_addr;
516
517 /* Allocate a MR */
518 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
519 if (!mr) {
520 rc = -ENOMEM;
521 goto fail;
522 }
523 fence->mr = mr;
524 mr->rdev = rdev;
525 mr->qplib_mr.pd = &pd->qplib_pd;
526 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
527 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
528 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
529 if (rc) {
530 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
531 goto fail;
532 }
533
534 /* Register MR */
535 mr->ib_mr.lkey = mr->qplib_mr.lkey;
536 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
537 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
538 pbl_tbl = dma_addr;
539 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
540 BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
541 if (rc) {
542 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
543 goto fail;
544 }
545 mr->ib_mr.rkey = mr->qplib_mr.rkey;
546
547 /* Create a fence MW only for kernel consumers */
548 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
549 if (IS_ERR(mw)) {
550 dev_err(rdev_to_dev(rdev),
551 "Failed to create fence-MW for PD: %p\n", pd);
552 rc = PTR_ERR(mw);
553 goto fail;
554 }
555 fence->mw = mw;
556
557 bnxt_re_create_fence_wqe(pd);
558 return 0;
559
560 fail:
561 bnxt_re_destroy_fence_mr(pd);
562 return rc;
563 }
564
565 /* Protection Domains */
bnxt_re_dealloc_pd(struct ib_pd * ib_pd)566 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
567 {
568 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
569 struct bnxt_re_dev *rdev = pd->rdev;
570 int rc;
571
572 bnxt_re_destroy_fence_mr(pd);
573
574 if (pd->qplib_pd.id) {
575 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
576 &rdev->qplib_res.pd_tbl,
577 &pd->qplib_pd);
578 if (rc)
579 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
580 }
581
582 kfree(pd);
583 return 0;
584 }
585
bnxt_re_alloc_pd(struct ib_device * ibdev,struct ib_ucontext * ucontext,struct ib_udata * udata)586 struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
587 struct ib_ucontext *ucontext,
588 struct ib_udata *udata)
589 {
590 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
591 struct bnxt_re_ucontext *ucntx = container_of(ucontext,
592 struct bnxt_re_ucontext,
593 ib_uctx);
594 struct bnxt_re_pd *pd;
595 int rc;
596
597 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
598 if (!pd)
599 return ERR_PTR(-ENOMEM);
600
601 pd->rdev = rdev;
602 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
603 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
604 rc = -ENOMEM;
605 goto fail;
606 }
607
608 if (udata) {
609 struct bnxt_re_pd_resp resp;
610
611 if (!ucntx->dpi.dbr) {
612 /* Allocate DPI in alloc_pd to avoid failing of
613 * ibv_devinfo and family of application when DPIs
614 * are depleted.
615 */
616 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
617 &ucntx->dpi, ucntx)) {
618 rc = -ENOMEM;
619 goto dbfail;
620 }
621 }
622
623 resp.pdid = pd->qplib_pd.id;
624 /* Still allow mapping this DBR to the new user PD. */
625 resp.dpi = ucntx->dpi.dpi;
626 resp.dbr = (u64)ucntx->dpi.umdbr;
627
628 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
629 if (rc) {
630 dev_err(rdev_to_dev(rdev),
631 "Failed to copy user response\n");
632 goto dbfail;
633 }
634 }
635
636 if (!udata)
637 if (bnxt_re_create_fence_mr(pd))
638 dev_warn(rdev_to_dev(rdev),
639 "Failed to create Fence-MR\n");
640 return &pd->ib_pd;
641 dbfail:
642 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
643 &pd->qplib_pd);
644 fail:
645 kfree(pd);
646 return ERR_PTR(rc);
647 }
648
649 /* Address Handles */
bnxt_re_destroy_ah(struct ib_ah * ib_ah)650 int bnxt_re_destroy_ah(struct ib_ah *ib_ah)
651 {
652 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
653 struct bnxt_re_dev *rdev = ah->rdev;
654 int rc;
655
656 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah);
657 if (rc) {
658 dev_err(rdev_to_dev(rdev), "Failed to destroy HW AH");
659 return rc;
660 }
661 kfree(ah);
662 return 0;
663 }
664
bnxt_re_create_ah(struct ib_pd * ib_pd,struct rdma_ah_attr * ah_attr,struct ib_udata * udata)665 struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
666 struct rdma_ah_attr *ah_attr,
667 struct ib_udata *udata)
668 {
669 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
670 struct bnxt_re_dev *rdev = pd->rdev;
671 struct bnxt_re_ah *ah;
672 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
673 int rc;
674 u8 nw_type;
675
676 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
677 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
678 return ERR_PTR(-EINVAL);
679 }
680 ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
681 if (!ah)
682 return ERR_PTR(-ENOMEM);
683
684 ah->rdev = rdev;
685 ah->qplib_ah.pd = &pd->qplib_pd;
686
687 /* Supply the configuration for the HW */
688 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
689 sizeof(union ib_gid));
690 /*
691 * If RoCE V2 is enabled, stack will have two entries for
692 * each GID entry. Avoiding this duplicte entry in HW. Dividing
693 * the GID index by 2 for RoCE V2
694 */
695 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
696 ah->qplib_ah.host_sgid_index = grh->sgid_index;
697 ah->qplib_ah.traffic_class = grh->traffic_class;
698 ah->qplib_ah.flow_label = grh->flow_label;
699 ah->qplib_ah.hop_limit = grh->hop_limit;
700 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
701 if (ib_pd->uobject &&
702 !rdma_is_multicast_addr((struct in6_addr *)
703 grh->dgid.raw) &&
704 !rdma_link_local_addr((struct in6_addr *)
705 grh->dgid.raw)) {
706 const struct ib_gid_attr *sgid_attr;
707
708 sgid_attr = grh->sgid_attr;
709 /* Get network header type for this GID */
710 nw_type = rdma_gid_attr_network_type(sgid_attr);
711 switch (nw_type) {
712 case RDMA_NETWORK_IPV4:
713 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
714 break;
715 case RDMA_NETWORK_IPV6:
716 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
717 break;
718 default:
719 ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
720 break;
721 }
722 }
723
724 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
725 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
726 if (rc) {
727 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
728 goto fail;
729 }
730
731 /* Write AVID to shared page. */
732 if (ib_pd->uobject) {
733 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
734 struct bnxt_re_ucontext *uctx;
735 unsigned long flag;
736 u32 *wrptr;
737
738 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
739 spin_lock_irqsave(&uctx->sh_lock, flag);
740 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
741 *wrptr = ah->qplib_ah.id;
742 wmb(); /* make sure cache is updated. */
743 spin_unlock_irqrestore(&uctx->sh_lock, flag);
744 }
745
746 return &ah->ib_ah;
747
748 fail:
749 kfree(ah);
750 return ERR_PTR(rc);
751 }
752
bnxt_re_modify_ah(struct ib_ah * ib_ah,struct rdma_ah_attr * ah_attr)753 int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
754 {
755 return 0;
756 }
757
bnxt_re_query_ah(struct ib_ah * ib_ah,struct rdma_ah_attr * ah_attr)758 int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
759 {
760 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
761
762 ah_attr->type = ib_ah->type;
763 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
764 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
765 rdma_ah_set_grh(ah_attr, NULL, 0,
766 ah->qplib_ah.host_sgid_index,
767 0, ah->qplib_ah.traffic_class);
768 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
769 rdma_ah_set_port_num(ah_attr, 1);
770 rdma_ah_set_static_rate(ah_attr, 0);
771 return 0;
772 }
773
bnxt_re_lock_cqs(struct bnxt_re_qp * qp)774 unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
775 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
776 {
777 unsigned long flags;
778
779 spin_lock_irqsave(&qp->scq->cq_lock, flags);
780 if (qp->rcq != qp->scq)
781 spin_lock(&qp->rcq->cq_lock);
782 else
783 __acquire(&qp->rcq->cq_lock);
784
785 return flags;
786 }
787
bnxt_re_unlock_cqs(struct bnxt_re_qp * qp,unsigned long flags)788 void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
789 unsigned long flags)
790 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
791 {
792 if (qp->rcq != qp->scq)
793 spin_unlock(&qp->rcq->cq_lock);
794 else
795 __release(&qp->rcq->cq_lock);
796 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
797 }
798
799 /* Queue Pairs */
bnxt_re_destroy_qp(struct ib_qp * ib_qp)800 int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
801 {
802 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
803 struct bnxt_re_dev *rdev = qp->rdev;
804 int rc;
805 unsigned int flags;
806
807 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
808 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
809 if (rc) {
810 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
811 return rc;
812 }
813
814 flags = bnxt_re_lock_cqs(qp);
815 bnxt_qplib_clean_qp(&qp->qplib_qp);
816 bnxt_re_unlock_cqs(qp, flags);
817 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
818
819 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
820 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
821 &rdev->sqp_ah->qplib_ah);
822 if (rc) {
823 dev_err(rdev_to_dev(rdev),
824 "Failed to destroy HW AH for shadow QP");
825 return rc;
826 }
827
828 bnxt_qplib_clean_qp(&qp->qplib_qp);
829 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
830 &rdev->qp1_sqp->qplib_qp);
831 if (rc) {
832 dev_err(rdev_to_dev(rdev),
833 "Failed to destroy Shadow QP");
834 return rc;
835 }
836 bnxt_qplib_free_qp_res(&rdev->qplib_res,
837 &rdev->qp1_sqp->qplib_qp);
838 mutex_lock(&rdev->qp_lock);
839 list_del(&rdev->qp1_sqp->list);
840 atomic_dec(&rdev->qp_count);
841 mutex_unlock(&rdev->qp_lock);
842
843 kfree(rdev->sqp_ah);
844 kfree(rdev->qp1_sqp);
845 rdev->qp1_sqp = NULL;
846 rdev->sqp_ah = NULL;
847 }
848
849 if (!IS_ERR_OR_NULL(qp->rumem))
850 ib_umem_release(qp->rumem);
851 if (!IS_ERR_OR_NULL(qp->sumem))
852 ib_umem_release(qp->sumem);
853
854 mutex_lock(&rdev->qp_lock);
855 list_del(&qp->list);
856 atomic_dec(&rdev->qp_count);
857 mutex_unlock(&rdev->qp_lock);
858 kfree(qp);
859 return 0;
860 }
861
__from_ib_qp_type(enum ib_qp_type type)862 static u8 __from_ib_qp_type(enum ib_qp_type type)
863 {
864 switch (type) {
865 case IB_QPT_GSI:
866 return CMDQ_CREATE_QP1_TYPE_GSI;
867 case IB_QPT_RC:
868 return CMDQ_CREATE_QP_TYPE_RC;
869 case IB_QPT_UD:
870 return CMDQ_CREATE_QP_TYPE_UD;
871 default:
872 return IB_QPT_MAX;
873 }
874 }
875
bnxt_re_init_user_qp(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_qp * qp,struct ib_udata * udata)876 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
877 struct bnxt_re_qp *qp, struct ib_udata *udata)
878 {
879 struct bnxt_re_qp_req ureq;
880 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
881 struct ib_umem *umem;
882 int bytes = 0;
883 struct ib_ucontext *context = pd->ib_pd.uobject->context;
884 struct bnxt_re_ucontext *cntx = container_of(context,
885 struct bnxt_re_ucontext,
886 ib_uctx);
887 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
888 return -EFAULT;
889
890 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
891 /* Consider mapping PSN search memory only for RC QPs. */
892 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC)
893 bytes += (qplib_qp->sq.max_wqe * sizeof(struct sq_psn_search));
894 bytes = PAGE_ALIGN(bytes);
895 umem = ib_umem_get(context, ureq.qpsva, bytes,
896 IB_ACCESS_LOCAL_WRITE, 1);
897 if (IS_ERR(umem))
898 return PTR_ERR(umem);
899
900 qp->sumem = umem;
901 qplib_qp->sq.sglist = umem->sg_head.sgl;
902 qplib_qp->sq.nmap = umem->nmap;
903 qplib_qp->qp_handle = ureq.qp_handle;
904
905 if (!qp->qplib_qp.srq) {
906 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
907 bytes = PAGE_ALIGN(bytes);
908 umem = ib_umem_get(context, ureq.qprva, bytes,
909 IB_ACCESS_LOCAL_WRITE, 1);
910 if (IS_ERR(umem))
911 goto rqfail;
912 qp->rumem = umem;
913 qplib_qp->rq.sglist = umem->sg_head.sgl;
914 qplib_qp->rq.nmap = umem->nmap;
915 }
916
917 qplib_qp->dpi = &cntx->dpi;
918 return 0;
919 rqfail:
920 ib_umem_release(qp->sumem);
921 qp->sumem = NULL;
922 qplib_qp->sq.sglist = NULL;
923 qplib_qp->sq.nmap = 0;
924
925 return PTR_ERR(umem);
926 }
927
bnxt_re_create_shadow_qp_ah(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)928 static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
929 (struct bnxt_re_pd *pd,
930 struct bnxt_qplib_res *qp1_res,
931 struct bnxt_qplib_qp *qp1_qp)
932 {
933 struct bnxt_re_dev *rdev = pd->rdev;
934 struct bnxt_re_ah *ah;
935 union ib_gid sgid;
936 int rc;
937
938 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
939 if (!ah)
940 return NULL;
941
942 ah->rdev = rdev;
943 ah->qplib_ah.pd = &pd->qplib_pd;
944
945 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
946 if (rc)
947 goto fail;
948
949 /* supply the dgid data same as sgid */
950 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
951 sizeof(union ib_gid));
952 ah->qplib_ah.sgid_index = 0;
953
954 ah->qplib_ah.traffic_class = 0;
955 ah->qplib_ah.flow_label = 0;
956 ah->qplib_ah.hop_limit = 1;
957 ah->qplib_ah.sl = 0;
958 /* Have DMAC same as SMAC */
959 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
960
961 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah);
962 if (rc) {
963 dev_err(rdev_to_dev(rdev),
964 "Failed to allocate HW AH for Shadow QP");
965 goto fail;
966 }
967
968 return ah;
969
970 fail:
971 kfree(ah);
972 return NULL;
973 }
974
bnxt_re_create_shadow_qp(struct bnxt_re_pd * pd,struct bnxt_qplib_res * qp1_res,struct bnxt_qplib_qp * qp1_qp)975 static struct bnxt_re_qp *bnxt_re_create_shadow_qp
976 (struct bnxt_re_pd *pd,
977 struct bnxt_qplib_res *qp1_res,
978 struct bnxt_qplib_qp *qp1_qp)
979 {
980 struct bnxt_re_dev *rdev = pd->rdev;
981 struct bnxt_re_qp *qp;
982 int rc;
983
984 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
985 if (!qp)
986 return NULL;
987
988 qp->rdev = rdev;
989
990 /* Initialize the shadow QP structure from the QP1 values */
991 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
992
993 qp->qplib_qp.pd = &pd->qplib_pd;
994 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
995 qp->qplib_qp.type = IB_QPT_UD;
996
997 qp->qplib_qp.max_inline_data = 0;
998 qp->qplib_qp.sig_type = true;
999
1000 /* Shadow QP SQ depth should be same as QP1 RQ depth */
1001 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1002 qp->qplib_qp.sq.max_sge = 2;
1003 /* Q full delta can be 1 since it is internal QP */
1004 qp->qplib_qp.sq.q_full_delta = 1;
1005
1006 qp->qplib_qp.scq = qp1_qp->scq;
1007 qp->qplib_qp.rcq = qp1_qp->rcq;
1008
1009 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1010 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1011 /* Q full delta can be 1 since it is internal QP */
1012 qp->qplib_qp.rq.q_full_delta = 1;
1013
1014 qp->qplib_qp.mtu = qp1_qp->mtu;
1015
1016 qp->qplib_qp.sq_hdr_buf_size = 0;
1017 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1018 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1019
1020 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1021 if (rc)
1022 goto fail;
1023
1024 rdev->sqp_id = qp->qplib_qp.id;
1025
1026 spin_lock_init(&qp->sq_lock);
1027 INIT_LIST_HEAD(&qp->list);
1028 mutex_lock(&rdev->qp_lock);
1029 list_add_tail(&qp->list, &rdev->qp_list);
1030 atomic_inc(&rdev->qp_count);
1031 mutex_unlock(&rdev->qp_lock);
1032 return qp;
1033 fail:
1034 kfree(qp);
1035 return NULL;
1036 }
1037
bnxt_re_create_qp(struct ib_pd * ib_pd,struct ib_qp_init_attr * qp_init_attr,struct ib_udata * udata)1038 struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1039 struct ib_qp_init_attr *qp_init_attr,
1040 struct ib_udata *udata)
1041 {
1042 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1043 struct bnxt_re_dev *rdev = pd->rdev;
1044 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1045 struct bnxt_re_qp *qp;
1046 struct bnxt_re_cq *cq;
1047 struct bnxt_re_srq *srq;
1048 int rc, entries;
1049
1050 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1051 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1052 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1053 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1054 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1055 return ERR_PTR(-EINVAL);
1056
1057 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1058 if (!qp)
1059 return ERR_PTR(-ENOMEM);
1060
1061 qp->rdev = rdev;
1062 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1063 qp->qplib_qp.pd = &pd->qplib_pd;
1064 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1065 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1066 if (qp->qplib_qp.type == IB_QPT_MAX) {
1067 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1068 qp->qplib_qp.type);
1069 rc = -EINVAL;
1070 goto fail;
1071 }
1072 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1073 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1074 IB_SIGNAL_ALL_WR) ? true : false);
1075
1076 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1077 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1078 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1079
1080 if (qp_init_attr->send_cq) {
1081 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1082 ib_cq);
1083 if (!cq) {
1084 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1085 rc = -EINVAL;
1086 goto fail;
1087 }
1088 qp->qplib_qp.scq = &cq->qplib_cq;
1089 qp->scq = cq;
1090 }
1091
1092 if (qp_init_attr->recv_cq) {
1093 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1094 ib_cq);
1095 if (!cq) {
1096 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1097 rc = -EINVAL;
1098 goto fail;
1099 }
1100 qp->qplib_qp.rcq = &cq->qplib_cq;
1101 qp->rcq = cq;
1102 }
1103
1104 if (qp_init_attr->srq) {
1105 srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
1106 ib_srq);
1107 if (!srq) {
1108 dev_err(rdev_to_dev(rdev), "SRQ not found");
1109 rc = -EINVAL;
1110 goto fail;
1111 }
1112 qp->qplib_qp.srq = &srq->qplib_srq;
1113 qp->qplib_qp.rq.max_wqe = 0;
1114 } else {
1115 /* Allocate 1 more than what's provided so posting max doesn't
1116 * mean empty
1117 */
1118 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1119 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1120 dev_attr->max_qp_wqes + 1);
1121
1122 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1123 qp_init_attr->cap.max_recv_wr;
1124
1125 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1126 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1127 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1128 }
1129
1130 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1131
1132 if (qp_init_attr->qp_type == IB_QPT_GSI) {
1133 /* Allocate 1 more than what's provided */
1134 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1135 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1136 dev_attr->max_qp_wqes + 1);
1137 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1138 qp_init_attr->cap.max_send_wr;
1139 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1140 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1141 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1142 qp->qplib_qp.sq.max_sge++;
1143 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1144 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1145
1146 qp->qplib_qp.rq_hdr_buf_size =
1147 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1148
1149 qp->qplib_qp.sq_hdr_buf_size =
1150 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1151 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1152 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1153 if (rc) {
1154 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1155 goto fail;
1156 }
1157 /* Create a shadow QP to handle the QP1 traffic */
1158 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1159 &qp->qplib_qp);
1160 if (!rdev->qp1_sqp) {
1161 rc = -EINVAL;
1162 dev_err(rdev_to_dev(rdev),
1163 "Failed to create Shadow QP for QP1");
1164 goto qp_destroy;
1165 }
1166 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1167 &qp->qplib_qp);
1168 if (!rdev->sqp_ah) {
1169 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1170 &rdev->qp1_sqp->qplib_qp);
1171 rc = -EINVAL;
1172 dev_err(rdev_to_dev(rdev),
1173 "Failed to create AH entry for ShadowQP");
1174 goto qp_destroy;
1175 }
1176
1177 } else {
1178 /* Allocate 128 + 1 more than what's provided */
1179 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1180 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1181 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1182 dev_attr->max_qp_wqes +
1183 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1184 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1185
1186 /*
1187 * Reserving one slot for Phantom WQE. Application can
1188 * post one extra entry in this case. But allowing this to avoid
1189 * unexpected Queue full condition
1190 */
1191
1192 qp->qplib_qp.sq.q_full_delta -= 1;
1193
1194 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1195 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1196 if (udata) {
1197 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1198 if (rc)
1199 goto fail;
1200 } else {
1201 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1202 }
1203
1204 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1205 if (rc) {
1206 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1207 goto free_umem;
1208 }
1209 }
1210
1211 qp->ib_qp.qp_num = qp->qplib_qp.id;
1212 spin_lock_init(&qp->sq_lock);
1213 spin_lock_init(&qp->rq_lock);
1214
1215 if (udata) {
1216 struct bnxt_re_qp_resp resp;
1217
1218 resp.qpid = qp->ib_qp.qp_num;
1219 resp.rsvd = 0;
1220 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1221 if (rc) {
1222 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1223 goto qp_destroy;
1224 }
1225 }
1226 INIT_LIST_HEAD(&qp->list);
1227 mutex_lock(&rdev->qp_lock);
1228 list_add_tail(&qp->list, &rdev->qp_list);
1229 atomic_inc(&rdev->qp_count);
1230 mutex_unlock(&rdev->qp_lock);
1231
1232 return &qp->ib_qp;
1233 qp_destroy:
1234 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1235 free_umem:
1236 if (udata) {
1237 if (qp->rumem)
1238 ib_umem_release(qp->rumem);
1239 if (qp->sumem)
1240 ib_umem_release(qp->sumem);
1241 }
1242 fail:
1243 kfree(qp);
1244 return ERR_PTR(rc);
1245 }
1246
__from_ib_qp_state(enum ib_qp_state state)1247 static u8 __from_ib_qp_state(enum ib_qp_state state)
1248 {
1249 switch (state) {
1250 case IB_QPS_RESET:
1251 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1252 case IB_QPS_INIT:
1253 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1254 case IB_QPS_RTR:
1255 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1256 case IB_QPS_RTS:
1257 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1258 case IB_QPS_SQD:
1259 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1260 case IB_QPS_SQE:
1261 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1262 case IB_QPS_ERR:
1263 default:
1264 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1265 }
1266 }
1267
__to_ib_qp_state(u8 state)1268 static enum ib_qp_state __to_ib_qp_state(u8 state)
1269 {
1270 switch (state) {
1271 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1272 return IB_QPS_RESET;
1273 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1274 return IB_QPS_INIT;
1275 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1276 return IB_QPS_RTR;
1277 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1278 return IB_QPS_RTS;
1279 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1280 return IB_QPS_SQD;
1281 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1282 return IB_QPS_SQE;
1283 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1284 default:
1285 return IB_QPS_ERR;
1286 }
1287 }
1288
__from_ib_mtu(enum ib_mtu mtu)1289 static u32 __from_ib_mtu(enum ib_mtu mtu)
1290 {
1291 switch (mtu) {
1292 case IB_MTU_256:
1293 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1294 case IB_MTU_512:
1295 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1296 case IB_MTU_1024:
1297 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1298 case IB_MTU_2048:
1299 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1300 case IB_MTU_4096:
1301 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1302 default:
1303 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1304 }
1305 }
1306
__to_ib_mtu(u32 mtu)1307 static enum ib_mtu __to_ib_mtu(u32 mtu)
1308 {
1309 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1310 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1311 return IB_MTU_256;
1312 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1313 return IB_MTU_512;
1314 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1315 return IB_MTU_1024;
1316 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1317 return IB_MTU_2048;
1318 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1319 return IB_MTU_4096;
1320 default:
1321 return IB_MTU_2048;
1322 }
1323 }
1324
1325 /* Shared Receive Queues */
bnxt_re_destroy_srq(struct ib_srq * ib_srq)1326 int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
1327 {
1328 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1329 ib_srq);
1330 struct bnxt_re_dev *rdev = srq->rdev;
1331 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1332 struct bnxt_qplib_nq *nq = NULL;
1333 int rc;
1334
1335 if (qplib_srq->cq)
1336 nq = qplib_srq->cq->nq;
1337 rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1338 if (rc) {
1339 dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
1340 return rc;
1341 }
1342
1343 if (srq->umem)
1344 ib_umem_release(srq->umem);
1345 kfree(srq);
1346 atomic_dec(&rdev->srq_count);
1347 if (nq)
1348 nq->budget--;
1349 return 0;
1350 }
1351
bnxt_re_init_user_srq(struct bnxt_re_dev * rdev,struct bnxt_re_pd * pd,struct bnxt_re_srq * srq,struct ib_udata * udata)1352 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1353 struct bnxt_re_pd *pd,
1354 struct bnxt_re_srq *srq,
1355 struct ib_udata *udata)
1356 {
1357 struct bnxt_re_srq_req ureq;
1358 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1359 struct ib_umem *umem;
1360 int bytes = 0;
1361 struct ib_ucontext *context = pd->ib_pd.uobject->context;
1362 struct bnxt_re_ucontext *cntx = container_of(context,
1363 struct bnxt_re_ucontext,
1364 ib_uctx);
1365 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1366 return -EFAULT;
1367
1368 bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1369 bytes = PAGE_ALIGN(bytes);
1370 umem = ib_umem_get(context, ureq.srqva, bytes,
1371 IB_ACCESS_LOCAL_WRITE, 1);
1372 if (IS_ERR(umem))
1373 return PTR_ERR(umem);
1374
1375 srq->umem = umem;
1376 qplib_srq->nmap = umem->nmap;
1377 qplib_srq->sglist = umem->sg_head.sgl;
1378 qplib_srq->srq_handle = ureq.srq_handle;
1379 qplib_srq->dpi = &cntx->dpi;
1380
1381 return 0;
1382 }
1383
bnxt_re_create_srq(struct ib_pd * ib_pd,struct ib_srq_init_attr * srq_init_attr,struct ib_udata * udata)1384 struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
1385 struct ib_srq_init_attr *srq_init_attr,
1386 struct ib_udata *udata)
1387 {
1388 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1389 struct bnxt_re_dev *rdev = pd->rdev;
1390 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1391 struct bnxt_re_srq *srq;
1392 struct bnxt_qplib_nq *nq = NULL;
1393 int rc, entries;
1394
1395 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1396 dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
1397 rc = -EINVAL;
1398 goto exit;
1399 }
1400
1401 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1402 rc = -EOPNOTSUPP;
1403 goto exit;
1404 }
1405
1406 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1407 if (!srq) {
1408 rc = -ENOMEM;
1409 goto exit;
1410 }
1411 srq->rdev = rdev;
1412 srq->qplib_srq.pd = &pd->qplib_pd;
1413 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1414 /* Allocate 1 more than what's provided so posting max doesn't
1415 * mean empty
1416 */
1417 entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1418 if (entries > dev_attr->max_srq_wqes + 1)
1419 entries = dev_attr->max_srq_wqes + 1;
1420
1421 srq->qplib_srq.max_wqe = entries;
1422 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1423 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1424 srq->srq_limit = srq_init_attr->attr.srq_limit;
1425 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1426 nq = &rdev->nq[0];
1427
1428 if (udata) {
1429 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1430 if (rc)
1431 goto fail;
1432 }
1433
1434 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1435 if (rc) {
1436 dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
1437 goto fail;
1438 }
1439
1440 if (udata) {
1441 struct bnxt_re_srq_resp resp;
1442
1443 resp.srqid = srq->qplib_srq.id;
1444 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1445 if (rc) {
1446 dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
1447 bnxt_qplib_destroy_srq(&rdev->qplib_res,
1448 &srq->qplib_srq);
1449 goto exit;
1450 }
1451 }
1452 if (nq)
1453 nq->budget++;
1454 atomic_inc(&rdev->srq_count);
1455
1456 return &srq->ib_srq;
1457
1458 fail:
1459 if (srq->umem)
1460 ib_umem_release(srq->umem);
1461 kfree(srq);
1462 exit:
1463 return ERR_PTR(rc);
1464 }
1465
bnxt_re_modify_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr,enum ib_srq_attr_mask srq_attr_mask,struct ib_udata * udata)1466 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1467 enum ib_srq_attr_mask srq_attr_mask,
1468 struct ib_udata *udata)
1469 {
1470 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1471 ib_srq);
1472 struct bnxt_re_dev *rdev = srq->rdev;
1473 int rc;
1474
1475 switch (srq_attr_mask) {
1476 case IB_SRQ_MAX_WR:
1477 /* SRQ resize is not supported */
1478 break;
1479 case IB_SRQ_LIMIT:
1480 /* Change the SRQ threshold */
1481 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1482 return -EINVAL;
1483
1484 srq->qplib_srq.threshold = srq_attr->srq_limit;
1485 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1486 if (rc) {
1487 dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
1488 return rc;
1489 }
1490 /* On success, update the shadow */
1491 srq->srq_limit = srq_attr->srq_limit;
1492 /* No need to Build and send response back to udata */
1493 break;
1494 default:
1495 dev_err(rdev_to_dev(rdev),
1496 "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1497 return -EINVAL;
1498 }
1499 return 0;
1500 }
1501
bnxt_re_query_srq(struct ib_srq * ib_srq,struct ib_srq_attr * srq_attr)1502 int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1503 {
1504 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1505 ib_srq);
1506 struct bnxt_re_srq tsrq;
1507 struct bnxt_re_dev *rdev = srq->rdev;
1508 int rc;
1509
1510 /* Get live SRQ attr */
1511 tsrq.qplib_srq.id = srq->qplib_srq.id;
1512 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1513 if (rc) {
1514 dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
1515 return rc;
1516 }
1517 srq_attr->max_wr = srq->qplib_srq.max_wqe;
1518 srq_attr->max_sge = srq->qplib_srq.max_sge;
1519 srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1520
1521 return 0;
1522 }
1523
bnxt_re_post_srq_recv(struct ib_srq * ib_srq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1524 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1525 const struct ib_recv_wr **bad_wr)
1526 {
1527 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1528 ib_srq);
1529 struct bnxt_qplib_swqe wqe;
1530 unsigned long flags;
1531 int rc = 0;
1532
1533 spin_lock_irqsave(&srq->lock, flags);
1534 while (wr) {
1535 /* Transcribe each ib_recv_wr to qplib_swqe */
1536 wqe.num_sge = wr->num_sge;
1537 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1538 wqe.wr_id = wr->wr_id;
1539 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1540
1541 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1542 if (rc) {
1543 *bad_wr = wr;
1544 break;
1545 }
1546 wr = wr->next;
1547 }
1548 spin_unlock_irqrestore(&srq->lock, flags);
1549
1550 return rc;
1551 }
bnxt_re_modify_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp1_qp,int qp_attr_mask)1552 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1553 struct bnxt_re_qp *qp1_qp,
1554 int qp_attr_mask)
1555 {
1556 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1557 int rc = 0;
1558
1559 if (qp_attr_mask & IB_QP_STATE) {
1560 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1561 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1562 }
1563 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1564 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1565 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1566 }
1567
1568 if (qp_attr_mask & IB_QP_QKEY) {
1569 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1570 /* Using a Random QKEY */
1571 qp->qplib_qp.qkey = 0x81818181;
1572 }
1573 if (qp_attr_mask & IB_QP_SQ_PSN) {
1574 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1575 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1576 }
1577
1578 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1579 if (rc)
1580 dev_err(rdev_to_dev(rdev),
1581 "Failed to modify Shadow QP for QP1");
1582 return rc;
1583 }
1584
bnxt_re_modify_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)1585 int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1586 int qp_attr_mask, struct ib_udata *udata)
1587 {
1588 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1589 struct bnxt_re_dev *rdev = qp->rdev;
1590 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1591 enum ib_qp_state curr_qp_state, new_qp_state;
1592 int rc, entries;
1593 unsigned int flags;
1594 u8 nw_type;
1595
1596 qp->qplib_qp.modify_flags = 0;
1597 if (qp_attr_mask & IB_QP_STATE) {
1598 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1599 new_qp_state = qp_attr->qp_state;
1600 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1601 ib_qp->qp_type, qp_attr_mask,
1602 IB_LINK_LAYER_ETHERNET)) {
1603 dev_err(rdev_to_dev(rdev),
1604 "Invalid attribute mask: %#x specified ",
1605 qp_attr_mask);
1606 dev_err(rdev_to_dev(rdev),
1607 "for qpn: %#x type: %#x",
1608 ib_qp->qp_num, ib_qp->qp_type);
1609 dev_err(rdev_to_dev(rdev),
1610 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1611 curr_qp_state, new_qp_state);
1612 return -EINVAL;
1613 }
1614 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1615 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1616
1617 if (!qp->sumem &&
1618 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1619 dev_dbg(rdev_to_dev(rdev),
1620 "Move QP = %p to flush list\n",
1621 qp);
1622 flags = bnxt_re_lock_cqs(qp);
1623 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1624 bnxt_re_unlock_cqs(qp, flags);
1625 }
1626 if (!qp->sumem &&
1627 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1628 dev_dbg(rdev_to_dev(rdev),
1629 "Move QP = %p out of flush list\n",
1630 qp);
1631 flags = bnxt_re_lock_cqs(qp);
1632 bnxt_qplib_clean_qp(&qp->qplib_qp);
1633 bnxt_re_unlock_cqs(qp, flags);
1634 }
1635 }
1636 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1637 qp->qplib_qp.modify_flags |=
1638 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1639 qp->qplib_qp.en_sqd_async_notify = true;
1640 }
1641 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1642 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1643 qp->qplib_qp.access =
1644 __from_ib_access_flags(qp_attr->qp_access_flags);
1645 /* LOCAL_WRITE access must be set to allow RC receive */
1646 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1647 }
1648 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1649 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1650 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1651 }
1652 if (qp_attr_mask & IB_QP_QKEY) {
1653 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1654 qp->qplib_qp.qkey = qp_attr->qkey;
1655 }
1656 if (qp_attr_mask & IB_QP_AV) {
1657 const struct ib_global_route *grh =
1658 rdma_ah_read_grh(&qp_attr->ah_attr);
1659 const struct ib_gid_attr *sgid_attr;
1660
1661 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1662 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1663 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1664 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1665 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1666 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1667 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1668 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1669 sizeof(qp->qplib_qp.ah.dgid.data));
1670 qp->qplib_qp.ah.flow_label = grh->flow_label;
1671 /* If RoCE V2 is enabled, stack will have two entries for
1672 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1673 * the GID index by 2 for RoCE V2
1674 */
1675 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1676 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1677 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1678 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1679 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1680 ether_addr_copy(qp->qplib_qp.ah.dmac,
1681 qp_attr->ah_attr.roce.dmac);
1682
1683 sgid_attr = qp_attr->ah_attr.grh.sgid_attr;
1684 memcpy(qp->qplib_qp.smac, sgid_attr->ndev->dev_addr,
1685 ETH_ALEN);
1686 nw_type = rdma_gid_attr_network_type(sgid_attr);
1687 switch (nw_type) {
1688 case RDMA_NETWORK_IPV4:
1689 qp->qplib_qp.nw_type =
1690 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1691 break;
1692 case RDMA_NETWORK_IPV6:
1693 qp->qplib_qp.nw_type =
1694 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1695 break;
1696 default:
1697 qp->qplib_qp.nw_type =
1698 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1699 break;
1700 }
1701 }
1702
1703 if (qp_attr_mask & IB_QP_PATH_MTU) {
1704 qp->qplib_qp.modify_flags |=
1705 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1706 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1707 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1708 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1709 qp->qplib_qp.modify_flags |=
1710 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1711 qp->qplib_qp.path_mtu =
1712 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1713 qp->qplib_qp.mtu =
1714 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1715 }
1716
1717 if (qp_attr_mask & IB_QP_TIMEOUT) {
1718 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1719 qp->qplib_qp.timeout = qp_attr->timeout;
1720 }
1721 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1722 qp->qplib_qp.modify_flags |=
1723 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1724 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1725 }
1726 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1727 qp->qplib_qp.modify_flags |=
1728 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1729 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1730 }
1731 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1732 qp->qplib_qp.modify_flags |=
1733 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1734 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1735 }
1736 if (qp_attr_mask & IB_QP_RQ_PSN) {
1737 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1738 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1739 }
1740 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1741 qp->qplib_qp.modify_flags |=
1742 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1743 /* Cap the max_rd_atomic to device max */
1744 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1745 dev_attr->max_qp_rd_atom);
1746 }
1747 if (qp_attr_mask & IB_QP_SQ_PSN) {
1748 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1749 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1750 }
1751 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1752 if (qp_attr->max_dest_rd_atomic >
1753 dev_attr->max_qp_init_rd_atom) {
1754 dev_err(rdev_to_dev(rdev),
1755 "max_dest_rd_atomic requested%d is > dev_max%d",
1756 qp_attr->max_dest_rd_atomic,
1757 dev_attr->max_qp_init_rd_atom);
1758 return -EINVAL;
1759 }
1760
1761 qp->qplib_qp.modify_flags |=
1762 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1763 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1764 }
1765 if (qp_attr_mask & IB_QP_CAP) {
1766 qp->qplib_qp.modify_flags |=
1767 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1768 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1769 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1770 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1771 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1772 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1773 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1774 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1775 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1776 (qp_attr->cap.max_inline_data >=
1777 dev_attr->max_inline_data)) {
1778 dev_err(rdev_to_dev(rdev),
1779 "Create QP failed - max exceeded");
1780 return -EINVAL;
1781 }
1782 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1783 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1784 dev_attr->max_qp_wqes + 1);
1785 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1786 qp_attr->cap.max_send_wr;
1787 /*
1788 * Reserving one slot for Phantom WQE. Some application can
1789 * post one extra entry in this case. Allowing this to avoid
1790 * unexpected Queue full condition
1791 */
1792 qp->qplib_qp.sq.q_full_delta -= 1;
1793 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1794 if (qp->qplib_qp.rq.max_wqe) {
1795 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1796 qp->qplib_qp.rq.max_wqe =
1797 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1798 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1799 qp_attr->cap.max_recv_wr;
1800 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1801 } else {
1802 /* SRQ was used prior, just ignore the RQ caps */
1803 }
1804 }
1805 if (qp_attr_mask & IB_QP_DEST_QPN) {
1806 qp->qplib_qp.modify_flags |=
1807 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1808 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1809 }
1810 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1811 if (rc) {
1812 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1813 return rc;
1814 }
1815 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1816 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1817 return rc;
1818 }
1819
bnxt_re_query_qp(struct ib_qp * ib_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)1820 int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1821 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1822 {
1823 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1824 struct bnxt_re_dev *rdev = qp->rdev;
1825 struct bnxt_qplib_qp *qplib_qp;
1826 int rc;
1827
1828 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1829 if (!qplib_qp)
1830 return -ENOMEM;
1831
1832 qplib_qp->id = qp->qplib_qp.id;
1833 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1834
1835 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
1836 if (rc) {
1837 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1838 goto out;
1839 }
1840 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1841 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1842 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1843 qp_attr->pkey_index = qplib_qp->pkey_index;
1844 qp_attr->qkey = qplib_qp->qkey;
1845 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1846 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1847 qplib_qp->ah.host_sgid_index,
1848 qplib_qp->ah.hop_limit,
1849 qplib_qp->ah.traffic_class);
1850 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1851 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1852 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1853 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1854 qp_attr->timeout = qplib_qp->timeout;
1855 qp_attr->retry_cnt = qplib_qp->retry_cnt;
1856 qp_attr->rnr_retry = qplib_qp->rnr_retry;
1857 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1858 qp_attr->rq_psn = qplib_qp->rq.psn;
1859 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1860 qp_attr->sq_psn = qplib_qp->sq.psn;
1861 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1862 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1863 IB_SIGNAL_REQ_WR;
1864 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
1865
1866 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1867 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1868 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1869 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1870 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1871 qp_init_attr->cap = qp_attr->cap;
1872
1873 out:
1874 kfree(qplib_qp);
1875 return rc;
1876 }
1877
1878 /* Routine for sending QP1 packets for RoCE V1 an V2
1879 */
bnxt_re_build_qp1_send_v2(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe,int payload_size)1880 static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1881 const struct ib_send_wr *wr,
1882 struct bnxt_qplib_swqe *wqe,
1883 int payload_size)
1884 {
1885 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1886 ib_ah);
1887 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1888 const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
1889 struct bnxt_qplib_sge sge;
1890 u8 nw_type;
1891 u16 ether_type;
1892 union ib_gid dgid;
1893 bool is_eth = false;
1894 bool is_vlan = false;
1895 bool is_grh = false;
1896 bool is_udp = false;
1897 u8 ip_version = 0;
1898 u16 vlan_id = 0xFFFF;
1899 void *buf;
1900 int i, rc = 0;
1901
1902 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1903
1904 if (is_vlan_dev(sgid_attr->ndev))
1905 vlan_id = vlan_dev_vlan_id(sgid_attr->ndev);
1906 /* Get network header type for this GID */
1907 nw_type = rdma_gid_attr_network_type(sgid_attr);
1908 switch (nw_type) {
1909 case RDMA_NETWORK_IPV4:
1910 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1911 break;
1912 case RDMA_NETWORK_IPV6:
1913 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1914 break;
1915 default:
1916 nw_type = BNXT_RE_ROCE_V1_PACKET;
1917 break;
1918 }
1919 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1920 is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1921 if (is_udp) {
1922 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
1923 ip_version = 4;
1924 ether_type = ETH_P_IP;
1925 } else {
1926 ip_version = 6;
1927 ether_type = ETH_P_IPV6;
1928 }
1929 is_grh = false;
1930 } else {
1931 ether_type = ETH_P_IBOE;
1932 is_grh = true;
1933 }
1934
1935 is_eth = true;
1936 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1937
1938 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1939 ip_version, is_udp, 0, &qp->qp1_hdr);
1940
1941 /* ETH */
1942 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1943 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1944
1945 /* For vlan, check the sgid for vlan existence */
1946
1947 if (!is_vlan) {
1948 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1949 } else {
1950 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1951 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1952 }
1953
1954 if (is_grh || (ip_version == 6)) {
1955 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
1956 sizeof(sgid_attr->gid));
1957 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1958 sizeof(sgid_attr->gid));
1959 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1960 }
1961
1962 if (ip_version == 4) {
1963 qp->qp1_hdr.ip4.tos = 0;
1964 qp->qp1_hdr.ip4.id = 0;
1965 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1966 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1967
1968 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
1969 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1970 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1971 }
1972
1973 if (is_udp) {
1974 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1975 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1976 qp->qp1_hdr.udp.csum = 0;
1977 }
1978
1979 /* BTH */
1980 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1981 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1982 qp->qp1_hdr.immediate_present = 1;
1983 } else {
1984 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1985 }
1986 if (wr->send_flags & IB_SEND_SOLICITED)
1987 qp->qp1_hdr.bth.solicited_event = 1;
1988 /* pad_count */
1989 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1990
1991 /* P_key for QP1 is for all members */
1992 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1993 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1994 qp->qp1_hdr.bth.ack_req = 0;
1995 qp->send_psn++;
1996 qp->send_psn &= BTH_PSN_MASK;
1997 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1998 /* DETH */
1999 /* Use the priviledged Q_Key for QP1 */
2000 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2001 qp->qp1_hdr.deth.source_qpn = IB_QP1;
2002
2003 /* Pack the QP1 to the transmit buffer */
2004 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2005 if (buf) {
2006 ib_ud_header_pack(&qp->qp1_hdr, buf);
2007 for (i = wqe->num_sge; i; i--) {
2008 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2009 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2010 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2011 }
2012
2013 /*
2014 * Max Header buf size for IPV6 RoCE V2 is 86,
2015 * which is same as the QP1 SQ header buffer.
2016 * Header buf size for IPV4 RoCE V2 can be 66.
2017 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2018 * Subtract 20 bytes from QP1 SQ header buf size
2019 */
2020 if (is_udp && ip_version == 4)
2021 sge.size -= 20;
2022 /*
2023 * Max Header buf size for RoCE V1 is 78.
2024 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2025 * Subtract 8 bytes from QP1 SQ header buf size
2026 */
2027 if (!is_udp)
2028 sge.size -= 8;
2029
2030 /* Subtract 4 bytes for non vlan packets */
2031 if (!is_vlan)
2032 sge.size -= 4;
2033
2034 wqe->sg_list[0].addr = sge.addr;
2035 wqe->sg_list[0].lkey = sge.lkey;
2036 wqe->sg_list[0].size = sge.size;
2037 wqe->num_sge++;
2038
2039 } else {
2040 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
2041 rc = -ENOMEM;
2042 }
2043 return rc;
2044 }
2045
2046 /* For the MAD layer, it only provides the recv SGE the size of
2047 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
2048 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
2049 * receive packet (334 bytes) with no VLAN and then copy the GRH
2050 * and the MAD datagram out to the provided SGE.
2051 */
bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp * qp,const struct ib_recv_wr * wr,struct bnxt_qplib_swqe * wqe,int payload_size)2052 static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2053 const struct ib_recv_wr *wr,
2054 struct bnxt_qplib_swqe *wqe,
2055 int payload_size)
2056 {
2057 struct bnxt_qplib_sge ref, sge;
2058 u32 rq_prod_index;
2059 struct bnxt_re_sqp_entries *sqp_entry;
2060
2061 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2062
2063 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2064 return -ENOMEM;
2065
2066 /* Create 1 SGE to receive the entire
2067 * ethernet packet
2068 */
2069 /* Save the reference from ULP */
2070 ref.addr = wqe->sg_list[0].addr;
2071 ref.lkey = wqe->sg_list[0].lkey;
2072 ref.size = wqe->sg_list[0].size;
2073
2074 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
2075
2076 /* SGE 1 */
2077 wqe->sg_list[0].addr = sge.addr;
2078 wqe->sg_list[0].lkey = sge.lkey;
2079 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2080 sge.size -= wqe->sg_list[0].size;
2081
2082 sqp_entry->sge.addr = ref.addr;
2083 sqp_entry->sge.lkey = ref.lkey;
2084 sqp_entry->sge.size = ref.size;
2085 /* Store the wrid for reporting completion */
2086 sqp_entry->wrid = wqe->wr_id;
2087 /* change the wqe->wrid to table index */
2088 wqe->wr_id = rq_prod_index;
2089 return 0;
2090 }
2091
is_ud_qp(struct bnxt_re_qp * qp)2092 static int is_ud_qp(struct bnxt_re_qp *qp)
2093 {
2094 return qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD;
2095 }
2096
bnxt_re_build_send_wqe(struct bnxt_re_qp * qp,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2097 static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2098 const struct ib_send_wr *wr,
2099 struct bnxt_qplib_swqe *wqe)
2100 {
2101 struct bnxt_re_ah *ah = NULL;
2102
2103 if (is_ud_qp(qp)) {
2104 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2105 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2106 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2107 wqe->send.avid = ah->qplib_ah.id;
2108 }
2109 switch (wr->opcode) {
2110 case IB_WR_SEND:
2111 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2112 break;
2113 case IB_WR_SEND_WITH_IMM:
2114 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2115 wqe->send.imm_data = wr->ex.imm_data;
2116 break;
2117 case IB_WR_SEND_WITH_INV:
2118 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2119 wqe->send.inv_key = wr->ex.invalidate_rkey;
2120 break;
2121 default:
2122 return -EINVAL;
2123 }
2124 if (wr->send_flags & IB_SEND_SIGNALED)
2125 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2126 if (wr->send_flags & IB_SEND_FENCE)
2127 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2128 if (wr->send_flags & IB_SEND_SOLICITED)
2129 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2130 if (wr->send_flags & IB_SEND_INLINE)
2131 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2132
2133 return 0;
2134 }
2135
bnxt_re_build_rdma_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2136 static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2137 struct bnxt_qplib_swqe *wqe)
2138 {
2139 switch (wr->opcode) {
2140 case IB_WR_RDMA_WRITE:
2141 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2142 break;
2143 case IB_WR_RDMA_WRITE_WITH_IMM:
2144 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2145 wqe->rdma.imm_data = wr->ex.imm_data;
2146 break;
2147 case IB_WR_RDMA_READ:
2148 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2149 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2150 break;
2151 default:
2152 return -EINVAL;
2153 }
2154 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2155 wqe->rdma.r_key = rdma_wr(wr)->rkey;
2156 if (wr->send_flags & IB_SEND_SIGNALED)
2157 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2158 if (wr->send_flags & IB_SEND_FENCE)
2159 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2160 if (wr->send_flags & IB_SEND_SOLICITED)
2161 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2162 if (wr->send_flags & IB_SEND_INLINE)
2163 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2164
2165 return 0;
2166 }
2167
bnxt_re_build_atomic_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2168 static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2169 struct bnxt_qplib_swqe *wqe)
2170 {
2171 switch (wr->opcode) {
2172 case IB_WR_ATOMIC_CMP_AND_SWP:
2173 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2174 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2175 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2176 break;
2177 case IB_WR_ATOMIC_FETCH_AND_ADD:
2178 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2179 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2180 break;
2181 default:
2182 return -EINVAL;
2183 }
2184 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2185 wqe->atomic.r_key = atomic_wr(wr)->rkey;
2186 if (wr->send_flags & IB_SEND_SIGNALED)
2187 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2188 if (wr->send_flags & IB_SEND_FENCE)
2189 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2190 if (wr->send_flags & IB_SEND_SOLICITED)
2191 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2192 return 0;
2193 }
2194
bnxt_re_build_inv_wqe(const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2195 static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2196 struct bnxt_qplib_swqe *wqe)
2197 {
2198 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2199 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2200
2201 /* Need unconditional fence for local invalidate
2202 * opcode to work as expected.
2203 */
2204 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2205
2206 if (wr->send_flags & IB_SEND_SIGNALED)
2207 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2208 if (wr->send_flags & IB_SEND_SOLICITED)
2209 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2210
2211 return 0;
2212 }
2213
bnxt_re_build_reg_wqe(const struct ib_reg_wr * wr,struct bnxt_qplib_swqe * wqe)2214 static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2215 struct bnxt_qplib_swqe *wqe)
2216 {
2217 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2218 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2219 int access = wr->access;
2220
2221 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2222 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2223 wqe->frmr.page_list = mr->pages;
2224 wqe->frmr.page_list_len = mr->npages;
2225 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2226 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2227
2228 /* Need unconditional fence for reg_mr
2229 * opcode to function as expected.
2230 */
2231
2232 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2233
2234 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2235 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2236
2237 if (access & IB_ACCESS_LOCAL_WRITE)
2238 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2239 if (access & IB_ACCESS_REMOTE_READ)
2240 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2241 if (access & IB_ACCESS_REMOTE_WRITE)
2242 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2243 if (access & IB_ACCESS_REMOTE_ATOMIC)
2244 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2245 if (access & IB_ACCESS_MW_BIND)
2246 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2247
2248 wqe->frmr.l_key = wr->key;
2249 wqe->frmr.length = wr->mr->length;
2250 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2251 wqe->frmr.va = wr->mr->iova;
2252 return 0;
2253 }
2254
bnxt_re_copy_inline_data(struct bnxt_re_dev * rdev,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2255 static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2256 const struct ib_send_wr *wr,
2257 struct bnxt_qplib_swqe *wqe)
2258 {
2259 /* Copy the inline data to the data field */
2260 u8 *in_data;
2261 u32 i, sge_len;
2262 void *sge_addr;
2263
2264 in_data = wqe->inline_data;
2265 for (i = 0; i < wr->num_sge; i++) {
2266 sge_addr = (void *)(unsigned long)
2267 wr->sg_list[i].addr;
2268 sge_len = wr->sg_list[i].length;
2269
2270 if ((sge_len + wqe->inline_len) >
2271 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2272 dev_err(rdev_to_dev(rdev),
2273 "Inline data size requested > supported value");
2274 return -EINVAL;
2275 }
2276 sge_len = wr->sg_list[i].length;
2277
2278 memcpy(in_data, sge_addr, sge_len);
2279 in_data += wr->sg_list[i].length;
2280 wqe->inline_len += wr->sg_list[i].length;
2281 }
2282 return wqe->inline_len;
2283 }
2284
bnxt_re_copy_wr_payload(struct bnxt_re_dev * rdev,const struct ib_send_wr * wr,struct bnxt_qplib_swqe * wqe)2285 static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2286 const struct ib_send_wr *wr,
2287 struct bnxt_qplib_swqe *wqe)
2288 {
2289 int payload_sz = 0;
2290
2291 if (wr->send_flags & IB_SEND_INLINE)
2292 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2293 else
2294 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2295 wqe->num_sge);
2296
2297 return payload_sz;
2298 }
2299
bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp * qp)2300 static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2301 {
2302 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2303 qp->ib_qp.qp_type == IB_QPT_GSI ||
2304 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2305 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2306 int qp_attr_mask;
2307 struct ib_qp_attr qp_attr;
2308
2309 qp_attr_mask = IB_QP_STATE;
2310 qp_attr.qp_state = IB_QPS_RTS;
2311 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2312 qp->qplib_qp.wqe_cnt = 0;
2313 }
2314 }
2315
bnxt_re_post_send_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,const struct ib_send_wr * wr)2316 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2317 struct bnxt_re_qp *qp,
2318 const struct ib_send_wr *wr)
2319 {
2320 struct bnxt_qplib_swqe wqe;
2321 int rc = 0, payload_sz = 0;
2322 unsigned long flags;
2323
2324 spin_lock_irqsave(&qp->sq_lock, flags);
2325 memset(&wqe, 0, sizeof(wqe));
2326 while (wr) {
2327 /* House keeping */
2328 memset(&wqe, 0, sizeof(wqe));
2329
2330 /* Common */
2331 wqe.num_sge = wr->num_sge;
2332 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2333 dev_err(rdev_to_dev(rdev),
2334 "Limit exceeded for Send SGEs");
2335 rc = -EINVAL;
2336 goto bad;
2337 }
2338
2339 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2340 if (payload_sz < 0) {
2341 rc = -EINVAL;
2342 goto bad;
2343 }
2344 wqe.wr_id = wr->wr_id;
2345
2346 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2347
2348 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2349 if (!rc)
2350 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2351 bad:
2352 if (rc) {
2353 dev_err(rdev_to_dev(rdev),
2354 "Post send failed opcode = %#x rc = %d",
2355 wr->opcode, rc);
2356 break;
2357 }
2358 wr = wr->next;
2359 }
2360 bnxt_qplib_post_send_db(&qp->qplib_qp);
2361 bnxt_ud_qp_hw_stall_workaround(qp);
2362 spin_unlock_irqrestore(&qp->sq_lock, flags);
2363 return rc;
2364 }
2365
bnxt_re_post_send(struct ib_qp * ib_qp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)2366 int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2367 const struct ib_send_wr **bad_wr)
2368 {
2369 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2370 struct bnxt_qplib_swqe wqe;
2371 int rc = 0, payload_sz = 0;
2372 unsigned long flags;
2373
2374 spin_lock_irqsave(&qp->sq_lock, flags);
2375 while (wr) {
2376 /* House keeping */
2377 memset(&wqe, 0, sizeof(wqe));
2378
2379 /* Common */
2380 wqe.num_sge = wr->num_sge;
2381 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2382 dev_err(rdev_to_dev(qp->rdev),
2383 "Limit exceeded for Send SGEs");
2384 rc = -EINVAL;
2385 goto bad;
2386 }
2387
2388 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2389 if (payload_sz < 0) {
2390 rc = -EINVAL;
2391 goto bad;
2392 }
2393 wqe.wr_id = wr->wr_id;
2394
2395 switch (wr->opcode) {
2396 case IB_WR_SEND:
2397 case IB_WR_SEND_WITH_IMM:
2398 if (ib_qp->qp_type == IB_QPT_GSI) {
2399 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2400 payload_sz);
2401 if (rc)
2402 goto bad;
2403 wqe.rawqp1.lflags |=
2404 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2405 }
2406 switch (wr->send_flags) {
2407 case IB_SEND_IP_CSUM:
2408 wqe.rawqp1.lflags |=
2409 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2410 break;
2411 default:
2412 break;
2413 }
2414 /* fall through */
2415 case IB_WR_SEND_WITH_INV:
2416 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2417 break;
2418 case IB_WR_RDMA_WRITE:
2419 case IB_WR_RDMA_WRITE_WITH_IMM:
2420 case IB_WR_RDMA_READ:
2421 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2422 break;
2423 case IB_WR_ATOMIC_CMP_AND_SWP:
2424 case IB_WR_ATOMIC_FETCH_AND_ADD:
2425 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2426 break;
2427 case IB_WR_RDMA_READ_WITH_INV:
2428 dev_err(rdev_to_dev(qp->rdev),
2429 "RDMA Read with Invalidate is not supported");
2430 rc = -EINVAL;
2431 goto bad;
2432 case IB_WR_LOCAL_INV:
2433 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2434 break;
2435 case IB_WR_REG_MR:
2436 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2437 break;
2438 default:
2439 /* Unsupported WRs */
2440 dev_err(rdev_to_dev(qp->rdev),
2441 "WR (%#x) is not supported", wr->opcode);
2442 rc = -EINVAL;
2443 goto bad;
2444 }
2445 if (!rc)
2446 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2447 bad:
2448 if (rc) {
2449 dev_err(rdev_to_dev(qp->rdev),
2450 "post_send failed op:%#x qps = %#x rc = %d\n",
2451 wr->opcode, qp->qplib_qp.state, rc);
2452 *bad_wr = wr;
2453 break;
2454 }
2455 wr = wr->next;
2456 }
2457 bnxt_qplib_post_send_db(&qp->qplib_qp);
2458 bnxt_ud_qp_hw_stall_workaround(qp);
2459 spin_unlock_irqrestore(&qp->sq_lock, flags);
2460
2461 return rc;
2462 }
2463
bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev * rdev,struct bnxt_re_qp * qp,const struct ib_recv_wr * wr)2464 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2465 struct bnxt_re_qp *qp,
2466 const struct ib_recv_wr *wr)
2467 {
2468 struct bnxt_qplib_swqe wqe;
2469 int rc = 0;
2470
2471 memset(&wqe, 0, sizeof(wqe));
2472 while (wr) {
2473 /* House keeping */
2474 memset(&wqe, 0, sizeof(wqe));
2475
2476 /* Common */
2477 wqe.num_sge = wr->num_sge;
2478 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2479 dev_err(rdev_to_dev(rdev),
2480 "Limit exceeded for Receive SGEs");
2481 rc = -EINVAL;
2482 break;
2483 }
2484 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2485 wqe.wr_id = wr->wr_id;
2486 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2487
2488 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2489 if (rc)
2490 break;
2491
2492 wr = wr->next;
2493 }
2494 if (!rc)
2495 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2496 return rc;
2497 }
2498
bnxt_re_post_recv(struct ib_qp * ib_qp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)2499 int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2500 const struct ib_recv_wr **bad_wr)
2501 {
2502 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2503 struct bnxt_qplib_swqe wqe;
2504 int rc = 0, payload_sz = 0;
2505 unsigned long flags;
2506 u32 count = 0;
2507
2508 spin_lock_irqsave(&qp->rq_lock, flags);
2509 while (wr) {
2510 /* House keeping */
2511 memset(&wqe, 0, sizeof(wqe));
2512
2513 /* Common */
2514 wqe.num_sge = wr->num_sge;
2515 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2516 dev_err(rdev_to_dev(qp->rdev),
2517 "Limit exceeded for Receive SGEs");
2518 rc = -EINVAL;
2519 *bad_wr = wr;
2520 break;
2521 }
2522
2523 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2524 wr->num_sge);
2525 wqe.wr_id = wr->wr_id;
2526 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2527
2528 if (ib_qp->qp_type == IB_QPT_GSI)
2529 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2530 payload_sz);
2531 if (!rc)
2532 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2533 if (rc) {
2534 *bad_wr = wr;
2535 break;
2536 }
2537
2538 /* Ring DB if the RQEs posted reaches a threshold value */
2539 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2540 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2541 count = 0;
2542 }
2543
2544 wr = wr->next;
2545 }
2546
2547 if (count)
2548 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2549
2550 spin_unlock_irqrestore(&qp->rq_lock, flags);
2551
2552 return rc;
2553 }
2554
2555 /* Completion Queues */
bnxt_re_destroy_cq(struct ib_cq * ib_cq)2556 int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
2557 {
2558 int rc;
2559 struct bnxt_re_cq *cq;
2560 struct bnxt_qplib_nq *nq;
2561 struct bnxt_re_dev *rdev;
2562
2563 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2564 rdev = cq->rdev;
2565 nq = cq->qplib_cq.nq;
2566
2567 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2568 if (rc) {
2569 dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
2570 return rc;
2571 }
2572 if (!IS_ERR_OR_NULL(cq->umem))
2573 ib_umem_release(cq->umem);
2574
2575 atomic_dec(&rdev->cq_count);
2576 nq->budget--;
2577 kfree(cq->cql);
2578 kfree(cq);
2579
2580 return 0;
2581 }
2582
bnxt_re_create_cq(struct ib_device * ibdev,const struct ib_cq_init_attr * attr,struct ib_ucontext * context,struct ib_udata * udata)2583 struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2584 const struct ib_cq_init_attr *attr,
2585 struct ib_ucontext *context,
2586 struct ib_udata *udata)
2587 {
2588 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
2589 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2590 struct bnxt_re_cq *cq = NULL;
2591 int rc, entries;
2592 int cqe = attr->cqe;
2593 struct bnxt_qplib_nq *nq = NULL;
2594 unsigned int nq_alloc_cnt;
2595
2596 /* Validate CQ fields */
2597 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2598 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2599 return ERR_PTR(-EINVAL);
2600 }
2601 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
2602 if (!cq)
2603 return ERR_PTR(-ENOMEM);
2604
2605 cq->rdev = rdev;
2606 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2607
2608 entries = roundup_pow_of_two(cqe + 1);
2609 if (entries > dev_attr->max_cq_wqes + 1)
2610 entries = dev_attr->max_cq_wqes + 1;
2611
2612 if (context) {
2613 struct bnxt_re_cq_req req;
2614 struct bnxt_re_ucontext *uctx = container_of
2615 (context,
2616 struct bnxt_re_ucontext,
2617 ib_uctx);
2618 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2619 rc = -EFAULT;
2620 goto fail;
2621 }
2622
2623 cq->umem = ib_umem_get(context, req.cq_va,
2624 entries * sizeof(struct cq_base),
2625 IB_ACCESS_LOCAL_WRITE, 1);
2626 if (IS_ERR(cq->umem)) {
2627 rc = PTR_ERR(cq->umem);
2628 goto fail;
2629 }
2630 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2631 cq->qplib_cq.nmap = cq->umem->nmap;
2632 cq->qplib_cq.dpi = &uctx->dpi;
2633 } else {
2634 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2635 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2636 GFP_KERNEL);
2637 if (!cq->cql) {
2638 rc = -ENOMEM;
2639 goto fail;
2640 }
2641
2642 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2643 cq->qplib_cq.sghead = NULL;
2644 cq->qplib_cq.nmap = 0;
2645 }
2646 /*
2647 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2648 * used for getting the NQ index.
2649 */
2650 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2651 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2652 cq->qplib_cq.max_wqe = entries;
2653 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2654 cq->qplib_cq.nq = nq;
2655
2656 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2657 if (rc) {
2658 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2659 goto fail;
2660 }
2661
2662 cq->ib_cq.cqe = entries;
2663 cq->cq_period = cq->qplib_cq.period;
2664 nq->budget++;
2665
2666 atomic_inc(&rdev->cq_count);
2667
2668 if (context) {
2669 struct bnxt_re_cq_resp resp;
2670
2671 resp.cqid = cq->qplib_cq.id;
2672 resp.tail = cq->qplib_cq.hwq.cons;
2673 resp.phase = cq->qplib_cq.period;
2674 resp.rsvd = 0;
2675 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2676 if (rc) {
2677 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2678 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2679 goto c2fail;
2680 }
2681 }
2682
2683 return &cq->ib_cq;
2684
2685 c2fail:
2686 if (context)
2687 ib_umem_release(cq->umem);
2688 fail:
2689 kfree(cq->cql);
2690 kfree(cq);
2691 return ERR_PTR(rc);
2692 }
2693
__req_to_ib_wc_status(u8 qstatus)2694 static u8 __req_to_ib_wc_status(u8 qstatus)
2695 {
2696 switch (qstatus) {
2697 case CQ_REQ_STATUS_OK:
2698 return IB_WC_SUCCESS;
2699 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2700 return IB_WC_BAD_RESP_ERR;
2701 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2702 return IB_WC_LOC_LEN_ERR;
2703 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2704 return IB_WC_LOC_QP_OP_ERR;
2705 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2706 return IB_WC_LOC_PROT_ERR;
2707 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2708 return IB_WC_GENERAL_ERR;
2709 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2710 return IB_WC_REM_INV_REQ_ERR;
2711 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2712 return IB_WC_REM_ACCESS_ERR;
2713 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2714 return IB_WC_REM_OP_ERR;
2715 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2716 return IB_WC_RNR_RETRY_EXC_ERR;
2717 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2718 return IB_WC_RETRY_EXC_ERR;
2719 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2720 return IB_WC_WR_FLUSH_ERR;
2721 default:
2722 return IB_WC_GENERAL_ERR;
2723 }
2724 return 0;
2725 }
2726
__rawqp1_to_ib_wc_status(u8 qstatus)2727 static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2728 {
2729 switch (qstatus) {
2730 case CQ_RES_RAWETH_QP1_STATUS_OK:
2731 return IB_WC_SUCCESS;
2732 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2733 return IB_WC_LOC_ACCESS_ERR;
2734 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2735 return IB_WC_LOC_LEN_ERR;
2736 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2737 return IB_WC_LOC_PROT_ERR;
2738 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2739 return IB_WC_LOC_QP_OP_ERR;
2740 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2741 return IB_WC_GENERAL_ERR;
2742 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2743 return IB_WC_WR_FLUSH_ERR;
2744 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2745 return IB_WC_WR_FLUSH_ERR;
2746 default:
2747 return IB_WC_GENERAL_ERR;
2748 }
2749 }
2750
__rc_to_ib_wc_status(u8 qstatus)2751 static u8 __rc_to_ib_wc_status(u8 qstatus)
2752 {
2753 switch (qstatus) {
2754 case CQ_RES_RC_STATUS_OK:
2755 return IB_WC_SUCCESS;
2756 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2757 return IB_WC_LOC_ACCESS_ERR;
2758 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2759 return IB_WC_LOC_LEN_ERR;
2760 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2761 return IB_WC_LOC_PROT_ERR;
2762 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2763 return IB_WC_LOC_QP_OP_ERR;
2764 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2765 return IB_WC_GENERAL_ERR;
2766 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2767 return IB_WC_REM_INV_REQ_ERR;
2768 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2769 return IB_WC_WR_FLUSH_ERR;
2770 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2771 return IB_WC_WR_FLUSH_ERR;
2772 default:
2773 return IB_WC_GENERAL_ERR;
2774 }
2775 }
2776
bnxt_re_process_req_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)2777 static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2778 {
2779 switch (cqe->type) {
2780 case BNXT_QPLIB_SWQE_TYPE_SEND:
2781 wc->opcode = IB_WC_SEND;
2782 break;
2783 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2784 wc->opcode = IB_WC_SEND;
2785 wc->wc_flags |= IB_WC_WITH_IMM;
2786 break;
2787 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2788 wc->opcode = IB_WC_SEND;
2789 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2790 break;
2791 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2792 wc->opcode = IB_WC_RDMA_WRITE;
2793 break;
2794 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2795 wc->opcode = IB_WC_RDMA_WRITE;
2796 wc->wc_flags |= IB_WC_WITH_IMM;
2797 break;
2798 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2799 wc->opcode = IB_WC_RDMA_READ;
2800 break;
2801 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2802 wc->opcode = IB_WC_COMP_SWAP;
2803 break;
2804 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2805 wc->opcode = IB_WC_FETCH_ADD;
2806 break;
2807 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2808 wc->opcode = IB_WC_LOCAL_INV;
2809 break;
2810 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2811 wc->opcode = IB_WC_REG_MR;
2812 break;
2813 default:
2814 wc->opcode = IB_WC_SEND;
2815 break;
2816 }
2817
2818 wc->status = __req_to_ib_wc_status(cqe->status);
2819 }
2820
bnxt_re_check_packet_type(u16 raweth_qp1_flags,u16 raweth_qp1_flags2)2821 static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2822 u16 raweth_qp1_flags2)
2823 {
2824 bool is_ipv6 = false, is_ipv4 = false;
2825
2826 /* raweth_qp1_flags Bit 9-6 indicates itype */
2827 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2828 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2829 return -1;
2830
2831 if (raweth_qp1_flags2 &
2832 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2833 raweth_qp1_flags2 &
2834 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2835 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2836 (raweth_qp1_flags2 &
2837 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2838 (is_ipv6 = true) : (is_ipv4 = true);
2839 return ((is_ipv6) ?
2840 BNXT_RE_ROCEV2_IPV6_PACKET :
2841 BNXT_RE_ROCEV2_IPV4_PACKET);
2842 } else {
2843 return BNXT_RE_ROCE_V1_PACKET;
2844 }
2845 }
2846
bnxt_re_to_ib_nw_type(int nw_type)2847 static int bnxt_re_to_ib_nw_type(int nw_type)
2848 {
2849 u8 nw_hdr_type = 0xFF;
2850
2851 switch (nw_type) {
2852 case BNXT_RE_ROCE_V1_PACKET:
2853 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2854 break;
2855 case BNXT_RE_ROCEV2_IPV4_PACKET:
2856 nw_hdr_type = RDMA_NETWORK_IPV4;
2857 break;
2858 case BNXT_RE_ROCEV2_IPV6_PACKET:
2859 nw_hdr_type = RDMA_NETWORK_IPV6;
2860 break;
2861 }
2862 return nw_hdr_type;
2863 }
2864
bnxt_re_is_loopback_packet(struct bnxt_re_dev * rdev,void * rq_hdr_buf)2865 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2866 void *rq_hdr_buf)
2867 {
2868 u8 *tmp_buf = NULL;
2869 struct ethhdr *eth_hdr;
2870 u16 eth_type;
2871 bool rc = false;
2872
2873 tmp_buf = (u8 *)rq_hdr_buf;
2874 /*
2875 * If dest mac is not same as I/F mac, this could be a
2876 * loopback address or multicast address, check whether
2877 * it is a loopback packet
2878 */
2879 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2880 tmp_buf += 4;
2881 /* Check the ether type */
2882 eth_hdr = (struct ethhdr *)tmp_buf;
2883 eth_type = ntohs(eth_hdr->h_proto);
2884 switch (eth_type) {
2885 case ETH_P_IBOE:
2886 rc = true;
2887 break;
2888 case ETH_P_IP:
2889 case ETH_P_IPV6: {
2890 u32 len;
2891 struct udphdr *udp_hdr;
2892
2893 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2894 sizeof(struct ipv6hdr));
2895 tmp_buf += sizeof(struct ethhdr) + len;
2896 udp_hdr = (struct udphdr *)tmp_buf;
2897 if (ntohs(udp_hdr->dest) ==
2898 ROCE_V2_UDP_DPORT)
2899 rc = true;
2900 break;
2901 }
2902 default:
2903 break;
2904 }
2905 }
2906
2907 return rc;
2908 }
2909
bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp * qp1_qp,struct bnxt_qplib_cqe * cqe)2910 static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2911 struct bnxt_qplib_cqe *cqe)
2912 {
2913 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2914 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2915 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2916 struct ib_send_wr *swr;
2917 struct ib_ud_wr udwr;
2918 struct ib_recv_wr rwr;
2919 int pkt_type = 0;
2920 u32 tbl_idx;
2921 void *rq_hdr_buf;
2922 dma_addr_t rq_hdr_buf_map;
2923 dma_addr_t shrq_hdr_buf_map;
2924 u32 offset = 0;
2925 u32 skip_bytes = 0;
2926 struct ib_sge s_sge[2];
2927 struct ib_sge r_sge[2];
2928 int rc;
2929
2930 memset(&udwr, 0, sizeof(udwr));
2931 memset(&rwr, 0, sizeof(rwr));
2932 memset(&s_sge, 0, sizeof(s_sge));
2933 memset(&r_sge, 0, sizeof(r_sge));
2934
2935 swr = &udwr.wr;
2936 tbl_idx = cqe->wr_id;
2937
2938 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2939 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2940 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2941 tbl_idx);
2942
2943 /* Shadow QP header buffer */
2944 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2945 tbl_idx);
2946 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2947
2948 /* Store this cqe */
2949 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2950 sqp_entry->qp1_qp = qp1_qp;
2951
2952 /* Find packet type from the cqe */
2953
2954 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2955 cqe->raweth_qp1_flags2);
2956 if (pkt_type < 0) {
2957 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2958 return -EINVAL;
2959 }
2960
2961 /* Adjust the offset for the user buffer and post in the rq */
2962
2963 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2964 offset = 20;
2965
2966 /*
2967 * QP1 loopback packet has 4 bytes of internal header before
2968 * ether header. Skip these four bytes.
2969 */
2970 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2971 skip_bytes = 4;
2972
2973 /* First send SGE . Skip the ether header*/
2974 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2975 + skip_bytes;
2976 s_sge[0].lkey = 0xFFFFFFFF;
2977 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2978 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2979
2980 /* Second Send SGE */
2981 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2982 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2983 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2984 s_sge[1].addr += 8;
2985 s_sge[1].lkey = 0xFFFFFFFF;
2986 s_sge[1].length = 256;
2987
2988 /* First recv SGE */
2989
2990 r_sge[0].addr = shrq_hdr_buf_map;
2991 r_sge[0].lkey = 0xFFFFFFFF;
2992 r_sge[0].length = 40;
2993
2994 r_sge[1].addr = sqp_entry->sge.addr + offset;
2995 r_sge[1].lkey = sqp_entry->sge.lkey;
2996 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2997
2998 /* Create receive work request */
2999 rwr.num_sge = 2;
3000 rwr.sg_list = r_sge;
3001 rwr.wr_id = tbl_idx;
3002 rwr.next = NULL;
3003
3004 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
3005 if (rc) {
3006 dev_err(rdev_to_dev(rdev),
3007 "Failed to post Rx buffers to shadow QP");
3008 return -ENOMEM;
3009 }
3010
3011 swr->num_sge = 2;
3012 swr->sg_list = s_sge;
3013 swr->wr_id = tbl_idx;
3014 swr->opcode = IB_WR_SEND;
3015 swr->next = NULL;
3016
3017 udwr.ah = &rdev->sqp_ah->ib_ah;
3018 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
3019 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
3020
3021 /* post data received in the send queue */
3022 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
3023
3024 return 0;
3025 }
3026
bnxt_re_process_res_rawqp1_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3027 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3028 struct bnxt_qplib_cqe *cqe)
3029 {
3030 wc->opcode = IB_WC_RECV;
3031 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3032 wc->wc_flags |= IB_WC_GRH;
3033 }
3034
bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe * orig_cqe,u16 * vid,u8 * sl)3035 static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3036 u16 *vid, u8 *sl)
3037 {
3038 bool ret = false;
3039 u32 metadata;
3040 u16 tpid;
3041
3042 metadata = orig_cqe->raweth_qp1_metadata;
3043 if (orig_cqe->raweth_qp1_flags2 &
3044 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3045 tpid = ((metadata &
3046 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3047 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3048 if (tpid == ETH_P_8021Q) {
3049 *vid = metadata &
3050 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3051 *sl = (metadata &
3052 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3053 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3054 ret = true;
3055 }
3056 }
3057
3058 return ret;
3059 }
3060
bnxt_re_process_res_rc_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3061 static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3062 struct bnxt_qplib_cqe *cqe)
3063 {
3064 wc->opcode = IB_WC_RECV;
3065 wc->status = __rc_to_ib_wc_status(cqe->status);
3066
3067 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3068 wc->wc_flags |= IB_WC_WITH_IMM;
3069 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3070 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3071 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3072 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3073 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3074 }
3075
bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp * qp,struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3076 static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
3077 struct ib_wc *wc,
3078 struct bnxt_qplib_cqe *cqe)
3079 {
3080 struct bnxt_re_dev *rdev = qp->rdev;
3081 struct bnxt_re_qp *qp1_qp = NULL;
3082 struct bnxt_qplib_cqe *orig_cqe = NULL;
3083 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3084 int nw_type;
3085 u32 tbl_idx;
3086 u16 vlan_id;
3087 u8 sl;
3088
3089 tbl_idx = cqe->wr_id;
3090
3091 sqp_entry = &rdev->sqp_tbl[tbl_idx];
3092 qp1_qp = sqp_entry->qp1_qp;
3093 orig_cqe = &sqp_entry->cqe;
3094
3095 wc->wr_id = sqp_entry->wrid;
3096 wc->byte_len = orig_cqe->length;
3097 wc->qp = &qp1_qp->ib_qp;
3098
3099 wc->ex.imm_data = orig_cqe->immdata;
3100 wc->src_qp = orig_cqe->src_qp;
3101 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3102 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3103 wc->vlan_id = vlan_id;
3104 wc->sl = sl;
3105 wc->wc_flags |= IB_WC_WITH_VLAN;
3106 }
3107 wc->port_num = 1;
3108 wc->vendor_err = orig_cqe->status;
3109
3110 wc->opcode = IB_WC_RECV;
3111 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3112 wc->wc_flags |= IB_WC_GRH;
3113
3114 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3115 orig_cqe->raweth_qp1_flags2);
3116 if (nw_type >= 0) {
3117 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3118 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3119 }
3120 }
3121
bnxt_re_process_res_ud_wc(struct ib_wc * wc,struct bnxt_qplib_cqe * cqe)3122 static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
3123 struct bnxt_qplib_cqe *cqe)
3124 {
3125 wc->opcode = IB_WC_RECV;
3126 wc->status = __rc_to_ib_wc_status(cqe->status);
3127
3128 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3129 wc->wc_flags |= IB_WC_WITH_IMM;
3130 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3131 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3132 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3133 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3134 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3135 }
3136
send_phantom_wqe(struct bnxt_re_qp * qp)3137 static int send_phantom_wqe(struct bnxt_re_qp *qp)
3138 {
3139 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3140 unsigned long flags;
3141 int rc = 0;
3142
3143 spin_lock_irqsave(&qp->sq_lock, flags);
3144
3145 rc = bnxt_re_bind_fence_mw(lib_qp);
3146 if (!rc) {
3147 lib_qp->sq.phantom_wqe_cnt++;
3148 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
3149 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3150 lib_qp->id, lib_qp->sq.hwq.prod,
3151 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3152 lib_qp->sq.phantom_wqe_cnt);
3153 }
3154
3155 spin_unlock_irqrestore(&qp->sq_lock, flags);
3156 return rc;
3157 }
3158
bnxt_re_poll_cq(struct ib_cq * ib_cq,int num_entries,struct ib_wc * wc)3159 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3160 {
3161 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3162 struct bnxt_re_qp *qp;
3163 struct bnxt_qplib_cqe *cqe;
3164 int i, ncqe, budget;
3165 struct bnxt_qplib_q *sq;
3166 struct bnxt_qplib_qp *lib_qp;
3167 u32 tbl_idx;
3168 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3169 unsigned long flags;
3170
3171 spin_lock_irqsave(&cq->cq_lock, flags);
3172 budget = min_t(u32, num_entries, cq->max_cql);
3173 num_entries = budget;
3174 if (!cq->cql) {
3175 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
3176 goto exit;
3177 }
3178 cqe = &cq->cql[0];
3179 while (budget) {
3180 lib_qp = NULL;
3181 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3182 if (lib_qp) {
3183 sq = &lib_qp->sq;
3184 if (sq->send_phantom) {
3185 qp = container_of(lib_qp,
3186 struct bnxt_re_qp, qplib_qp);
3187 if (send_phantom_wqe(qp) == -ENOMEM)
3188 dev_err(rdev_to_dev(cq->rdev),
3189 "Phantom failed! Scheduled to send again\n");
3190 else
3191 sq->send_phantom = false;
3192 }
3193 }
3194 if (ncqe < budget)
3195 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3196 cqe + ncqe,
3197 budget - ncqe);
3198
3199 if (!ncqe)
3200 break;
3201
3202 for (i = 0; i < ncqe; i++, cqe++) {
3203 /* Transcribe each qplib_wqe back to ib_wc */
3204 memset(wc, 0, sizeof(*wc));
3205
3206 wc->wr_id = cqe->wr_id;
3207 wc->byte_len = cqe->length;
3208 qp = container_of
3209 ((struct bnxt_qplib_qp *)
3210 (unsigned long)(cqe->qp_handle),
3211 struct bnxt_re_qp, qplib_qp);
3212 if (!qp) {
3213 dev_err(rdev_to_dev(cq->rdev),
3214 "POLL CQ : bad QP handle");
3215 continue;
3216 }
3217 wc->qp = &qp->ib_qp;
3218 wc->ex.imm_data = cqe->immdata;
3219 wc->src_qp = cqe->src_qp;
3220 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3221 wc->port_num = 1;
3222 wc->vendor_err = cqe->status;
3223
3224 switch (cqe->opcode) {
3225 case CQ_BASE_CQE_TYPE_REQ:
3226 if (qp->qplib_qp.id ==
3227 qp->rdev->qp1_sqp->qplib_qp.id) {
3228 /* Handle this completion with
3229 * the stored completion
3230 */
3231 memset(wc, 0, sizeof(*wc));
3232 continue;
3233 }
3234 bnxt_re_process_req_wc(wc, cqe);
3235 break;
3236 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3237 if (!cqe->status) {
3238 int rc = 0;
3239
3240 rc = bnxt_re_process_raw_qp_pkt_rx
3241 (qp, cqe);
3242 if (!rc) {
3243 memset(wc, 0, sizeof(*wc));
3244 continue;
3245 }
3246 cqe->status = -1;
3247 }
3248 /* Errors need not be looped back.
3249 * But change the wr_id to the one
3250 * stored in the table
3251 */
3252 tbl_idx = cqe->wr_id;
3253 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
3254 wc->wr_id = sqp_entry->wrid;
3255 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3256 break;
3257 case CQ_BASE_CQE_TYPE_RES_RC:
3258 bnxt_re_process_res_rc_wc(wc, cqe);
3259 break;
3260 case CQ_BASE_CQE_TYPE_RES_UD:
3261 if (qp->qplib_qp.id ==
3262 qp->rdev->qp1_sqp->qplib_qp.id) {
3263 /* Handle this completion with
3264 * the stored completion
3265 */
3266 if (cqe->status) {
3267 continue;
3268 } else {
3269 bnxt_re_process_res_shadow_qp_wc
3270 (qp, wc, cqe);
3271 break;
3272 }
3273 }
3274 bnxt_re_process_res_ud_wc(wc, cqe);
3275 break;
3276 default:
3277 dev_err(rdev_to_dev(cq->rdev),
3278 "POLL CQ : type 0x%x not handled",
3279 cqe->opcode);
3280 continue;
3281 }
3282 wc++;
3283 budget--;
3284 }
3285 }
3286 exit:
3287 spin_unlock_irqrestore(&cq->cq_lock, flags);
3288 return num_entries - budget;
3289 }
3290
bnxt_re_req_notify_cq(struct ib_cq * ib_cq,enum ib_cq_notify_flags ib_cqn_flags)3291 int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3292 enum ib_cq_notify_flags ib_cqn_flags)
3293 {
3294 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3295 int type = 0, rc = 0;
3296 unsigned long flags;
3297
3298 spin_lock_irqsave(&cq->cq_lock, flags);
3299 /* Trigger on the very next completion */
3300 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3301 type = DBR_DBR_TYPE_CQ_ARMALL;
3302 /* Trigger on the next solicited completion */
3303 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3304 type = DBR_DBR_TYPE_CQ_ARMSE;
3305
3306 /* Poll to see if there are missed events */
3307 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3308 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3309 rc = 1;
3310 goto exit;
3311 }
3312 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3313
3314 exit:
3315 spin_unlock_irqrestore(&cq->cq_lock, flags);
3316 return rc;
3317 }
3318
3319 /* Memory Regions */
bnxt_re_get_dma_mr(struct ib_pd * ib_pd,int mr_access_flags)3320 struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3321 {
3322 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3323 struct bnxt_re_dev *rdev = pd->rdev;
3324 struct bnxt_re_mr *mr;
3325 u64 pbl = 0;
3326 int rc;
3327
3328 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3329 if (!mr)
3330 return ERR_PTR(-ENOMEM);
3331
3332 mr->rdev = rdev;
3333 mr->qplib_mr.pd = &pd->qplib_pd;
3334 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3335 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3336
3337 /* Allocate and register 0 as the address */
3338 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3339 if (rc)
3340 goto fail;
3341
3342 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3343 mr->qplib_mr.total_size = -1; /* Infinte length */
3344 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
3345 PAGE_SIZE);
3346 if (rc)
3347 goto fail_mr;
3348
3349 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3350 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3351 IB_ACCESS_REMOTE_ATOMIC))
3352 mr->ib_mr.rkey = mr->ib_mr.lkey;
3353 atomic_inc(&rdev->mr_count);
3354
3355 return &mr->ib_mr;
3356
3357 fail_mr:
3358 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3359 fail:
3360 kfree(mr);
3361 return ERR_PTR(rc);
3362 }
3363
bnxt_re_dereg_mr(struct ib_mr * ib_mr)3364 int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3365 {
3366 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3367 struct bnxt_re_dev *rdev = mr->rdev;
3368 int rc;
3369
3370 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3371 if (rc)
3372 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3373
3374 if (mr->pages) {
3375 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3376 &mr->qplib_frpl);
3377 kfree(mr->pages);
3378 mr->npages = 0;
3379 mr->pages = NULL;
3380 }
3381 if (!IS_ERR_OR_NULL(mr->ib_umem))
3382 ib_umem_release(mr->ib_umem);
3383
3384 kfree(mr);
3385 atomic_dec(&rdev->mr_count);
3386 return rc;
3387 }
3388
bnxt_re_set_page(struct ib_mr * ib_mr,u64 addr)3389 static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3390 {
3391 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3392
3393 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3394 return -ENOMEM;
3395
3396 mr->pages[mr->npages++] = addr;
3397 return 0;
3398 }
3399
bnxt_re_map_mr_sg(struct ib_mr * ib_mr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)3400 int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3401 unsigned int *sg_offset)
3402 {
3403 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3404
3405 mr->npages = 0;
3406 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3407 }
3408
bnxt_re_alloc_mr(struct ib_pd * ib_pd,enum ib_mr_type type,u32 max_num_sg)3409 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3410 u32 max_num_sg)
3411 {
3412 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3413 struct bnxt_re_dev *rdev = pd->rdev;
3414 struct bnxt_re_mr *mr = NULL;
3415 int rc;
3416
3417 if (type != IB_MR_TYPE_MEM_REG) {
3418 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3419 return ERR_PTR(-EINVAL);
3420 }
3421 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3422 return ERR_PTR(-EINVAL);
3423
3424 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3425 if (!mr)
3426 return ERR_PTR(-ENOMEM);
3427
3428 mr->rdev = rdev;
3429 mr->qplib_mr.pd = &pd->qplib_pd;
3430 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3431 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3432
3433 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3434 if (rc)
3435 goto bail;
3436
3437 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3438 mr->ib_mr.rkey = mr->ib_mr.lkey;
3439
3440 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3441 if (!mr->pages) {
3442 rc = -ENOMEM;
3443 goto fail;
3444 }
3445 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3446 &mr->qplib_frpl, max_num_sg);
3447 if (rc) {
3448 dev_err(rdev_to_dev(rdev),
3449 "Failed to allocate HW FR page list");
3450 goto fail_mr;
3451 }
3452
3453 atomic_inc(&rdev->mr_count);
3454 return &mr->ib_mr;
3455
3456 fail_mr:
3457 kfree(mr->pages);
3458 fail:
3459 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3460 bail:
3461 kfree(mr);
3462 return ERR_PTR(rc);
3463 }
3464
bnxt_re_alloc_mw(struct ib_pd * ib_pd,enum ib_mw_type type,struct ib_udata * udata)3465 struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3466 struct ib_udata *udata)
3467 {
3468 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3469 struct bnxt_re_dev *rdev = pd->rdev;
3470 struct bnxt_re_mw *mw;
3471 int rc;
3472
3473 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3474 if (!mw)
3475 return ERR_PTR(-ENOMEM);
3476 mw->rdev = rdev;
3477 mw->qplib_mw.pd = &pd->qplib_pd;
3478
3479 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3480 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3481 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3482 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3483 if (rc) {
3484 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3485 goto fail;
3486 }
3487 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3488
3489 atomic_inc(&rdev->mw_count);
3490 return &mw->ib_mw;
3491
3492 fail:
3493 kfree(mw);
3494 return ERR_PTR(rc);
3495 }
3496
bnxt_re_dealloc_mw(struct ib_mw * ib_mw)3497 int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3498 {
3499 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3500 struct bnxt_re_dev *rdev = mw->rdev;
3501 int rc;
3502
3503 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3504 if (rc) {
3505 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3506 return rc;
3507 }
3508
3509 kfree(mw);
3510 atomic_dec(&rdev->mw_count);
3511 return rc;
3512 }
3513
bnxt_re_page_size_ok(int page_shift)3514 static int bnxt_re_page_size_ok(int page_shift)
3515 {
3516 switch (page_shift) {
3517 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
3518 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
3519 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
3520 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
3521 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
3522 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
3523 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
3524 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
3525 return 1;
3526 default:
3527 return 0;
3528 }
3529 }
3530
fill_umem_pbl_tbl(struct ib_umem * umem,u64 * pbl_tbl_orig,int page_shift)3531 static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
3532 int page_shift)
3533 {
3534 u64 *pbl_tbl = pbl_tbl_orig;
3535 u64 paddr;
3536 u64 page_mask = (1ULL << page_shift) - 1;
3537 int i, pages;
3538 struct scatterlist *sg;
3539 int entry;
3540
3541 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
3542 pages = sg_dma_len(sg) >> PAGE_SHIFT;
3543 for (i = 0; i < pages; i++) {
3544 paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
3545 if (pbl_tbl == pbl_tbl_orig)
3546 *pbl_tbl++ = paddr & ~page_mask;
3547 else if ((paddr & page_mask) == 0)
3548 *pbl_tbl++ = paddr;
3549 }
3550 }
3551 return pbl_tbl - pbl_tbl_orig;
3552 }
3553
3554 /* uverbs */
bnxt_re_reg_user_mr(struct ib_pd * ib_pd,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_udata * udata)3555 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3556 u64 virt_addr, int mr_access_flags,
3557 struct ib_udata *udata)
3558 {
3559 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3560 struct bnxt_re_dev *rdev = pd->rdev;
3561 struct bnxt_re_mr *mr;
3562 struct ib_umem *umem;
3563 u64 *pbl_tbl = NULL;
3564 int umem_pgs, page_shift, rc;
3565
3566 if (length > BNXT_RE_MAX_MR_SIZE) {
3567 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
3568 length, BNXT_RE_MAX_MR_SIZE);
3569 return ERR_PTR(-ENOMEM);
3570 }
3571
3572 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3573 if (!mr)
3574 return ERR_PTR(-ENOMEM);
3575
3576 mr->rdev = rdev;
3577 mr->qplib_mr.pd = &pd->qplib_pd;
3578 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3579 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3580
3581 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3582 if (rc) {
3583 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3584 goto free_mr;
3585 }
3586 /* The fixed portion of the rkey is the same as the lkey */
3587 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3588
3589 umem = ib_umem_get(ib_pd->uobject->context, start, length,
3590 mr_access_flags, 0);
3591 if (IS_ERR(umem)) {
3592 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3593 rc = -EFAULT;
3594 goto free_mrw;
3595 }
3596 mr->ib_umem = umem;
3597
3598 mr->qplib_mr.va = virt_addr;
3599 umem_pgs = ib_umem_page_count(umem);
3600 if (!umem_pgs) {
3601 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3602 rc = -EINVAL;
3603 goto free_umem;
3604 }
3605 mr->qplib_mr.total_size = length;
3606
3607 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3608 if (!pbl_tbl) {
3609 rc = -ENOMEM;
3610 goto free_umem;
3611 }
3612
3613 page_shift = umem->page_shift;
3614
3615 if (!bnxt_re_page_size_ok(page_shift)) {
3616 dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
3617 rc = -EFAULT;
3618 goto fail;
3619 }
3620
3621 if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
3622 dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
3623 length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
3624 rc = -EINVAL;
3625 goto fail;
3626 }
3627 if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
3628 page_shift = BNXT_RE_PAGE_SHIFT_2M;
3629 dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
3630 1 << page_shift);
3631 }
3632
3633 /* Map umem buf ptrs to the PBL */
3634 umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
3635 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
3636 umem_pgs, false, 1 << page_shift);
3637 if (rc) {
3638 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3639 goto fail;
3640 }
3641
3642 kfree(pbl_tbl);
3643
3644 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3645 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3646 atomic_inc(&rdev->mr_count);
3647
3648 return &mr->ib_mr;
3649 fail:
3650 kfree(pbl_tbl);
3651 free_umem:
3652 ib_umem_release(umem);
3653 free_mrw:
3654 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3655 free_mr:
3656 kfree(mr);
3657 return ERR_PTR(rc);
3658 }
3659
bnxt_re_alloc_ucontext(struct ib_device * ibdev,struct ib_udata * udata)3660 struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
3661 struct ib_udata *udata)
3662 {
3663 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3664 struct bnxt_re_uctx_resp resp;
3665 struct bnxt_re_ucontext *uctx;
3666 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3667 int rc;
3668
3669 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d",
3670 ibdev->uverbs_abi_ver);
3671
3672 if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3673 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3674 BNXT_RE_ABI_VERSION);
3675 return ERR_PTR(-EPERM);
3676 }
3677
3678 uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
3679 if (!uctx)
3680 return ERR_PTR(-ENOMEM);
3681
3682 uctx->rdev = rdev;
3683
3684 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3685 if (!uctx->shpg) {
3686 rc = -ENOMEM;
3687 goto fail;
3688 }
3689 spin_lock_init(&uctx->sh_lock);
3690
3691 resp.dev_id = rdev->en_dev->pdev->devfn; /*Temp, Use idr_alloc instead*/
3692 resp.max_qp = rdev->qplib_ctx.qpc_count;
3693 resp.pg_size = PAGE_SIZE;
3694 resp.cqe_sz = sizeof(struct cq_base);
3695 resp.max_cqd = dev_attr->max_cq_wqes;
3696 resp.rsvd = 0;
3697
3698 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
3699 if (rc) {
3700 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3701 rc = -EFAULT;
3702 goto cfail;
3703 }
3704
3705 return &uctx->ib_uctx;
3706 cfail:
3707 free_page((unsigned long)uctx->shpg);
3708 uctx->shpg = NULL;
3709 fail:
3710 kfree(uctx);
3711 return ERR_PTR(rc);
3712 }
3713
bnxt_re_dealloc_ucontext(struct ib_ucontext * ib_uctx)3714 int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3715 {
3716 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3717 struct bnxt_re_ucontext,
3718 ib_uctx);
3719
3720 struct bnxt_re_dev *rdev = uctx->rdev;
3721 int rc = 0;
3722
3723 if (uctx->shpg)
3724 free_page((unsigned long)uctx->shpg);
3725
3726 if (uctx->dpi.dbr) {
3727 /* Free DPI only if this is the first PD allocated by the
3728 * application and mark the context dpi as NULL
3729 */
3730 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3731 &rdev->qplib_res.dpi_tbl,
3732 &uctx->dpi);
3733 if (rc)
3734 dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
3735 /* Don't fail, continue*/
3736 uctx->dpi.dbr = NULL;
3737 }
3738
3739 kfree(uctx);
3740 return 0;
3741 }
3742
3743 /* Helper function to mmap the virtual memory from user app */
bnxt_re_mmap(struct ib_ucontext * ib_uctx,struct vm_area_struct * vma)3744 int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3745 {
3746 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3747 struct bnxt_re_ucontext,
3748 ib_uctx);
3749 struct bnxt_re_dev *rdev = uctx->rdev;
3750 u64 pfn;
3751
3752 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3753 return -EINVAL;
3754
3755 if (vma->vm_pgoff) {
3756 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3757 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3758 PAGE_SIZE, vma->vm_page_prot)) {
3759 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3760 return -EAGAIN;
3761 }
3762 } else {
3763 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3764 if (remap_pfn_range(vma, vma->vm_start,
3765 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3766 dev_err(rdev_to_dev(rdev),
3767 "Failed to map shared page");
3768 return -EAGAIN;
3769 }
3770 }
3771
3772 return 0;
3773 }
3774