1  /*
2   * Broadcom NetXtreme-E RoCE driver.
3   *
4   * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5   * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6   *
7   * This software is available to you under a choice of one of two
8   * licenses.  You may choose to be licensed under the terms of the GNU
9   * General Public License (GPL) Version 2, available from the file
10   * COPYING in the main directory of this source tree, or the
11   * BSD license below:
12   *
13   * Redistribution and use in source and binary forms, with or without
14   * modification, are permitted provided that the following conditions
15   * are met:
16   *
17   * 1. Redistributions of source code must retain the above copyright
18   *    notice, this list of conditions and the following disclaimer.
19   * 2. Redistributions in binary form must reproduce the above copyright
20   *    notice, this list of conditions and the following disclaimer in
21   *    the documentation and/or other materials provided with the
22   *    distribution.
23   *
24   * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25   * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26   * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27   * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28   * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29   * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30   * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31   * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32   * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33   * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34   * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35   *
36   * Description: Fast Path Operators
37   */
38  
39  #define dev_fmt(fmt) "QPLIB: " fmt
40  
41  #include <linux/interrupt.h>
42  #include <linux/spinlock.h>
43  #include <linux/sched.h>
44  #include <linux/slab.h>
45  #include <linux/pci.h>
46  #include <linux/delay.h>
47  #include <linux/prefetch.h>
48  #include <linux/if_ether.h>
49  #include <rdma/ib_mad.h>
50  
51  #include "roce_hsi.h"
52  
53  #include "qplib_res.h"
54  #include "qplib_rcfw.h"
55  #include "qplib_sp.h"
56  #include "qplib_fp.h"
57  
58  static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
59  
bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp * qp)60  static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
61  {
62  	qp->sq.condition = false;
63  	qp->sq.send_phantom = false;
64  	qp->sq.single = false;
65  }
66  
67  /* Flush list */
__bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)68  static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
69  {
70  	struct bnxt_qplib_cq *scq, *rcq;
71  
72  	scq = qp->scq;
73  	rcq = qp->rcq;
74  
75  	if (!qp->sq.flushed) {
76  		dev_dbg(&scq->hwq.pdev->dev,
77  			"FP: Adding to SQ Flush list = %p\n", qp);
78  		bnxt_qplib_cancel_phantom_processing(qp);
79  		list_add_tail(&qp->sq_flush, &scq->sqf_head);
80  		qp->sq.flushed = true;
81  	}
82  	if (!qp->srq) {
83  		if (!qp->rq.flushed) {
84  			dev_dbg(&rcq->hwq.pdev->dev,
85  				"FP: Adding to RQ Flush list = %p\n", qp);
86  			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87  			qp->rq.flushed = true;
88  		}
89  	}
90  }
91  
bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)92  static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
93  				       unsigned long *flags)
94  	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
95  {
96  	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97  	if (qp->scq == qp->rcq)
98  		__acquire(&qp->rcq->flush_lock);
99  	else
100  		spin_lock(&qp->rcq->flush_lock);
101  }
102  
bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp * qp,unsigned long * flags)103  static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104  				       unsigned long *flags)
105  	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
106  {
107  	if (qp->scq == qp->rcq)
108  		__release(&qp->rcq->flush_lock);
109  	else
110  		spin_unlock(&qp->rcq->flush_lock);
111  	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
112  }
113  
bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp * qp)114  void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
115  {
116  	unsigned long flags;
117  
118  	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119  	__bnxt_qplib_add_flush_qp(qp);
120  	bnxt_qplib_release_cq_flush_locks(qp, &flags);
121  }
122  
__bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp * qp)123  static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
124  {
125  	if (qp->sq.flushed) {
126  		qp->sq.flushed = false;
127  		list_del(&qp->sq_flush);
128  	}
129  	if (!qp->srq) {
130  		if (qp->rq.flushed) {
131  			qp->rq.flushed = false;
132  			list_del(&qp->rq_flush);
133  		}
134  	}
135  }
136  
bnxt_qplib_clean_qp(struct bnxt_qplib_qp * qp)137  void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
138  {
139  	unsigned long flags;
140  
141  	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142  	__clean_cq(qp->scq, (u64)(unsigned long)qp);
143  	qp->sq.hwq.prod = 0;
144  	qp->sq.hwq.cons = 0;
145  	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
146  	qp->rq.hwq.prod = 0;
147  	qp->rq.hwq.cons = 0;
148  
149  	__bnxt_qplib_del_flush_qp(qp);
150  	bnxt_qplib_release_cq_flush_locks(qp, &flags);
151  }
152  
bnxt_qpn_cqn_sched_task(struct work_struct * work)153  static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
154  {
155  	struct bnxt_qplib_nq_work *nq_work =
156  			container_of(work, struct bnxt_qplib_nq_work, work);
157  
158  	struct bnxt_qplib_cq *cq = nq_work->cq;
159  	struct bnxt_qplib_nq *nq = nq_work->nq;
160  
161  	if (cq && nq) {
162  		spin_lock_bh(&cq->compl_lock);
163  		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164  			dev_dbg(&nq->pdev->dev,
165  				"%s:Trigger cq  = %p event nq = %p\n",
166  				__func__, cq, nq);
167  			nq->cqn_handler(nq, cq);
168  		}
169  		spin_unlock_bh(&cq->compl_lock);
170  	}
171  	kfree(nq_work);
172  }
173  
bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)174  static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175  				       struct bnxt_qplib_qp *qp)
176  {
177  	struct bnxt_qplib_q *rq = &qp->rq;
178  	struct bnxt_qplib_q *sq = &qp->sq;
179  
180  	if (qp->rq_hdr_buf)
181  		dma_free_coherent(&res->pdev->dev,
182  				  rq->max_wqe * qp->rq_hdr_buf_size,
183  				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184  	if (qp->sq_hdr_buf)
185  		dma_free_coherent(&res->pdev->dev,
186  				  sq->max_wqe * qp->sq_hdr_buf_size,
187  				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188  	qp->rq_hdr_buf = NULL;
189  	qp->sq_hdr_buf = NULL;
190  	qp->rq_hdr_buf_map = 0;
191  	qp->sq_hdr_buf_map = 0;
192  	qp->sq_hdr_buf_size = 0;
193  	qp->rq_hdr_buf_size = 0;
194  }
195  
bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)196  static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197  				       struct bnxt_qplib_qp *qp)
198  {
199  	struct bnxt_qplib_q *rq = &qp->rq;
200  	struct bnxt_qplib_q *sq = &qp->sq;
201  	int rc = 0;
202  
203  	if (qp->sq_hdr_buf_size && sq->max_wqe) {
204  		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205  					sq->max_wqe * qp->sq_hdr_buf_size,
206  					&qp->sq_hdr_buf_map, GFP_KERNEL);
207  		if (!qp->sq_hdr_buf) {
208  			rc = -ENOMEM;
209  			dev_err(&res->pdev->dev,
210  				"Failed to create sq_hdr_buf\n");
211  			goto fail;
212  		}
213  	}
214  
215  	if (qp->rq_hdr_buf_size && rq->max_wqe) {
216  		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217  						    rq->max_wqe *
218  						    qp->rq_hdr_buf_size,
219  						    &qp->rq_hdr_buf_map,
220  						    GFP_KERNEL);
221  		if (!qp->rq_hdr_buf) {
222  			rc = -ENOMEM;
223  			dev_err(&res->pdev->dev,
224  				"Failed to create rq_hdr_buf\n");
225  			goto fail;
226  		}
227  	}
228  	return 0;
229  
230  fail:
231  	bnxt_qplib_free_qp_hdr_buf(res, qp);
232  	return rc;
233  }
234  
clean_nq(struct bnxt_qplib_nq * nq,struct bnxt_qplib_cq * cq)235  static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
236  {
237  	struct bnxt_qplib_hwq *hwq = &nq->hwq;
238  	struct nq_base *nqe, **nq_ptr;
239  	int budget = nq->budget;
240  	u32 sw_cons, raw_cons;
241  	uintptr_t q_handle;
242  	u16 type;
243  
244  	spin_lock_bh(&hwq->lock);
245  	/* Service the NQ until empty */
246  	raw_cons = hwq->cons;
247  	while (budget--) {
248  		sw_cons = HWQ_CMP(raw_cons, hwq);
249  		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
250  		nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
251  		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
252  			break;
253  
254  		/*
255  		 * The valid test of the entry must be done first before
256  		 * reading any further.
257  		 */
258  		dma_rmb();
259  
260  		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
261  		switch (type) {
262  		case NQ_BASE_TYPE_CQ_NOTIFICATION:
263  		{
264  			struct nq_cn *nqcne = (struct nq_cn *)nqe;
265  
266  			q_handle = le32_to_cpu(nqcne->cq_handle_low);
267  			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
268  						     << 32;
269  			if ((unsigned long)cq == q_handle) {
270  				nqcne->cq_handle_low = 0;
271  				nqcne->cq_handle_high = 0;
272  				cq->cnq_events++;
273  			}
274  			break;
275  		}
276  		default:
277  			break;
278  		}
279  		raw_cons++;
280  	}
281  	spin_unlock_bh(&hwq->lock);
282  }
283  
284  /* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
285   * this CQ.
286   */
__wait_for_all_nqes(struct bnxt_qplib_cq * cq,u16 cnq_events)287  static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
288  {
289  	u32 retry_cnt = 100;
290  
291  	while (retry_cnt--) {
292  		if (cnq_events == cq->cnq_events)
293  			return;
294  		usleep_range(50, 100);
295  		clean_nq(cq->nq, cq);
296  	}
297  }
298  
bnxt_qplib_service_nq(struct tasklet_struct * t)299  static void bnxt_qplib_service_nq(struct tasklet_struct *t)
300  {
301  	struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
302  	struct bnxt_qplib_hwq *hwq = &nq->hwq;
303  	struct bnxt_qplib_cq *cq;
304  	int budget = nq->budget;
305  	u32 sw_cons, raw_cons;
306  	struct nq_base *nqe;
307  	uintptr_t q_handle;
308  	u16 type;
309  
310  	spin_lock_bh(&hwq->lock);
311  	/* Service the NQ until empty */
312  	raw_cons = hwq->cons;
313  	while (budget--) {
314  		sw_cons = HWQ_CMP(raw_cons, hwq);
315  		nqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
316  		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
317  			break;
318  
319  		/*
320  		 * The valid test of the entry must be done first before
321  		 * reading any further.
322  		 */
323  		dma_rmb();
324  
325  		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
326  		switch (type) {
327  		case NQ_BASE_TYPE_CQ_NOTIFICATION:
328  		{
329  			struct nq_cn *nqcne = (struct nq_cn *)nqe;
330  
331  			q_handle = le32_to_cpu(nqcne->cq_handle_low);
332  			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
333  						     << 32;
334  			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
335  			if (!cq)
336  				break;
337  			bnxt_qplib_armen_db(&cq->dbinfo,
338  					    DBC_DBC_TYPE_CQ_ARMENA);
339  			spin_lock_bh(&cq->compl_lock);
340  			atomic_set(&cq->arm_state, 0);
341  			if (nq->cqn_handler(nq, (cq)))
342  				dev_warn(&nq->pdev->dev,
343  					 "cqn - type 0x%x not handled\n", type);
344  			cq->cnq_events++;
345  			spin_unlock_bh(&cq->compl_lock);
346  			break;
347  		}
348  		case NQ_BASE_TYPE_SRQ_EVENT:
349  		{
350  			struct bnxt_qplib_srq *srq;
351  			struct nq_srq_event *nqsrqe =
352  						(struct nq_srq_event *)nqe;
353  
354  			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
355  			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
356  				     << 32;
357  			srq = (struct bnxt_qplib_srq *)q_handle;
358  			bnxt_qplib_armen_db(&srq->dbinfo,
359  					    DBC_DBC_TYPE_SRQ_ARMENA);
360  			if (nq->srqn_handler(nq,
361  					     (struct bnxt_qplib_srq *)q_handle,
362  					     nqsrqe->event))
363  				dev_warn(&nq->pdev->dev,
364  					 "SRQ event 0x%x not handled\n",
365  					 nqsrqe->event);
366  			break;
367  		}
368  		case NQ_BASE_TYPE_DBQ_EVENT:
369  			break;
370  		default:
371  			dev_warn(&nq->pdev->dev,
372  				 "nqe with type = 0x%x not handled\n", type);
373  			break;
374  		}
375  		raw_cons++;
376  	}
377  	if (hwq->cons != raw_cons) {
378  		hwq->cons = raw_cons;
379  		bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
380  	}
381  	spin_unlock_bh(&hwq->lock);
382  }
383  
384  /* bnxt_re_synchronize_nq - self polling notification queue.
385   * @nq      -     notification queue pointer
386   *
387   * This function will start polling entries of a given notification queue
388   * for all pending  entries.
389   * This function is useful to synchronize notification entries while resources
390   * are going away.
391   */
392  
bnxt_re_synchronize_nq(struct bnxt_qplib_nq * nq)393  void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
394  {
395  	int budget = nq->budget;
396  
397  	nq->budget = nq->hwq.max_elements;
398  	bnxt_qplib_service_nq(&nq->nq_tasklet);
399  	nq->budget = budget;
400  }
401  
bnxt_qplib_nq_irq(int irq,void * dev_instance)402  static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
403  {
404  	struct bnxt_qplib_nq *nq = dev_instance;
405  	struct bnxt_qplib_hwq *hwq = &nq->hwq;
406  	u32 sw_cons;
407  
408  	/* Prefetch the NQ element */
409  	sw_cons = HWQ_CMP(hwq->cons, hwq);
410  	prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
411  
412  	/* Fan out to CPU affinitized kthreads? */
413  	tasklet_schedule(&nq->nq_tasklet);
414  
415  	return IRQ_HANDLED;
416  }
417  
bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq * nq,bool kill)418  void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
419  {
420  	if (!nq->requested)
421  		return;
422  
423  	nq->requested = false;
424  	/* Mask h/w interrupt */
425  	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
426  	/* Sync with last running IRQ handler */
427  	synchronize_irq(nq->msix_vec);
428  	irq_set_affinity_hint(nq->msix_vec, NULL);
429  	free_irq(nq->msix_vec, nq);
430  	kfree(nq->name);
431  	nq->name = NULL;
432  
433  	if (kill)
434  		tasklet_kill(&nq->nq_tasklet);
435  	tasklet_disable(&nq->nq_tasklet);
436  }
437  
bnxt_qplib_disable_nq(struct bnxt_qplib_nq * nq)438  void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
439  {
440  	if (nq->cqn_wq) {
441  		destroy_workqueue(nq->cqn_wq);
442  		nq->cqn_wq = NULL;
443  	}
444  
445  	/* Make sure the HW is stopped! */
446  	bnxt_qplib_nq_stop_irq(nq, true);
447  
448  	if (nq->nq_db.reg.bar_reg) {
449  		iounmap(nq->nq_db.reg.bar_reg);
450  		nq->nq_db.reg.bar_reg = NULL;
451  	}
452  
453  	nq->cqn_handler = NULL;
454  	nq->srqn_handler = NULL;
455  	nq->msix_vec = 0;
456  }
457  
bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq * nq,int nq_indx,int msix_vector,bool need_init)458  int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
459  			    int msix_vector, bool need_init)
460  {
461  	struct bnxt_qplib_res *res = nq->res;
462  	int rc;
463  
464  	if (nq->requested)
465  		return -EFAULT;
466  
467  	nq->msix_vec = msix_vector;
468  	if (need_init)
469  		tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
470  	else
471  		tasklet_enable(&nq->nq_tasklet);
472  
473  	nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
474  			     nq_indx, pci_name(res->pdev));
475  	if (!nq->name)
476  		return -ENOMEM;
477  	rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
478  	if (rc) {
479  		kfree(nq->name);
480  		nq->name = NULL;
481  		tasklet_disable(&nq->nq_tasklet);
482  		return rc;
483  	}
484  
485  	cpumask_clear(&nq->mask);
486  	cpumask_set_cpu(nq_indx, &nq->mask);
487  	rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
488  	if (rc) {
489  		dev_warn(&nq->pdev->dev,
490  			 "set affinity failed; vector: %d nq_idx: %d\n",
491  			 nq->msix_vec, nq_indx);
492  	}
493  	nq->requested = true;
494  	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
495  
496  	return rc;
497  }
498  
bnxt_qplib_map_nq_db(struct bnxt_qplib_nq * nq,u32 reg_offt)499  static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
500  {
501  	resource_size_t reg_base;
502  	struct bnxt_qplib_nq_db *nq_db;
503  	struct pci_dev *pdev;
504  
505  	pdev = nq->pdev;
506  	nq_db = &nq->nq_db;
507  
508  	nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
509  	nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
510  	if (!nq_db->reg.bar_base) {
511  		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
512  			nq_db->reg.bar_id);
513  		return -ENOMEM;
514  	}
515  
516  	reg_base = nq_db->reg.bar_base + reg_offt;
517  	/* Unconditionally map 8 bytes to support 57500 series */
518  	nq_db->reg.len = 8;
519  	nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
520  	if (!nq_db->reg.bar_reg) {
521  		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
522  			nq_db->reg.bar_id);
523  		return -ENOMEM;
524  	}
525  
526  	nq_db->dbinfo.db = nq_db->reg.bar_reg;
527  	nq_db->dbinfo.hwq = &nq->hwq;
528  	nq_db->dbinfo.xid = nq->ring_id;
529  
530  	return 0;
531  }
532  
bnxt_qplib_enable_nq(struct pci_dev * pdev,struct bnxt_qplib_nq * nq,int nq_idx,int msix_vector,int bar_reg_offset,cqn_handler_t cqn_handler,srqn_handler_t srqn_handler)533  int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
534  			 int nq_idx, int msix_vector, int bar_reg_offset,
535  			 cqn_handler_t cqn_handler,
536  			 srqn_handler_t srqn_handler)
537  {
538  	int rc;
539  
540  	nq->pdev = pdev;
541  	nq->cqn_handler = cqn_handler;
542  	nq->srqn_handler = srqn_handler;
543  
544  	/* Have a task to schedule CQ notifiers in post send case */
545  	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
546  	if (!nq->cqn_wq)
547  		return -ENOMEM;
548  
549  	rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
550  	if (rc)
551  		goto fail;
552  
553  	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
554  	if (rc) {
555  		dev_err(&nq->pdev->dev,
556  			"Failed to request irq for nq-idx %d\n", nq_idx);
557  		goto fail;
558  	}
559  
560  	return 0;
561  fail:
562  	bnxt_qplib_disable_nq(nq);
563  	return rc;
564  }
565  
bnxt_qplib_free_nq(struct bnxt_qplib_nq * nq)566  void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
567  {
568  	if (nq->hwq.max_elements) {
569  		bnxt_qplib_free_hwq(nq->res, &nq->hwq);
570  		nq->hwq.max_elements = 0;
571  	}
572  }
573  
bnxt_qplib_alloc_nq(struct bnxt_qplib_res * res,struct bnxt_qplib_nq * nq)574  int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
575  {
576  	struct bnxt_qplib_hwq_attr hwq_attr = {};
577  	struct bnxt_qplib_sg_info sginfo = {};
578  
579  	nq->pdev = res->pdev;
580  	nq->res = res;
581  	if (!nq->hwq.max_elements ||
582  	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
583  		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
584  
585  	sginfo.pgsize = PAGE_SIZE;
586  	sginfo.pgshft = PAGE_SHIFT;
587  	hwq_attr.res = res;
588  	hwq_attr.sginfo = &sginfo;
589  	hwq_attr.depth = nq->hwq.max_elements;
590  	hwq_attr.stride = sizeof(struct nq_base);
591  	hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
592  	if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
593  		dev_err(&nq->pdev->dev, "FP NQ allocation failed");
594  		return -ENOMEM;
595  	}
596  	nq->budget = 8;
597  	return 0;
598  }
599  
600  /* SRQ */
bnxt_qplib_destroy_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)601  void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
602  			   struct bnxt_qplib_srq *srq)
603  {
604  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
605  	struct creq_destroy_srq_resp resp = {};
606  	struct bnxt_qplib_cmdqmsg msg = {};
607  	struct cmdq_destroy_srq req = {};
608  	int rc;
609  
610  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
611  				 CMDQ_BASE_OPCODE_DESTROY_SRQ,
612  				 sizeof(req));
613  
614  	/* Configure the request */
615  	req.srq_cid = cpu_to_le32(srq->id);
616  
617  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
618  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
619  	kfree(srq->swq);
620  	if (rc)
621  		return;
622  	bnxt_qplib_free_hwq(res, &srq->hwq);
623  }
624  
bnxt_qplib_create_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)625  int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
626  			  struct bnxt_qplib_srq *srq)
627  {
628  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
629  	struct bnxt_qplib_hwq_attr hwq_attr = {};
630  	struct creq_create_srq_resp resp = {};
631  	struct bnxt_qplib_cmdqmsg msg = {};
632  	struct cmdq_create_srq req = {};
633  	struct bnxt_qplib_pbl *pbl;
634  	u16 pg_sz_lvl;
635  	int rc, idx;
636  
637  	hwq_attr.res = res;
638  	hwq_attr.sginfo = &srq->sg_info;
639  	hwq_attr.depth = srq->max_wqe;
640  	hwq_attr.stride = srq->wqe_size;
641  	hwq_attr.type = HWQ_TYPE_QUEUE;
642  	rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
643  	if (rc)
644  		return rc;
645  
646  	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
647  			   GFP_KERNEL);
648  	if (!srq->swq) {
649  		rc = -ENOMEM;
650  		goto fail;
651  	}
652  
653  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
654  				 CMDQ_BASE_OPCODE_CREATE_SRQ,
655  				 sizeof(req));
656  
657  	/* Configure the request */
658  	req.dpi = cpu_to_le32(srq->dpi->dpi);
659  	req.srq_handle = cpu_to_le64((uintptr_t)srq);
660  
661  	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
662  	pbl = &srq->hwq.pbl[PBL_LVL_0];
663  	pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
664  		     CMDQ_CREATE_SRQ_PG_SIZE_SFT);
665  	pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
666  		      CMDQ_CREATE_SRQ_LVL_SFT;
667  	req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
668  	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
669  	req.pd_id = cpu_to_le32(srq->pd->id);
670  	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
671  
672  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
673  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
674  	if (rc)
675  		goto fail;
676  
677  	spin_lock_init(&srq->lock);
678  	srq->start_idx = 0;
679  	srq->last_idx = srq->hwq.max_elements - 1;
680  	for (idx = 0; idx < srq->hwq.max_elements; idx++)
681  		srq->swq[idx].next_idx = idx + 1;
682  	srq->swq[srq->last_idx].next_idx = -1;
683  
684  	srq->id = le32_to_cpu(resp.xid);
685  	srq->dbinfo.hwq = &srq->hwq;
686  	srq->dbinfo.xid = srq->id;
687  	srq->dbinfo.db = srq->dpi->dbr;
688  	srq->dbinfo.max_slot = 1;
689  	srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
690  	if (srq->threshold)
691  		bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
692  	srq->arm_req = false;
693  
694  	return 0;
695  fail:
696  	bnxt_qplib_free_hwq(res, &srq->hwq);
697  	kfree(srq->swq);
698  
699  	return rc;
700  }
701  
bnxt_qplib_modify_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)702  int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
703  			  struct bnxt_qplib_srq *srq)
704  {
705  	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
706  	u32 sw_prod, sw_cons, count = 0;
707  
708  	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
709  	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
710  
711  	count = sw_prod > sw_cons ? sw_prod - sw_cons :
712  				    srq_hwq->max_elements - sw_cons + sw_prod;
713  	if (count > srq->threshold) {
714  		srq->arm_req = false;
715  		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
716  	} else {
717  		/* Deferred arming */
718  		srq->arm_req = true;
719  	}
720  
721  	return 0;
722  }
723  
bnxt_qplib_query_srq(struct bnxt_qplib_res * res,struct bnxt_qplib_srq * srq)724  int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
725  			 struct bnxt_qplib_srq *srq)
726  {
727  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
728  	struct creq_query_srq_resp resp = {};
729  	struct bnxt_qplib_cmdqmsg msg = {};
730  	struct bnxt_qplib_rcfw_sbuf sbuf;
731  	struct creq_query_srq_resp_sb *sb;
732  	struct cmdq_query_srq req = {};
733  	int rc;
734  
735  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
736  				 CMDQ_BASE_OPCODE_QUERY_SRQ,
737  				 sizeof(req));
738  
739  	/* Configure the request */
740  	sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
741  	sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
742  				     &sbuf.dma_addr, GFP_KERNEL);
743  	if (!sbuf.sb)
744  		return -ENOMEM;
745  	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
746  	req.srq_cid = cpu_to_le32(srq->id);
747  	sb = sbuf.sb;
748  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
749  				sizeof(resp), 0);
750  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
751  	srq->threshold = le16_to_cpu(sb->srq_limit);
752  	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
753  			  sbuf.sb, sbuf.dma_addr);
754  
755  	return rc;
756  }
757  
bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq * srq,struct bnxt_qplib_swqe * wqe)758  int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
759  			     struct bnxt_qplib_swqe *wqe)
760  {
761  	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
762  	struct rq_wqe *srqe;
763  	struct sq_sge *hw_sge;
764  	u32 sw_prod, sw_cons, count = 0;
765  	int i, next;
766  
767  	spin_lock(&srq_hwq->lock);
768  	if (srq->start_idx == srq->last_idx) {
769  		dev_err(&srq_hwq->pdev->dev,
770  			"FP: SRQ (0x%x) is full!\n", srq->id);
771  		spin_unlock(&srq_hwq->lock);
772  		return -EINVAL;
773  	}
774  	next = srq->start_idx;
775  	srq->start_idx = srq->swq[next].next_idx;
776  	spin_unlock(&srq_hwq->lock);
777  
778  	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
779  	srqe = bnxt_qplib_get_qe(srq_hwq, sw_prod, NULL);
780  	memset(srqe, 0, srq->wqe_size);
781  	/* Calculate wqe_size16 and data_len */
782  	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
783  	     i < wqe->num_sge; i++, hw_sge++) {
784  		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
785  		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
786  		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
787  	}
788  	srqe->wqe_type = wqe->type;
789  	srqe->flags = wqe->flags;
790  	srqe->wqe_size = wqe->num_sge +
791  			((offsetof(typeof(*srqe), data) + 15) >> 4);
792  	srqe->wr_id[0] = cpu_to_le32((u32)next);
793  	srq->swq[next].wr_id = wqe->wr_id;
794  
795  	srq_hwq->prod++;
796  
797  	spin_lock(&srq_hwq->lock);
798  	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
799  	/* retaining srq_hwq->cons for this logic
800  	 * actually the lock is only required to
801  	 * read srq_hwq->cons.
802  	 */
803  	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
804  	count = sw_prod > sw_cons ? sw_prod - sw_cons :
805  				    srq_hwq->max_elements - sw_cons + sw_prod;
806  	spin_unlock(&srq_hwq->lock);
807  	/* Ring DB */
808  	bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
809  	if (srq->arm_req == true && count > srq->threshold) {
810  		srq->arm_req = false;
811  		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
812  	}
813  
814  	return 0;
815  }
816  
817  /* QP */
818  
bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q * que)819  static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
820  {
821  	int indx;
822  
823  	que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
824  	if (!que->swq)
825  		return -ENOMEM;
826  
827  	que->swq_start = 0;
828  	que->swq_last = que->max_wqe - 1;
829  	for (indx = 0; indx < que->max_wqe; indx++)
830  		que->swq[indx].next_idx = indx + 1;
831  	que->swq[que->swq_last].next_idx = 0; /* Make it circular */
832  	que->swq_last = 0;
833  
834  	return 0;
835  }
836  
bnxt_qplib_create_qp1(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)837  int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
838  {
839  	struct bnxt_qplib_hwq_attr hwq_attr = {};
840  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
841  	struct creq_create_qp1_resp resp = {};
842  	struct bnxt_qplib_cmdqmsg msg = {};
843  	struct bnxt_qplib_q *sq = &qp->sq;
844  	struct bnxt_qplib_q *rq = &qp->rq;
845  	struct cmdq_create_qp1 req = {};
846  	struct bnxt_qplib_pbl *pbl;
847  	u32 qp_flags = 0;
848  	u8 pg_sz_lvl;
849  	u32 tbl_indx;
850  	int rc;
851  
852  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
853  				 CMDQ_BASE_OPCODE_CREATE_QP1,
854  				 sizeof(req));
855  	/* General */
856  	req.type = qp->type;
857  	req.dpi = cpu_to_le32(qp->dpi->dpi);
858  	req.qp_handle = cpu_to_le64(qp->qp_handle);
859  
860  	/* SQ */
861  	hwq_attr.res = res;
862  	hwq_attr.sginfo = &sq->sg_info;
863  	hwq_attr.stride = sizeof(struct sq_sge);
864  	hwq_attr.depth = bnxt_qplib_get_depth(sq);
865  	hwq_attr.type = HWQ_TYPE_QUEUE;
866  	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
867  	if (rc)
868  		return rc;
869  
870  	rc = bnxt_qplib_alloc_init_swq(sq);
871  	if (rc)
872  		goto fail_sq;
873  
874  	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
875  	pbl = &sq->hwq.pbl[PBL_LVL_0];
876  	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
877  	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
878  		     CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
879  	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
880  	req.sq_pg_size_sq_lvl = pg_sz_lvl;
881  	req.sq_fwo_sq_sge =
882  		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
883  			     CMDQ_CREATE_QP1_SQ_SGE_SFT);
884  	req.scq_cid = cpu_to_le32(qp->scq->id);
885  
886  	/* RQ */
887  	if (rq->max_wqe) {
888  		hwq_attr.res = res;
889  		hwq_attr.sginfo = &rq->sg_info;
890  		hwq_attr.stride = sizeof(struct sq_sge);
891  		hwq_attr.depth = bnxt_qplib_get_depth(rq);
892  		hwq_attr.type = HWQ_TYPE_QUEUE;
893  		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
894  		if (rc)
895  			goto sq_swq;
896  		rc = bnxt_qplib_alloc_init_swq(rq);
897  		if (rc)
898  			goto fail_rq;
899  		req.rq_size = cpu_to_le32(rq->max_wqe);
900  		pbl = &rq->hwq.pbl[PBL_LVL_0];
901  		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
902  		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
903  			     CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
904  		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
905  		req.rq_pg_size_rq_lvl = pg_sz_lvl;
906  		req.rq_fwo_rq_sge =
907  			cpu_to_le16((rq->max_sge &
908  				     CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
909  				    CMDQ_CREATE_QP1_RQ_SGE_SFT);
910  	}
911  	req.rcq_cid = cpu_to_le32(qp->rcq->id);
912  	/* Header buffer - allow hdr_buf pass in */
913  	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
914  	if (rc) {
915  		rc = -ENOMEM;
916  		goto rq_rwq;
917  	}
918  	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
919  	req.qp_flags = cpu_to_le32(qp_flags);
920  	req.pd_id = cpu_to_le32(qp->pd->id);
921  
922  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
923  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
924  	if (rc)
925  		goto fail;
926  
927  	qp->id = le32_to_cpu(resp.xid);
928  	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
929  	qp->cctx = res->cctx;
930  	sq->dbinfo.hwq = &sq->hwq;
931  	sq->dbinfo.xid = qp->id;
932  	sq->dbinfo.db = qp->dpi->dbr;
933  	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
934  	if (rq->max_wqe) {
935  		rq->dbinfo.hwq = &rq->hwq;
936  		rq->dbinfo.xid = qp->id;
937  		rq->dbinfo.db = qp->dpi->dbr;
938  		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
939  	}
940  	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
941  	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
942  	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
943  
944  	return 0;
945  
946  fail:
947  	bnxt_qplib_free_qp_hdr_buf(res, qp);
948  rq_rwq:
949  	kfree(rq->swq);
950  fail_rq:
951  	bnxt_qplib_free_hwq(res, &rq->hwq);
952  sq_swq:
953  	kfree(sq->swq);
954  fail_sq:
955  	bnxt_qplib_free_hwq(res, &sq->hwq);
956  	return rc;
957  }
958  
bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp * qp,int size)959  static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
960  {
961  	struct bnxt_qplib_hwq *hwq;
962  	struct bnxt_qplib_q *sq;
963  	u64 fpsne, psn_pg;
964  	u16 indx_pad = 0;
965  
966  	sq = &qp->sq;
967  	hwq = &sq->hwq;
968  	/* First psn entry */
969  	fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
970  	if (!IS_ALIGNED(fpsne, PAGE_SIZE))
971  		indx_pad = (fpsne & ~PAGE_MASK) / size;
972  	hwq->pad_pgofft = indx_pad;
973  	hwq->pad_pg = (u64 *)psn_pg;
974  	hwq->pad_stride = size;
975  }
976  
bnxt_qplib_create_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)977  int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
978  {
979  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
980  	struct bnxt_qplib_hwq_attr hwq_attr = {};
981  	struct bnxt_qplib_sg_info sginfo = {};
982  	struct creq_create_qp_resp resp = {};
983  	struct bnxt_qplib_cmdqmsg msg = {};
984  	struct bnxt_qplib_q *sq = &qp->sq;
985  	struct bnxt_qplib_q *rq = &qp->rq;
986  	struct cmdq_create_qp req = {};
987  	int rc, req_size, psn_sz = 0;
988  	struct bnxt_qplib_hwq *xrrq;
989  	struct bnxt_qplib_pbl *pbl;
990  	u32 qp_flags = 0;
991  	u8 pg_sz_lvl;
992  	u32 tbl_indx;
993  	u16 nsge;
994  
995  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
996  				 CMDQ_BASE_OPCODE_CREATE_QP,
997  				 sizeof(req));
998  
999  	/* General */
1000  	req.type = qp->type;
1001  	req.dpi = cpu_to_le32(qp->dpi->dpi);
1002  	req.qp_handle = cpu_to_le64(qp->qp_handle);
1003  
1004  	/* SQ */
1005  	if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1006  		psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
1007  			 sizeof(struct sq_psn_search_ext) :
1008  			 sizeof(struct sq_psn_search);
1009  	}
1010  
1011  	hwq_attr.res = res;
1012  	hwq_attr.sginfo = &sq->sg_info;
1013  	hwq_attr.stride = sizeof(struct sq_sge);
1014  	hwq_attr.depth = bnxt_qplib_get_depth(sq);
1015  	hwq_attr.aux_stride = psn_sz;
1016  	hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
1017  	hwq_attr.type = HWQ_TYPE_QUEUE;
1018  	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1019  	if (rc)
1020  		return rc;
1021  
1022  	rc = bnxt_qplib_alloc_init_swq(sq);
1023  	if (rc)
1024  		goto fail_sq;
1025  
1026  	if (psn_sz)
1027  		bnxt_qplib_init_psn_ptr(qp, psn_sz);
1028  
1029  	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1030  	pbl = &sq->hwq.pbl[PBL_LVL_0];
1031  	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1032  	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1033  		     CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1034  	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1035  	req.sq_pg_size_sq_lvl = pg_sz_lvl;
1036  	req.sq_fwo_sq_sge =
1037  		cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1038  			     CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1039  	req.scq_cid = cpu_to_le32(qp->scq->id);
1040  
1041  	/* RQ */
1042  	if (!qp->srq) {
1043  		hwq_attr.res = res;
1044  		hwq_attr.sginfo = &rq->sg_info;
1045  		hwq_attr.stride = sizeof(struct sq_sge);
1046  		hwq_attr.depth = bnxt_qplib_get_depth(rq);
1047  		hwq_attr.aux_stride = 0;
1048  		hwq_attr.aux_depth = 0;
1049  		hwq_attr.type = HWQ_TYPE_QUEUE;
1050  		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1051  		if (rc)
1052  			goto sq_swq;
1053  		rc = bnxt_qplib_alloc_init_swq(rq);
1054  		if (rc)
1055  			goto fail_rq;
1056  
1057  		req.rq_size = cpu_to_le32(rq->max_wqe);
1058  		pbl = &rq->hwq.pbl[PBL_LVL_0];
1059  		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1060  		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1061  			     CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1062  		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1063  		req.rq_pg_size_rq_lvl = pg_sz_lvl;
1064  		nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1065  			6 : rq->max_sge;
1066  		req.rq_fwo_rq_sge =
1067  			cpu_to_le16(((nsge &
1068  				      CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1069  				     CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1070  	} else {
1071  		/* SRQ */
1072  		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1073  		req.srq_cid = cpu_to_le32(qp->srq->id);
1074  	}
1075  	req.rcq_cid = cpu_to_le32(qp->rcq->id);
1076  
1077  	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1078  	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1079  	if (qp->sig_type)
1080  		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1081  	if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1082  		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1083  	if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1084  		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1085  
1086  	req.qp_flags = cpu_to_le32(qp_flags);
1087  
1088  	/* ORRQ and IRRQ */
1089  	if (psn_sz) {
1090  		xrrq = &qp->orrq;
1091  		xrrq->max_elements =
1092  			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1093  		req_size = xrrq->max_elements *
1094  			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1095  		req_size &= ~(PAGE_SIZE - 1);
1096  		sginfo.pgsize = req_size;
1097  		sginfo.pgshft = PAGE_SHIFT;
1098  
1099  		hwq_attr.res = res;
1100  		hwq_attr.sginfo = &sginfo;
1101  		hwq_attr.depth = xrrq->max_elements;
1102  		hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1103  		hwq_attr.aux_stride = 0;
1104  		hwq_attr.aux_depth = 0;
1105  		hwq_attr.type = HWQ_TYPE_CTX;
1106  		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1107  		if (rc)
1108  			goto rq_swq;
1109  		pbl = &xrrq->pbl[PBL_LVL_0];
1110  		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1111  
1112  		xrrq = &qp->irrq;
1113  		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1114  						qp->max_dest_rd_atomic);
1115  		req_size = xrrq->max_elements *
1116  			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1117  		req_size &= ~(PAGE_SIZE - 1);
1118  		sginfo.pgsize = req_size;
1119  		hwq_attr.depth =  xrrq->max_elements;
1120  		hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1121  		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1122  		if (rc)
1123  			goto fail_orrq;
1124  
1125  		pbl = &xrrq->pbl[PBL_LVL_0];
1126  		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1127  	}
1128  	req.pd_id = cpu_to_le32(qp->pd->id);
1129  
1130  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1131  				sizeof(resp), 0);
1132  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1133  	if (rc)
1134  		goto fail;
1135  
1136  	qp->id = le32_to_cpu(resp.xid);
1137  	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1138  	INIT_LIST_HEAD(&qp->sq_flush);
1139  	INIT_LIST_HEAD(&qp->rq_flush);
1140  	qp->cctx = res->cctx;
1141  	sq->dbinfo.hwq = &sq->hwq;
1142  	sq->dbinfo.xid = qp->id;
1143  	sq->dbinfo.db = qp->dpi->dbr;
1144  	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1145  	if (rq->max_wqe) {
1146  		rq->dbinfo.hwq = &rq->hwq;
1147  		rq->dbinfo.xid = qp->id;
1148  		rq->dbinfo.db = qp->dpi->dbr;
1149  		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1150  	}
1151  	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1152  	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1153  	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1154  
1155  	return 0;
1156  fail:
1157  	bnxt_qplib_free_hwq(res, &qp->irrq);
1158  fail_orrq:
1159  	bnxt_qplib_free_hwq(res, &qp->orrq);
1160  rq_swq:
1161  	kfree(rq->swq);
1162  fail_rq:
1163  	bnxt_qplib_free_hwq(res, &rq->hwq);
1164  sq_swq:
1165  	kfree(sq->swq);
1166  fail_sq:
1167  	bnxt_qplib_free_hwq(res, &sq->hwq);
1168  	return rc;
1169  }
1170  
__modify_flags_from_init_state(struct bnxt_qplib_qp * qp)1171  static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1172  {
1173  	switch (qp->state) {
1174  	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1175  		/* INIT->RTR, configure the path_mtu to the default
1176  		 * 2048 if not being requested
1177  		 */
1178  		if (!(qp->modify_flags &
1179  		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1180  			qp->modify_flags |=
1181  				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1182  			qp->path_mtu =
1183  				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1184  		}
1185  		qp->modify_flags &=
1186  			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1187  		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1188  		if (qp->max_dest_rd_atomic < 1)
1189  			qp->max_dest_rd_atomic = 1;
1190  		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1191  		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1192  		if (!(qp->modify_flags &
1193  		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1194  			qp->modify_flags |=
1195  				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1196  			qp->ah.sgid_index = 0;
1197  		}
1198  		break;
1199  	default:
1200  		break;
1201  	}
1202  }
1203  
__modify_flags_from_rtr_state(struct bnxt_qplib_qp * qp)1204  static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1205  {
1206  	switch (qp->state) {
1207  	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1208  		/* Bono FW requires the max_rd_atomic to be >= 1 */
1209  		if (qp->max_rd_atomic < 1)
1210  			qp->max_rd_atomic = 1;
1211  		/* Bono FW does not allow PKEY_INDEX,
1212  		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1213  		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1214  		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1215  		 * modification
1216  		 */
1217  		qp->modify_flags &=
1218  			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1219  			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1220  			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1221  			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1222  			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1223  			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1224  			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1225  			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1226  			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1227  			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1228  			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1229  			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1230  		break;
1231  	default:
1232  		break;
1233  	}
1234  }
1235  
__filter_modify_flags(struct bnxt_qplib_qp * qp)1236  static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1237  {
1238  	switch (qp->cur_qp_state) {
1239  	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1240  		break;
1241  	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1242  		__modify_flags_from_init_state(qp);
1243  		break;
1244  	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1245  		__modify_flags_from_rtr_state(qp);
1246  		break;
1247  	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1248  		break;
1249  	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1250  		break;
1251  	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1252  		break;
1253  	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1254  		break;
1255  	default:
1256  		break;
1257  	}
1258  }
1259  
bnxt_qplib_modify_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1260  int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1261  {
1262  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1263  	struct creq_modify_qp_resp resp = {};
1264  	struct bnxt_qplib_cmdqmsg msg = {};
1265  	struct cmdq_modify_qp req = {};
1266  	u32 temp32[4];
1267  	u32 bmask;
1268  	int rc;
1269  
1270  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1271  				 CMDQ_BASE_OPCODE_MODIFY_QP,
1272  				 sizeof(req));
1273  
1274  	/* Filter out the qp_attr_mask based on the state->new transition */
1275  	__filter_modify_flags(qp);
1276  	bmask = qp->modify_flags;
1277  	req.modify_mask = cpu_to_le32(qp->modify_flags);
1278  	req.qp_cid = cpu_to_le32(qp->id);
1279  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1280  		req.network_type_en_sqd_async_notify_new_state =
1281  				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1282  				(qp->en_sqd_async_notify ?
1283  					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1284  	}
1285  	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1286  
1287  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1288  		req.access = qp->access;
1289  
1290  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1291  		req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1292  
1293  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1294  		req.qkey = cpu_to_le32(qp->qkey);
1295  
1296  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1297  		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1298  		req.dgid[0] = cpu_to_le32(temp32[0]);
1299  		req.dgid[1] = cpu_to_le32(temp32[1]);
1300  		req.dgid[2] = cpu_to_le32(temp32[2]);
1301  		req.dgid[3] = cpu_to_le32(temp32[3]);
1302  	}
1303  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1304  		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1305  
1306  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1307  		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1308  					     [qp->ah.sgid_index]);
1309  
1310  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1311  		req.hop_limit = qp->ah.hop_limit;
1312  
1313  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1314  		req.traffic_class = qp->ah.traffic_class;
1315  
1316  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1317  		memcpy(req.dest_mac, qp->ah.dmac, 6);
1318  
1319  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1320  		req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1321  
1322  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1323  		req.timeout = qp->timeout;
1324  
1325  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1326  		req.retry_cnt = qp->retry_cnt;
1327  
1328  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1329  		req.rnr_retry = qp->rnr_retry;
1330  
1331  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1332  		req.min_rnr_timer = qp->min_rnr_timer;
1333  
1334  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1335  		req.rq_psn = cpu_to_le32(qp->rq.psn);
1336  
1337  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1338  		req.sq_psn = cpu_to_le32(qp->sq.psn);
1339  
1340  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1341  		req.max_rd_atomic =
1342  			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1343  
1344  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1345  		req.max_dest_rd_atomic =
1346  			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1347  
1348  	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1349  	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1350  	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1351  	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1352  	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1353  	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1354  		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1355  
1356  	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1357  
1358  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),  sizeof(resp), 0);
1359  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1360  	if (rc)
1361  		return rc;
1362  	qp->cur_qp_state = qp->state;
1363  	return 0;
1364  }
1365  
bnxt_qplib_query_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1366  int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1367  {
1368  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1369  	struct creq_query_qp_resp resp = {};
1370  	struct bnxt_qplib_cmdqmsg msg = {};
1371  	struct bnxt_qplib_rcfw_sbuf sbuf;
1372  	struct creq_query_qp_resp_sb *sb;
1373  	struct cmdq_query_qp req = {};
1374  	u32 temp32[4];
1375  	int i, rc;
1376  
1377  	sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1378  	sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1379  				     &sbuf.dma_addr, GFP_KERNEL);
1380  	if (!sbuf.sb)
1381  		return -ENOMEM;
1382  	sb = sbuf.sb;
1383  
1384  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1385  				 CMDQ_BASE_OPCODE_QUERY_QP,
1386  				 sizeof(req));
1387  
1388  	req.qp_cid = cpu_to_le32(qp->id);
1389  	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1390  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1391  				sizeof(resp), 0);
1392  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1393  	if (rc)
1394  		goto bail;
1395  	/* Extract the context from the side buffer */
1396  	qp->state = sb->en_sqd_async_notify_state &
1397  			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1398  	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1399  				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1400  	qp->access = sb->access;
1401  	qp->pkey_index = le16_to_cpu(sb->pkey);
1402  	qp->qkey = le32_to_cpu(sb->qkey);
1403  
1404  	temp32[0] = le32_to_cpu(sb->dgid[0]);
1405  	temp32[1] = le32_to_cpu(sb->dgid[1]);
1406  	temp32[2] = le32_to_cpu(sb->dgid[2]);
1407  	temp32[3] = le32_to_cpu(sb->dgid[3]);
1408  	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1409  
1410  	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1411  
1412  	qp->ah.sgid_index = 0;
1413  	for (i = 0; i < res->sgid_tbl.max; i++) {
1414  		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1415  			qp->ah.sgid_index = i;
1416  			break;
1417  		}
1418  	}
1419  	if (i == res->sgid_tbl.max)
1420  		dev_warn(&res->pdev->dev, "SGID not found??\n");
1421  
1422  	qp->ah.hop_limit = sb->hop_limit;
1423  	qp->ah.traffic_class = sb->traffic_class;
1424  	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1425  	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1426  				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1427  				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1428  	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1429  				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1430  				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1431  	qp->timeout = sb->timeout;
1432  	qp->retry_cnt = sb->retry_cnt;
1433  	qp->rnr_retry = sb->rnr_retry;
1434  	qp->min_rnr_timer = sb->min_rnr_timer;
1435  	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1436  	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1437  	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1438  	qp->max_dest_rd_atomic =
1439  			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1440  	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1441  	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1442  	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1443  	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1444  	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1445  	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1446  	memcpy(qp->smac, sb->src_mac, 6);
1447  	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1448  bail:
1449  	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1450  			  sbuf.sb, sbuf.dma_addr);
1451  	return rc;
1452  }
1453  
__clean_cq(struct bnxt_qplib_cq * cq,u64 qp)1454  static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1455  {
1456  	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1457  	struct cq_base *hw_cqe;
1458  	int i;
1459  
1460  	for (i = 0; i < cq_hwq->max_elements; i++) {
1461  		hw_cqe = bnxt_qplib_get_qe(cq_hwq, i, NULL);
1462  		if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1463  			continue;
1464  		/*
1465  		 * The valid test of the entry must be done first before
1466  		 * reading any further.
1467  		 */
1468  		dma_rmb();
1469  		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1470  		case CQ_BASE_CQE_TYPE_REQ:
1471  		case CQ_BASE_CQE_TYPE_TERMINAL:
1472  		{
1473  			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1474  
1475  			if (qp == le64_to_cpu(cqe->qp_handle))
1476  				cqe->qp_handle = 0;
1477  			break;
1478  		}
1479  		case CQ_BASE_CQE_TYPE_RES_RC:
1480  		case CQ_BASE_CQE_TYPE_RES_UD:
1481  		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1482  		{
1483  			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1484  
1485  			if (qp == le64_to_cpu(cqe->qp_handle))
1486  				cqe->qp_handle = 0;
1487  			break;
1488  		}
1489  		default:
1490  			break;
1491  		}
1492  	}
1493  }
1494  
bnxt_qplib_destroy_qp(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1495  int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1496  			  struct bnxt_qplib_qp *qp)
1497  {
1498  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1499  	struct creq_destroy_qp_resp resp = {};
1500  	struct bnxt_qplib_cmdqmsg msg = {};
1501  	struct cmdq_destroy_qp req = {};
1502  	u32 tbl_indx;
1503  	int rc;
1504  
1505  	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1506  	rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1507  	rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1508  
1509  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1510  				 CMDQ_BASE_OPCODE_DESTROY_QP,
1511  				 sizeof(req));
1512  
1513  	req.qp_cid = cpu_to_le32(qp->id);
1514  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1515  				sizeof(resp), 0);
1516  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1517  	if (rc) {
1518  		rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1519  		rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1520  		return rc;
1521  	}
1522  
1523  	return 0;
1524  }
1525  
bnxt_qplib_free_qp_res(struct bnxt_qplib_res * res,struct bnxt_qplib_qp * qp)1526  void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1527  			    struct bnxt_qplib_qp *qp)
1528  {
1529  	bnxt_qplib_free_qp_hdr_buf(res, qp);
1530  	bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1531  	kfree(qp->sq.swq);
1532  
1533  	bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1534  	kfree(qp->rq.swq);
1535  
1536  	if (qp->irrq.max_elements)
1537  		bnxt_qplib_free_hwq(res, &qp->irrq);
1538  	if (qp->orrq.max_elements)
1539  		bnxt_qplib_free_hwq(res, &qp->orrq);
1540  
1541  }
1542  
bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1543  void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1544  				struct bnxt_qplib_sge *sge)
1545  {
1546  	struct bnxt_qplib_q *sq = &qp->sq;
1547  	u32 sw_prod;
1548  
1549  	memset(sge, 0, sizeof(*sge));
1550  
1551  	if (qp->sq_hdr_buf) {
1552  		sw_prod = sq->swq_start;
1553  		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1554  					 sw_prod * qp->sq_hdr_buf_size);
1555  		sge->lkey = 0xFFFFFFFF;
1556  		sge->size = qp->sq_hdr_buf_size;
1557  		return qp->sq_hdr_buf + sw_prod * sge->size;
1558  	}
1559  	return NULL;
1560  }
1561  
bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp * qp)1562  u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1563  {
1564  	struct bnxt_qplib_q *rq = &qp->rq;
1565  
1566  	return rq->swq_start;
1567  }
1568  
bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp * qp,u32 index)1569  dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1570  {
1571  	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1572  }
1573  
bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp * qp,struct bnxt_qplib_sge * sge)1574  void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1575  				struct bnxt_qplib_sge *sge)
1576  {
1577  	struct bnxt_qplib_q *rq = &qp->rq;
1578  	u32 sw_prod;
1579  
1580  	memset(sge, 0, sizeof(*sge));
1581  
1582  	if (qp->rq_hdr_buf) {
1583  		sw_prod = rq->swq_start;
1584  		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1585  					 sw_prod * qp->rq_hdr_buf_size);
1586  		sge->lkey = 0xFFFFFFFF;
1587  		sge->size = qp->rq_hdr_buf_size;
1588  		return qp->rq_hdr_buf + sw_prod * sge->size;
1589  	}
1590  	return NULL;
1591  }
1592  
bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,struct bnxt_qplib_swq * swq)1593  static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1594  				       struct bnxt_qplib_swqe *wqe,
1595  				       struct bnxt_qplib_swq *swq)
1596  {
1597  	struct sq_psn_search_ext *psns_ext;
1598  	struct sq_psn_search *psns;
1599  	u32 flg_npsn;
1600  	u32 op_spsn;
1601  
1602  	if (!swq->psn_search)
1603  		return;
1604  	psns = swq->psn_search;
1605  	psns_ext = swq->psn_ext;
1606  
1607  	op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1608  		    SQ_PSN_SEARCH_START_PSN_MASK);
1609  	op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1610  		     SQ_PSN_SEARCH_OPCODE_MASK);
1611  	flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1612  		     SQ_PSN_SEARCH_NEXT_PSN_MASK);
1613  
1614  	if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1615  		psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1616  		psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1617  		psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1618  	} else {
1619  		psns->opcode_start_psn = cpu_to_le32(op_spsn);
1620  		psns->flags_next_psn = cpu_to_le32(flg_npsn);
1621  	}
1622  }
1623  
bnxt_qplib_put_inline(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * idx)1624  static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1625  				 struct bnxt_qplib_swqe *wqe,
1626  				 u16 *idx)
1627  {
1628  	struct bnxt_qplib_hwq *hwq;
1629  	int len, t_len, offt;
1630  	bool pull_dst = true;
1631  	void *il_dst = NULL;
1632  	void *il_src = NULL;
1633  	int t_cplen, cplen;
1634  	int indx;
1635  
1636  	hwq = &qp->sq.hwq;
1637  	t_len = 0;
1638  	for (indx = 0; indx < wqe->num_sge; indx++) {
1639  		len = wqe->sg_list[indx].size;
1640  		il_src = (void *)wqe->sg_list[indx].addr;
1641  		t_len += len;
1642  		if (t_len > qp->max_inline_data)
1643  			return -ENOMEM;
1644  		while (len) {
1645  			if (pull_dst) {
1646  				pull_dst = false;
1647  				il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1648  				(*idx)++;
1649  				t_cplen = 0;
1650  				offt = 0;
1651  			}
1652  			cplen = min_t(int, len, sizeof(struct sq_sge));
1653  			cplen = min_t(int, cplen,
1654  					(sizeof(struct sq_sge) - offt));
1655  			memcpy(il_dst, il_src, cplen);
1656  			t_cplen += cplen;
1657  			il_src += cplen;
1658  			il_dst += cplen;
1659  			offt += cplen;
1660  			len -= cplen;
1661  			if (t_cplen == sizeof(struct sq_sge))
1662  				pull_dst = true;
1663  		}
1664  	}
1665  
1666  	return t_len;
1667  }
1668  
bnxt_qplib_put_sges(struct bnxt_qplib_hwq * hwq,struct bnxt_qplib_sge * ssge,u16 nsge,u16 * idx)1669  static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1670  			       struct bnxt_qplib_sge *ssge,
1671  			       u16 nsge, u16 *idx)
1672  {
1673  	struct sq_sge *dsge;
1674  	int indx, len = 0;
1675  
1676  	for (indx = 0; indx < nsge; indx++, (*idx)++) {
1677  		dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1678  		dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1679  		dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1680  		dsge->size = cpu_to_le32(ssge[indx].size);
1681  		len += ssge[indx].size;
1682  	}
1683  
1684  	return len;
1685  }
1686  
bnxt_qplib_required_slots(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe,u16 * wqe_sz,u16 * qdf,u8 mode)1687  static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1688  				     struct bnxt_qplib_swqe *wqe,
1689  				     u16 *wqe_sz, u16 *qdf, u8 mode)
1690  {
1691  	u32 ilsize, bytes;
1692  	u16 nsge;
1693  	u16 slot;
1694  
1695  	nsge = wqe->num_sge;
1696  	/* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1697  	bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1698  	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1699  		ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1700  		bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1701  		bytes += sizeof(struct sq_send_hdr);
1702  	}
1703  
1704  	*qdf =  __xlate_qfd(qp->sq.q_full_delta, bytes);
1705  	slot = bytes >> 4;
1706  	*wqe_sz = slot;
1707  	if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1708  		slot = 8;
1709  	return slot;
1710  }
1711  
bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q * sq,struct bnxt_qplib_swq * swq)1712  static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_q *sq,
1713  				     struct bnxt_qplib_swq *swq)
1714  {
1715  	struct bnxt_qplib_hwq *hwq;
1716  	u32 pg_num, pg_indx;
1717  	void *buff;
1718  	u32 tail;
1719  
1720  	hwq = &sq->hwq;
1721  	if (!hwq->pad_pg)
1722  		return;
1723  	tail = swq->slot_idx / sq->dbinfo.max_slot;
1724  	pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1725  	pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1726  	buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1727  	swq->psn_ext = buff;
1728  	swq->psn_search = buff;
1729  }
1730  
bnxt_qplib_post_send_db(struct bnxt_qplib_qp * qp)1731  void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1732  {
1733  	struct bnxt_qplib_q *sq = &qp->sq;
1734  
1735  	bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1736  }
1737  
bnxt_qplib_post_send(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)1738  int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1739  			 struct bnxt_qplib_swqe *wqe)
1740  {
1741  	struct bnxt_qplib_nq_work *nq_work = NULL;
1742  	int i, rc = 0, data_len = 0, pkt_num = 0;
1743  	struct bnxt_qplib_q *sq = &qp->sq;
1744  	struct bnxt_qplib_hwq *hwq;
1745  	struct bnxt_qplib_swq *swq;
1746  	bool sch_handler = false;
1747  	u16 wqe_sz, qdf = 0;
1748  	void *base_hdr;
1749  	void *ext_hdr;
1750  	__le32 temp32;
1751  	u32 wqe_idx;
1752  	u32 slots;
1753  	u16 idx;
1754  
1755  	hwq = &sq->hwq;
1756  	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1757  	    qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1758  		dev_err(&hwq->pdev->dev,
1759  			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1760  			qp->id, qp->state);
1761  		rc = -EINVAL;
1762  		goto done;
1763  	}
1764  
1765  	slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1766  	if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1767  		dev_err(&hwq->pdev->dev,
1768  			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1769  			hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1770  		rc = -ENOMEM;
1771  		goto done;
1772  	}
1773  
1774  	swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1775  	bnxt_qplib_pull_psn_buff(sq, swq);
1776  
1777  	idx = 0;
1778  	swq->slot_idx = hwq->prod;
1779  	swq->slots = slots;
1780  	swq->wr_id = wqe->wr_id;
1781  	swq->type = wqe->type;
1782  	swq->flags = wqe->flags;
1783  	swq->start_psn = sq->psn & BTH_PSN_MASK;
1784  	if (qp->sig_type)
1785  		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1786  
1787  	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1788  		sch_handler = true;
1789  		dev_dbg(&hwq->pdev->dev,
1790  			"%s Error QP. Scheduling for poll_cq\n", __func__);
1791  		goto queue_err;
1792  	}
1793  
1794  	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1795  	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1796  	memset(base_hdr, 0, sizeof(struct sq_sge));
1797  	memset(ext_hdr, 0, sizeof(struct sq_sge));
1798  
1799  	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1800  		/* Copy the inline data */
1801  		data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1802  	else
1803  		data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1804  					       &idx);
1805  	if (data_len < 0)
1806  		goto queue_err;
1807  	/* Specifics */
1808  	switch (wqe->type) {
1809  	case BNXT_QPLIB_SWQE_TYPE_SEND:
1810  		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1811  			struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1812  			struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1813  			/* Assemble info for Raw Ethertype QPs */
1814  
1815  			sqe->wqe_type = wqe->type;
1816  			sqe->flags = wqe->flags;
1817  			sqe->wqe_size = wqe_sz;
1818  			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1819  			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1820  			sqe->length = cpu_to_le32(data_len);
1821  			ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1822  				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1823  				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1824  
1825  			break;
1826  		}
1827  		fallthrough;
1828  	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1829  	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1830  	{
1831  		struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1832  		struct sq_send_hdr *sqe = base_hdr;
1833  
1834  		sqe->wqe_type = wqe->type;
1835  		sqe->flags = wqe->flags;
1836  		sqe->wqe_size = wqe_sz;
1837  		sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1838  		if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1839  		    qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1840  			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1841  			sqe->length = cpu_to_le32(data_len);
1842  			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1843  			ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1844  						      SQ_SEND_DST_QP_MASK);
1845  			ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1846  						    SQ_SEND_AVID_MASK);
1847  		} else {
1848  			sqe->length = cpu_to_le32(data_len);
1849  			if (qp->mtu)
1850  				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1851  			if (!pkt_num)
1852  				pkt_num = 1;
1853  			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1854  		}
1855  		break;
1856  	}
1857  	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1858  	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1859  	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1860  	{
1861  		struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1862  		struct sq_rdma_hdr *sqe = base_hdr;
1863  
1864  		sqe->wqe_type = wqe->type;
1865  		sqe->flags = wqe->flags;
1866  		sqe->wqe_size = wqe_sz;
1867  		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1868  		sqe->length = cpu_to_le32((u32)data_len);
1869  		ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1870  		ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1871  		if (qp->mtu)
1872  			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1873  		if (!pkt_num)
1874  			pkt_num = 1;
1875  		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1876  		break;
1877  	}
1878  	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1879  	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1880  	{
1881  		struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1882  		struct sq_atomic_hdr *sqe = base_hdr;
1883  
1884  		sqe->wqe_type = wqe->type;
1885  		sqe->flags = wqe->flags;
1886  		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1887  		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1888  		ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1889  		ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1890  		if (qp->mtu)
1891  			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1892  		if (!pkt_num)
1893  			pkt_num = 1;
1894  		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1895  		break;
1896  	}
1897  	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1898  	{
1899  		struct sq_localinvalidate *sqe = base_hdr;
1900  
1901  		sqe->wqe_type = wqe->type;
1902  		sqe->flags = wqe->flags;
1903  		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1904  
1905  		break;
1906  	}
1907  	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1908  	{
1909  		struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1910  		struct sq_fr_pmr_hdr *sqe = base_hdr;
1911  
1912  		sqe->wqe_type = wqe->type;
1913  		sqe->flags = wqe->flags;
1914  		sqe->access_cntl = wqe->frmr.access_cntl |
1915  				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1916  		sqe->zero_based_page_size_log =
1917  			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1918  			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1919  			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1920  		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1921  		temp32 = cpu_to_le32(wqe->frmr.length);
1922  		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1923  		sqe->numlevels_pbl_page_size_log =
1924  			((wqe->frmr.pbl_pg_sz_log <<
1925  					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1926  					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1927  			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1928  					SQ_FR_PMR_NUMLEVELS_MASK);
1929  
1930  		for (i = 0; i < wqe->frmr.page_list_len; i++)
1931  			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1932  						wqe->frmr.page_list[i] |
1933  						PTU_PTE_VALID);
1934  		ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1935  		ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1936  
1937  		break;
1938  	}
1939  	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1940  	{
1941  		struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1942  		struct sq_bind_hdr *sqe = base_hdr;
1943  
1944  		sqe->wqe_type = wqe->type;
1945  		sqe->flags = wqe->flags;
1946  		sqe->access_cntl = wqe->bind.access_cntl;
1947  		sqe->mw_type_zero_based = wqe->bind.mw_type |
1948  			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1949  		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1950  		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1951  		ext_sqe->va = cpu_to_le64(wqe->bind.va);
1952  		ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
1953  		break;
1954  	}
1955  	default:
1956  		/* Bad wqe, return error */
1957  		rc = -EINVAL;
1958  		goto done;
1959  	}
1960  	swq->next_psn = sq->psn & BTH_PSN_MASK;
1961  	bnxt_qplib_fill_psn_search(qp, wqe, swq);
1962  queue_err:
1963  	bnxt_qplib_swq_mod_start(sq, wqe_idx);
1964  	bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
1965  	qp->wqe_cnt++;
1966  done:
1967  	if (sch_handler) {
1968  		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1969  		if (nq_work) {
1970  			nq_work->cq = qp->scq;
1971  			nq_work->nq = qp->scq->nq;
1972  			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1973  			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1974  		} else {
1975  			dev_err(&hwq->pdev->dev,
1976  				"FP: Failed to allocate SQ nq_work!\n");
1977  			rc = -ENOMEM;
1978  		}
1979  	}
1980  	return rc;
1981  }
1982  
bnxt_qplib_post_recv_db(struct bnxt_qplib_qp * qp)1983  void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1984  {
1985  	struct bnxt_qplib_q *rq = &qp->rq;
1986  
1987  	bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
1988  }
1989  
bnxt_qplib_post_recv(struct bnxt_qplib_qp * qp,struct bnxt_qplib_swqe * wqe)1990  int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1991  			 struct bnxt_qplib_swqe *wqe)
1992  {
1993  	struct bnxt_qplib_nq_work *nq_work = NULL;
1994  	struct bnxt_qplib_q *rq = &qp->rq;
1995  	struct rq_wqe_hdr *base_hdr;
1996  	struct rq_ext_hdr *ext_hdr;
1997  	struct bnxt_qplib_hwq *hwq;
1998  	struct bnxt_qplib_swq *swq;
1999  	bool sch_handler = false;
2000  	u16 wqe_sz, idx;
2001  	u32 wqe_idx;
2002  	int rc = 0;
2003  
2004  	hwq = &rq->hwq;
2005  	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2006  		dev_err(&hwq->pdev->dev,
2007  			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
2008  			qp->id, qp->state);
2009  		rc = -EINVAL;
2010  		goto done;
2011  	}
2012  
2013  	if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2014  		dev_err(&hwq->pdev->dev,
2015  			"FP: QP (0x%x) RQ is full!\n", qp->id);
2016  		rc = -EINVAL;
2017  		goto done;
2018  	}
2019  
2020  	swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2021  	swq->wr_id = wqe->wr_id;
2022  	swq->slots = rq->dbinfo.max_slot;
2023  
2024  	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2025  		sch_handler = true;
2026  		dev_dbg(&hwq->pdev->dev,
2027  			"%s: Error QP. Scheduling for poll_cq\n", __func__);
2028  		goto queue_err;
2029  	}
2030  
2031  	idx = 0;
2032  	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2033  	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2034  	memset(base_hdr, 0, sizeof(struct sq_sge));
2035  	memset(ext_hdr, 0, sizeof(struct sq_sge));
2036  	wqe_sz = (sizeof(struct rq_wqe_hdr) +
2037  	wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2038  	bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2039  	if (!wqe->num_sge) {
2040  		struct sq_sge *sge;
2041  
2042  		sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2043  		sge->size = 0;
2044  		wqe_sz++;
2045  	}
2046  	base_hdr->wqe_type = wqe->type;
2047  	base_hdr->flags = wqe->flags;
2048  	base_hdr->wqe_size = wqe_sz;
2049  	base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2050  queue_err:
2051  	bnxt_qplib_swq_mod_start(rq, wqe_idx);
2052  	bnxt_qplib_hwq_incr_prod(hwq, swq->slots);
2053  done:
2054  	if (sch_handler) {
2055  		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2056  		if (nq_work) {
2057  			nq_work->cq = qp->rcq;
2058  			nq_work->nq = qp->rcq->nq;
2059  			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2060  			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2061  		} else {
2062  			dev_err(&hwq->pdev->dev,
2063  				"FP: Failed to allocate RQ nq_work!\n");
2064  			rc = -ENOMEM;
2065  		}
2066  	}
2067  
2068  	return rc;
2069  }
2070  
2071  /* CQ */
bnxt_qplib_create_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2072  int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2073  {
2074  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2075  	struct bnxt_qplib_hwq_attr hwq_attr = {};
2076  	struct creq_create_cq_resp resp = {};
2077  	struct bnxt_qplib_cmdqmsg msg = {};
2078  	struct cmdq_create_cq req = {};
2079  	struct bnxt_qplib_pbl *pbl;
2080  	u32 pg_sz_lvl;
2081  	int rc;
2082  
2083  	if (!cq->dpi) {
2084  		dev_err(&rcfw->pdev->dev,
2085  			"FP: CREATE_CQ failed due to NULL DPI\n");
2086  		return -EINVAL;
2087  	}
2088  
2089  	hwq_attr.res = res;
2090  	hwq_attr.depth = cq->max_wqe;
2091  	hwq_attr.stride = sizeof(struct cq_base);
2092  	hwq_attr.type = HWQ_TYPE_QUEUE;
2093  	hwq_attr.sginfo = &cq->sg_info;
2094  	rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2095  	if (rc)
2096  		return rc;
2097  
2098  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2099  				 CMDQ_BASE_OPCODE_CREATE_CQ,
2100  				 sizeof(req));
2101  
2102  	req.dpi = cpu_to_le32(cq->dpi->dpi);
2103  	req.cq_handle = cpu_to_le64(cq->cq_handle);
2104  	req.cq_size = cpu_to_le32(cq->hwq.max_elements);
2105  	pbl = &cq->hwq.pbl[PBL_LVL_0];
2106  	pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2107  		     CMDQ_CREATE_CQ_PG_SIZE_SFT);
2108  	pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2109  	req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2110  	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2111  	req.cq_fco_cnq_id = cpu_to_le32(
2112  			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2113  			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2114  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2115  				sizeof(resp), 0);
2116  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2117  	if (rc)
2118  		goto fail;
2119  
2120  	cq->id = le32_to_cpu(resp.xid);
2121  	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2122  	init_waitqueue_head(&cq->waitq);
2123  	INIT_LIST_HEAD(&cq->sqf_head);
2124  	INIT_LIST_HEAD(&cq->rqf_head);
2125  	spin_lock_init(&cq->compl_lock);
2126  	spin_lock_init(&cq->flush_lock);
2127  
2128  	cq->dbinfo.hwq = &cq->hwq;
2129  	cq->dbinfo.xid = cq->id;
2130  	cq->dbinfo.db = cq->dpi->dbr;
2131  	cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2132  
2133  	bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2134  
2135  	return 0;
2136  
2137  fail:
2138  	bnxt_qplib_free_hwq(res, &cq->hwq);
2139  	return rc;
2140  }
2141  
bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2142  void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2143  				   struct bnxt_qplib_cq *cq)
2144  {
2145  	bnxt_qplib_free_hwq(res, &cq->hwq);
2146  	memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2147  }
2148  
bnxt_qplib_resize_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq,int new_cqes)2149  int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2150  			 int new_cqes)
2151  {
2152  	struct bnxt_qplib_hwq_attr hwq_attr = {};
2153  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2154  	struct creq_resize_cq_resp resp = {};
2155  	struct bnxt_qplib_cmdqmsg msg = {};
2156  	struct cmdq_resize_cq req = {};
2157  	struct bnxt_qplib_pbl *pbl;
2158  	u32 pg_sz, lvl, new_sz;
2159  	int rc;
2160  
2161  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2162  				 CMDQ_BASE_OPCODE_RESIZE_CQ,
2163  				 sizeof(req));
2164  	hwq_attr.sginfo = &cq->sg_info;
2165  	hwq_attr.res = res;
2166  	hwq_attr.depth = new_cqes;
2167  	hwq_attr.stride = sizeof(struct cq_base);
2168  	hwq_attr.type = HWQ_TYPE_QUEUE;
2169  	rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2170  	if (rc)
2171  		return rc;
2172  
2173  	req.cq_cid = cpu_to_le32(cq->id);
2174  	pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2175  	pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2176  	lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2177  				       CMDQ_RESIZE_CQ_LVL_MASK;
2178  	new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2179  		  CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2180  	req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2181  	req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2182  
2183  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2184  				sizeof(resp), 0);
2185  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2186  	return rc;
2187  }
2188  
bnxt_qplib_destroy_cq(struct bnxt_qplib_res * res,struct bnxt_qplib_cq * cq)2189  int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2190  {
2191  	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2192  	struct creq_destroy_cq_resp resp = {};
2193  	struct bnxt_qplib_cmdqmsg msg = {};
2194  	struct cmdq_destroy_cq req = {};
2195  	u16 total_cnq_events;
2196  	int rc;
2197  
2198  	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2199  				 CMDQ_BASE_OPCODE_DESTROY_CQ,
2200  				 sizeof(req));
2201  
2202  	req.cq_cid = cpu_to_le32(cq->id);
2203  	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2204  				sizeof(resp), 0);
2205  	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2206  	if (rc)
2207  		return rc;
2208  	total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2209  	__wait_for_all_nqes(cq, total_cnq_events);
2210  	bnxt_qplib_free_hwq(res, &cq->hwq);
2211  	return 0;
2212  }
2213  
__flush_sq(struct bnxt_qplib_q * sq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2214  static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2215  		      struct bnxt_qplib_cqe **pcqe, int *budget)
2216  {
2217  	struct bnxt_qplib_cqe *cqe;
2218  	u32 start, last;
2219  	int rc = 0;
2220  
2221  	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2222  	start = sq->swq_start;
2223  	cqe = *pcqe;
2224  	while (*budget) {
2225  		last = sq->swq_last;
2226  		if (start == last)
2227  			break;
2228  		/* Skip the FENCE WQE completions */
2229  		if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2230  			bnxt_qplib_cancel_phantom_processing(qp);
2231  			goto skip_compl;
2232  		}
2233  		memset(cqe, 0, sizeof(*cqe));
2234  		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2235  		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2236  		cqe->qp_handle = (u64)(unsigned long)qp;
2237  		cqe->wr_id = sq->swq[last].wr_id;
2238  		cqe->src_qp = qp->id;
2239  		cqe->type = sq->swq[last].type;
2240  		cqe++;
2241  		(*budget)--;
2242  skip_compl:
2243  		bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[last].slots);
2244  		sq->swq_last = sq->swq[last].next_idx;
2245  	}
2246  	*pcqe = cqe;
2247  	if (!(*budget) && sq->swq_last != start)
2248  		/* Out of budget */
2249  		rc = -EAGAIN;
2250  
2251  	return rc;
2252  }
2253  
__flush_rq(struct bnxt_qplib_q * rq,struct bnxt_qplib_qp * qp,struct bnxt_qplib_cqe ** pcqe,int * budget)2254  static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2255  		      struct bnxt_qplib_cqe **pcqe, int *budget)
2256  {
2257  	struct bnxt_qplib_cqe *cqe;
2258  	u32 start, last;
2259  	int opcode = 0;
2260  	int rc = 0;
2261  
2262  	switch (qp->type) {
2263  	case CMDQ_CREATE_QP1_TYPE_GSI:
2264  		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2265  		break;
2266  	case CMDQ_CREATE_QP_TYPE_RC:
2267  		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2268  		break;
2269  	case CMDQ_CREATE_QP_TYPE_UD:
2270  	case CMDQ_CREATE_QP_TYPE_GSI:
2271  		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2272  		break;
2273  	}
2274  
2275  	/* Flush the rest of the RQ */
2276  	start = rq->swq_start;
2277  	cqe = *pcqe;
2278  	while (*budget) {
2279  		last = rq->swq_last;
2280  		if (last == start)
2281  			break;
2282  		memset(cqe, 0, sizeof(*cqe));
2283  		cqe->status =
2284  		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2285  		cqe->opcode = opcode;
2286  		cqe->qp_handle = (unsigned long)qp;
2287  		cqe->wr_id = rq->swq[last].wr_id;
2288  		cqe++;
2289  		(*budget)--;
2290  		bnxt_qplib_hwq_incr_cons(&rq->hwq, rq->swq[last].slots);
2291  		rq->swq_last = rq->swq[last].next_idx;
2292  	}
2293  	*pcqe = cqe;
2294  	if (!*budget && rq->swq_last != start)
2295  		/* Out of budget */
2296  		rc = -EAGAIN;
2297  
2298  	return rc;
2299  }
2300  
bnxt_qplib_mark_qp_error(void * qp_handle)2301  void bnxt_qplib_mark_qp_error(void *qp_handle)
2302  {
2303  	struct bnxt_qplib_qp *qp = qp_handle;
2304  
2305  	if (!qp)
2306  		return;
2307  
2308  	/* Must block new posting of SQ and RQ */
2309  	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2310  	bnxt_qplib_cancel_phantom_processing(qp);
2311  }
2312  
2313  /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2314   *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2315   */
do_wa9060(struct bnxt_qplib_qp * qp,struct bnxt_qplib_cq * cq,u32 cq_cons,u32 swq_last,u32 cqe_sq_cons)2316  static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2317  		     u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2318  {
2319  	u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2320  	struct bnxt_qplib_q *sq = &qp->sq;
2321  	struct cq_req *peek_req_hwcqe;
2322  	struct bnxt_qplib_qp *peek_qp;
2323  	struct bnxt_qplib_q *peek_sq;
2324  	struct bnxt_qplib_swq *swq;
2325  	struct cq_base *peek_hwcqe;
2326  	int i, rc = 0;
2327  
2328  	/* Normal mode */
2329  	/* Check for the psn_search marking before completing */
2330  	swq = &sq->swq[swq_last];
2331  	if (swq->psn_search &&
2332  	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2333  		/* Unmark */
2334  		swq->psn_search->flags_next_psn = cpu_to_le32
2335  			(le32_to_cpu(swq->psn_search->flags_next_psn)
2336  				     & ~0x80000000);
2337  		dev_dbg(&cq->hwq.pdev->dev,
2338  			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2339  			cq_cons, qp->id, swq_last, cqe_sq_cons);
2340  		sq->condition = true;
2341  		sq->send_phantom = true;
2342  
2343  		/* TODO: Only ARM if the previous SQE is ARMALL */
2344  		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2345  		rc = -EAGAIN;
2346  		goto out;
2347  	}
2348  	if (sq->condition) {
2349  		/* Peek at the completions */
2350  		peek_raw_cq_cons = cq->hwq.cons;
2351  		peek_sw_cq_cons = cq_cons;
2352  		i = cq->hwq.max_elements;
2353  		while (i--) {
2354  			peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2355  			peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2356  						       peek_sw_cq_cons, NULL);
2357  			/* If the next hwcqe is VALID */
2358  			if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2359  					  cq->hwq.max_elements)) {
2360  			/*
2361  			 * The valid test of the entry must be done first before
2362  			 * reading any further.
2363  			 */
2364  				dma_rmb();
2365  				/* If the next hwcqe is a REQ */
2366  				if ((peek_hwcqe->cqe_type_toggle &
2367  				    CQ_BASE_CQE_TYPE_MASK) ==
2368  				    CQ_BASE_CQE_TYPE_REQ) {
2369  					peek_req_hwcqe = (struct cq_req *)
2370  							 peek_hwcqe;
2371  					peek_qp = (struct bnxt_qplib_qp *)
2372  						((unsigned long)
2373  						 le64_to_cpu
2374  						 (peek_req_hwcqe->qp_handle));
2375  					peek_sq = &peek_qp->sq;
2376  					peek_sq_cons_idx =
2377  						((le16_to_cpu(
2378  						  peek_req_hwcqe->sq_cons_idx)
2379  						  - 1) % sq->max_wqe);
2380  					/* If the hwcqe's sq's wr_id matches */
2381  					if (peek_sq == sq &&
2382  					    sq->swq[peek_sq_cons_idx].wr_id ==
2383  					    BNXT_QPLIB_FENCE_WRID) {
2384  						/*
2385  						 *  Unbreak only if the phantom
2386  						 *  comes back
2387  						 */
2388  						dev_dbg(&cq->hwq.pdev->dev,
2389  							"FP: Got Phantom CQE\n");
2390  						sq->condition = false;
2391  						sq->single = true;
2392  						rc = 0;
2393  						goto out;
2394  					}
2395  				}
2396  				/* Valid but not the phantom, so keep looping */
2397  			} else {
2398  				/* Not valid yet, just exit and wait */
2399  				rc = -EINVAL;
2400  				goto out;
2401  			}
2402  			peek_sw_cq_cons++;
2403  			peek_raw_cq_cons++;
2404  		}
2405  		dev_err(&cq->hwq.pdev->dev,
2406  			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2407  			cq_cons, qp->id, swq_last, cqe_sq_cons);
2408  		rc = -EINVAL;
2409  	}
2410  out:
2411  	return rc;
2412  }
2413  
bnxt_qplib_cq_process_req(struct bnxt_qplib_cq * cq,struct cq_req * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget,u32 cq_cons,struct bnxt_qplib_qp ** lib_qp)2414  static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2415  				     struct cq_req *hwcqe,
2416  				     struct bnxt_qplib_cqe **pcqe, int *budget,
2417  				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2418  {
2419  	struct bnxt_qplib_swq *swq;
2420  	struct bnxt_qplib_cqe *cqe;
2421  	struct bnxt_qplib_qp *qp;
2422  	struct bnxt_qplib_q *sq;
2423  	u32 cqe_sq_cons;
2424  	int rc = 0;
2425  
2426  	qp = (struct bnxt_qplib_qp *)((unsigned long)
2427  				      le64_to_cpu(hwcqe->qp_handle));
2428  	if (!qp) {
2429  		dev_err(&cq->hwq.pdev->dev,
2430  			"FP: Process Req qp is NULL\n");
2431  		return -EINVAL;
2432  	}
2433  	sq = &qp->sq;
2434  
2435  	cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2436  	if (qp->sq.flushed) {
2437  		dev_dbg(&cq->hwq.pdev->dev,
2438  			"%s: QP in Flush QP = %p\n", __func__, qp);
2439  		goto done;
2440  	}
2441  	/* Require to walk the sq's swq to fabricate CQEs for all previously
2442  	 * signaled SWQEs due to CQE aggregation from the current sq cons
2443  	 * to the cqe_sq_cons
2444  	 */
2445  	cqe = *pcqe;
2446  	while (*budget) {
2447  		if (sq->swq_last == cqe_sq_cons)
2448  			/* Done */
2449  			break;
2450  
2451  		swq = &sq->swq[sq->swq_last];
2452  		memset(cqe, 0, sizeof(*cqe));
2453  		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2454  		cqe->qp_handle = (u64)(unsigned long)qp;
2455  		cqe->src_qp = qp->id;
2456  		cqe->wr_id = swq->wr_id;
2457  		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2458  			goto skip;
2459  		cqe->type = swq->type;
2460  
2461  		/* For the last CQE, check for status.  For errors, regardless
2462  		 * of the request being signaled or not, it must complete with
2463  		 * the hwcqe error status
2464  		 */
2465  		if (swq->next_idx == cqe_sq_cons &&
2466  		    hwcqe->status != CQ_REQ_STATUS_OK) {
2467  			cqe->status = hwcqe->status;
2468  			dev_err(&cq->hwq.pdev->dev,
2469  				"FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2470  				sq->swq_last, cqe->wr_id, cqe->status);
2471  			cqe++;
2472  			(*budget)--;
2473  			bnxt_qplib_mark_qp_error(qp);
2474  			/* Add qp to flush list of the CQ */
2475  			bnxt_qplib_add_flush_qp(qp);
2476  		} else {
2477  			/* Before we complete, do WA 9060 */
2478  			if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2479  				      cqe_sq_cons)) {
2480  				*lib_qp = qp;
2481  				goto out;
2482  			}
2483  			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2484  				cqe->status = CQ_REQ_STATUS_OK;
2485  				cqe++;
2486  				(*budget)--;
2487  			}
2488  		}
2489  skip:
2490  		bnxt_qplib_hwq_incr_cons(&sq->hwq, swq->slots);
2491  		sq->swq_last = swq->next_idx;
2492  		if (sq->single)
2493  			break;
2494  	}
2495  out:
2496  	*pcqe = cqe;
2497  	if (sq->swq_last != cqe_sq_cons) {
2498  		/* Out of budget */
2499  		rc = -EAGAIN;
2500  		goto done;
2501  	}
2502  	/*
2503  	 * Back to normal completion mode only after it has completed all of
2504  	 * the WC for this CQE
2505  	 */
2506  	sq->single = false;
2507  done:
2508  	return rc;
2509  }
2510  
bnxt_qplib_release_srqe(struct bnxt_qplib_srq * srq,u32 tag)2511  static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2512  {
2513  	spin_lock(&srq->hwq.lock);
2514  	srq->swq[srq->last_idx].next_idx = (int)tag;
2515  	srq->last_idx = (int)tag;
2516  	srq->swq[srq->last_idx].next_idx = -1;
2517  	srq->hwq.cons++; /* Support for SRQE counter */
2518  	spin_unlock(&srq->hwq.lock);
2519  }
2520  
bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq * cq,struct cq_res_rc * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2521  static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2522  					struct cq_res_rc *hwcqe,
2523  					struct bnxt_qplib_cqe **pcqe,
2524  					int *budget)
2525  {
2526  	struct bnxt_qplib_srq *srq;
2527  	struct bnxt_qplib_cqe *cqe;
2528  	struct bnxt_qplib_qp *qp;
2529  	struct bnxt_qplib_q *rq;
2530  	u32 wr_id_idx;
2531  
2532  	qp = (struct bnxt_qplib_qp *)((unsigned long)
2533  				      le64_to_cpu(hwcqe->qp_handle));
2534  	if (!qp) {
2535  		dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2536  		return -EINVAL;
2537  	}
2538  	if (qp->rq.flushed) {
2539  		dev_dbg(&cq->hwq.pdev->dev,
2540  			"%s: QP in Flush QP = %p\n", __func__, qp);
2541  		return 0;
2542  	}
2543  
2544  	cqe = *pcqe;
2545  	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2546  	cqe->length = le32_to_cpu(hwcqe->length);
2547  	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2548  	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2549  	cqe->flags = le16_to_cpu(hwcqe->flags);
2550  	cqe->status = hwcqe->status;
2551  	cqe->qp_handle = (u64)(unsigned long)qp;
2552  
2553  	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2554  				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2555  	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2556  		srq = qp->srq;
2557  		if (!srq)
2558  			return -EINVAL;
2559  		if (wr_id_idx >= srq->hwq.max_elements) {
2560  			dev_err(&cq->hwq.pdev->dev,
2561  				"FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2562  				wr_id_idx, srq->hwq.max_elements);
2563  			return -EINVAL;
2564  		}
2565  		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2566  		bnxt_qplib_release_srqe(srq, wr_id_idx);
2567  		cqe++;
2568  		(*budget)--;
2569  		*pcqe = cqe;
2570  	} else {
2571  		struct bnxt_qplib_swq *swq;
2572  
2573  		rq = &qp->rq;
2574  		if (wr_id_idx > (rq->max_wqe - 1)) {
2575  			dev_err(&cq->hwq.pdev->dev,
2576  				"FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2577  				wr_id_idx, rq->max_wqe);
2578  			return -EINVAL;
2579  		}
2580  		if (wr_id_idx != rq->swq_last)
2581  			return -EINVAL;
2582  		swq = &rq->swq[rq->swq_last];
2583  		cqe->wr_id = swq->wr_id;
2584  		cqe++;
2585  		(*budget)--;
2586  		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2587  		rq->swq_last = swq->next_idx;
2588  		*pcqe = cqe;
2589  
2590  		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2591  			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2592  			/* Add qp to flush list of the CQ */
2593  			bnxt_qplib_add_flush_qp(qp);
2594  		}
2595  	}
2596  
2597  	return 0;
2598  }
2599  
bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq * cq,struct cq_res_ud * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2600  static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2601  					struct cq_res_ud *hwcqe,
2602  					struct bnxt_qplib_cqe **pcqe,
2603  					int *budget)
2604  {
2605  	struct bnxt_qplib_srq *srq;
2606  	struct bnxt_qplib_cqe *cqe;
2607  	struct bnxt_qplib_qp *qp;
2608  	struct bnxt_qplib_q *rq;
2609  	u32 wr_id_idx;
2610  
2611  	qp = (struct bnxt_qplib_qp *)((unsigned long)
2612  				      le64_to_cpu(hwcqe->qp_handle));
2613  	if (!qp) {
2614  		dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2615  		return -EINVAL;
2616  	}
2617  	if (qp->rq.flushed) {
2618  		dev_dbg(&cq->hwq.pdev->dev,
2619  			"%s: QP in Flush QP = %p\n", __func__, qp);
2620  		return 0;
2621  	}
2622  	cqe = *pcqe;
2623  	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2624  	cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2625  	cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2626  	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2627  	cqe->flags = le16_to_cpu(hwcqe->flags);
2628  	cqe->status = hwcqe->status;
2629  	cqe->qp_handle = (u64)(unsigned long)qp;
2630  	/*FIXME: Endianness fix needed for smace */
2631  	memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2632  	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2633  				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2634  	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2635  				  ((le32_to_cpu(
2636  				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2637  				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2638  
2639  	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2640  		srq = qp->srq;
2641  		if (!srq)
2642  			return -EINVAL;
2643  
2644  		if (wr_id_idx >= srq->hwq.max_elements) {
2645  			dev_err(&cq->hwq.pdev->dev,
2646  				"FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2647  				wr_id_idx, srq->hwq.max_elements);
2648  			return -EINVAL;
2649  		}
2650  		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2651  		bnxt_qplib_release_srqe(srq, wr_id_idx);
2652  		cqe++;
2653  		(*budget)--;
2654  		*pcqe = cqe;
2655  	} else {
2656  		struct bnxt_qplib_swq *swq;
2657  
2658  		rq = &qp->rq;
2659  		if (wr_id_idx > (rq->max_wqe - 1)) {
2660  			dev_err(&cq->hwq.pdev->dev,
2661  				"FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2662  				wr_id_idx, rq->max_wqe);
2663  			return -EINVAL;
2664  		}
2665  
2666  		if (rq->swq_last != wr_id_idx)
2667  			return -EINVAL;
2668  		swq = &rq->swq[rq->swq_last];
2669  		cqe->wr_id = swq->wr_id;
2670  		cqe++;
2671  		(*budget)--;
2672  		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2673  		rq->swq_last = swq->next_idx;
2674  		*pcqe = cqe;
2675  
2676  		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2677  			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2678  			/* Add qp to flush list of the CQ */
2679  			bnxt_qplib_add_flush_qp(qp);
2680  		}
2681  	}
2682  
2683  	return 0;
2684  }
2685  
bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq * cq)2686  bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2687  {
2688  	struct cq_base *hw_cqe;
2689  	u32 sw_cons, raw_cons;
2690  	bool rc = true;
2691  
2692  	raw_cons = cq->hwq.cons;
2693  	sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2694  	hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2695  	 /* Check for Valid bit. If the CQE is valid, return false */
2696  	rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2697  	return rc;
2698  }
2699  
bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq * cq,struct cq_res_raweth_qp1 * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2700  static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2701  						struct cq_res_raweth_qp1 *hwcqe,
2702  						struct bnxt_qplib_cqe **pcqe,
2703  						int *budget)
2704  {
2705  	struct bnxt_qplib_qp *qp;
2706  	struct bnxt_qplib_q *rq;
2707  	struct bnxt_qplib_srq *srq;
2708  	struct bnxt_qplib_cqe *cqe;
2709  	u32 wr_id_idx;
2710  
2711  	qp = (struct bnxt_qplib_qp *)((unsigned long)
2712  				      le64_to_cpu(hwcqe->qp_handle));
2713  	if (!qp) {
2714  		dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2715  		return -EINVAL;
2716  	}
2717  	if (qp->rq.flushed) {
2718  		dev_dbg(&cq->hwq.pdev->dev,
2719  			"%s: QP in Flush QP = %p\n", __func__, qp);
2720  		return 0;
2721  	}
2722  	cqe = *pcqe;
2723  	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2724  	cqe->flags = le16_to_cpu(hwcqe->flags);
2725  	cqe->qp_handle = (u64)(unsigned long)qp;
2726  
2727  	wr_id_idx =
2728  		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2729  				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2730  	cqe->src_qp = qp->id;
2731  	if (qp->id == 1 && !cqe->length) {
2732  		/* Add workaround for the length misdetection */
2733  		cqe->length = 296;
2734  	} else {
2735  		cqe->length = le16_to_cpu(hwcqe->length);
2736  	}
2737  	cqe->pkey_index = qp->pkey_index;
2738  	memcpy(cqe->smac, qp->smac, 6);
2739  
2740  	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2741  	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2742  	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2743  
2744  	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2745  		srq = qp->srq;
2746  		if (!srq) {
2747  			dev_err(&cq->hwq.pdev->dev,
2748  				"FP: SRQ used but not defined??\n");
2749  			return -EINVAL;
2750  		}
2751  		if (wr_id_idx >= srq->hwq.max_elements) {
2752  			dev_err(&cq->hwq.pdev->dev,
2753  				"FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2754  				wr_id_idx, srq->hwq.max_elements);
2755  			return -EINVAL;
2756  		}
2757  		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2758  		bnxt_qplib_release_srqe(srq, wr_id_idx);
2759  		cqe++;
2760  		(*budget)--;
2761  		*pcqe = cqe;
2762  	} else {
2763  		struct bnxt_qplib_swq *swq;
2764  
2765  		rq = &qp->rq;
2766  		if (wr_id_idx > (rq->max_wqe - 1)) {
2767  			dev_err(&cq->hwq.pdev->dev,
2768  				"FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2769  				wr_id_idx, rq->max_wqe);
2770  			return -EINVAL;
2771  		}
2772  		if (rq->swq_last != wr_id_idx)
2773  			return -EINVAL;
2774  		swq = &rq->swq[rq->swq_last];
2775  		cqe->wr_id = swq->wr_id;
2776  		cqe++;
2777  		(*budget)--;
2778  		bnxt_qplib_hwq_incr_cons(&rq->hwq, swq->slots);
2779  		rq->swq_last = swq->next_idx;
2780  		*pcqe = cqe;
2781  
2782  		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2783  			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2784  			/* Add qp to flush list of the CQ */
2785  			bnxt_qplib_add_flush_qp(qp);
2786  		}
2787  	}
2788  
2789  	return 0;
2790  }
2791  
bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq * cq,struct cq_terminal * hwcqe,struct bnxt_qplib_cqe ** pcqe,int * budget)2792  static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2793  					  struct cq_terminal *hwcqe,
2794  					  struct bnxt_qplib_cqe **pcqe,
2795  					  int *budget)
2796  {
2797  	struct bnxt_qplib_qp *qp;
2798  	struct bnxt_qplib_q *sq, *rq;
2799  	struct bnxt_qplib_cqe *cqe;
2800  	u32 swq_last = 0, cqe_cons;
2801  	int rc = 0;
2802  
2803  	/* Check the Status */
2804  	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2805  		dev_warn(&cq->hwq.pdev->dev,
2806  			 "FP: CQ Process Terminal Error status = 0x%x\n",
2807  			 hwcqe->status);
2808  
2809  	qp = (struct bnxt_qplib_qp *)((unsigned long)
2810  				      le64_to_cpu(hwcqe->qp_handle));
2811  	if (!qp)
2812  		return -EINVAL;
2813  
2814  	/* Must block new posting of SQ and RQ */
2815  	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2816  
2817  	sq = &qp->sq;
2818  	rq = &qp->rq;
2819  
2820  	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2821  	if (cqe_cons == 0xFFFF)
2822  		goto do_rq;
2823  	cqe_cons %= sq->max_wqe;
2824  
2825  	if (qp->sq.flushed) {
2826  		dev_dbg(&cq->hwq.pdev->dev,
2827  			"%s: QP in Flush QP = %p\n", __func__, qp);
2828  		goto sq_done;
2829  	}
2830  
2831  	/* Terminal CQE can also include aggregated successful CQEs prior.
2832  	 * So we must complete all CQEs from the current sq's cons to the
2833  	 * cq_cons with status OK
2834  	 */
2835  	cqe = *pcqe;
2836  	while (*budget) {
2837  		swq_last = sq->swq_last;
2838  		if (swq_last == cqe_cons)
2839  			break;
2840  		if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2841  			memset(cqe, 0, sizeof(*cqe));
2842  			cqe->status = CQ_REQ_STATUS_OK;
2843  			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2844  			cqe->qp_handle = (u64)(unsigned long)qp;
2845  			cqe->src_qp = qp->id;
2846  			cqe->wr_id = sq->swq[swq_last].wr_id;
2847  			cqe->type = sq->swq[swq_last].type;
2848  			cqe++;
2849  			(*budget)--;
2850  		}
2851  		bnxt_qplib_hwq_incr_cons(&sq->hwq, sq->swq[swq_last].slots);
2852  		sq->swq_last = sq->swq[swq_last].next_idx;
2853  	}
2854  	*pcqe = cqe;
2855  	if (!(*budget) && swq_last != cqe_cons) {
2856  		/* Out of budget */
2857  		rc = -EAGAIN;
2858  		goto sq_done;
2859  	}
2860  sq_done:
2861  	if (rc)
2862  		return rc;
2863  do_rq:
2864  	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2865  	if (cqe_cons == 0xFFFF) {
2866  		goto done;
2867  	} else if (cqe_cons > rq->max_wqe - 1) {
2868  		dev_err(&cq->hwq.pdev->dev,
2869  			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2870  			cqe_cons, rq->max_wqe);
2871  		rc = -EINVAL;
2872  		goto done;
2873  	}
2874  
2875  	if (qp->rq.flushed) {
2876  		dev_dbg(&cq->hwq.pdev->dev,
2877  			"%s: QP in Flush QP = %p\n", __func__, qp);
2878  		rc = 0;
2879  		goto done;
2880  	}
2881  
2882  	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2883  	 * from the current rq->cons to the rq->prod regardless what the
2884  	 * rq->cons the terminal CQE indicates
2885  	 */
2886  
2887  	/* Add qp to flush list of the CQ */
2888  	bnxt_qplib_add_flush_qp(qp);
2889  done:
2890  	return rc;
2891  }
2892  
bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq * cq,struct cq_cutoff * hwcqe)2893  static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2894  					struct cq_cutoff *hwcqe)
2895  {
2896  	/* Check the Status */
2897  	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2898  		dev_err(&cq->hwq.pdev->dev,
2899  			"FP: CQ Process Cutoff Error status = 0x%x\n",
2900  			hwcqe->status);
2901  		return -EINVAL;
2902  	}
2903  	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2904  	wake_up_interruptible(&cq->waitq);
2905  
2906  	return 0;
2907  }
2908  
bnxt_qplib_process_flush_list(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes)2909  int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2910  				  struct bnxt_qplib_cqe *cqe,
2911  				  int num_cqes)
2912  {
2913  	struct bnxt_qplib_qp *qp = NULL;
2914  	u32 budget = num_cqes;
2915  	unsigned long flags;
2916  
2917  	spin_lock_irqsave(&cq->flush_lock, flags);
2918  	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2919  		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2920  		__flush_sq(&qp->sq, qp, &cqe, &budget);
2921  	}
2922  
2923  	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2924  		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2925  		__flush_rq(&qp->rq, qp, &cqe, &budget);
2926  	}
2927  	spin_unlock_irqrestore(&cq->flush_lock, flags);
2928  
2929  	return num_cqes - budget;
2930  }
2931  
bnxt_qplib_poll_cq(struct bnxt_qplib_cq * cq,struct bnxt_qplib_cqe * cqe,int num_cqes,struct bnxt_qplib_qp ** lib_qp)2932  int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2933  		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2934  {
2935  	struct cq_base *hw_cqe;
2936  	u32 sw_cons, raw_cons;
2937  	int budget, rc = 0;
2938  	u8 type;
2939  
2940  	raw_cons = cq->hwq.cons;
2941  	budget = num_cqes;
2942  
2943  	while (budget) {
2944  		sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2945  		hw_cqe = bnxt_qplib_get_qe(&cq->hwq, sw_cons, NULL);
2946  
2947  		/* Check for Valid bit */
2948  		if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2949  			break;
2950  
2951  		/*
2952  		 * The valid test of the entry must be done first before
2953  		 * reading any further.
2954  		 */
2955  		dma_rmb();
2956  		/* From the device's respective CQE format to qplib_wc*/
2957  		type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2958  		switch (type) {
2959  		case CQ_BASE_CQE_TYPE_REQ:
2960  			rc = bnxt_qplib_cq_process_req(cq,
2961  						       (struct cq_req *)hw_cqe,
2962  						       &cqe, &budget,
2963  						       sw_cons, lib_qp);
2964  			break;
2965  		case CQ_BASE_CQE_TYPE_RES_RC:
2966  			rc = bnxt_qplib_cq_process_res_rc(cq,
2967  							  (struct cq_res_rc *)
2968  							  hw_cqe, &cqe,
2969  							  &budget);
2970  			break;
2971  		case CQ_BASE_CQE_TYPE_RES_UD:
2972  			rc = bnxt_qplib_cq_process_res_ud
2973  					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
2974  					 &budget);
2975  			break;
2976  		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2977  			rc = bnxt_qplib_cq_process_res_raweth_qp1
2978  					(cq, (struct cq_res_raweth_qp1 *)
2979  					 hw_cqe, &cqe, &budget);
2980  			break;
2981  		case CQ_BASE_CQE_TYPE_TERMINAL:
2982  			rc = bnxt_qplib_cq_process_terminal
2983  					(cq, (struct cq_terminal *)hw_cqe,
2984  					 &cqe, &budget);
2985  			break;
2986  		case CQ_BASE_CQE_TYPE_CUT_OFF:
2987  			bnxt_qplib_cq_process_cutoff
2988  					(cq, (struct cq_cutoff *)hw_cqe);
2989  			/* Done processing this CQ */
2990  			goto exit;
2991  		default:
2992  			dev_err(&cq->hwq.pdev->dev,
2993  				"process_cq unknown type 0x%lx\n",
2994  				hw_cqe->cqe_type_toggle &
2995  				CQ_BASE_CQE_TYPE_MASK);
2996  			rc = -EINVAL;
2997  			break;
2998  		}
2999  		if (rc < 0) {
3000  			if (rc == -EAGAIN)
3001  				break;
3002  			/* Error while processing the CQE, just skip to the
3003  			 * next one
3004  			 */
3005  			if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3006  				dev_err(&cq->hwq.pdev->dev,
3007  					"process_cqe error rc = 0x%x\n", rc);
3008  		}
3009  		raw_cons++;
3010  	}
3011  	if (cq->hwq.cons != raw_cons) {
3012  		cq->hwq.cons = raw_cons;
3013  		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3014  	}
3015  exit:
3016  	return num_cqes - budget;
3017  }
3018  
bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq * cq,u32 arm_type)3019  void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3020  {
3021  	if (arm_type)
3022  		bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3023  	/* Using cq->arm_state variable to track whether to issue cq handler */
3024  	atomic_set(&cq->arm_state, 1);
3025  }
3026  
bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp * qp)3027  void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3028  {
3029  	flush_workqueue(qp->scq->nq->cqn_wq);
3030  	if (qp->scq != qp->rcq)
3031  		flush_workqueue(qp->rcq->nq->cqn_wq);
3032  }
3033