1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/kref.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <rdma/ib_cache.h>
37 #include "mlx5_ib.h"
38 
mlx5_ib_cq_comp(struct mlx5_core_cq * cq)39 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
40 {
41 	struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
42 
43 	ibcq->comp_handler(ibcq, ibcq->cq_context);
44 }
45 
mlx5_ib_cq_event(struct mlx5_core_cq * mcq,enum mlx5_event type)46 static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
47 {
48 	struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
49 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
50 	struct ib_cq *ibcq = &cq->ibcq;
51 	struct ib_event event;
52 
53 	if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
54 		mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
55 			     type, mcq->cqn);
56 		return;
57 	}
58 
59 	if (ibcq->event_handler) {
60 		event.device     = &dev->ib_dev;
61 		event.event      = IB_EVENT_CQ_ERR;
62 		event.element.cq = ibcq;
63 		ibcq->event_handler(&event, ibcq->cq_context);
64 	}
65 }
66 
get_cqe(struct mlx5_ib_cq * cq,int n)67 static void *get_cqe(struct mlx5_ib_cq *cq, int n)
68 {
69 	return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
70 }
71 
sw_ownership_bit(int n,int nent)72 static u8 sw_ownership_bit(int n, int nent)
73 {
74 	return (n & nent) ? 1 : 0;
75 }
76 
get_sw_cqe(struct mlx5_ib_cq * cq,int n)77 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
78 {
79 	void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
80 	struct mlx5_cqe64 *cqe64;
81 
82 	cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
83 
84 	if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) &&
85 	    !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
86 		return cqe;
87 	} else {
88 		return NULL;
89 	}
90 }
91 
next_cqe_sw(struct mlx5_ib_cq * cq)92 static void *next_cqe_sw(struct mlx5_ib_cq *cq)
93 {
94 	return get_sw_cqe(cq, cq->mcq.cons_index);
95 }
96 
get_umr_comp(struct mlx5_ib_wq * wq,int idx)97 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
98 {
99 	switch (wq->wr_data[idx]) {
100 	case MLX5_IB_WR_UMR:
101 		return 0;
102 
103 	case IB_WR_LOCAL_INV:
104 		return IB_WC_LOCAL_INV;
105 
106 	case IB_WR_REG_MR:
107 		return IB_WC_REG_MR;
108 
109 	default:
110 		pr_warn("unknown completion status\n");
111 		return 0;
112 	}
113 }
114 
handle_good_req(struct ib_wc * wc,struct mlx5_cqe64 * cqe,struct mlx5_ib_wq * wq,int idx)115 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
116 			    struct mlx5_ib_wq *wq, int idx)
117 {
118 	wc->wc_flags = 0;
119 	switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
120 	case MLX5_OPCODE_RDMA_WRITE_IMM:
121 		wc->wc_flags |= IB_WC_WITH_IMM;
122 		/* fall through */
123 	case MLX5_OPCODE_RDMA_WRITE:
124 		wc->opcode    = IB_WC_RDMA_WRITE;
125 		break;
126 	case MLX5_OPCODE_SEND_IMM:
127 		wc->wc_flags |= IB_WC_WITH_IMM;
128 		/* fall through */
129 	case MLX5_OPCODE_SEND:
130 	case MLX5_OPCODE_SEND_INVAL:
131 		wc->opcode    = IB_WC_SEND;
132 		break;
133 	case MLX5_OPCODE_RDMA_READ:
134 		wc->opcode    = IB_WC_RDMA_READ;
135 		wc->byte_len  = be32_to_cpu(cqe->byte_cnt);
136 		break;
137 	case MLX5_OPCODE_ATOMIC_CS:
138 		wc->opcode    = IB_WC_COMP_SWAP;
139 		wc->byte_len  = 8;
140 		break;
141 	case MLX5_OPCODE_ATOMIC_FA:
142 		wc->opcode    = IB_WC_FETCH_ADD;
143 		wc->byte_len  = 8;
144 		break;
145 	case MLX5_OPCODE_ATOMIC_MASKED_CS:
146 		wc->opcode    = IB_WC_MASKED_COMP_SWAP;
147 		wc->byte_len  = 8;
148 		break;
149 	case MLX5_OPCODE_ATOMIC_MASKED_FA:
150 		wc->opcode    = IB_WC_MASKED_FETCH_ADD;
151 		wc->byte_len  = 8;
152 		break;
153 	case MLX5_OPCODE_UMR:
154 		wc->opcode = get_umr_comp(wq, idx);
155 		break;
156 	}
157 }
158 
159 enum {
160 	MLX5_GRH_IN_BUFFER = 1,
161 	MLX5_GRH_IN_CQE	   = 2,
162 };
163 
handle_responder(struct ib_wc * wc,struct mlx5_cqe64 * cqe,struct mlx5_ib_qp * qp)164 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
165 			     struct mlx5_ib_qp *qp)
166 {
167 	enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
168 	struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
169 	struct mlx5_ib_srq *srq;
170 	struct mlx5_ib_wq *wq;
171 	u16 wqe_ctr;
172 	u8  roce_packet_type;
173 	bool vlan_present;
174 	u8 g;
175 
176 	if (qp->ibqp.srq || qp->ibqp.xrcd) {
177 		struct mlx5_core_srq *msrq = NULL;
178 
179 		if (qp->ibqp.xrcd) {
180 			msrq = mlx5_core_get_srq(dev->mdev,
181 						 be32_to_cpu(cqe->srqn));
182 			srq = to_mibsrq(msrq);
183 		} else {
184 			srq = to_msrq(qp->ibqp.srq);
185 		}
186 		if (srq) {
187 			wqe_ctr = be16_to_cpu(cqe->wqe_counter);
188 			wc->wr_id = srq->wrid[wqe_ctr];
189 			mlx5_ib_free_srq_wqe(srq, wqe_ctr);
190 			if (msrq && atomic_dec_and_test(&msrq->refcount))
191 				complete(&msrq->free);
192 		}
193 	} else {
194 		wq	  = &qp->rq;
195 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
196 		++wq->tail;
197 	}
198 	wc->byte_len = be32_to_cpu(cqe->byte_cnt);
199 
200 	switch (cqe->op_own >> 4) {
201 	case MLX5_CQE_RESP_WR_IMM:
202 		wc->opcode	= IB_WC_RECV_RDMA_WITH_IMM;
203 		wc->wc_flags	= IB_WC_WITH_IMM;
204 		wc->ex.imm_data = cqe->imm_inval_pkey;
205 		break;
206 	case MLX5_CQE_RESP_SEND:
207 		wc->opcode   = IB_WC_RECV;
208 		wc->wc_flags = IB_WC_IP_CSUM_OK;
209 		if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
210 			       (cqe->hds_ip_ext & CQE_L4_OK))))
211 			wc->wc_flags = 0;
212 		break;
213 	case MLX5_CQE_RESP_SEND_IMM:
214 		wc->opcode	= IB_WC_RECV;
215 		wc->wc_flags	= IB_WC_WITH_IMM;
216 		wc->ex.imm_data = cqe->imm_inval_pkey;
217 		break;
218 	case MLX5_CQE_RESP_SEND_INV:
219 		wc->opcode	= IB_WC_RECV;
220 		wc->wc_flags	= IB_WC_WITH_INVALIDATE;
221 		wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
222 		break;
223 	}
224 	wc->src_qp	   = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
225 	wc->dlid_path_bits = cqe->ml_path;
226 	g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
227 	wc->wc_flags |= g ? IB_WC_GRH : 0;
228 	if (unlikely(is_qp1(qp->ibqp.qp_type))) {
229 		u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
230 
231 		ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
232 				    &wc->pkey_index);
233 	} else {
234 		wc->pkey_index = 0;
235 	}
236 
237 	if (ll != IB_LINK_LAYER_ETHERNET) {
238 		wc->slid = be16_to_cpu(cqe->slid);
239 		wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
240 		return;
241 	}
242 
243 	wc->slid = 0;
244 	vlan_present = cqe->l4_l3_hdr_type & 0x1;
245 	roce_packet_type   = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
246 	if (vlan_present) {
247 		wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
248 		wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
249 		wc->wc_flags |= IB_WC_WITH_VLAN;
250 	} else {
251 		wc->sl = 0;
252 	}
253 
254 	switch (roce_packet_type) {
255 	case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
256 		wc->network_hdr_type = RDMA_NETWORK_IB;
257 		break;
258 	case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
259 		wc->network_hdr_type = RDMA_NETWORK_IPV6;
260 		break;
261 	case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4:
262 		wc->network_hdr_type = RDMA_NETWORK_IPV4;
263 		break;
264 	}
265 	wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
266 }
267 
dump_cqe(struct mlx5_ib_dev * dev,struct mlx5_err_cqe * cqe)268 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
269 {
270 	mlx5_ib_warn(dev, "dump error cqe\n");
271 	mlx5_dump_err_cqe(dev->mdev, cqe);
272 }
273 
mlx5_handle_error_cqe(struct mlx5_ib_dev * dev,struct mlx5_err_cqe * cqe,struct ib_wc * wc)274 static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
275 				  struct mlx5_err_cqe *cqe,
276 				  struct ib_wc *wc)
277 {
278 	int dump = 1;
279 
280 	switch (cqe->syndrome) {
281 	case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
282 		wc->status = IB_WC_LOC_LEN_ERR;
283 		break;
284 	case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
285 		wc->status = IB_WC_LOC_QP_OP_ERR;
286 		break;
287 	case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
288 		wc->status = IB_WC_LOC_PROT_ERR;
289 		break;
290 	case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
291 		dump = 0;
292 		wc->status = IB_WC_WR_FLUSH_ERR;
293 		break;
294 	case MLX5_CQE_SYNDROME_MW_BIND_ERR:
295 		wc->status = IB_WC_MW_BIND_ERR;
296 		break;
297 	case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
298 		wc->status = IB_WC_BAD_RESP_ERR;
299 		break;
300 	case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
301 		wc->status = IB_WC_LOC_ACCESS_ERR;
302 		break;
303 	case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
304 		wc->status = IB_WC_REM_INV_REQ_ERR;
305 		break;
306 	case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
307 		wc->status = IB_WC_REM_ACCESS_ERR;
308 		break;
309 	case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
310 		wc->status = IB_WC_REM_OP_ERR;
311 		break;
312 	case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
313 		wc->status = IB_WC_RETRY_EXC_ERR;
314 		dump = 0;
315 		break;
316 	case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
317 		wc->status = IB_WC_RNR_RETRY_EXC_ERR;
318 		dump = 0;
319 		break;
320 	case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
321 		wc->status = IB_WC_REM_ABORT_ERR;
322 		break;
323 	default:
324 		wc->status = IB_WC_GENERAL_ERR;
325 		break;
326 	}
327 
328 	wc->vendor_err = cqe->vendor_err_synd;
329 	if (dump)
330 		dump_cqe(dev, cqe);
331 }
332 
is_atomic_response(struct mlx5_ib_qp * qp,uint16_t idx)333 static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx)
334 {
335 	/* TBD: waiting decision
336 	*/
337 	return 0;
338 }
339 
mlx5_get_atomic_laddr(struct mlx5_ib_qp * qp,uint16_t idx)340 static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx)
341 {
342 	struct mlx5_wqe_data_seg *dpseg;
343 	void *addr;
344 
345 	dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) +
346 		sizeof(struct mlx5_wqe_raddr_seg) +
347 		sizeof(struct mlx5_wqe_atomic_seg);
348 	addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr);
349 	return addr;
350 }
351 
handle_atomic(struct mlx5_ib_qp * qp,struct mlx5_cqe64 * cqe64,uint16_t idx)352 static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
353 			  uint16_t idx)
354 {
355 	void *addr;
356 	int byte_count;
357 	int i;
358 
359 	if (!is_atomic_response(qp, idx))
360 		return;
361 
362 	byte_count = be32_to_cpu(cqe64->byte_cnt);
363 	addr = mlx5_get_atomic_laddr(qp, idx);
364 
365 	if (byte_count == 4) {
366 		*(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr));
367 	} else {
368 		for (i = 0; i < byte_count; i += 8) {
369 			*(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr));
370 			addr += 8;
371 		}
372 	}
373 
374 	return;
375 }
376 
handle_atomics(struct mlx5_ib_qp * qp,struct mlx5_cqe64 * cqe64,u16 tail,u16 head)377 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
378 			   u16 tail, u16 head)
379 {
380 	u16 idx;
381 
382 	do {
383 		idx = tail & (qp->sq.wqe_cnt - 1);
384 		handle_atomic(qp, cqe64, idx);
385 		if (idx == head)
386 			break;
387 
388 		tail = qp->sq.w_list[idx].next;
389 	} while (1);
390 	tail = qp->sq.w_list[idx].next;
391 	qp->sq.last_poll = tail;
392 }
393 
free_cq_buf(struct mlx5_ib_dev * dev,struct mlx5_ib_cq_buf * buf)394 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
395 {
396 	mlx5_frag_buf_free(dev->mdev, &buf->fbc.frag_buf);
397 }
398 
get_sig_err_item(struct mlx5_sig_err_cqe * cqe,struct ib_sig_err * item)399 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
400 			     struct ib_sig_err *item)
401 {
402 	u16 syndrome = be16_to_cpu(cqe->syndrome);
403 
404 #define GUARD_ERR   (1 << 13)
405 #define APPTAG_ERR  (1 << 12)
406 #define REFTAG_ERR  (1 << 11)
407 
408 	if (syndrome & GUARD_ERR) {
409 		item->err_type = IB_SIG_BAD_GUARD;
410 		item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
411 		item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
412 	} else
413 	if (syndrome & REFTAG_ERR) {
414 		item->err_type = IB_SIG_BAD_REFTAG;
415 		item->expected = be32_to_cpu(cqe->expected_reftag);
416 		item->actual = be32_to_cpu(cqe->actual_reftag);
417 	} else
418 	if (syndrome & APPTAG_ERR) {
419 		item->err_type = IB_SIG_BAD_APPTAG;
420 		item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
421 		item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
422 	} else {
423 		pr_err("Got signature completion error with bad syndrome %04x\n",
424 		       syndrome);
425 	}
426 
427 	item->sig_err_offset = be64_to_cpu(cqe->err_offset);
428 	item->key = be32_to_cpu(cqe->mkey);
429 }
430 
sw_send_comp(struct mlx5_ib_qp * qp,int num_entries,struct ib_wc * wc,int * npolled)431 static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries,
432 			 struct ib_wc *wc, int *npolled)
433 {
434 	struct mlx5_ib_wq *wq;
435 	unsigned int cur;
436 	unsigned int idx;
437 	int np;
438 	int i;
439 
440 	wq = &qp->sq;
441 	cur = wq->head - wq->tail;
442 	np = *npolled;
443 
444 	if (cur == 0)
445 		return;
446 
447 	for (i = 0;  i < cur && np < num_entries; i++) {
448 		idx = wq->last_poll & (wq->wqe_cnt - 1);
449 		wc->wr_id = wq->wrid[idx];
450 		wc->status = IB_WC_WR_FLUSH_ERR;
451 		wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
452 		wq->tail++;
453 		np++;
454 		wc->qp = &qp->ibqp;
455 		wc++;
456 		wq->last_poll = wq->w_list[idx].next;
457 	}
458 	*npolled = np;
459 }
460 
sw_recv_comp(struct mlx5_ib_qp * qp,int num_entries,struct ib_wc * wc,int * npolled)461 static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries,
462 			 struct ib_wc *wc, int *npolled)
463 {
464 	struct mlx5_ib_wq *wq;
465 	unsigned int cur;
466 	int np;
467 	int i;
468 
469 	wq = &qp->rq;
470 	cur = wq->head - wq->tail;
471 	np = *npolled;
472 
473 	if (cur == 0)
474 		return;
475 
476 	for (i = 0;  i < cur && np < num_entries; i++) {
477 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
478 		wc->status = IB_WC_WR_FLUSH_ERR;
479 		wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
480 		wq->tail++;
481 		np++;
482 		wc->qp = &qp->ibqp;
483 		wc++;
484 	}
485 	*npolled = np;
486 }
487 
mlx5_ib_poll_sw_comp(struct mlx5_ib_cq * cq,int num_entries,struct ib_wc * wc,int * npolled)488 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
489 				 struct ib_wc *wc, int *npolled)
490 {
491 	struct mlx5_ib_qp *qp;
492 
493 	*npolled = 0;
494 	/* Find uncompleted WQEs belonging to that cq and return mmics ones */
495 	list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
496 		sw_send_comp(qp, num_entries, wc + *npolled, npolled);
497 		if (*npolled >= num_entries)
498 			return;
499 	}
500 
501 	list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
502 		sw_recv_comp(qp, num_entries, wc + *npolled, npolled);
503 		if (*npolled >= num_entries)
504 			return;
505 	}
506 }
507 
mlx5_poll_one(struct mlx5_ib_cq * cq,struct mlx5_ib_qp ** cur_qp,struct ib_wc * wc)508 static int mlx5_poll_one(struct mlx5_ib_cq *cq,
509 			 struct mlx5_ib_qp **cur_qp,
510 			 struct ib_wc *wc)
511 {
512 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
513 	struct mlx5_err_cqe *err_cqe;
514 	struct mlx5_cqe64 *cqe64;
515 	struct mlx5_core_qp *mqp;
516 	struct mlx5_ib_wq *wq;
517 	struct mlx5_sig_err_cqe *sig_err_cqe;
518 	struct mlx5_core_mkey *mmkey;
519 	struct mlx5_ib_mr *mr;
520 	uint8_t opcode;
521 	uint32_t qpn;
522 	u16 wqe_ctr;
523 	void *cqe;
524 	int idx;
525 
526 repoll:
527 	cqe = next_cqe_sw(cq);
528 	if (!cqe)
529 		return -EAGAIN;
530 
531 	cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
532 
533 	++cq->mcq.cons_index;
534 
535 	/* Make sure we read CQ entry contents after we've checked the
536 	 * ownership bit.
537 	 */
538 	rmb();
539 
540 	opcode = cqe64->op_own >> 4;
541 	if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
542 		if (likely(cq->resize_buf)) {
543 			free_cq_buf(dev, &cq->buf);
544 			cq->buf = *cq->resize_buf;
545 			kfree(cq->resize_buf);
546 			cq->resize_buf = NULL;
547 			goto repoll;
548 		} else {
549 			mlx5_ib_warn(dev, "unexpected resize cqe\n");
550 		}
551 	}
552 
553 	qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
554 	if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
555 		/* We do not have to take the QP table lock here,
556 		 * because CQs will be locked while QPs are removed
557 		 * from the table.
558 		 */
559 		mqp = __mlx5_qp_lookup(dev->mdev, qpn);
560 		*cur_qp = to_mibqp(mqp);
561 	}
562 
563 	wc->qp  = &(*cur_qp)->ibqp;
564 	switch (opcode) {
565 	case MLX5_CQE_REQ:
566 		wq = &(*cur_qp)->sq;
567 		wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
568 		idx = wqe_ctr & (wq->wqe_cnt - 1);
569 		handle_good_req(wc, cqe64, wq, idx);
570 		handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
571 		wc->wr_id = wq->wrid[idx];
572 		wq->tail = wq->wqe_head[idx] + 1;
573 		wc->status = IB_WC_SUCCESS;
574 		break;
575 	case MLX5_CQE_RESP_WR_IMM:
576 	case MLX5_CQE_RESP_SEND:
577 	case MLX5_CQE_RESP_SEND_IMM:
578 	case MLX5_CQE_RESP_SEND_INV:
579 		handle_responder(wc, cqe64, *cur_qp);
580 		wc->status = IB_WC_SUCCESS;
581 		break;
582 	case MLX5_CQE_RESIZE_CQ:
583 		break;
584 	case MLX5_CQE_REQ_ERR:
585 	case MLX5_CQE_RESP_ERR:
586 		err_cqe = (struct mlx5_err_cqe *)cqe64;
587 		mlx5_handle_error_cqe(dev, err_cqe, wc);
588 		mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
589 			    opcode == MLX5_CQE_REQ_ERR ?
590 			    "Requestor" : "Responder", cq->mcq.cqn);
591 		mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
592 			    err_cqe->syndrome, err_cqe->vendor_err_synd);
593 		if (opcode == MLX5_CQE_REQ_ERR) {
594 			wq = &(*cur_qp)->sq;
595 			wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
596 			idx = wqe_ctr & (wq->wqe_cnt - 1);
597 			wc->wr_id = wq->wrid[idx];
598 			wq->tail = wq->wqe_head[idx] + 1;
599 		} else {
600 			struct mlx5_ib_srq *srq;
601 
602 			if ((*cur_qp)->ibqp.srq) {
603 				srq = to_msrq((*cur_qp)->ibqp.srq);
604 				wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
605 				wc->wr_id = srq->wrid[wqe_ctr];
606 				mlx5_ib_free_srq_wqe(srq, wqe_ctr);
607 			} else {
608 				wq = &(*cur_qp)->rq;
609 				wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
610 				++wq->tail;
611 			}
612 		}
613 		break;
614 	case MLX5_CQE_SIG_ERR:
615 		sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
616 
617 		read_lock(&dev->mdev->priv.mkey_table.lock);
618 		mmkey = __mlx5_mr_lookup(dev->mdev,
619 					 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
620 		mr = to_mibmr(mmkey);
621 		get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
622 		mr->sig->sig_err_exists = true;
623 		mr->sig->sigerr_count++;
624 
625 		mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
626 			     cq->mcq.cqn, mr->sig->err_item.key,
627 			     mr->sig->err_item.err_type,
628 			     mr->sig->err_item.sig_err_offset,
629 			     mr->sig->err_item.expected,
630 			     mr->sig->err_item.actual);
631 
632 		read_unlock(&dev->mdev->priv.mkey_table.lock);
633 		goto repoll;
634 	}
635 
636 	return 0;
637 }
638 
poll_soft_wc(struct mlx5_ib_cq * cq,int num_entries,struct ib_wc * wc,bool is_fatal_err)639 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
640 			struct ib_wc *wc, bool is_fatal_err)
641 {
642 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
643 	struct mlx5_ib_wc *soft_wc, *next;
644 	int npolled = 0;
645 
646 	list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
647 		if (npolled >= num_entries)
648 			break;
649 
650 		mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
651 			    cq->mcq.cqn);
652 
653 		if (unlikely(is_fatal_err)) {
654 			soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
655 			soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
656 		}
657 		wc[npolled++] = soft_wc->wc;
658 		list_del(&soft_wc->list);
659 		kfree(soft_wc);
660 	}
661 
662 	return npolled;
663 }
664 
mlx5_ib_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)665 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
666 {
667 	struct mlx5_ib_cq *cq = to_mcq(ibcq);
668 	struct mlx5_ib_qp *cur_qp = NULL;
669 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
670 	struct mlx5_core_dev *mdev = dev->mdev;
671 	unsigned long flags;
672 	int soft_polled = 0;
673 	int npolled;
674 
675 	spin_lock_irqsave(&cq->lock, flags);
676 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
677 		/* make sure no soft wqe's are waiting */
678 		if (unlikely(!list_empty(&cq->wc_list)))
679 			soft_polled = poll_soft_wc(cq, num_entries, wc, true);
680 
681 		mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
682 				     wc + soft_polled, &npolled);
683 		goto out;
684 	}
685 
686 	if (unlikely(!list_empty(&cq->wc_list)))
687 		soft_polled = poll_soft_wc(cq, num_entries, wc, false);
688 
689 	for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
690 		if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
691 			break;
692 	}
693 
694 	if (npolled)
695 		mlx5_cq_set_ci(&cq->mcq);
696 out:
697 	spin_unlock_irqrestore(&cq->lock, flags);
698 
699 	return soft_polled + npolled;
700 }
701 
mlx5_ib_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)702 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
703 {
704 	struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
705 	struct mlx5_ib_cq *cq = to_mcq(ibcq);
706 	void __iomem *uar_page = mdev->priv.uar->map;
707 	unsigned long irq_flags;
708 	int ret = 0;
709 
710 	spin_lock_irqsave(&cq->lock, irq_flags);
711 	if (cq->notify_flags != IB_CQ_NEXT_COMP)
712 		cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
713 
714 	if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
715 		ret = 1;
716 	spin_unlock_irqrestore(&cq->lock, irq_flags);
717 
718 	mlx5_cq_arm(&cq->mcq,
719 		    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
720 		    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
721 		    uar_page, to_mcq(ibcq)->mcq.cons_index);
722 
723 	return ret;
724 }
725 
alloc_cq_frag_buf(struct mlx5_ib_dev * dev,struct mlx5_ib_cq_buf * buf,int nent,int cqe_size)726 static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
727 			     struct mlx5_ib_cq_buf *buf,
728 			     int nent,
729 			     int cqe_size)
730 {
731 	struct mlx5_frag_buf_ctrl *c = &buf->fbc;
732 	struct mlx5_frag_buf *frag_buf = &c->frag_buf;
733 	u32 cqc_buff[MLX5_ST_SZ_DW(cqc)] = {0};
734 	int err;
735 
736 	MLX5_SET(cqc, cqc_buff, log_cq_size, ilog2(cqe_size));
737 	MLX5_SET(cqc, cqc_buff, cqe_sz, (cqe_size == 128) ? 1 : 0);
738 
739 	mlx5_core_init_cq_frag_buf(&buf->fbc, cqc_buff);
740 
741 	err = mlx5_frag_buf_alloc_node(dev->mdev,
742 				       nent * cqe_size,
743 				       frag_buf,
744 				       dev->mdev->priv.numa_node);
745 	if (err)
746 		return err;
747 
748 	buf->cqe_size = cqe_size;
749 	buf->nent = nent;
750 
751 	return 0;
752 }
753 
754 enum {
755 	MLX5_CQE_RES_FORMAT_HASH = 0,
756 	MLX5_CQE_RES_FORMAT_CSUM = 1,
757 	MLX5_CQE_RES_FORMAT_CSUM_STRIDX = 3,
758 };
759 
mini_cqe_res_format_to_hw(struct mlx5_ib_dev * dev,u8 format)760 static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
761 {
762 	switch (format) {
763 	case MLX5_IB_CQE_RES_FORMAT_HASH:
764 		return MLX5_CQE_RES_FORMAT_HASH;
765 	case MLX5_IB_CQE_RES_FORMAT_CSUM:
766 		return MLX5_CQE_RES_FORMAT_CSUM;
767 	case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX:
768 		if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
769 			return MLX5_CQE_RES_FORMAT_CSUM_STRIDX;
770 		return -EOPNOTSUPP;
771 	default:
772 		return -EINVAL;
773 	}
774 }
775 
create_cq_user(struct mlx5_ib_dev * dev,struct ib_udata * udata,struct ib_ucontext * context,struct mlx5_ib_cq * cq,int entries,u32 ** cqb,int * cqe_size,int * index,int * inlen)776 static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
777 			  struct ib_ucontext *context, struct mlx5_ib_cq *cq,
778 			  int entries, u32 **cqb,
779 			  int *cqe_size, int *index, int *inlen)
780 {
781 	struct mlx5_ib_create_cq ucmd = {};
782 	size_t ucmdlen;
783 	int page_shift;
784 	__be64 *pas;
785 	int npages;
786 	int ncont;
787 	void *cqc;
788 	int err;
789 
790 	ucmdlen = udata->inlen < sizeof(ucmd) ?
791 		  (sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd);
792 
793 	if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
794 		return -EFAULT;
795 
796 	if (ucmdlen == sizeof(ucmd) &&
797 	    (ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD)))
798 		return -EINVAL;
799 
800 	if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
801 		return -EINVAL;
802 
803 	*cqe_size = ucmd.cqe_size;
804 
805 	cq->buf.umem = ib_umem_get(context, ucmd.buf_addr,
806 				   entries * ucmd.cqe_size,
807 				   IB_ACCESS_LOCAL_WRITE, 1);
808 	if (IS_ERR(cq->buf.umem)) {
809 		err = PTR_ERR(cq->buf.umem);
810 		return err;
811 	}
812 
813 	err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
814 				  &cq->db);
815 	if (err)
816 		goto err_umem;
817 
818 	mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
819 			   &ncont, NULL);
820 	mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
821 		    ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
822 
823 	*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
824 		 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
825 	*cqb = kvzalloc(*inlen, GFP_KERNEL);
826 	if (!*cqb) {
827 		err = -ENOMEM;
828 		goto err_db;
829 	}
830 
831 	pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
832 	mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0);
833 
834 	cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
835 	MLX5_SET(cqc, cqc, log_page_size,
836 		 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
837 
838 	*index = to_mucontext(context)->bfregi.sys_pages[0];
839 
840 	if (ucmd.cqe_comp_en == 1) {
841 		int mini_cqe_format;
842 
843 		if (!((*cqe_size == 128 &&
844 		       MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
845 		      (*cqe_size == 64  &&
846 		       MLX5_CAP_GEN(dev->mdev, cqe_compression)))) {
847 			err = -EOPNOTSUPP;
848 			mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
849 				     *cqe_size);
850 			goto err_cqb;
851 		}
852 
853 		mini_cqe_format =
854 			mini_cqe_res_format_to_hw(dev,
855 						  ucmd.cqe_comp_res_format);
856 		if (mini_cqe_format < 0) {
857 			err = mini_cqe_format;
858 			mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n",
859 				    ucmd.cqe_comp_res_format, err);
860 			goto err_cqb;
861 		}
862 
863 		MLX5_SET(cqc, cqc, cqe_comp_en, 1);
864 		MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format);
865 	}
866 
867 	if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
868 		if (*cqe_size != 128 ||
869 		    !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) {
870 			err = -EOPNOTSUPP;
871 			mlx5_ib_warn(dev,
872 				     "CQE padding is not supported for CQE size of %dB!\n",
873 				     *cqe_size);
874 			goto err_cqb;
875 		}
876 
877 		cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
878 	}
879 
880 	return 0;
881 
882 err_cqb:
883 	kvfree(*cqb);
884 
885 err_db:
886 	mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
887 
888 err_umem:
889 	ib_umem_release(cq->buf.umem);
890 	return err;
891 }
892 
destroy_cq_user(struct mlx5_ib_cq * cq,struct ib_ucontext * context)893 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
894 {
895 	mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
896 	ib_umem_release(cq->buf.umem);
897 }
898 
init_cq_frag_buf(struct mlx5_ib_cq * cq,struct mlx5_ib_cq_buf * buf)899 static void init_cq_frag_buf(struct mlx5_ib_cq *cq,
900 			     struct mlx5_ib_cq_buf *buf)
901 {
902 	int i;
903 	void *cqe;
904 	struct mlx5_cqe64 *cqe64;
905 
906 	for (i = 0; i < buf->nent; i++) {
907 		cqe = get_cqe(cq, i);
908 		cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
909 		cqe64->op_own = MLX5_CQE_INVALID << 4;
910 	}
911 }
912 
create_cq_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq,int entries,int cqe_size,u32 ** cqb,int * index,int * inlen)913 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
914 			    int entries, int cqe_size,
915 			    u32 **cqb, int *index, int *inlen)
916 {
917 	__be64 *pas;
918 	void *cqc;
919 	int err;
920 
921 	err = mlx5_db_alloc(dev->mdev, &cq->db);
922 	if (err)
923 		return err;
924 
925 	cq->mcq.set_ci_db  = cq->db.db;
926 	cq->mcq.arm_db     = cq->db.db + 1;
927 	cq->mcq.cqe_sz = cqe_size;
928 
929 	err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size);
930 	if (err)
931 		goto err_db;
932 
933 	init_cq_frag_buf(cq, &cq->buf);
934 
935 	*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
936 		 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
937 		 cq->buf.fbc.frag_buf.npages;
938 	*cqb = kvzalloc(*inlen, GFP_KERNEL);
939 	if (!*cqb) {
940 		err = -ENOMEM;
941 		goto err_buf;
942 	}
943 
944 	pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
945 	mlx5_fill_page_frag_array(&cq->buf.fbc.frag_buf, pas);
946 
947 	cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
948 	MLX5_SET(cqc, cqc, log_page_size,
949 		 cq->buf.fbc.frag_buf.page_shift -
950 		 MLX5_ADAPTER_PAGE_SHIFT);
951 
952 	*index = dev->mdev->priv.uar->index;
953 
954 	return 0;
955 
956 err_buf:
957 	free_cq_buf(dev, &cq->buf);
958 
959 err_db:
960 	mlx5_db_free(dev->mdev, &cq->db);
961 	return err;
962 }
963 
destroy_cq_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq)964 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
965 {
966 	free_cq_buf(dev, &cq->buf);
967 	mlx5_db_free(dev->mdev, &cq->db);
968 }
969 
notify_soft_wc_handler(struct work_struct * work)970 static void notify_soft_wc_handler(struct work_struct *work)
971 {
972 	struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
973 					     notify_work);
974 
975 	cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
976 }
977 
mlx5_ib_create_cq(struct ib_device * ibdev,const struct ib_cq_init_attr * attr,struct ib_ucontext * context,struct ib_udata * udata)978 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
979 				const struct ib_cq_init_attr *attr,
980 				struct ib_ucontext *context,
981 				struct ib_udata *udata)
982 {
983 	int entries = attr->cqe;
984 	int vector = attr->comp_vector;
985 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
986 	struct mlx5_ib_cq *cq;
987 	int uninitialized_var(index);
988 	int uninitialized_var(inlen);
989 	u32 *cqb = NULL;
990 	void *cqc;
991 	int cqe_size;
992 	unsigned int irqn;
993 	int eqn;
994 	int err;
995 
996 	if (entries < 0 ||
997 	    (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
998 		return ERR_PTR(-EINVAL);
999 
1000 	if (check_cq_create_flags(attr->flags))
1001 		return ERR_PTR(-EOPNOTSUPP);
1002 
1003 	entries = roundup_pow_of_two(entries + 1);
1004 	if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
1005 		return ERR_PTR(-EINVAL);
1006 
1007 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1008 	if (!cq)
1009 		return ERR_PTR(-ENOMEM);
1010 
1011 	cq->ibcq.cqe = entries - 1;
1012 	mutex_init(&cq->resize_mutex);
1013 	spin_lock_init(&cq->lock);
1014 	cq->resize_buf = NULL;
1015 	cq->resize_umem = NULL;
1016 	cq->create_flags = attr->flags;
1017 	INIT_LIST_HEAD(&cq->list_send_qp);
1018 	INIT_LIST_HEAD(&cq->list_recv_qp);
1019 
1020 	if (context) {
1021 		err = create_cq_user(dev, udata, context, cq, entries,
1022 				     &cqb, &cqe_size, &index, &inlen);
1023 		if (err)
1024 			goto err_create;
1025 	} else {
1026 		cqe_size = cache_line_size() == 128 ? 128 : 64;
1027 		err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
1028 				       &index, &inlen);
1029 		if (err)
1030 			goto err_create;
1031 
1032 		INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
1033 	}
1034 
1035 	err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
1036 	if (err)
1037 		goto err_cqb;
1038 
1039 	cq->cqe_size = cqe_size;
1040 
1041 	cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
1042 	MLX5_SET(cqc, cqc, cqe_sz,
1043 		 cqe_sz_to_mlx_sz(cqe_size,
1044 				  cq->private_flags &
1045 				  MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
1046 	MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
1047 	MLX5_SET(cqc, cqc, uar_page, index);
1048 	MLX5_SET(cqc, cqc, c_eqn, eqn);
1049 	MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
1050 	if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
1051 		MLX5_SET(cqc, cqc, oi, 1);
1052 
1053 	err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
1054 	if (err)
1055 		goto err_cqb;
1056 
1057 	mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
1058 	cq->mcq.irqn = irqn;
1059 	if (context)
1060 		cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
1061 	else
1062 		cq->mcq.comp  = mlx5_ib_cq_comp;
1063 	cq->mcq.event = mlx5_ib_cq_event;
1064 
1065 	INIT_LIST_HEAD(&cq->wc_list);
1066 
1067 	if (context)
1068 		if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
1069 			err = -EFAULT;
1070 			goto err_cmd;
1071 		}
1072 
1073 
1074 	kvfree(cqb);
1075 	return &cq->ibcq;
1076 
1077 err_cmd:
1078 	mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
1079 
1080 err_cqb:
1081 	kvfree(cqb);
1082 	if (context)
1083 		destroy_cq_user(cq, context);
1084 	else
1085 		destroy_cq_kernel(dev, cq);
1086 
1087 err_create:
1088 	kfree(cq);
1089 
1090 	return ERR_PTR(err);
1091 }
1092 
1093 
mlx5_ib_destroy_cq(struct ib_cq * cq)1094 int mlx5_ib_destroy_cq(struct ib_cq *cq)
1095 {
1096 	struct mlx5_ib_dev *dev = to_mdev(cq->device);
1097 	struct mlx5_ib_cq *mcq = to_mcq(cq);
1098 	struct ib_ucontext *context = NULL;
1099 
1100 	if (cq->uobject)
1101 		context = cq->uobject->context;
1102 
1103 	mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
1104 	if (context)
1105 		destroy_cq_user(mcq, context);
1106 	else
1107 		destroy_cq_kernel(dev, mcq);
1108 
1109 	kfree(mcq);
1110 
1111 	return 0;
1112 }
1113 
is_equal_rsn(struct mlx5_cqe64 * cqe64,u32 rsn)1114 static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
1115 {
1116 	return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
1117 }
1118 
__mlx5_ib_cq_clean(struct mlx5_ib_cq * cq,u32 rsn,struct mlx5_ib_srq * srq)1119 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
1120 {
1121 	struct mlx5_cqe64 *cqe64, *dest64;
1122 	void *cqe, *dest;
1123 	u32 prod_index;
1124 	int nfreed = 0;
1125 	u8 owner_bit;
1126 
1127 	if (!cq)
1128 		return;
1129 
1130 	/* First we need to find the current producer index, so we
1131 	 * know where to start cleaning from.  It doesn't matter if HW
1132 	 * adds new entries after this loop -- the QP we're worried
1133 	 * about is already in RESET, so the new entries won't come
1134 	 * from our QP and therefore don't need to be checked.
1135 	 */
1136 	for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
1137 		if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1138 			break;
1139 
1140 	/* Now sweep backwards through the CQ, removing CQ entries
1141 	 * that match our QP by copying older entries on top of them.
1142 	 */
1143 	while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
1144 		cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
1145 		cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
1146 		if (is_equal_rsn(cqe64, rsn)) {
1147 			if (srq && (ntohl(cqe64->srqn) & 0xffffff))
1148 				mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
1149 			++nfreed;
1150 		} else if (nfreed) {
1151 			dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
1152 			dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
1153 			owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
1154 			memcpy(dest, cqe, cq->mcq.cqe_sz);
1155 			dest64->op_own = owner_bit |
1156 				(dest64->op_own & ~MLX5_CQE_OWNER_MASK);
1157 		}
1158 	}
1159 
1160 	if (nfreed) {
1161 		cq->mcq.cons_index += nfreed;
1162 		/* Make sure update of buffer contents is done before
1163 		 * updating consumer index.
1164 		 */
1165 		wmb();
1166 		mlx5_cq_set_ci(&cq->mcq);
1167 	}
1168 }
1169 
mlx5_ib_cq_clean(struct mlx5_ib_cq * cq,u32 qpn,struct mlx5_ib_srq * srq)1170 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
1171 {
1172 	if (!cq)
1173 		return;
1174 
1175 	spin_lock_irq(&cq->lock);
1176 	__mlx5_ib_cq_clean(cq, qpn, srq);
1177 	spin_unlock_irq(&cq->lock);
1178 }
1179 
mlx5_ib_modify_cq(struct ib_cq * cq,u16 cq_count,u16 cq_period)1180 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1181 {
1182 	struct mlx5_ib_dev *dev = to_mdev(cq->device);
1183 	struct mlx5_ib_cq *mcq = to_mcq(cq);
1184 	int err;
1185 
1186 	if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
1187 		return -EOPNOTSUPP;
1188 
1189 	if (cq_period > MLX5_MAX_CQ_PERIOD)
1190 		return -EINVAL;
1191 
1192 	err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
1193 					     cq_period, cq_count);
1194 	if (err)
1195 		mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
1196 
1197 	return err;
1198 }
1199 
resize_user(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq,int entries,struct ib_udata * udata,int * npas,int * page_shift,int * cqe_size)1200 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1201 		       int entries, struct ib_udata *udata, int *npas,
1202 		       int *page_shift, int *cqe_size)
1203 {
1204 	struct mlx5_ib_resize_cq ucmd;
1205 	struct ib_umem *umem;
1206 	int err;
1207 	int npages;
1208 	struct ib_ucontext *context = cq->buf.umem->context;
1209 
1210 	err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
1211 	if (err)
1212 		return err;
1213 
1214 	if (ucmd.reserved0 || ucmd.reserved1)
1215 		return -EINVAL;
1216 
1217 	/* check multiplication overflow */
1218 	if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
1219 		return -EINVAL;
1220 
1221 	umem = ib_umem_get(context, ucmd.buf_addr,
1222 			   (size_t)ucmd.cqe_size * entries,
1223 			   IB_ACCESS_LOCAL_WRITE, 1);
1224 	if (IS_ERR(umem)) {
1225 		err = PTR_ERR(umem);
1226 		return err;
1227 	}
1228 
1229 	mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
1230 			   npas, NULL);
1231 
1232 	cq->resize_umem = umem;
1233 	*cqe_size = ucmd.cqe_size;
1234 
1235 	return 0;
1236 }
1237 
un_resize_user(struct mlx5_ib_cq * cq)1238 static void un_resize_user(struct mlx5_ib_cq *cq)
1239 {
1240 	ib_umem_release(cq->resize_umem);
1241 }
1242 
resize_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq,int entries,int cqe_size)1243 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1244 			 int entries, int cqe_size)
1245 {
1246 	int err;
1247 
1248 	cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
1249 	if (!cq->resize_buf)
1250 		return -ENOMEM;
1251 
1252 	err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size);
1253 	if (err)
1254 		goto ex;
1255 
1256 	init_cq_frag_buf(cq, cq->resize_buf);
1257 
1258 	return 0;
1259 
1260 ex:
1261 	kfree(cq->resize_buf);
1262 	return err;
1263 }
1264 
un_resize_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq)1265 static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
1266 {
1267 	free_cq_buf(dev, cq->resize_buf);
1268 	cq->resize_buf = NULL;
1269 }
1270 
copy_resize_cqes(struct mlx5_ib_cq * cq)1271 static int copy_resize_cqes(struct mlx5_ib_cq *cq)
1272 {
1273 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
1274 	struct mlx5_cqe64 *scqe64;
1275 	struct mlx5_cqe64 *dcqe64;
1276 	void *start_cqe;
1277 	void *scqe;
1278 	void *dcqe;
1279 	int ssize;
1280 	int dsize;
1281 	int i;
1282 	u8 sw_own;
1283 
1284 	ssize = cq->buf.cqe_size;
1285 	dsize = cq->resize_buf->cqe_size;
1286 	if (ssize != dsize) {
1287 		mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
1288 		return -EINVAL;
1289 	}
1290 
1291 	i = cq->mcq.cons_index;
1292 	scqe = get_sw_cqe(cq, i);
1293 	scqe64 = ssize == 64 ? scqe : scqe + 64;
1294 	start_cqe = scqe;
1295 	if (!scqe) {
1296 		mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1297 		return -EINVAL;
1298 	}
1299 
1300 	while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
1301 		dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
1302 					     (i + 1) & cq->resize_buf->nent);
1303 		dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
1304 		sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
1305 		memcpy(dcqe, scqe, dsize);
1306 		dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
1307 
1308 		++i;
1309 		scqe = get_sw_cqe(cq, i);
1310 		scqe64 = ssize == 64 ? scqe : scqe + 64;
1311 		if (!scqe) {
1312 			mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1313 			return -EINVAL;
1314 		}
1315 
1316 		if (scqe == start_cqe) {
1317 			pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
1318 				cq->mcq.cqn);
1319 			return -ENOMEM;
1320 		}
1321 	}
1322 	++cq->mcq.cons_index;
1323 	return 0;
1324 }
1325 
mlx5_ib_resize_cq(struct ib_cq * ibcq,int entries,struct ib_udata * udata)1326 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1327 {
1328 	struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
1329 	struct mlx5_ib_cq *cq = to_mcq(ibcq);
1330 	void *cqc;
1331 	u32 *in;
1332 	int err;
1333 	int npas;
1334 	__be64 *pas;
1335 	int page_shift;
1336 	int inlen;
1337 	int uninitialized_var(cqe_size);
1338 	unsigned long flags;
1339 
1340 	if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
1341 		pr_info("Firmware does not support resize CQ\n");
1342 		return -ENOSYS;
1343 	}
1344 
1345 	if (entries < 1 ||
1346 	    entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
1347 		mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
1348 			     entries,
1349 			     1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
1350 		return -EINVAL;
1351 	}
1352 
1353 	entries = roundup_pow_of_two(entries + 1);
1354 	if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
1355 		return -EINVAL;
1356 
1357 	if (entries == ibcq->cqe + 1)
1358 		return 0;
1359 
1360 	mutex_lock(&cq->resize_mutex);
1361 	if (udata) {
1362 		err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
1363 				  &cqe_size);
1364 	} else {
1365 		cqe_size = 64;
1366 		err = resize_kernel(dev, cq, entries, cqe_size);
1367 		if (!err) {
1368 			struct mlx5_frag_buf_ctrl *c;
1369 
1370 			c = &cq->resize_buf->fbc;
1371 			npas = c->frag_buf.npages;
1372 			page_shift = c->frag_buf.page_shift;
1373 		}
1374 	}
1375 
1376 	if (err)
1377 		goto ex;
1378 
1379 	inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
1380 		MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
1381 
1382 	in = kvzalloc(inlen, GFP_KERNEL);
1383 	if (!in) {
1384 		err = -ENOMEM;
1385 		goto ex_resize;
1386 	}
1387 
1388 	pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
1389 	if (udata)
1390 		mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
1391 				     pas, 0);
1392 	else
1393 		mlx5_fill_page_frag_array(&cq->resize_buf->fbc.frag_buf,
1394 					  pas);
1395 
1396 	MLX5_SET(modify_cq_in, in,
1397 		 modify_field_select_resize_field_select.resize_field_select.resize_field_select,
1398 		 MLX5_MODIFY_CQ_MASK_LOG_SIZE  |
1399 		 MLX5_MODIFY_CQ_MASK_PG_OFFSET |
1400 		 MLX5_MODIFY_CQ_MASK_PG_SIZE);
1401 
1402 	cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
1403 
1404 	MLX5_SET(cqc, cqc, log_page_size,
1405 		 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1406 	MLX5_SET(cqc, cqc, cqe_sz,
1407 		 cqe_sz_to_mlx_sz(cqe_size,
1408 				  cq->private_flags &
1409 				  MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
1410 	MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
1411 
1412 	MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
1413 	MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
1414 
1415 	err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
1416 	if (err)
1417 		goto ex_alloc;
1418 
1419 	if (udata) {
1420 		cq->ibcq.cqe = entries - 1;
1421 		ib_umem_release(cq->buf.umem);
1422 		cq->buf.umem = cq->resize_umem;
1423 		cq->resize_umem = NULL;
1424 	} else {
1425 		struct mlx5_ib_cq_buf tbuf;
1426 		int resized = 0;
1427 
1428 		spin_lock_irqsave(&cq->lock, flags);
1429 		if (cq->resize_buf) {
1430 			err = copy_resize_cqes(cq);
1431 			if (!err) {
1432 				tbuf = cq->buf;
1433 				cq->buf = *cq->resize_buf;
1434 				kfree(cq->resize_buf);
1435 				cq->resize_buf = NULL;
1436 				resized = 1;
1437 			}
1438 		}
1439 		cq->ibcq.cqe = entries - 1;
1440 		spin_unlock_irqrestore(&cq->lock, flags);
1441 		if (resized)
1442 			free_cq_buf(dev, &tbuf);
1443 	}
1444 	mutex_unlock(&cq->resize_mutex);
1445 
1446 	kvfree(in);
1447 	return 0;
1448 
1449 ex_alloc:
1450 	kvfree(in);
1451 
1452 ex_resize:
1453 	if (udata)
1454 		un_resize_user(cq);
1455 	else
1456 		un_resize_kernel(dev, cq);
1457 ex:
1458 	mutex_unlock(&cq->resize_mutex);
1459 	return err;
1460 }
1461 
mlx5_ib_get_cqe_size(struct mlx5_ib_dev * dev,struct ib_cq * ibcq)1462 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
1463 {
1464 	struct mlx5_ib_cq *cq;
1465 
1466 	if (!ibcq)
1467 		return 128;
1468 
1469 	cq = to_mcq(ibcq);
1470 	return cq->cqe_size;
1471 }
1472 
1473 /* Called from atomic context */
mlx5_ib_generate_wc(struct ib_cq * ibcq,struct ib_wc * wc)1474 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
1475 {
1476 	struct mlx5_ib_wc *soft_wc;
1477 	struct mlx5_ib_cq *cq = to_mcq(ibcq);
1478 	unsigned long flags;
1479 
1480 	soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
1481 	if (!soft_wc)
1482 		return -ENOMEM;
1483 
1484 	soft_wc->wc = *wc;
1485 	spin_lock_irqsave(&cq->lock, flags);
1486 	list_add_tail(&soft_wc->list, &cq->wc_list);
1487 	if (cq->notify_flags == IB_CQ_NEXT_COMP ||
1488 	    wc->status != IB_WC_SUCCESS) {
1489 		cq->notify_flags = 0;
1490 		schedule_work(&cq->notify_work);
1491 	}
1492 	spin_unlock_irqrestore(&cq->lock, flags);
1493 
1494 	return 0;
1495 }
1496