1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7 #include <linux/skbuff.h>
8 #include <crypto/hash.h>
9
10 #include "rxe.h"
11 #include "rxe_loc.h"
12 #include "rxe_queue.h"
13
14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
15 u32 opcode);
16
retry_first_write_send(struct rxe_qp * qp,struct rxe_send_wqe * wqe,int npsn)17 static inline void retry_first_write_send(struct rxe_qp *qp,
18 struct rxe_send_wqe *wqe, int npsn)
19 {
20 int i;
21
22 for (i = 0; i < npsn; i++) {
23 int to_send = (wqe->dma.resid > qp->mtu) ?
24 qp->mtu : wqe->dma.resid;
25
26 qp->req.opcode = next_opcode(qp, wqe,
27 wqe->wr.opcode);
28
29 if (wqe->wr.send_flags & IB_SEND_INLINE) {
30 wqe->dma.resid -= to_send;
31 wqe->dma.sge_offset += to_send;
32 } else {
33 advance_dma_data(&wqe->dma, to_send);
34 }
35 }
36 }
37
req_retry(struct rxe_qp * qp)38 static void req_retry(struct rxe_qp *qp)
39 {
40 struct rxe_send_wqe *wqe;
41 unsigned int wqe_index;
42 unsigned int mask;
43 int npsn;
44 int first = 1;
45 struct rxe_queue *q = qp->sq.queue;
46 unsigned int cons;
47 unsigned int prod;
48
49 cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
50 prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
51
52 qp->req.wqe_index = cons;
53 qp->req.psn = qp->comp.psn;
54 qp->req.opcode = -1;
55
56 for (wqe_index = cons; wqe_index != prod;
57 wqe_index = queue_next_index(q, wqe_index)) {
58 wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
59 mask = wr_opcode_mask(wqe->wr.opcode, qp);
60
61 if (wqe->state == wqe_state_posted)
62 break;
63
64 if (wqe->state == wqe_state_done)
65 continue;
66
67 wqe->iova = (mask & WR_ATOMIC_MASK) ?
68 wqe->wr.wr.atomic.remote_addr :
69 (mask & WR_READ_OR_WRITE_MASK) ?
70 wqe->wr.wr.rdma.remote_addr :
71 0;
72
73 if (!first || (mask & WR_READ_MASK) == 0) {
74 wqe->dma.resid = wqe->dma.length;
75 wqe->dma.cur_sge = 0;
76 wqe->dma.sge_offset = 0;
77 }
78
79 if (first) {
80 first = 0;
81
82 if (mask & WR_WRITE_OR_SEND_MASK) {
83 npsn = (qp->comp.psn - wqe->first_psn) &
84 BTH_PSN_MASK;
85 retry_first_write_send(qp, wqe, npsn);
86 }
87
88 if (mask & WR_READ_MASK) {
89 npsn = (wqe->dma.length - wqe->dma.resid) /
90 qp->mtu;
91 wqe->iova += npsn * qp->mtu;
92 }
93 }
94
95 wqe->state = wqe_state_posted;
96 }
97 }
98
rnr_nak_timer(struct timer_list * t)99 void rnr_nak_timer(struct timer_list *t)
100 {
101 struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
102
103 pr_debug("%s: fired for qp#%d\n", __func__, qp_num(qp));
104
105 /* request a send queue retry */
106 qp->req.need_retry = 1;
107 qp->req.wait_for_rnr_timer = 0;
108 rxe_run_task(&qp->req.task, 1);
109 }
110
req_next_wqe(struct rxe_qp * qp)111 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
112 {
113 struct rxe_send_wqe *wqe;
114 struct rxe_queue *q = qp->sq.queue;
115 unsigned int index = qp->req.wqe_index;
116 unsigned int cons;
117 unsigned int prod;
118
119 wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
120 cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
121 prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
122
123 if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
124 /* check to see if we are drained;
125 * state_lock used by requester and completer
126 */
127 spin_lock_bh(&qp->state_lock);
128 do {
129 if (qp->req.state != QP_STATE_DRAIN) {
130 /* comp just finished */
131 spin_unlock_bh(&qp->state_lock);
132 break;
133 }
134
135 if (wqe && ((index != cons) ||
136 (wqe->state != wqe_state_posted))) {
137 /* comp not done yet */
138 spin_unlock_bh(&qp->state_lock);
139 break;
140 }
141
142 qp->req.state = QP_STATE_DRAINED;
143 spin_unlock_bh(&qp->state_lock);
144
145 if (qp->ibqp.event_handler) {
146 struct ib_event ev;
147
148 ev.device = qp->ibqp.device;
149 ev.element.qp = &qp->ibqp;
150 ev.event = IB_EVENT_SQ_DRAINED;
151 qp->ibqp.event_handler(&ev,
152 qp->ibqp.qp_context);
153 }
154 } while (0);
155 }
156
157 if (index == prod)
158 return NULL;
159
160 wqe = queue_addr_from_index(q, index);
161
162 if (unlikely((qp->req.state == QP_STATE_DRAIN ||
163 qp->req.state == QP_STATE_DRAINED) &&
164 (wqe->state != wqe_state_processing)))
165 return NULL;
166
167 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
168 return wqe;
169 }
170
171 /**
172 * rxe_wqe_is_fenced - check if next wqe is fenced
173 * @qp: the queue pair
174 * @wqe: the next wqe
175 *
176 * Returns: 1 if wqe needs to wait
177 * 0 if wqe is ready to go
178 */
rxe_wqe_is_fenced(struct rxe_qp * qp,struct rxe_send_wqe * wqe)179 static int rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
180 {
181 /* Local invalidate fence (LIF) see IBA 10.6.5.1
182 * Requires ALL previous operations on the send queue
183 * are complete. Make mandatory for the rxe driver.
184 */
185 if (wqe->wr.opcode == IB_WR_LOCAL_INV)
186 return qp->req.wqe_index != queue_get_consumer(qp->sq.queue,
187 QUEUE_TYPE_FROM_CLIENT);
188
189 /* Fence see IBA 10.8.3.3
190 * Requires that all previous read and atomic operations
191 * are complete.
192 */
193 return (wqe->wr.send_flags & IB_SEND_FENCE) &&
194 atomic_read(&qp->req.rd_atomic) != qp->attr.max_rd_atomic;
195 }
196
next_opcode_rc(struct rxe_qp * qp,u32 opcode,int fits)197 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
198 {
199 switch (opcode) {
200 case IB_WR_RDMA_WRITE:
201 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
202 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
203 return fits ?
204 IB_OPCODE_RC_RDMA_WRITE_LAST :
205 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
206 else
207 return fits ?
208 IB_OPCODE_RC_RDMA_WRITE_ONLY :
209 IB_OPCODE_RC_RDMA_WRITE_FIRST;
210
211 case IB_WR_RDMA_WRITE_WITH_IMM:
212 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
213 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
214 return fits ?
215 IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
216 IB_OPCODE_RC_RDMA_WRITE_MIDDLE;
217 else
218 return fits ?
219 IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
220 IB_OPCODE_RC_RDMA_WRITE_FIRST;
221
222 case IB_WR_SEND:
223 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
224 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
225 return fits ?
226 IB_OPCODE_RC_SEND_LAST :
227 IB_OPCODE_RC_SEND_MIDDLE;
228 else
229 return fits ?
230 IB_OPCODE_RC_SEND_ONLY :
231 IB_OPCODE_RC_SEND_FIRST;
232
233 case IB_WR_SEND_WITH_IMM:
234 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
235 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
236 return fits ?
237 IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE :
238 IB_OPCODE_RC_SEND_MIDDLE;
239 else
240 return fits ?
241 IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE :
242 IB_OPCODE_RC_SEND_FIRST;
243
244 case IB_WR_RDMA_READ:
245 return IB_OPCODE_RC_RDMA_READ_REQUEST;
246
247 case IB_WR_ATOMIC_CMP_AND_SWP:
248 return IB_OPCODE_RC_COMPARE_SWAP;
249
250 case IB_WR_ATOMIC_FETCH_AND_ADD:
251 return IB_OPCODE_RC_FETCH_ADD;
252
253 case IB_WR_SEND_WITH_INV:
254 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
255 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
256 return fits ? IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE :
257 IB_OPCODE_RC_SEND_MIDDLE;
258 else
259 return fits ? IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE :
260 IB_OPCODE_RC_SEND_FIRST;
261 case IB_WR_REG_MR:
262 case IB_WR_LOCAL_INV:
263 return opcode;
264 }
265
266 return -EINVAL;
267 }
268
next_opcode_uc(struct rxe_qp * qp,u32 opcode,int fits)269 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
270 {
271 switch (opcode) {
272 case IB_WR_RDMA_WRITE:
273 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
274 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
275 return fits ?
276 IB_OPCODE_UC_RDMA_WRITE_LAST :
277 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
278 else
279 return fits ?
280 IB_OPCODE_UC_RDMA_WRITE_ONLY :
281 IB_OPCODE_UC_RDMA_WRITE_FIRST;
282
283 case IB_WR_RDMA_WRITE_WITH_IMM:
284 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
285 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
286 return fits ?
287 IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE :
288 IB_OPCODE_UC_RDMA_WRITE_MIDDLE;
289 else
290 return fits ?
291 IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE :
292 IB_OPCODE_UC_RDMA_WRITE_FIRST;
293
294 case IB_WR_SEND:
295 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
296 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
297 return fits ?
298 IB_OPCODE_UC_SEND_LAST :
299 IB_OPCODE_UC_SEND_MIDDLE;
300 else
301 return fits ?
302 IB_OPCODE_UC_SEND_ONLY :
303 IB_OPCODE_UC_SEND_FIRST;
304
305 case IB_WR_SEND_WITH_IMM:
306 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
307 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
308 return fits ?
309 IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE :
310 IB_OPCODE_UC_SEND_MIDDLE;
311 else
312 return fits ?
313 IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE :
314 IB_OPCODE_UC_SEND_FIRST;
315 }
316
317 return -EINVAL;
318 }
319
next_opcode(struct rxe_qp * qp,struct rxe_send_wqe * wqe,u32 opcode)320 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
321 u32 opcode)
322 {
323 int fits = (wqe->dma.resid <= qp->mtu);
324
325 switch (qp_type(qp)) {
326 case IB_QPT_RC:
327 return next_opcode_rc(qp, opcode, fits);
328
329 case IB_QPT_UC:
330 return next_opcode_uc(qp, opcode, fits);
331
332 case IB_QPT_UD:
333 case IB_QPT_GSI:
334 switch (opcode) {
335 case IB_WR_SEND:
336 return IB_OPCODE_UD_SEND_ONLY;
337
338 case IB_WR_SEND_WITH_IMM:
339 return IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
340 }
341 break;
342
343 default:
344 break;
345 }
346
347 return -EINVAL;
348 }
349
check_init_depth(struct rxe_qp * qp,struct rxe_send_wqe * wqe)350 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
351 {
352 int depth;
353
354 if (wqe->has_rd_atomic)
355 return 0;
356
357 qp->req.need_rd_atomic = 1;
358 depth = atomic_dec_return(&qp->req.rd_atomic);
359
360 if (depth >= 0) {
361 qp->req.need_rd_atomic = 0;
362 wqe->has_rd_atomic = 1;
363 return 0;
364 }
365
366 atomic_inc(&qp->req.rd_atomic);
367 return -EAGAIN;
368 }
369
get_mtu(struct rxe_qp * qp)370 static inline int get_mtu(struct rxe_qp *qp)
371 {
372 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
373
374 if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
375 return qp->mtu;
376
377 return rxe->port.mtu_cap;
378 }
379
init_req_packet(struct rxe_qp * qp,struct rxe_av * av,struct rxe_send_wqe * wqe,int opcode,u32 payload,struct rxe_pkt_info * pkt)380 static struct sk_buff *init_req_packet(struct rxe_qp *qp,
381 struct rxe_av *av,
382 struct rxe_send_wqe *wqe,
383 int opcode, u32 payload,
384 struct rxe_pkt_info *pkt)
385 {
386 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
387 struct sk_buff *skb;
388 struct rxe_send_wr *ibwr = &wqe->wr;
389 int pad = (-payload) & 0x3;
390 int paylen;
391 int solicited;
392 u32 qp_num;
393 int ack_req;
394
395 /* length from start of bth to end of icrc */
396 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
397 pkt->paylen = paylen;
398
399 /* init skb */
400 skb = rxe_init_packet(rxe, av, paylen, pkt);
401 if (unlikely(!skb))
402 return NULL;
403
404 /* init bth */
405 solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
406 (pkt->mask & RXE_END_MASK) &&
407 ((pkt->mask & (RXE_SEND_MASK)) ||
408 (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
409 (RXE_WRITE_MASK | RXE_IMMDT_MASK));
410
411 qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
412 qp->attr.dest_qp_num;
413
414 ack_req = ((pkt->mask & RXE_END_MASK) ||
415 (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
416 if (ack_req)
417 qp->req.noack_pkts = 0;
418
419 bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num,
420 ack_req, pkt->psn);
421
422 /* init optional headers */
423 if (pkt->mask & RXE_RETH_MASK) {
424 reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
425 reth_set_va(pkt, wqe->iova);
426 reth_set_len(pkt, wqe->dma.resid);
427 }
428
429 if (pkt->mask & RXE_IMMDT_MASK)
430 immdt_set_imm(pkt, ibwr->ex.imm_data);
431
432 if (pkt->mask & RXE_IETH_MASK)
433 ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
434
435 if (pkt->mask & RXE_ATMETH_MASK) {
436 atmeth_set_va(pkt, wqe->iova);
437 if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
438 atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
439 atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
440 } else {
441 atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
442 }
443 atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
444 }
445
446 if (pkt->mask & RXE_DETH_MASK) {
447 if (qp->ibqp.qp_num == 1)
448 deth_set_qkey(pkt, GSI_QKEY);
449 else
450 deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
451 deth_set_sqp(pkt, qp->ibqp.qp_num);
452 }
453
454 return skb;
455 }
456
finish_packet(struct rxe_qp * qp,struct rxe_av * av,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt,struct sk_buff * skb,u32 payload)457 static int finish_packet(struct rxe_qp *qp, struct rxe_av *av,
458 struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt,
459 struct sk_buff *skb, u32 payload)
460 {
461 int err;
462
463 err = rxe_prepare(av, pkt, skb);
464 if (err)
465 return err;
466
467 if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
468 if (wqe->wr.send_flags & IB_SEND_INLINE) {
469 u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
470
471 memcpy(payload_addr(pkt), tmp, payload);
472
473 wqe->dma.resid -= payload;
474 wqe->dma.sge_offset += payload;
475 } else {
476 err = copy_data(qp->pd, 0, &wqe->dma,
477 payload_addr(pkt), payload,
478 RXE_FROM_MR_OBJ);
479 if (err)
480 return err;
481 }
482 if (bth_pad(pkt)) {
483 u8 *pad = payload_addr(pkt) + payload;
484
485 memset(pad, 0, bth_pad(pkt));
486 }
487 }
488
489 return 0;
490 }
491
update_wqe_state(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt)492 static void update_wqe_state(struct rxe_qp *qp,
493 struct rxe_send_wqe *wqe,
494 struct rxe_pkt_info *pkt)
495 {
496 if (pkt->mask & RXE_END_MASK) {
497 if (qp_type(qp) == IB_QPT_RC)
498 wqe->state = wqe_state_pending;
499 } else {
500 wqe->state = wqe_state_processing;
501 }
502 }
503
update_wqe_psn(struct rxe_qp * qp,struct rxe_send_wqe * wqe,struct rxe_pkt_info * pkt,u32 payload)504 static void update_wqe_psn(struct rxe_qp *qp,
505 struct rxe_send_wqe *wqe,
506 struct rxe_pkt_info *pkt,
507 u32 payload)
508 {
509 /* number of packets left to send including current one */
510 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
511
512 /* handle zero length packet case */
513 if (num_pkt == 0)
514 num_pkt = 1;
515
516 if (pkt->mask & RXE_START_MASK) {
517 wqe->first_psn = qp->req.psn;
518 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
519 }
520
521 if (pkt->mask & RXE_READ_MASK)
522 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
523 else
524 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
525 }
526
save_state(struct rxe_send_wqe * wqe,struct rxe_qp * qp,struct rxe_send_wqe * rollback_wqe,u32 * rollback_psn)527 static void save_state(struct rxe_send_wqe *wqe,
528 struct rxe_qp *qp,
529 struct rxe_send_wqe *rollback_wqe,
530 u32 *rollback_psn)
531 {
532 rollback_wqe->state = wqe->state;
533 rollback_wqe->first_psn = wqe->first_psn;
534 rollback_wqe->last_psn = wqe->last_psn;
535 *rollback_psn = qp->req.psn;
536 }
537
rollback_state(struct rxe_send_wqe * wqe,struct rxe_qp * qp,struct rxe_send_wqe * rollback_wqe,u32 rollback_psn)538 static void rollback_state(struct rxe_send_wqe *wqe,
539 struct rxe_qp *qp,
540 struct rxe_send_wqe *rollback_wqe,
541 u32 rollback_psn)
542 {
543 wqe->state = rollback_wqe->state;
544 wqe->first_psn = rollback_wqe->first_psn;
545 wqe->last_psn = rollback_wqe->last_psn;
546 qp->req.psn = rollback_psn;
547 }
548
update_state(struct rxe_qp * qp,struct rxe_pkt_info * pkt)549 static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
550 {
551 qp->req.opcode = pkt->opcode;
552
553 if (pkt->mask & RXE_END_MASK)
554 qp->req.wqe_index = queue_next_index(qp->sq.queue,
555 qp->req.wqe_index);
556
557 qp->need_req_skb = 0;
558
559 if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
560 mod_timer(&qp->retrans_timer,
561 jiffies + qp->qp_timeout_jiffies);
562 }
563
rxe_do_local_ops(struct rxe_qp * qp,struct rxe_send_wqe * wqe)564 static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
565 {
566 u8 opcode = wqe->wr.opcode;
567 u32 rkey;
568 int ret;
569
570 switch (opcode) {
571 case IB_WR_LOCAL_INV:
572 rkey = wqe->wr.ex.invalidate_rkey;
573 if (rkey_is_mw(rkey))
574 ret = rxe_invalidate_mw(qp, rkey);
575 else
576 ret = rxe_invalidate_mr(qp, rkey);
577
578 if (unlikely(ret)) {
579 wqe->status = IB_WC_LOC_QP_OP_ERR;
580 return ret;
581 }
582 break;
583 case IB_WR_REG_MR:
584 ret = rxe_reg_fast_mr(qp, wqe);
585 if (unlikely(ret)) {
586 wqe->status = IB_WC_LOC_QP_OP_ERR;
587 return ret;
588 }
589 break;
590 case IB_WR_BIND_MW:
591 ret = rxe_bind_mw(qp, wqe);
592 if (unlikely(ret)) {
593 wqe->status = IB_WC_MW_BIND_ERR;
594 return ret;
595 }
596 break;
597 default:
598 pr_err("Unexpected send wqe opcode %d\n", opcode);
599 wqe->status = IB_WC_LOC_QP_OP_ERR;
600 return -EINVAL;
601 }
602
603 wqe->state = wqe_state_done;
604 wqe->status = IB_WC_SUCCESS;
605 qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
606
607 /* There is no ack coming for local work requests
608 * which can lead to a deadlock. So go ahead and complete
609 * it now.
610 */
611 rxe_run_task(&qp->comp.task, 1);
612
613 return 0;
614 }
615
rxe_requester(void * arg)616 int rxe_requester(void *arg)
617 {
618 struct rxe_qp *qp = (struct rxe_qp *)arg;
619 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
620 struct rxe_pkt_info pkt;
621 struct sk_buff *skb;
622 struct rxe_send_wqe *wqe;
623 enum rxe_hdr_mask mask;
624 u32 payload;
625 int mtu;
626 int opcode;
627 int err;
628 int ret;
629 struct rxe_send_wqe rollback_wqe;
630 u32 rollback_psn;
631 struct rxe_queue *q = qp->sq.queue;
632 struct rxe_ah *ah;
633 struct rxe_av *av;
634
635 if (!rxe_get(qp))
636 return -EAGAIN;
637
638 if (unlikely(!qp->valid))
639 goto exit;
640
641 if (unlikely(qp->req.state == QP_STATE_ERROR)) {
642 wqe = req_next_wqe(qp);
643 if (wqe)
644 /*
645 * Generate an error completion for error qp state
646 */
647 goto err;
648 else
649 goto exit;
650 }
651
652 if (unlikely(qp->req.state == QP_STATE_RESET)) {
653 qp->req.wqe_index = queue_get_consumer(q,
654 QUEUE_TYPE_FROM_CLIENT);
655 qp->req.opcode = -1;
656 qp->req.need_rd_atomic = 0;
657 qp->req.wait_psn = 0;
658 qp->req.need_retry = 0;
659 qp->req.wait_for_rnr_timer = 0;
660 goto exit;
661 }
662
663 /* we come here if the retransmit timer has fired
664 * or if the rnr timer has fired. If the retransmit
665 * timer fires while we are processing an RNR NAK wait
666 * until the rnr timer has fired before starting the
667 * retry flow
668 */
669 if (unlikely(qp->req.need_retry && !qp->req.wait_for_rnr_timer)) {
670 req_retry(qp);
671 qp->req.need_retry = 0;
672 }
673
674 wqe = req_next_wqe(qp);
675 if (unlikely(!wqe))
676 goto exit;
677
678 if (rxe_wqe_is_fenced(qp, wqe)) {
679 qp->req.wait_fence = 1;
680 goto exit;
681 }
682
683 if (wqe->mask & WR_LOCAL_OP_MASK) {
684 err = rxe_do_local_ops(qp, wqe);
685 if (unlikely(err))
686 goto err;
687 else
688 goto done;
689 }
690
691 if (unlikely(qp_type(qp) == IB_QPT_RC &&
692 psn_compare(qp->req.psn, (qp->comp.psn +
693 RXE_MAX_UNACKED_PSNS)) > 0)) {
694 qp->req.wait_psn = 1;
695 goto exit;
696 }
697
698 /* Limit the number of inflight SKBs per QP */
699 if (unlikely(atomic_read(&qp->skb_out) >
700 RXE_INFLIGHT_SKBS_PER_QP_HIGH)) {
701 qp->need_req_skb = 1;
702 goto exit;
703 }
704
705 opcode = next_opcode(qp, wqe, wqe->wr.opcode);
706 if (unlikely(opcode < 0)) {
707 wqe->status = IB_WC_LOC_QP_OP_ERR;
708 goto err;
709 }
710
711 mask = rxe_opcode[opcode].mask;
712 if (unlikely(mask & RXE_READ_OR_ATOMIC_MASK)) {
713 if (check_init_depth(qp, wqe))
714 goto exit;
715 }
716
717 mtu = get_mtu(qp);
718 payload = (mask & RXE_WRITE_OR_SEND_MASK) ? wqe->dma.resid : 0;
719 if (payload > mtu) {
720 if (qp_type(qp) == IB_QPT_UD) {
721 /* C10-93.1.1: If the total sum of all the buffer lengths specified for a
722 * UD message exceeds the MTU of the port as returned by QueryHCA, the CI
723 * shall not emit any packets for this message. Further, the CI shall not
724 * generate an error due to this condition.
725 */
726
727 /* fake a successful UD send */
728 wqe->first_psn = qp->req.psn;
729 wqe->last_psn = qp->req.psn;
730 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
731 qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
732 qp->req.wqe_index = queue_next_index(qp->sq.queue,
733 qp->req.wqe_index);
734 wqe->state = wqe_state_done;
735 wqe->status = IB_WC_SUCCESS;
736 rxe_run_task(&qp->comp.task, 0);
737 goto done;
738 }
739 payload = mtu;
740 }
741
742 pkt.rxe = rxe;
743 pkt.opcode = opcode;
744 pkt.qp = qp;
745 pkt.psn = qp->req.psn;
746 pkt.mask = rxe_opcode[opcode].mask;
747 pkt.wqe = wqe;
748
749 av = rxe_get_av(&pkt, &ah);
750 if (unlikely(!av)) {
751 pr_err("qp#%d Failed no address vector\n", qp_num(qp));
752 wqe->status = IB_WC_LOC_QP_OP_ERR;
753 goto err;
754 }
755
756 skb = init_req_packet(qp, av, wqe, opcode, payload, &pkt);
757 if (unlikely(!skb)) {
758 pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
759 wqe->status = IB_WC_LOC_QP_OP_ERR;
760 if (ah)
761 rxe_put(ah);
762 goto err;
763 }
764
765 err = finish_packet(qp, av, wqe, &pkt, skb, payload);
766 if (unlikely(err)) {
767 pr_debug("qp#%d Error during finish packet\n", qp_num(qp));
768 if (err == -EFAULT)
769 wqe->status = IB_WC_LOC_PROT_ERR;
770 else
771 wqe->status = IB_WC_LOC_QP_OP_ERR;
772 kfree_skb(skb);
773 if (ah)
774 rxe_put(ah);
775 goto err;
776 }
777
778 if (ah)
779 rxe_put(ah);
780
781 /*
782 * To prevent a race on wqe access between requester and completer,
783 * wqe members state and psn need to be set before calling
784 * rxe_xmit_packet().
785 * Otherwise, completer might initiate an unjustified retry flow.
786 */
787 save_state(wqe, qp, &rollback_wqe, &rollback_psn);
788 update_wqe_state(qp, wqe, &pkt);
789 update_wqe_psn(qp, wqe, &pkt, payload);
790
791 err = rxe_xmit_packet(qp, &pkt, skb);
792 if (err) {
793 qp->need_req_skb = 1;
794
795 rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
796
797 if (err == -EAGAIN) {
798 rxe_run_task(&qp->req.task, 1);
799 goto exit;
800 }
801
802 wqe->status = IB_WC_LOC_QP_OP_ERR;
803 goto err;
804 }
805
806 update_state(qp, &pkt);
807
808 /* A non-zero return value will cause rxe_do_task to
809 * exit its loop and end the tasklet. A zero return
810 * will continue looping and return to rxe_requester
811 */
812 done:
813 ret = 0;
814 goto out;
815 err:
816 /* update wqe_index for each wqe completion */
817 qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
818 wqe->state = wqe_state_error;
819 qp->req.state = QP_STATE_ERROR;
820 rxe_run_task(&qp->comp.task, 0);
821 exit:
822 ret = -EAGAIN;
823 out:
824 rxe_put(qp);
825
826 return ret;
827 }
828