Lines Matching +full:long +full:- +full:term
1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
4 /* Copyright (c) 2008-2019, IBM Corporation */
29 * per-RDMAP message basis. Please keep order of initializer. All MPA len
35 .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_write) - 2),
43 .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_rreq) - 2),
50 .ctrl.mpa_len = htons(sizeof(struct iwarp_rdma_rresp) - 2),
58 .ctrl.mpa_len = htons(sizeof(struct iwarp_send) - 2),
65 .ctrl.mpa_len = htons(sizeof(struct iwarp_send_inv) - 2),
72 .ctrl.mpa_len = htons(sizeof(struct iwarp_send) - 2),
79 .ctrl.mpa_len = htons(sizeof(struct iwarp_send_inv) - 2),
86 .ctrl.mpa_len = htons(sizeof(struct iwarp_terminate) - 2),
97 read_lock(&sk->sk_callback_lock); in siw_qp_llp_data_ready()
99 if (unlikely(!sk->sk_user_data || !sk_to_qp(sk))) in siw_qp_llp_data_ready()
104 if (likely(!qp->rx_stream.rx_suspend && in siw_qp_llp_data_ready()
105 down_read_trylock(&qp->state_lock))) { in siw_qp_llp_data_ready()
108 if (likely(qp->attrs.state == SIW_QP_STATE_RTS)) in siw_qp_llp_data_ready()
117 up_read(&qp->state_lock); in siw_qp_llp_data_ready()
120 qp->rx_stream.rx_suspend); in siw_qp_llp_data_ready()
123 read_unlock(&sk->sk_callback_lock); in siw_qp_llp_data_ready()
129 siw_qp_state_to_string[qp->attrs.state]); in siw_qp_llp_close()
131 down_write(&qp->state_lock); in siw_qp_llp_close()
133 qp->rx_stream.rx_suspend = 1; in siw_qp_llp_close()
134 qp->tx_ctx.tx_suspend = 1; in siw_qp_llp_close()
135 qp->attrs.sk = NULL; in siw_qp_llp_close()
137 switch (qp->attrs.state) { in siw_qp_llp_close()
142 qp->attrs.state = SIW_QP_STATE_ERROR; in siw_qp_llp_close()
151 if (tx_wqe(qp)->wr_status == SIW_WR_IDLE) in siw_qp_llp_close()
152 qp->attrs.state = SIW_QP_STATE_ERROR; in siw_qp_llp_close()
154 qp->attrs.state = SIW_QP_STATE_IDLE; in siw_qp_llp_close()
159 siw_qp_state_to_string[qp->attrs.state]); in siw_qp_llp_close()
168 if (qp->cep) { in siw_qp_llp_close()
169 siw_cep_put(qp->cep); in siw_qp_llp_close()
170 qp->cep = NULL; in siw_qp_llp_close()
173 up_write(&qp->state_lock); in siw_qp_llp_close()
176 siw_qp_state_to_string[qp->attrs.state]); in siw_qp_llp_close()
187 read_lock(&sk->sk_callback_lock); in siw_qp_llp_write_space()
191 cep->sk_write_space(sk); in siw_qp_llp_write_space()
193 if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) in siw_qp_llp_write_space()
194 (void)siw_sq_start(cep->qp); in siw_qp_llp_write_space()
197 read_unlock(&sk->sk_callback_lock); in siw_qp_llp_write_space()
205 qp->attrs.irq_size = irq_size; in siw_qp_readq_init()
206 qp->attrs.orq_size = orq_size; in siw_qp_readq_init()
208 qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe)); in siw_qp_readq_init()
209 if (!qp->irq) { in siw_qp_readq_init()
211 qp->attrs.irq_size = 0; in siw_qp_readq_init()
212 return -ENOMEM; in siw_qp_readq_init()
214 qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe)); in siw_qp_readq_init()
215 if (!qp->orq) { in siw_qp_readq_init()
217 qp->attrs.orq_size = 0; in siw_qp_readq_init()
218 qp->attrs.irq_size = 0; in siw_qp_readq_init()
219 vfree(qp->irq); in siw_qp_readq_init()
220 return -ENOMEM; in siw_qp_readq_init()
228 struct siw_rx_stream *c_rx = &qp->rx_stream; in siw_qp_enable_crc()
229 struct siw_iwarp_tx *c_tx = &qp->tx_ctx; in siw_qp_enable_crc()
233 return -ENOENT; in siw_qp_enable_crc()
238 c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); in siw_qp_enable_crc()
239 c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); in siw_qp_enable_crc()
240 if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) { in siw_qp_enable_crc()
241 kfree(c_tx->mpa_crc_hd); in siw_qp_enable_crc()
242 kfree(c_rx->mpa_crc_hd); in siw_qp_enable_crc()
243 c_tx->mpa_crc_hd = NULL; in siw_qp_enable_crc()
244 c_rx->mpa_crc_hd = NULL; in siw_qp_enable_crc()
245 return -ENOMEM; in siw_qp_enable_crc()
247 c_tx->mpa_crc_hd->tfm = siw_crypto_shash; in siw_qp_enable_crc()
248 c_rx->mpa_crc_hd->tfm = siw_crypto_shash; in siw_qp_enable_crc()
263 unsigned long flags; in siw_qp_mpa_rts()
266 spin_lock_irqsave(&qp->sq_lock, flags); in siw_qp_mpa_rts()
268 if (unlikely(wqe->wr_status != SIW_WR_IDLE)) { in siw_qp_mpa_rts()
269 spin_unlock_irqrestore(&qp->sq_lock, flags); in siw_qp_mpa_rts()
270 return -EIO; in siw_qp_mpa_rts()
272 memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); in siw_qp_mpa_rts()
274 wqe->wr_status = SIW_WR_QUEUED; in siw_qp_mpa_rts()
275 wqe->sqe.flags = 0; in siw_qp_mpa_rts()
276 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts()
277 wqe->sqe.sge[0].length = 0; in siw_qp_mpa_rts()
278 wqe->sqe.sge[0].laddr = 0; in siw_qp_mpa_rts()
279 wqe->sqe.sge[0].lkey = 0; in siw_qp_mpa_rts()
284 wqe->sqe.rkey = 1; in siw_qp_mpa_rts()
285 wqe->sqe.raddr = 0; in siw_qp_mpa_rts()
286 wqe->processed = 0; in siw_qp_mpa_rts()
289 wqe->sqe.opcode = SIW_OP_WRITE; in siw_qp_mpa_rts()
293 wqe->sqe.opcode = SIW_OP_READ; in siw_qp_mpa_rts()
295 spin_lock(&qp->orq_lock); in siw_qp_mpa_rts()
299 siw_read_to_orq(rreq, &wqe->sqe); in siw_qp_mpa_rts()
300 qp->orq_put++; in siw_qp_mpa_rts()
302 rv = -EIO; in siw_qp_mpa_rts()
304 spin_unlock(&qp->orq_lock); in siw_qp_mpa_rts()
306 rv = -EINVAL; in siw_qp_mpa_rts()
309 wqe->wr_status = SIW_WR_IDLE; in siw_qp_mpa_rts()
311 spin_unlock_irqrestore(&qp->sq_lock, flags); in siw_qp_mpa_rts()
366 if (!qp->term_info.valid) { in siw_init_terminate()
367 memset(&qp->term_info, 0, sizeof(qp->term_info)); in siw_init_terminate()
368 qp->term_info.layer = layer; in siw_init_terminate()
369 qp->term_info.etype = etype; in siw_init_terminate()
370 qp->term_info.ecode = ecode; in siw_init_terminate()
371 qp->term_info.in_tx = in_tx; in siw_init_terminate()
372 qp->term_info.valid = 1; in siw_init_terminate()
374 siw_dbg_qp(qp, "init TERM: layer %d, type %d, code %d, in tx %s\n", in siw_init_terminate()
380 * Sending TERMINATE messages is best effort - such messages
382 * not have another outbound message in-progress, i.e. the
390 struct iwarp_terminate *term = NULL; in siw_send_terminate() local
392 struct socket *s = qp->attrs.sk; in siw_send_terminate()
393 struct siw_rx_stream *srx = &qp->rx_stream; in siw_send_terminate()
394 union iwarp_hdr *rx_hdr = &srx->hdr; in siw_send_terminate()
398 if (!qp->term_info.valid) in siw_send_terminate()
401 qp->term_info.valid = 0; in siw_send_terminate()
403 if (tx_wqe(qp)->wr_status == SIW_WR_INPROGRESS) { in siw_send_terminate()
408 if (!s && qp->cep) in siw_send_terminate()
410 s = qp->cep->sock; in siw_send_terminate()
417 term = kzalloc(sizeof(*term), GFP_KERNEL); in siw_send_terminate()
418 if (!term) in siw_send_terminate()
421 term->ddp_qn = cpu_to_be32(RDMAP_UNTAGGED_QN_TERMINATE); in siw_send_terminate()
422 term->ddp_mo = 0; in siw_send_terminate()
423 term->ddp_msn = cpu_to_be32(1); in siw_send_terminate()
425 iov[0].iov_base = term; in siw_send_terminate()
426 iov[0].iov_len = sizeof(*term); in siw_send_terminate()
428 if ((qp->term_info.layer == TERM_ERROR_LAYER_DDP) || in siw_send_terminate()
429 ((qp->term_info.layer == TERM_ERROR_LAYER_RDMAP) && in siw_send_terminate()
430 (qp->term_info.etype != RDMAP_ETYPE_CATASTROPHIC))) { in siw_send_terminate()
433 kfree(term); in siw_send_terminate()
437 memcpy(&term->ctrl, &iwarp_pktinfo[RDMAP_TERMINATE].ctrl, in siw_send_terminate()
440 __rdmap_term_set_layer(term, qp->term_info.layer); in siw_send_terminate()
441 __rdmap_term_set_etype(term, qp->term_info.etype); in siw_send_terminate()
442 __rdmap_term_set_ecode(term, qp->term_info.ecode); in siw_send_terminate()
444 switch (qp->term_info.layer) { in siw_send_terminate()
446 if (qp->term_info.etype == RDMAP_ETYPE_CATASTROPHIC) in siw_send_terminate()
450 if (qp->term_info.etype == RDMAP_ETYPE_REMOTE_PROTECTION) { in siw_send_terminate()
455 term->flag_m = 1; in siw_send_terminate()
456 term->flag_d = 1; in siw_send_terminate()
457 term->flag_r = 1; in siw_send_terminate()
459 if (qp->term_info.in_tx) { in siw_send_terminate()
470 memcpy(&rreq->ctrl, in siw_send_terminate()
474 rreq->rsvd = 0; in siw_send_terminate()
475 rreq->ddp_qn = in siw_send_terminate()
479 rreq->ddp_msn = htonl(wqe->sqe.sge[0].length); in siw_send_terminate()
481 rreq->ddp_mo = htonl(wqe->processed); in siw_send_terminate()
482 rreq->sink_stag = htonl(wqe->sqe.rkey); in siw_send_terminate()
483 rreq->sink_to = cpu_to_be64(wqe->sqe.raddr); in siw_send_terminate()
484 rreq->read_size = htonl(wqe->sqe.sge[0].length); in siw_send_terminate()
485 rreq->source_stag = htonl(wqe->sqe.sge[0].lkey); in siw_send_terminate()
486 rreq->source_to = in siw_send_terminate()
487 cpu_to_be64(wqe->sqe.sge[0].laddr); in siw_send_terminate()
499 if (__rdmap_get_opcode(&rx_hdr->ctrl) == in siw_send_terminate()
511 if ((qp->term_info.ecode == RDMAP_ECODE_VERSION) || in siw_send_terminate()
512 (qp->term_info.ecode == RDMAP_ECODE_OPCODE)) in siw_send_terminate()
518 if (rx_hdr->ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED) in siw_send_terminate()
524 term->flag_m = 1; in siw_send_terminate()
525 term->flag_d = 1; in siw_send_terminate()
527 term->ctrl.mpa_len = cpu_to_be16(iov[1].iov_len); in siw_send_terminate()
539 if (((qp->term_info.etype == DDP_ETYPE_TAGGED_BUF) && in siw_send_terminate()
540 (qp->term_info.ecode == DDP_ECODE_T_VERSION)) || in siw_send_terminate()
541 ((qp->term_info.etype == DDP_ETYPE_UNTAGGED_BUF) && in siw_send_terminate()
542 (qp->term_info.ecode == DDP_ECODE_UT_VERSION))) in siw_send_terminate()
547 if (rx_hdr->ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED) in siw_send_terminate()
552 term->flag_m = 1; in siw_send_terminate()
553 term->flag_d = 1; in siw_send_terminate()
559 if (term->flag_m || term->flag_d || term->flag_r) { in siw_send_terminate()
562 len_terminate = sizeof(*term) + iov[1].iov_len + MPA_CRC_SIZE; in siw_send_terminate()
567 len_terminate = sizeof(*term) + MPA_CRC_SIZE; in siw_send_terminate()
572 if (term->flag_m) { in siw_send_terminate()
573 u32 real_ddp_len = be16_to_cpu(rx_hdr->ctrl.mpa_len); in siw_send_terminate()
574 enum rdma_opcode op = __rdmap_get_opcode(&rx_hdr->ctrl); in siw_send_terminate()
576 real_ddp_len -= iwarp_pktinfo[op].hdr_len - MPA_HDR_SIZE; in siw_send_terminate()
577 rx_hdr->ctrl.mpa_len = cpu_to_be16(real_ddp_len); in siw_send_terminate()
580 term->ctrl.mpa_len = in siw_send_terminate()
581 cpu_to_be16(len_terminate - (MPA_HDR_SIZE + MPA_CRC_SIZE)); in siw_send_terminate()
582 if (qp->tx_ctx.mpa_crc_hd) { in siw_send_terminate()
583 crypto_shash_init(qp->tx_ctx.mpa_crc_hd); in siw_send_terminate()
584 if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd, in siw_send_terminate()
590 if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd, in siw_send_terminate()
595 crypto_shash_final(qp->tx_ctx.mpa_crc_hd, (u8 *)&crc); in siw_send_terminate()
599 siw_dbg_qp(qp, "sent TERM: %s, layer %d, type %d, code %d (%d bytes)\n", in siw_send_terminate()
601 __rdmap_term_layer(term), __rdmap_term_etype(term), in siw_send_terminate()
602 __rdmap_term_ecode(term), rv); in siw_send_terminate()
604 kfree(term); in siw_send_terminate()
616 if (attrs->flags & SIW_RDMA_BIND_ENABLED) in siw_qp_modify_nonstate()
617 qp->attrs.flags |= SIW_RDMA_BIND_ENABLED; in siw_qp_modify_nonstate()
619 qp->attrs.flags &= ~SIW_RDMA_BIND_ENABLED; in siw_qp_modify_nonstate()
621 if (attrs->flags & SIW_RDMA_WRITE_ENABLED) in siw_qp_modify_nonstate()
622 qp->attrs.flags |= SIW_RDMA_WRITE_ENABLED; in siw_qp_modify_nonstate()
624 qp->attrs.flags &= ~SIW_RDMA_WRITE_ENABLED; in siw_qp_modify_nonstate()
626 if (attrs->flags & SIW_RDMA_READ_ENABLED) in siw_qp_modify_nonstate()
627 qp->attrs.flags |= SIW_RDMA_READ_ENABLED; in siw_qp_modify_nonstate()
629 qp->attrs.flags &= ~SIW_RDMA_READ_ENABLED; in siw_qp_modify_nonstate()
639 switch (attrs->state) { in siw_qp_nextstate_from_idle()
641 if (attrs->flags & SIW_MPA_CRC) { in siw_qp_nextstate_from_idle()
648 rv = -EINVAL; in siw_qp_nextstate_from_idle()
653 rv = -EINVAL; in siw_qp_nextstate_from_idle()
659 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 0; in siw_qp_nextstate_from_idle()
660 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 0; in siw_qp_nextstate_from_idle()
661 qp->tx_ctx.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 0; in siw_qp_nextstate_from_idle()
666 qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_SEND] = 1; in siw_qp_nextstate_from_idle()
667 qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ] = 1; in siw_qp_nextstate_from_idle()
668 qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] = 1; in siw_qp_nextstate_from_idle()
674 rv = siw_qp_readq_init(qp, attrs->irq_size, in siw_qp_nextstate_from_idle()
675 attrs->orq_size); in siw_qp_nextstate_from_idle()
679 qp->attrs.sk = attrs->sk; in siw_qp_nextstate_from_idle()
680 qp->attrs.state = SIW_QP_STATE_RTS; in siw_qp_nextstate_from_idle()
683 attrs->flags & SIW_MPA_CRC ? "y" : "n", in siw_qp_nextstate_from_idle()
684 qp->attrs.orq_size, qp->attrs.irq_size); in siw_qp_nextstate_from_idle()
689 qp->attrs.state = SIW_QP_STATE_ERROR; in siw_qp_nextstate_from_idle()
690 if (qp->cep) { in siw_qp_nextstate_from_idle()
691 siw_cep_put(qp->cep); in siw_qp_nextstate_from_idle()
692 qp->cep = NULL; in siw_qp_nextstate_from_idle()
707 switch (attrs->state) { in siw_qp_nextstate_from_rts()
717 if (tx_wqe(qp)->wr_status == SIW_WR_IDLE) { in siw_qp_nextstate_from_rts()
718 qp->attrs.state = SIW_QP_STATE_CLOSING; in siw_qp_nextstate_from_rts()
720 qp->attrs.state = SIW_QP_STATE_ERROR; in siw_qp_nextstate_from_rts()
729 qp->attrs.state = SIW_QP_STATE_TERMINATE; in siw_qp_nextstate_from_rts()
747 * Esp., how to handle the non-empty IRQ case? in siw_qp_nextstate_from_rts()
753 qp->attrs.state = SIW_QP_STATE_ERROR; in siw_qp_nextstate_from_rts()
766 switch (attrs->state) { in siw_qp_nextstate_from_term()
769 qp->attrs.state = SIW_QP_STATE_ERROR; in siw_qp_nextstate_from_term()
771 if (tx_wqe(qp)->wr_status != SIW_WR_IDLE) in siw_qp_nextstate_from_term()
785 switch (attrs->state) { in siw_qp_nextstate_from_close()
787 WARN_ON(tx_wqe(qp)->wr_status != SIW_WR_IDLE); in siw_qp_nextstate_from_close()
788 qp->attrs.state = SIW_QP_STATE_IDLE; in siw_qp_nextstate_from_close()
803 qp->attrs.state = SIW_QP_STATE_ERROR; in siw_qp_nextstate_from_close()
805 if (tx_wqe(qp)->wr_status != SIW_WR_IDLE) in siw_qp_nextstate_from_close()
813 siw_qp_state_to_string[qp->attrs.state], in siw_qp_nextstate_from_close()
814 siw_qp_state_to_string[attrs->state]); in siw_qp_nextstate_from_close()
816 rv = -ECONNABORTED; in siw_qp_nextstate_from_close()
822 * Caller must hold qp->state_lock
833 siw_qp_state_to_string[qp->attrs.state], in siw_qp_modify()
834 siw_qp_state_to_string[attrs->state]); in siw_qp_modify()
842 switch (qp->attrs.state) { in siw_qp_modify()
870 rreq->id = sqe->id; in siw_read_to_orq()
871 rreq->opcode = sqe->opcode; in siw_read_to_orq()
872 rreq->sge[0].laddr = sqe->sge[0].laddr; in siw_read_to_orq()
873 rreq->sge[0].length = sqe->sge[0].length; in siw_read_to_orq()
874 rreq->sge[0].lkey = sqe->sge[0].lkey; in siw_read_to_orq()
875 rreq->sge[1].lkey = sqe->sge[1].lkey; in siw_read_to_orq()
876 rreq->flags = sqe->flags | SIW_WQE_VALID; in siw_read_to_orq()
877 rreq->num_sge = 1; in siw_read_to_orq()
883 * the active IRQ will not be served after qp->irq_burst, if the
892 irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size]; in siw_activate_tx()
894 if (irqe->flags & SIW_WQE_VALID) { in siw_activate_tx()
901 if (sqe && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) { in siw_activate_tx()
902 qp->irq_burst = 0; in siw_activate_tx()
905 memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); in siw_activate_tx()
906 wqe->wr_status = SIW_WR_QUEUED; in siw_activate_tx()
909 wqe->sqe.opcode = SIW_OP_READ_RESPONSE; in siw_activate_tx()
910 wqe->sqe.flags = 0; in siw_activate_tx()
911 if (irqe->num_sge) { in siw_activate_tx()
912 wqe->sqe.num_sge = 1; in siw_activate_tx()
913 wqe->sqe.sge[0].length = irqe->sge[0].length; in siw_activate_tx()
914 wqe->sqe.sge[0].laddr = irqe->sge[0].laddr; in siw_activate_tx()
915 wqe->sqe.sge[0].lkey = irqe->sge[0].lkey; in siw_activate_tx()
917 wqe->sqe.num_sge = 0; in siw_activate_tx()
923 wqe->sqe.sge[1].length = irqe->sge[1].length; in siw_activate_tx()
925 wqe->sqe.rkey = irqe->rkey; in siw_activate_tx()
926 wqe->sqe.raddr = irqe->raddr; in siw_activate_tx()
928 wqe->processed = 0; in siw_activate_tx()
929 qp->irq_get++; in siw_activate_tx()
932 smp_store_mb(irqe->flags, 0); in siw_activate_tx()
939 memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE); in siw_activate_tx()
940 wqe->wr_status = SIW_WR_QUEUED; in siw_activate_tx()
943 memcpy(&wqe->sqe, sqe, sizeof(*sqe)); in siw_activate_tx()
945 if (wqe->sqe.opcode >= SIW_NUM_OPCODES) { in siw_activate_tx()
946 rv = -EINVAL; in siw_activate_tx()
949 if (wqe->sqe.flags & SIW_WQE_INLINE) { in siw_activate_tx()
950 if (wqe->sqe.opcode != SIW_OP_SEND && in siw_activate_tx()
951 wqe->sqe.opcode != SIW_OP_WRITE) { in siw_activate_tx()
952 rv = -EINVAL; in siw_activate_tx()
955 if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) { in siw_activate_tx()
956 rv = -EINVAL; in siw_activate_tx()
959 wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1]; in siw_activate_tx()
960 wqe->sqe.sge[0].lkey = 0; in siw_activate_tx()
961 wqe->sqe.num_sge = 1; in siw_activate_tx()
963 if (wqe->sqe.flags & SIW_WQE_READ_FENCE) { in siw_activate_tx()
965 if (unlikely(wqe->sqe.opcode == SIW_OP_READ || in siw_activate_tx()
966 wqe->sqe.opcode == in siw_activate_tx()
969 rv = -EINVAL; in siw_activate_tx()
972 spin_lock(&qp->orq_lock); in siw_activate_tx()
975 qp->tx_ctx.orq_fence = 1; in siw_activate_tx()
978 spin_unlock(&qp->orq_lock); in siw_activate_tx()
980 } else if (wqe->sqe.opcode == SIW_OP_READ || in siw_activate_tx()
981 wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) { in siw_activate_tx()
984 wqe->sqe.num_sge = 1; in siw_activate_tx()
986 spin_lock(&qp->orq_lock); in siw_activate_tx()
994 siw_read_to_orq(rreq, &wqe->sqe); in siw_activate_tx()
995 qp->orq_put++; in siw_activate_tx()
997 qp->tx_ctx.orq_fence = 1; in siw_activate_tx()
1000 spin_unlock(&qp->orq_lock); in siw_activate_tx()
1003 /* Clear SQE, can be re-used by application */ in siw_activate_tx()
1004 smp_store_mb(sqe->flags, 0); in siw_activate_tx()
1005 qp->sq_get++; in siw_activate_tx()
1012 wqe->wr_status = SIW_WR_IDLE; in siw_activate_tx()
1025 if (!cq->base_cq.comp_handler) in siw_cq_notify_now()
1029 cq_notify = READ_ONCE(cq->notify->flags); in siw_cq_notify_now()
1035 * CQ notification is one-shot: Since the in siw_cq_notify_now()
1037 * the CQ gets dis-aremd and must be re-aremd in siw_cq_notify_now()
1040 WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT); in siw_cq_notify_now()
1050 struct siw_cq *cq = qp->scq; in siw_sqe_complete()
1054 u32 sqe_flags = sqe->flags; in siw_sqe_complete()
1057 unsigned long flags; in siw_sqe_complete()
1059 spin_lock_irqsave(&cq->lock, flags); in siw_sqe_complete()
1061 idx = cq->cq_put % cq->num_cqe; in siw_sqe_complete()
1062 cqe = &cq->queue[idx]; in siw_sqe_complete()
1064 if (!READ_ONCE(cqe->flags)) { in siw_sqe_complete()
1067 cqe->id = sqe->id; in siw_sqe_complete()
1068 cqe->opcode = sqe->opcode; in siw_sqe_complete()
1069 cqe->status = status; in siw_sqe_complete()
1070 cqe->imm_data = 0; in siw_sqe_complete()
1071 cqe->bytes = bytes; in siw_sqe_complete()
1073 if (rdma_is_kernel_res(&cq->base_cq.res)) in siw_sqe_complete()
1074 cqe->base_qp = &qp->base_qp; in siw_sqe_complete()
1076 cqe->qp_id = qp_id(qp); in siw_sqe_complete()
1079 WRITE_ONCE(cqe->flags, SIW_WQE_VALID); in siw_sqe_complete()
1081 smp_store_mb(sqe->flags, 0); in siw_sqe_complete()
1083 cq->cq_put++; in siw_sqe_complete()
1086 spin_unlock_irqrestore(&cq->lock, flags); in siw_sqe_complete()
1090 cq->base_cq.comp_handler(&cq->base_cq, in siw_sqe_complete()
1091 cq->base_cq.cq_context); in siw_sqe_complete()
1094 spin_unlock_irqrestore(&cq->lock, flags); in siw_sqe_complete()
1095 rv = -ENOMEM; in siw_sqe_complete()
1100 smp_store_mb(sqe->flags, 0); in siw_sqe_complete()
1108 struct siw_cq *cq = qp->rcq; in siw_rqe_complete()
1114 unsigned long flags; in siw_rqe_complete()
1116 spin_lock_irqsave(&cq->lock, flags); in siw_rqe_complete()
1118 idx = cq->cq_put % cq->num_cqe; in siw_rqe_complete()
1119 cqe = &cq->queue[idx]; in siw_rqe_complete()
1121 if (!READ_ONCE(cqe->flags)) { in siw_rqe_complete()
1125 cqe->id = rqe->id; in siw_rqe_complete()
1126 cqe->opcode = SIW_OP_RECEIVE; in siw_rqe_complete()
1127 cqe->status = status; in siw_rqe_complete()
1128 cqe->imm_data = 0; in siw_rqe_complete()
1129 cqe->bytes = bytes; in siw_rqe_complete()
1131 if (rdma_is_kernel_res(&cq->base_cq.res)) { in siw_rqe_complete()
1132 cqe->base_qp = &qp->base_qp; in siw_rqe_complete()
1135 cqe->inval_stag = inval_stag; in siw_rqe_complete()
1138 cqe->qp_id = qp_id(qp); in siw_rqe_complete()
1141 WRITE_ONCE(cqe->flags, cqe_flags); in siw_rqe_complete()
1143 smp_store_mb(rqe->flags, 0); in siw_rqe_complete()
1145 cq->cq_put++; in siw_rqe_complete()
1148 spin_unlock_irqrestore(&cq->lock, flags); in siw_rqe_complete()
1152 cq->base_cq.comp_handler(&cq->base_cq, in siw_rqe_complete()
1153 cq->base_cq.cq_context); in siw_rqe_complete()
1156 spin_unlock_irqrestore(&cq->lock, flags); in siw_rqe_complete()
1157 rv = -ENOMEM; in siw_rqe_complete()
1162 smp_store_mb(rqe->flags, 0); in siw_rqe_complete()
1184 while (qp->attrs.orq_size) { in siw_sq_flush()
1185 sqe = &qp->orq[qp->orq_get % qp->attrs.orq_size]; in siw_sq_flush()
1186 if (!READ_ONCE(sqe->flags)) in siw_sq_flush()
1192 WRITE_ONCE(sqe->flags, 0); in siw_sq_flush()
1193 qp->orq_get++; in siw_sq_flush()
1196 * Flush an in-progress WQE if present in siw_sq_flush()
1198 if (wqe->wr_status != SIW_WR_IDLE) { in siw_sq_flush()
1200 tx_type(wqe), wqe->wr_status); in siw_sq_flush()
1207 wqe->wr_status == SIW_WR_QUEUED)) in siw_sq_flush()
1209 * An in-progress Read Request is already in in siw_sq_flush()
1212 siw_sqe_complete(qp, &wqe->sqe, wqe->bytes, in siw_sq_flush()
1215 wqe->wr_status = SIW_WR_IDLE; in siw_sq_flush()
1220 while (qp->attrs.sq_size) { in siw_sq_flush()
1221 sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; in siw_sq_flush()
1222 if (!READ_ONCE(sqe->flags)) in siw_sq_flush()
1233 WRITE_ONCE(sqe->flags, 0); in siw_sq_flush()
1234 qp->sq_get++; in siw_sq_flush()
1253 struct siw_wqe *wqe = &qp->rx_untagged.wqe_active; in siw_rq_flush()
1256 * Flush an in-progress untagged operation if present in siw_rq_flush()
1258 if (wqe->wr_status != SIW_WR_IDLE) { in siw_rq_flush()
1260 rx_type(wqe), wqe->wr_status); in siw_rq_flush()
1265 siw_rqe_complete(qp, &wqe->rqe, wqe->bytes, in siw_rq_flush()
1270 siw_sqe_complete(qp, &wqe->sqe, 0, SIW_WC_WR_FLUSH_ERR); in siw_rq_flush()
1272 wqe->wr_status = SIW_WR_IDLE; in siw_rq_flush()
1274 wqe = &qp->rx_tagged.wqe_active; in siw_rq_flush()
1276 if (wqe->wr_status != SIW_WR_IDLE) { in siw_rq_flush()
1278 wqe->wr_status = SIW_WR_IDLE; in siw_rq_flush()
1283 while (qp->attrs.rq_size) { in siw_rq_flush()
1285 &qp->recvq[qp->rq_get % qp->attrs.rq_size]; in siw_rq_flush()
1287 if (!READ_ONCE(rqe->flags)) in siw_rq_flush()
1293 WRITE_ONCE(rqe->flags, 0); in siw_rq_flush()
1294 qp->rq_get++; in siw_rq_flush()
1300 int rv = xa_alloc(&sdev->qp_xa, &qp->base_qp.qp_num, qp, xa_limit_32b, in siw_qp_add()
1304 kref_init(&qp->ref); in siw_qp_add()
1305 qp->sdev = sdev; in siw_qp_add()
1314 struct siw_device *sdev = qp->sdev; in siw_free_qp()
1315 unsigned long flags; in siw_free_qp()
1317 if (qp->cep) in siw_free_qp()
1318 siw_cep_put(qp->cep); in siw_free_qp()
1320 found = xa_erase(&sdev->qp_xa, qp_id(qp)); in siw_free_qp()
1322 spin_lock_irqsave(&sdev->lock, flags); in siw_free_qp()
1323 list_del(&qp->devq); in siw_free_qp()
1324 spin_unlock_irqrestore(&sdev->lock, flags); in siw_free_qp()
1326 vfree(qp->sendq); in siw_free_qp()
1327 vfree(qp->recvq); in siw_free_qp()
1328 vfree(qp->irq); in siw_free_qp()
1329 vfree(qp->orq); in siw_free_qp()
1331 siw_put_tx_cpu(qp->tx_cpu); in siw_free_qp()
1333 atomic_dec(&sdev->num_qp); in siw_free_qp()