Lines Matching +full:long +full:- +full:term

1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
4 /* Copyright (c) 2008-2019, IBM Corporation */
44 (void *)(uintptr_t)umem->fp_addr); in siw_rx_umem()
46 srx->skb_copied += copied; in siw_rx_umem()
47 srx->skb_new -= copied; in siw_rx_umem()
49 return -EFAULT; in siw_rx_umem()
52 bytes = min(len, (int)PAGE_SIZE - pg_off); in siw_rx_umem()
57 rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off, in siw_rx_umem()
62 srx->skb_copied += copied; in siw_rx_umem()
63 srx->skb_new -= copied; in siw_rx_umem()
68 return -EFAULT; in siw_rx_umem()
70 if (srx->mpa_crc_hd) { in siw_rx_umem()
71 if (rdma_is_kernel_res(&rx_qp(srx)->base_qp.res)) { in siw_rx_umem()
72 crypto_shash_update(srx->mpa_crc_hd, in siw_rx_umem()
92 srx->skb_offset += bytes; in siw_rx_umem()
94 len -= bytes; in siw_rx_umem()
98 srx->skb_copied += copied; in siw_rx_umem()
99 srx->skb_new -= copied; in siw_rx_umem()
110 rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len); in siw_rx_kva()
117 if (srx->mpa_crc_hd) in siw_rx_kva()
118 crypto_shash_update(srx->mpa_crc_hd, (u8 *)kva, len); in siw_rx_kva()
120 srx->skb_offset += len; in siw_rx_kva()
121 srx->skb_copied += len; in siw_rx_kva()
122 srx->skb_new -= len; in siw_rx_kva()
130 struct siw_pbl *pbl = mem->pbl; in siw_rx_pbl()
131 u64 offset = addr - mem->va; in siw_rx_pbl()
146 len -= bytes; in siw_rx_pbl()
168 struct iwarp_rdma_rresp *rresp = &srx->hdr.rresp; in siw_rresp_check_ntoh()
169 struct siw_wqe *wqe = &frx->wqe_active; in siw_rresp_check_ntoh()
172 u32 sink_stag = be32_to_cpu(rresp->sink_stag); in siw_rresp_check_ntoh()
173 u64 sink_to = be64_to_cpu(rresp->sink_to); in siw_rresp_check_ntoh()
175 if (frx->first_ddp_seg) { in siw_rresp_check_ntoh()
176 srx->ddp_stag = wqe->sqe.sge[0].lkey; in siw_rresp_check_ntoh()
177 srx->ddp_to = wqe->sqe.sge[0].laddr; in siw_rresp_check_ntoh()
178 frx->pbl_idx = 0; in siw_rresp_check_ntoh()
190 if (unlikely(srx->ddp_stag != sink_stag)) { in siw_rresp_check_ntoh()
192 qp_id(rx_qp(srx)), sink_stag, srx->ddp_stag); in siw_rresp_check_ntoh()
196 if (unlikely(srx->ddp_to != sink_to)) { in siw_rresp_check_ntoh()
198 qp_id(rx_qp(srx)), (unsigned long long)sink_to, in siw_rresp_check_ntoh()
199 (unsigned long long)srx->ddp_to); in siw_rresp_check_ntoh()
203 if (unlikely(!frx->more_ddp_segs && in siw_rresp_check_ntoh()
204 (wqe->processed + srx->fpdu_part_rem != wqe->bytes))) { in siw_rresp_check_ntoh()
207 wqe->processed + srx->fpdu_part_rem, wqe->bytes); in siw_rresp_check_ntoh()
215 return -EINVAL; in siw_rresp_check_ntoh()
232 struct iwarp_rdma_write *write = &srx->hdr.rwrite; in siw_write_check_ntoh()
235 u32 sink_stag = be32_to_cpu(write->sink_stag); in siw_write_check_ntoh()
236 u64 sink_to = be64_to_cpu(write->sink_to); in siw_write_check_ntoh()
238 if (frx->first_ddp_seg) { in siw_write_check_ntoh()
239 srx->ddp_stag = sink_stag; in siw_write_check_ntoh()
240 srx->ddp_to = sink_to; in siw_write_check_ntoh()
241 frx->pbl_idx = 0; in siw_write_check_ntoh()
243 if (unlikely(srx->ddp_stag != sink_stag)) { in siw_write_check_ntoh()
246 srx->ddp_stag); in siw_write_check_ntoh()
250 if (unlikely(srx->ddp_to != sink_to)) { in siw_write_check_ntoh()
253 (unsigned long long)sink_to, in siw_write_check_ntoh()
254 (unsigned long long)srx->ddp_to); in siw_write_check_ntoh()
263 return -EINVAL; in siw_write_check_ntoh()
280 struct iwarp_send_inv *send = &srx->hdr.send_inv; in siw_send_check_ntoh()
281 struct siw_wqe *wqe = &frx->wqe_active; in siw_send_check_ntoh()
284 u32 ddp_msn = be32_to_cpu(send->ddp_msn); in siw_send_check_ntoh()
285 u32 ddp_mo = be32_to_cpu(send->ddp_mo); in siw_send_check_ntoh()
286 u32 ddp_qn = be32_to_cpu(send->ddp_qn); in siw_send_check_ntoh()
294 if (unlikely(ddp_msn != srx->ddp_msn[RDMAP_UNTAGGED_QN_SEND])) { in siw_send_check_ntoh()
297 srx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]); in siw_send_check_ntoh()
301 if (unlikely(ddp_mo != wqe->processed)) { in siw_send_check_ntoh()
303 qp_id(rx_qp(srx)), ddp_mo, wqe->processed); in siw_send_check_ntoh()
307 if (frx->first_ddp_seg) { in siw_send_check_ntoh()
309 frx->sge_idx = 0; in siw_send_check_ntoh()
310 frx->sge_off = 0; in siw_send_check_ntoh()
311 frx->pbl_idx = 0; in siw_send_check_ntoh()
314 srx->inval_stag = be32_to_cpu(send->inval_stag); in siw_send_check_ntoh()
316 if (unlikely(wqe->bytes < wqe->processed + srx->fpdu_part_rem)) { in siw_send_check_ntoh()
317 siw_dbg_qp(rx_qp(srx), "receive space short: %d - %d < %d\n", in siw_send_check_ntoh()
318 wqe->bytes, wqe->processed, srx->fpdu_part_rem); in siw_send_check_ntoh()
319 wqe->wc_status = SIW_WC_LOC_LEN_ERR; in siw_send_check_ntoh()
327 return -EINVAL; in siw_send_check_ntoh()
336 unsigned long flags; in siw_rqe_get()
338 srq = qp->srq; in siw_rqe_get()
340 spin_lock_irqsave(&srq->lock, flags); in siw_rqe_get()
341 if (unlikely(!srq->num_rqe)) in siw_rqe_get()
344 rqe = &srq->recvq[srq->rq_get % srq->num_rqe]; in siw_rqe_get()
346 if (unlikely(!qp->recvq)) in siw_rqe_get()
349 rqe = &qp->recvq[qp->rq_get % qp->attrs.rq_size]; in siw_rqe_get()
351 if (likely(rqe->flags == SIW_WQE_VALID)) { in siw_rqe_get()
352 int num_sge = rqe->num_sge; in siw_rqe_get()
357 wqe = rx_wqe(&qp->rx_untagged); in siw_rqe_get()
359 wqe->wr_status = SIW_WR_INPROGRESS; in siw_rqe_get()
360 wqe->bytes = 0; in siw_rqe_get()
361 wqe->processed = 0; in siw_rqe_get()
363 wqe->rqe.id = rqe->id; in siw_rqe_get()
364 wqe->rqe.num_sge = num_sge; in siw_rqe_get()
367 wqe->rqe.sge[i].laddr = rqe->sge[i].laddr; in siw_rqe_get()
368 wqe->rqe.sge[i].lkey = rqe->sge[i].lkey; in siw_rqe_get()
369 wqe->rqe.sge[i].length = rqe->sge[i].length; in siw_rqe_get()
370 wqe->bytes += wqe->rqe.sge[i].length; in siw_rqe_get()
371 wqe->mem[i] = NULL; in siw_rqe_get()
374 /* can be re-used by appl */ in siw_rqe_get()
375 smp_store_mb(rqe->flags, 0); in siw_rqe_get()
377 siw_dbg_qp(qp, "too many sge's: %d\n", rqe->num_sge); in siw_rqe_get()
379 spin_unlock_irqrestore(&srq->lock, flags); in siw_rqe_get()
383 qp->rq_get++; in siw_rqe_get()
385 if (srq->armed) { in siw_rqe_get()
387 u32 off = (srq->rq_get + srq->limit) % in siw_rqe_get()
388 srq->num_rqe; in siw_rqe_get()
389 struct siw_rqe *rqe2 = &srq->recvq[off]; in siw_rqe_get()
391 if (!(rqe2->flags & SIW_WQE_VALID)) { in siw_rqe_get()
392 srq->armed = false; in siw_rqe_get()
396 srq->rq_get++; in siw_rqe_get()
401 spin_unlock_irqrestore(&srq->lock, flags); in siw_rqe_get()
419 * -EAGAIN: to be called again to finish the DDP segment
423 struct siw_rx_stream *srx = &qp->rx_stream; in siw_proc_send()
424 struct siw_rx_fpdu *frx = &qp->rx_untagged; in siw_proc_send()
430 if (frx->first_ddp_seg) { in siw_proc_send()
436 return -ENOENT; in siw_proc_send()
441 if (srx->state == SIW_GET_DATA_START) { in siw_proc_send()
447 if (!srx->fpdu_part_rem) /* zero length SEND */ in siw_proc_send()
450 data_bytes = min(srx->fpdu_part_rem, srx->skb_new); in siw_proc_send()
460 sge = &wqe->rqe.sge[frx->sge_idx]; in siw_proc_send()
462 if (!sge->length) { in siw_proc_send()
464 frx->sge_idx++; in siw_proc_send()
465 frx->sge_off = 0; in siw_proc_send()
466 frx->pbl_idx = 0; in siw_proc_send()
469 sge_bytes = min(data_bytes, sge->length - frx->sge_off); in siw_proc_send()
470 mem = &wqe->mem[frx->sge_idx]; in siw_proc_send()
475 pd = qp->srq == NULL ? qp->pd : qp->srq->base_srq.pd; in siw_proc_send()
478 frx->sge_off, sge_bytes); in siw_proc_send()
488 if (mem_p->mem_obj == NULL) in siw_proc_send()
490 (void *)(uintptr_t)(sge->laddr + frx->sge_off), in siw_proc_send()
492 else if (!mem_p->is_pbl) in siw_proc_send()
493 rv = siw_rx_umem(srx, mem_p->umem, in siw_proc_send()
494 sge->laddr + frx->sge_off, sge_bytes); in siw_proc_send()
496 rv = siw_rx_pbl(srx, &frx->pbl_idx, mem_p, in siw_proc_send()
497 sge->laddr + frx->sge_off, sge_bytes); in siw_proc_send()
500 wqe->processed += rcvd_bytes; in siw_proc_send()
505 return -EINVAL; in siw_proc_send()
507 frx->sge_off += rv; in siw_proc_send()
509 if (frx->sge_off == sge->length) { in siw_proc_send()
510 frx->sge_idx++; in siw_proc_send()
511 frx->sge_off = 0; in siw_proc_send()
512 frx->pbl_idx = 0; in siw_proc_send()
514 data_bytes -= rv; in siw_proc_send()
517 srx->fpdu_part_rem -= rv; in siw_proc_send()
518 srx->fpdu_part_rcvd += rv; in siw_proc_send()
520 wqe->processed += rcvd_bytes; in siw_proc_send()
522 if (!srx->fpdu_part_rem) in siw_proc_send()
525 return (rv < 0) ? rv : -EAGAIN; in siw_proc_send()
538 * -EAGAIN: to be called again to finish the DDP segment
542 struct siw_rx_stream *srx = &qp->rx_stream; in siw_proc_write()
543 struct siw_rx_fpdu *frx = &qp->rx_tagged; in siw_proc_write()
547 if (srx->state == SIW_GET_DATA_START) { in siw_proc_write()
548 if (!srx->fpdu_part_rem) /* zero length WRITE */ in siw_proc_write()
557 bytes = min(srx->fpdu_part_rem, srx->skb_new); in siw_proc_write()
559 if (frx->first_ddp_seg) { in siw_proc_write()
562 rx_mem(frx) = siw_mem_id2obj(qp->sdev, srx->ddp_stag >> 8); in siw_proc_write()
566 srx->ddp_stag); in siw_proc_write()
571 return -EINVAL; in siw_proc_write()
573 wqe->rqe.num_sge = 1; in siw_proc_write()
575 wqe->wr_status = SIW_WR_INPROGRESS; in siw_proc_write()
580 * Check if application re-registered memory with different in siw_proc_write()
583 if (unlikely(mem->stag != srx->ddp_stag)) { in siw_proc_write()
587 return -EINVAL; in siw_proc_write()
589 rv = siw_check_mem(qp->pd, mem, srx->ddp_to + srx->fpdu_part_rcvd, in siw_proc_write()
593 DDP_ETYPE_TAGGED_BUF, siw_tagged_error(-rv), in siw_proc_write()
598 return -EINVAL; in siw_proc_write()
601 if (mem->mem_obj == NULL) in siw_proc_write()
603 (void *)(uintptr_t)(srx->ddp_to + srx->fpdu_part_rcvd), in siw_proc_write()
605 else if (!mem->is_pbl) in siw_proc_write()
606 rv = siw_rx_umem(srx, mem->umem, in siw_proc_write()
607 srx->ddp_to + srx->fpdu_part_rcvd, bytes); in siw_proc_write()
609 rv = siw_rx_pbl(srx, &frx->pbl_idx, mem, in siw_proc_write()
610 srx->ddp_to + srx->fpdu_part_rcvd, bytes); in siw_proc_write()
616 return -EINVAL; in siw_proc_write()
618 srx->fpdu_part_rem -= rv; in siw_proc_write()
619 srx->fpdu_part_rcvd += rv; in siw_proc_write()
621 if (!srx->fpdu_part_rem) { in siw_proc_write()
622 srx->ddp_to += srx->fpdu_part_rcvd; in siw_proc_write()
625 return -EAGAIN; in siw_proc_write()
633 struct siw_rx_stream *srx = &qp->rx_stream; in siw_proc_rreq()
635 if (!srx->fpdu_part_rem) in siw_proc_rreq()
639 be16_to_cpu(srx->hdr.ctrl.mpa_len)); in siw_proc_rreq()
641 return -EPROTO; in siw_proc_rreq()
665 uint64_t raddr = be64_to_cpu(srx->hdr.rreq.sink_to), in siw_init_rresp()
666 laddr = be64_to_cpu(srx->hdr.rreq.source_to); in siw_init_rresp()
667 uint32_t length = be32_to_cpu(srx->hdr.rreq.read_size), in siw_init_rresp()
668 lkey = be32_to_cpu(srx->hdr.rreq.source_stag), in siw_init_rresp()
669 rkey = be32_to_cpu(srx->hdr.rreq.sink_stag), in siw_init_rresp()
670 msn = be32_to_cpu(srx->hdr.rreq.ddp_msn); in siw_init_rresp()
673 unsigned long flags; in siw_init_rresp()
675 if (unlikely(msn != srx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ])) { in siw_init_rresp()
679 return -EPROTO; in siw_init_rresp()
681 spin_lock_irqsave(&qp->sq_lock, flags); in siw_init_rresp()
683 if (unlikely(!qp->attrs.irq_size)) { in siw_init_rresp()
687 if (tx_work->wr_status == SIW_WR_IDLE) { in siw_init_rresp()
692 tx_work->processed = 0; in siw_init_rresp()
693 tx_work->mem[0] = NULL; in siw_init_rresp()
694 tx_work->wr_status = SIW_WR_QUEUED; in siw_init_rresp()
695 resp = &tx_work->sqe; in siw_init_rresp()
701 resp->opcode = SIW_OP_READ_RESPONSE; in siw_init_rresp()
703 resp->sge[0].length = length; in siw_init_rresp()
704 resp->sge[0].laddr = laddr; in siw_init_rresp()
705 resp->sge[0].lkey = lkey; in siw_init_rresp()
710 resp->sge[1].length = msn; in siw_init_rresp()
712 resp->raddr = raddr; in siw_init_rresp()
713 resp->rkey = rkey; in siw_init_rresp()
714 resp->num_sge = length ? 1 : 0; in siw_init_rresp()
717 smp_store_mb(resp->flags, SIW_WQE_VALID); in siw_init_rresp()
721 qp_id(qp), qp->attrs.irq_size); in siw_init_rresp()
726 rv = -EPROTO; in siw_init_rresp()
729 spin_unlock_irqrestore(&qp->sq_lock, flags); in siw_init_rresp()
748 if (unlikely(!qp->attrs.orq_size)) in siw_orqe_start_rx()
749 return -EPROTO; in siw_orqe_start_rx()
755 if (READ_ONCE(orqe->flags) & SIW_WQE_VALID) { in siw_orqe_start_rx()
757 wqe = rx_wqe(&qp->rx_tagged); in siw_orqe_start_rx()
758 wqe->sqe.id = orqe->id; in siw_orqe_start_rx()
759 wqe->sqe.opcode = orqe->opcode; in siw_orqe_start_rx()
760 wqe->sqe.sge[0].laddr = orqe->sge[0].laddr; in siw_orqe_start_rx()
761 wqe->sqe.sge[0].lkey = orqe->sge[0].lkey; in siw_orqe_start_rx()
762 wqe->sqe.sge[0].length = orqe->sge[0].length; in siw_orqe_start_rx()
763 wqe->sqe.flags = orqe->flags; in siw_orqe_start_rx()
764 wqe->sqe.num_sge = 1; in siw_orqe_start_rx()
765 wqe->bytes = orqe->sge[0].length; in siw_orqe_start_rx()
766 wqe->processed = 0; in siw_orqe_start_rx()
767 wqe->mem[0] = NULL; in siw_orqe_start_rx()
770 wqe->wr_status = SIW_WR_INPROGRESS; in siw_orqe_start_rx()
774 return -EPROTO; in siw_orqe_start_rx()
788 struct siw_rx_stream *srx = &qp->rx_stream; in siw_proc_rresp()
789 struct siw_rx_fpdu *frx = &qp->rx_tagged; in siw_proc_rresp()
795 if (frx->first_ddp_seg) { in siw_proc_rresp()
796 if (unlikely(wqe->wr_status != SIW_WR_IDLE)) { in siw_proc_rresp()
798 qp_id(qp), wqe->wr_status, wqe->sqe.opcode); in siw_proc_rresp()
799 rv = -EPROTO; in siw_proc_rresp()
808 qp_id(qp), qp->attrs.orq_size); in siw_proc_rresp()
817 if (unlikely(wqe->wr_status != SIW_WR_INPROGRESS)) { in siw_proc_rresp()
819 qp_id(qp), wqe->wr_status); in siw_proc_rresp()
820 rv = -EPROTO; in siw_proc_rresp()
824 if (!srx->fpdu_part_rem) /* zero length RRESPONSE */ in siw_proc_rresp()
827 sge = wqe->sqe.sge; /* there is only one */ in siw_proc_rresp()
828 mem = &wqe->mem[0]; in siw_proc_rresp()
834 rv = siw_check_sge(qp->pd, sge, mem, IB_ACCESS_LOCAL_WRITE, 0, in siw_proc_rresp()
835 wqe->bytes); in siw_proc_rresp()
838 wqe->wc_status = SIW_WC_LOC_PROT_ERR; in siw_proc_rresp()
842 siw_tagged_error(-rv), 0); in siw_proc_rresp()
846 return -EINVAL; in siw_proc_rresp()
851 bytes = min(srx->fpdu_part_rem, srx->skb_new); in siw_proc_rresp()
853 if (mem_p->mem_obj == NULL) in siw_proc_rresp()
855 (void *)(uintptr_t)(sge->laddr + wqe->processed), in siw_proc_rresp()
857 else if (!mem_p->is_pbl) in siw_proc_rresp()
858 rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed, in siw_proc_rresp()
861 rv = siw_rx_pbl(srx, &frx->pbl_idx, mem_p, in siw_proc_rresp()
862 sge->laddr + wqe->processed, bytes); in siw_proc_rresp()
864 wqe->wc_status = SIW_WC_GENERAL_ERR; in siw_proc_rresp()
865 rv = -EINVAL; in siw_proc_rresp()
868 srx->fpdu_part_rem -= rv; in siw_proc_rresp()
869 srx->fpdu_part_rcvd += rv; in siw_proc_rresp()
870 wqe->processed += rv; in siw_proc_rresp()
872 if (!srx->fpdu_part_rem) { in siw_proc_rresp()
873 srx->ddp_to += srx->fpdu_part_rcvd; in siw_proc_rresp()
876 return -EAGAIN; in siw_proc_rresp()
886 struct siw_rx_stream *srx = &qp->rx_stream; in siw_proc_terminate()
887 struct sk_buff *skb = srx->skb; in siw_proc_terminate()
888 struct iwarp_terminate *term = &srx->hdr.terminate; in siw_proc_terminate() local
895 __rdmap_term_layer(term), __rdmap_term_etype(term), in siw_proc_terminate()
896 __rdmap_term_ecode(term)); in siw_proc_terminate()
898 if (be32_to_cpu(term->ddp_qn) != RDMAP_UNTAGGED_QN_TERMINATE || in siw_proc_terminate()
899 be32_to_cpu(term->ddp_msn) != in siw_proc_terminate()
900 qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] || in siw_proc_terminate()
901 be32_to_cpu(term->ddp_mo) != 0) { in siw_proc_terminate()
902 pr_warn("siw: rx bogus TERM [QN x%08x, MSN x%08x, MO x%08x]\n", in siw_proc_terminate()
903 be32_to_cpu(term->ddp_qn), be32_to_cpu(term->ddp_msn), in siw_proc_terminate()
904 be32_to_cpu(term->ddp_mo)); in siw_proc_terminate()
905 return -ECONNRESET; in siw_proc_terminate()
908 * Receive remaining pieces of TERM if indicated in siw_proc_terminate()
910 if (!term->flag_m) in siw_proc_terminate()
911 return -ECONNRESET; in siw_proc_terminate()
914 * TERM message in siw_proc_terminate()
916 if (srx->skb_new < sizeof(struct iwarp_ctrl_tagged)) in siw_proc_terminate()
917 return -ECONNRESET; in siw_proc_terminate()
921 skb_copy_bits(skb, srx->skb_offset, infop, to_copy); in siw_proc_terminate()
928 srx->skb_offset += to_copy; in siw_proc_terminate()
929 srx->skb_new -= to_copy; in siw_proc_terminate()
930 srx->skb_copied += to_copy; in siw_proc_terminate()
931 srx->fpdu_part_rcvd += to_copy; in siw_proc_terminate()
932 srx->fpdu_part_rem -= to_copy; in siw_proc_terminate()
934 to_copy = iwarp_pktinfo[op].hdr_len - to_copy; in siw_proc_terminate()
936 /* Again, no network fragmented TERM's */ in siw_proc_terminate()
937 if (to_copy + MPA_CRC_SIZE > srx->skb_new) in siw_proc_terminate()
938 return -ECONNRESET; in siw_proc_terminate()
940 skb_copy_bits(skb, srx->skb_offset, infop, to_copy); in siw_proc_terminate()
942 if (term->flag_r) { in siw_proc_terminate()
943 siw_dbg_qp(qp, "TERM reports RDMAP hdr type %u, len %u (%s)\n", in siw_proc_terminate()
945 term->flag_m ? "valid" : "invalid"); in siw_proc_terminate()
946 } else if (term->flag_d) { in siw_proc_terminate()
947 siw_dbg_qp(qp, "TERM reports DDP hdr type %u, len %u (%s)\n", in siw_proc_terminate()
949 term->flag_m ? "valid" : "invalid"); in siw_proc_terminate()
952 srx->skb_new -= to_copy; in siw_proc_terminate()
953 srx->skb_offset += to_copy; in siw_proc_terminate()
954 srx->skb_copied += to_copy; in siw_proc_terminate()
955 srx->fpdu_part_rcvd += to_copy; in siw_proc_terminate()
956 srx->fpdu_part_rem -= to_copy; in siw_proc_terminate()
958 return -ECONNRESET; in siw_proc_terminate()
963 struct sk_buff *skb = srx->skb; in siw_get_trailer()
964 u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad; in siw_get_trailer()
968 srx->fpdu_part_rem, srx->skb_new, srx->pad); in siw_get_trailer()
970 if (srx->skb_new < srx->fpdu_part_rem) in siw_get_trailer()
971 return -EAGAIN; in siw_get_trailer()
973 skb_copy_bits(skb, srx->skb_offset, tbuf, srx->fpdu_part_rem); in siw_get_trailer()
975 if (srx->mpa_crc_hd && srx->pad) in siw_get_trailer()
976 crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad); in siw_get_trailer()
978 srx->skb_new -= srx->fpdu_part_rem; in siw_get_trailer()
979 srx->skb_offset += srx->fpdu_part_rem; in siw_get_trailer()
980 srx->skb_copied += srx->fpdu_part_rem; in siw_get_trailer()
982 if (!srx->mpa_crc_hd) in siw_get_trailer()
989 crypto_shash_final(srx->mpa_crc_hd, (u8 *)&crc_own); in siw_get_trailer()
990 crc_in = (__force __wsum)srx->trailer.crc; in siw_get_trailer()
994 crc_in, crc_own, qp->rx_stream.rdmap_op); in siw_get_trailer()
999 return -EINVAL; in siw_get_trailer()
1008 struct sk_buff *skb = srx->skb; in siw_get_hdr()
1010 struct iwarp_ctrl *c_hdr = &srx->hdr.ctrl; in siw_get_hdr()
1015 if (srx->fpdu_part_rcvd < MIN_DDP_HDR) { in siw_get_hdr()
1019 bytes = min_t(int, srx->skb_new, in siw_get_hdr()
1020 MIN_DDP_HDR - srx->fpdu_part_rcvd); in siw_get_hdr()
1022 skb_copy_bits(skb, srx->skb_offset, in siw_get_hdr()
1023 (char *)c_hdr + srx->fpdu_part_rcvd, bytes); in siw_get_hdr()
1025 srx->fpdu_part_rcvd += bytes; in siw_get_hdr()
1027 srx->skb_new -= bytes; in siw_get_hdr()
1028 srx->skb_offset += bytes; in siw_get_hdr()
1029 srx->skb_copied += bytes; in siw_get_hdr()
1031 if (srx->fpdu_part_rcvd < MIN_DDP_HDR) in siw_get_hdr()
1032 return -EAGAIN; in siw_get_hdr()
1041 if (c_hdr->ddp_rdmap_ctrl & DDP_FLAG_TAGGED) { in siw_get_hdr()
1050 return -EINVAL; in siw_get_hdr()
1059 return -EINVAL; in siw_get_hdr()
1070 return -EINVAL; in siw_get_hdr()
1077 frx = qp->rx_fpdu; in siw_get_hdr()
1086 bytes = iwarp_pktinfo[opcode].hdr_len - MIN_DDP_HDR; in siw_get_hdr()
1088 if (srx->skb_new < bytes) in siw_get_hdr()
1089 return -EAGAIN; in siw_get_hdr()
1091 skb_copy_bits(skb, srx->skb_offset, in siw_get_hdr()
1092 (char *)c_hdr + srx->fpdu_part_rcvd, bytes); in siw_get_hdr()
1094 srx->fpdu_part_rcvd += bytes; in siw_get_hdr()
1096 srx->skb_new -= bytes; in siw_get_hdr()
1097 srx->skb_offset += bytes; in siw_get_hdr()
1098 srx->skb_copied += bytes; in siw_get_hdr()
1107 * tagged and untagged RDMAP messages is supported, as long as in siw_get_hdr()
1113 if (srx->mpa_crc_hd) { in siw_get_hdr()
1117 crypto_shash_init(srx->mpa_crc_hd); in siw_get_hdr()
1118 crypto_shash_update(srx->mpa_crc_hd, (u8 *)c_hdr, in siw_get_hdr()
1119 srx->fpdu_part_rcvd); in siw_get_hdr()
1121 if (frx->more_ddp_segs) { in siw_get_hdr()
1122 frx->first_ddp_seg = 0; in siw_get_hdr()
1123 if (frx->prev_rdmap_op != opcode) { in siw_get_hdr()
1125 frx->prev_rdmap_op, opcode); in siw_get_hdr()
1134 set_rx_fpdu_context(qp, frx->prev_rdmap_op); in siw_get_hdr()
1135 __rdmap_set_opcode(c_hdr, frx->prev_rdmap_op); in siw_get_hdr()
1136 return -EPROTO; in siw_get_hdr()
1139 frx->prev_rdmap_op = opcode; in siw_get_hdr()
1140 frx->first_ddp_seg = 1; in siw_get_hdr()
1142 frx->more_ddp_segs = c_hdr->ddp_rdmap_ctrl & DDP_FLAG_LAST ? 0 : 1; in siw_get_hdr()
1152 unsigned long flags; in siw_check_tx_fence()
1154 spin_lock_irqsave(&qp->orq_lock, flags); in siw_check_tx_fence()
1159 WRITE_ONCE(rreq->flags, 0); in siw_check_tx_fence()
1161 if (qp->tx_ctx.orq_fence) { in siw_check_tx_fence()
1162 if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) { in siw_check_tx_fence()
1164 qp_id(qp), tx_waiting->wr_status); in siw_check_tx_fence()
1165 rv = -EPROTO; in siw_check_tx_fence()
1169 if (tx_waiting->sqe.opcode == SIW_OP_READ || in siw_check_tx_fence()
1170 tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) { in siw_check_tx_fence()
1174 rv = -EPROTO; in siw_check_tx_fence()
1177 siw_read_to_orq(rreq, &tx_waiting->sqe); in siw_check_tx_fence()
1179 qp->orq_put++; in siw_check_tx_fence()
1180 qp->tx_ctx.orq_fence = 0; in siw_check_tx_fence()
1184 qp->tx_ctx.orq_fence = 0; in siw_check_tx_fence()
1188 qp_id(qp), qp->orq_get, qp->orq_put); in siw_check_tx_fence()
1189 rv = -EPROTO; in siw_check_tx_fence()
1192 qp->orq_get++; in siw_check_tx_fence()
1194 spin_unlock_irqrestore(&qp->orq_lock, flags); in siw_check_tx_fence()
1216 struct siw_rx_stream *srx = &qp->rx_stream; in siw_rdmap_complete()
1217 struct siw_wqe *wqe = rx_wqe(qp->rx_fpdu); in siw_rdmap_complete()
1218 enum siw_wc_status wc_status = wqe->wc_status; in siw_rdmap_complete()
1219 u8 opcode = __rdmap_get_opcode(&srx->hdr.ctrl); in siw_rdmap_complete()
1225 wqe->rqe.flags |= SIW_WQE_SOLICITED; in siw_rdmap_complete()
1230 if (wqe->wr_status == SIW_WR_IDLE) in siw_rdmap_complete()
1233 srx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]++; in siw_rdmap_complete()
1243 rv = siw_invalidate_stag(qp->pd, srx->inval_stag); in siw_rdmap_complete()
1247 rv == -EACCES ? in siw_rdmap_complete()
1254 rv = siw_rqe_complete(qp, &wqe->rqe, wqe->processed, in siw_rdmap_complete()
1255 rv ? 0 : srx->inval_stag, in siw_rdmap_complete()
1258 rv = siw_rqe_complete(qp, &wqe->rqe, wqe->processed, in siw_rdmap_complete()
1265 if (wqe->wr_status == SIW_WR_IDLE) in siw_rdmap_complete()
1269 if ((srx->state == SIW_GET_HDR && in siw_rdmap_complete()
1270 qp->rx_fpdu->first_ddp_seg) || error == -ENODATA) in siw_rdmap_complete()
1276 } else if (rdma_is_kernel_res(&qp->base_qp.res) && in siw_rdmap_complete()
1281 rv = siw_invalidate_stag(qp->pd, wqe->sqe.sge[0].lkey); in siw_rdmap_complete()
1296 if ((wqe->sqe.flags & SIW_WQE_SIGNALLED) || error != 0) in siw_rdmap_complete()
1297 rv = siw_sqe_complete(qp, &wqe->sqe, wqe->processed, in siw_rdmap_complete()
1305 if (qp->attrs.orq_size) in siw_rdmap_complete()
1306 WRITE_ONCE(orq_get_current(qp)->flags, 0); in siw_rdmap_complete()
1313 srx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ]++; in siw_rdmap_complete()
1318 if (wqe->wr_status == SIW_WR_IDLE) in siw_rdmap_complete()
1324 * While a zero-length WRITE is allowed, in siw_rdmap_complete()
1327 if (rx_mem(&qp->rx_tagged)) { in siw_rdmap_complete()
1328 siw_mem_put(rx_mem(&qp->rx_tagged)); in siw_rdmap_complete()
1329 rx_mem(&qp->rx_tagged) = NULL; in siw_rdmap_complete()
1336 wqe->wr_status = SIW_WR_IDLE; in siw_rdmap_complete()
1349 * @len: skb->len - offset : payload in skb
1354 struct siw_qp *qp = rd_desc->arg.data; in siw_tcp_rx_data()
1355 struct siw_rx_stream *srx = &qp->rx_stream; in siw_tcp_rx_data()
1358 srx->skb = skb; in siw_tcp_rx_data()
1359 srx->skb_new = skb->len - off; in siw_tcp_rx_data()
1360 srx->skb_offset = off; in siw_tcp_rx_data()
1361 srx->skb_copied = 0; in siw_tcp_rx_data()
1363 siw_dbg_qp(qp, "new data, len %d\n", srx->skb_new); in siw_tcp_rx_data()
1365 while (srx->skb_new) { in siw_tcp_rx_data()
1368 if (unlikely(srx->rx_suspend)) { in siw_tcp_rx_data()
1370 srx->skb_copied += srx->skb_new; in siw_tcp_rx_data()
1373 switch (srx->state) { in siw_tcp_rx_data()
1377 srx->fpdu_part_rem = in siw_tcp_rx_data()
1378 be16_to_cpu(srx->hdr.ctrl.mpa_len) - in siw_tcp_rx_data()
1379 srx->fpdu_part_rcvd + MPA_HDR_SIZE; in siw_tcp_rx_data()
1381 if (srx->fpdu_part_rem) in siw_tcp_rx_data()
1382 srx->pad = -srx->fpdu_part_rem & 0x3; in siw_tcp_rx_data()
1384 srx->pad = 0; in siw_tcp_rx_data()
1386 srx->state = SIW_GET_DATA_START; in siw_tcp_rx_data()
1387 srx->fpdu_part_rcvd = 0; in siw_tcp_rx_data()
1398 qp->rx_fpdu->first_ddp_seg = 0; in siw_tcp_rx_data()
1403 * Headers will be checked by the opcode-specific in siw_tcp_rx_data()
1406 rv = iwarp_pktinfo[qp->rx_stream.rdmap_op].rx_data(qp); in siw_tcp_rx_data()
1409 be16_to_cpu(srx->hdr.ctrl.mpa_len) in siw_tcp_rx_data()
1412 srx->fpdu_part_rem = (-mpa_len & 0x3) in siw_tcp_rx_data()
1414 srx->fpdu_part_rcvd = 0; in siw_tcp_rx_data()
1415 srx->state = SIW_GET_TRAILER; in siw_tcp_rx_data()
1417 if (unlikely(rv == -ECONNRESET)) in siw_tcp_rx_data()
1420 srx->state = SIW_GET_DATA_MORE; in siw_tcp_rx_data()
1434 srx->state = SIW_GET_HDR; in siw_tcp_rx_data()
1435 srx->fpdu_part_rcvd = 0; in siw_tcp_rx_data()
1437 if (!(srx->hdr.ctrl.ddp_rdmap_ctrl & in siw_tcp_rx_data()
1449 rv = -EPROTO; in siw_tcp_rx_data()
1452 if (unlikely(rv != 0 && rv != -EAGAIN)) { in siw_tcp_rx_data()
1453 if ((srx->state > SIW_GET_HDR || in siw_tcp_rx_data()
1454 qp->rx_fpdu->more_ddp_segs) && run_completion) in siw_tcp_rx_data()
1458 srx->state); in siw_tcp_rx_data()
1466 srx->state, srx->fpdu_part_rem); in siw_tcp_rx_data()
1470 return srx->skb_copied; in siw_tcp_rx_data()