Lines Matching +full:long +full:- +full:term

1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
4 /* Copyright (c) 2008-2019, IBM Corporation */
44 (void *)(uintptr_t)umem->fp_addr); in siw_rx_umem()
46 srx->skb_copied += copied; in siw_rx_umem()
47 srx->skb_new -= copied; in siw_rx_umem()
49 return -EFAULT; in siw_rx_umem()
52 bytes = min(len, (int)PAGE_SIZE - pg_off); in siw_rx_umem()
57 rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off, in siw_rx_umem()
62 srx->skb_copied += copied; in siw_rx_umem()
63 srx->skb_new -= copied; in siw_rx_umem()
68 return -EFAULT; in siw_rx_umem()
70 if (srx->mpa_crc_hd) { in siw_rx_umem()
71 if (rdma_is_kernel_res(&rx_qp(srx)->base_qp.res)) { in siw_rx_umem()
72 crypto_shash_update(srx->mpa_crc_hd, in siw_rx_umem()
92 srx->skb_offset += bytes; in siw_rx_umem()
94 len -= bytes; in siw_rx_umem()
98 srx->skb_copied += copied; in siw_rx_umem()
99 srx->skb_new -= copied; in siw_rx_umem()
110 rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len); in siw_rx_kva()
117 if (srx->mpa_crc_hd) in siw_rx_kva()
118 crypto_shash_update(srx->mpa_crc_hd, (u8 *)kva, len); in siw_rx_kva()
120 srx->skb_offset += len; in siw_rx_kva()
121 srx->skb_copied += len; in siw_rx_kva()
122 srx->skb_new -= len; in siw_rx_kva()
130 struct siw_pbl *pbl = mem->pbl; in siw_rx_pbl()
131 u64 offset = addr - mem->va; in siw_rx_pbl()
146 len -= bytes; in siw_rx_pbl()
168 struct iwarp_rdma_rresp *rresp = &srx->hdr.rresp; in siw_rresp_check_ntoh()
169 struct siw_wqe *wqe = &frx->wqe_active; in siw_rresp_check_ntoh()
172 u32 sink_stag = be32_to_cpu(rresp->sink_stag); in siw_rresp_check_ntoh()
173 u64 sink_to = be64_to_cpu(rresp->sink_to); in siw_rresp_check_ntoh()
175 if (frx->first_ddp_seg) { in siw_rresp_check_ntoh()
176 srx->ddp_stag = wqe->sqe.sge[0].lkey; in siw_rresp_check_ntoh()
177 srx->ddp_to = wqe->sqe.sge[0].laddr; in siw_rresp_check_ntoh()
178 frx->pbl_idx = 0; in siw_rresp_check_ntoh()
190 if (unlikely(srx->ddp_stag != sink_stag)) { in siw_rresp_check_ntoh()
192 qp_id(rx_qp(srx)), sink_stag, srx->ddp_stag); in siw_rresp_check_ntoh()
196 if (unlikely(srx->ddp_to != sink_to)) { in siw_rresp_check_ntoh()
198 qp_id(rx_qp(srx)), (unsigned long long)sink_to, in siw_rresp_check_ntoh()
199 (unsigned long long)srx->ddp_to); in siw_rresp_check_ntoh()
203 if (unlikely(!frx->more_ddp_segs && in siw_rresp_check_ntoh()
204 (wqe->processed + srx->fpdu_part_rem != wqe->bytes))) { in siw_rresp_check_ntoh()
207 wqe->processed + srx->fpdu_part_rem, wqe->bytes); in siw_rresp_check_ntoh()
215 return -EINVAL; in siw_rresp_check_ntoh()
232 struct iwarp_rdma_write *write = &srx->hdr.rwrite; in siw_write_check_ntoh()
235 u32 sink_stag = be32_to_cpu(write->sink_stag); in siw_write_check_ntoh()
236 u64 sink_to = be64_to_cpu(write->sink_to); in siw_write_check_ntoh()
238 if (frx->first_ddp_seg) { in siw_write_check_ntoh()
239 srx->ddp_stag = sink_stag; in siw_write_check_ntoh()
240 srx->ddp_to = sink_to; in siw_write_check_ntoh()
241 frx->pbl_idx = 0; in siw_write_check_ntoh()
243 if (unlikely(srx->ddp_stag != sink_stag)) { in siw_write_check_ntoh()
246 srx->ddp_stag); in siw_write_check_ntoh()
250 if (unlikely(srx->ddp_to != sink_to)) { in siw_write_check_ntoh()
253 (unsigned long long)sink_to, in siw_write_check_ntoh()
254 (unsigned long long)srx->ddp_to); in siw_write_check_ntoh()
263 return -EINVAL; in siw_write_check_ntoh()
280 struct iwarp_send_inv *send = &srx->hdr.send_inv; in siw_send_check_ntoh()
281 struct siw_wqe *wqe = &frx->wqe_active; in siw_send_check_ntoh()
284 u32 ddp_msn = be32_to_cpu(send->ddp_msn); in siw_send_check_ntoh()
285 u32 ddp_mo = be32_to_cpu(send->ddp_mo); in siw_send_check_ntoh()
286 u32 ddp_qn = be32_to_cpu(send->ddp_qn); in siw_send_check_ntoh()
294 if (unlikely(ddp_msn != srx->ddp_msn[RDMAP_UNTAGGED_QN_SEND])) { in siw_send_check_ntoh()
297 srx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]); in siw_send_check_ntoh()
301 if (unlikely(ddp_mo != wqe->processed)) { in siw_send_check_ntoh()
303 qp_id(rx_qp(srx)), ddp_mo, wqe->processed); in siw_send_check_ntoh()
307 if (frx->first_ddp_seg) { in siw_send_check_ntoh()
309 frx->sge_idx = 0; in siw_send_check_ntoh()
310 frx->sge_off = 0; in siw_send_check_ntoh()
311 frx->pbl_idx = 0; in siw_send_check_ntoh()
314 srx->inval_stag = be32_to_cpu(send->inval_stag); in siw_send_check_ntoh()
316 if (unlikely(wqe->bytes < wqe->processed + srx->fpdu_part_rem)) { in siw_send_check_ntoh()
317 siw_dbg_qp(rx_qp(srx), "receive space short: %d - %d < %d\n", in siw_send_check_ntoh()
318 wqe->bytes, wqe->processed, srx->fpdu_part_rem); in siw_send_check_ntoh()
319 wqe->wc_status = SIW_WC_LOC_LEN_ERR; in siw_send_check_ntoh()
327 return -EINVAL; in siw_send_check_ntoh()
336 unsigned long flags; in siw_rqe_get()
338 srq = qp->srq; in siw_rqe_get()
340 spin_lock_irqsave(&srq->lock, flags); in siw_rqe_get()
341 if (unlikely(!srq->num_rqe)) in siw_rqe_get()
344 rqe = &srq->recvq[srq->rq_get % srq->num_rqe]; in siw_rqe_get()
346 if (unlikely(!qp->recvq)) in siw_rqe_get()
349 rqe = &qp->recvq[qp->rq_get % qp->attrs.rq_size]; in siw_rqe_get()
351 if (likely(rqe->flags == SIW_WQE_VALID)) { in siw_rqe_get()
352 int num_sge = rqe->num_sge; in siw_rqe_get()
357 wqe = rx_wqe(&qp->rx_untagged); in siw_rqe_get()
359 wqe->wr_status = SIW_WR_INPROGRESS; in siw_rqe_get()
360 wqe->bytes = 0; in siw_rqe_get()
361 wqe->processed = 0; in siw_rqe_get()
363 wqe->rqe.id = rqe->id; in siw_rqe_get()
364 wqe->rqe.num_sge = num_sge; in siw_rqe_get()
367 wqe->rqe.sge[i].laddr = rqe->sge[i].laddr; in siw_rqe_get()
368 wqe->rqe.sge[i].lkey = rqe->sge[i].lkey; in siw_rqe_get()
369 wqe->rqe.sge[i].length = rqe->sge[i].length; in siw_rqe_get()
370 wqe->bytes += wqe->rqe.sge[i].length; in siw_rqe_get()
371 wqe->mem[i] = NULL; in siw_rqe_get()
374 /* can be re-used by appl */ in siw_rqe_get()
375 smp_store_mb(rqe->flags, 0); in siw_rqe_get()
377 siw_dbg_qp(qp, "too many sge's: %d\n", rqe->num_sge); in siw_rqe_get()
379 spin_unlock_irqrestore(&srq->lock, flags); in siw_rqe_get()
383 qp->rq_get++; in siw_rqe_get()
385 if (srq->armed) { in siw_rqe_get()
387 u32 off = (srq->rq_get + srq->limit) % in siw_rqe_get()
388 srq->num_rqe; in siw_rqe_get()
389 struct siw_rqe *rqe2 = &srq->recvq[off]; in siw_rqe_get()
391 if (!(rqe2->flags & SIW_WQE_VALID)) { in siw_rqe_get()
392 srq->armed = false; in siw_rqe_get()
396 srq->rq_get++; in siw_rqe_get()
401 spin_unlock_irqrestore(&srq->lock, flags); in siw_rqe_get()
419 * -EAGAIN: to be called again to finish the DDP segment
423 struct siw_rx_stream *srx = &qp->rx_stream; in siw_proc_send()
424 struct siw_rx_fpdu *frx = &qp->rx_untagged; in siw_proc_send()
430 if (frx->first_ddp_seg) { in siw_proc_send()
436 return -ENOENT; in siw_proc_send()
441 if (srx->state == SIW_GET_DATA_START) { in siw_proc_send()
447 if (!srx->fpdu_part_rem) /* zero length SEND */ in siw_proc_send()
450 data_bytes = min(srx->fpdu_part_rem, srx->skb_new); in siw_proc_send()
460 sge = &wqe->rqe.sge[frx->sge_idx]; in siw_proc_send()
462 if (!sge->length) { in siw_proc_send()
464 frx->sge_idx++; in siw_proc_send()
465 frx->sge_off = 0; in siw_proc_send()
466 frx->pbl_idx = 0; in siw_proc_send()
469 sge_bytes = min(data_bytes, sge->length - frx->sge_off); in siw_proc_send()
470 mem = &wqe->mem[frx->sge_idx]; in siw_proc_send()
475 pd = qp->srq == NULL ? qp->pd : qp->srq->base_srq.pd; in siw_proc_send()
478 frx->sge_off, sge_bytes); in siw_proc_send()
488 if (mem_p->mem_obj == NULL) in siw_proc_send()
490 (void *)(uintptr_t)(sge->laddr + frx->sge_off), in siw_proc_send()
492 else if (!mem_p->is_pbl) in siw_proc_send()
493 rv = siw_rx_umem(srx, mem_p->umem, in siw_proc_send()
494 sge->laddr + frx->sge_off, sge_bytes); in siw_proc_send()
496 rv = siw_rx_pbl(srx, &frx->pbl_idx, mem_p, in siw_proc_send()
497 sge->laddr + frx->sge_off, sge_bytes); in siw_proc_send()
500 wqe->processed += rcvd_bytes; in siw_proc_send()
505 return -EINVAL; in siw_proc_send()
507 frx->sge_off += rv; in siw_proc_send()
509 if (frx->sge_off == sge->length) { in siw_proc_send()
510 frx->sge_idx++; in siw_proc_send()
511 frx->sge_off = 0; in siw_proc_send()
512 frx->pbl_idx = 0; in siw_proc_send()
514 data_bytes -= rv; in siw_proc_send()
517 srx->fpdu_part_rem -= rv; in siw_proc_send()
518 srx->fpdu_part_rcvd += rv; in siw_proc_send()
520 wqe->processed += rcvd_bytes; in siw_proc_send()
522 if (!srx->fpdu_part_rem) in siw_proc_send()
525 return (rv < 0) ? rv : -EAGAIN; in siw_proc_send()
538 * -EAGAIN: to be called again to finish the DDP segment
542 struct siw_rx_stream *srx = &qp->rx_stream; in siw_proc_write()
543 struct siw_rx_fpdu *frx = &qp->rx_tagged; in siw_proc_write()
547 if (srx->state == SIW_GET_DATA_START) { in siw_proc_write()
548 if (!srx->fpdu_part_rem) /* zero length WRITE */ in siw_proc_write()
557 bytes = min(srx->fpdu_part_rem, srx->skb_new); in siw_proc_write()
559 if (frx->first_ddp_seg) { in siw_proc_write()
562 rx_mem(frx) = siw_mem_id2obj(qp->sdev, srx->ddp_stag >> 8); in siw_proc_write()
566 srx->ddp_stag); in siw_proc_write()
571 return -EINVAL; in siw_proc_write()
573 wqe->rqe.num_sge = 1; in siw_proc_write()
575 wqe->wr_status = SIW_WR_INPROGRESS; in siw_proc_write()
580 * Check if application re-registered memory with different in siw_proc_write()
583 if (unlikely(mem->stag != srx->ddp_stag)) { in siw_proc_write()
587 return -EINVAL; in siw_proc_write()
589 rv = siw_check_mem(qp->pd, mem, srx->ddp_to + srx->fpdu_part_rcvd, in siw_proc_write()
593 DDP_ETYPE_TAGGED_BUF, siw_tagged_error(-rv), in siw_proc_write()
598 return -EINVAL; in siw_proc_write()
601 if (mem->mem_obj == NULL) in siw_proc_write()
603 (void *)(uintptr_t)(srx->ddp_to + srx->fpdu_part_rcvd), in siw_proc_write()
605 else if (!mem->is_pbl) in siw_proc_write()
606 rv = siw_rx_umem(srx, mem->umem, in siw_proc_write()
607 srx->ddp_to + srx->fpdu_part_rcvd, bytes); in siw_proc_write()
609 rv = siw_rx_pbl(srx, &frx->pbl_idx, mem, in siw_proc_write()
610 srx->ddp_to + srx->fpdu_part_rcvd, bytes); in siw_proc_write()
616 return -EINVAL; in siw_proc_write()
618 srx->fpdu_part_rem -= rv; in siw_proc_write()
619 srx->fpdu_part_rcvd += rv; in siw_proc_write()
621 if (!srx->fpdu_part_rem) { in siw_proc_write()
622 srx->ddp_to += srx->fpdu_part_rcvd; in siw_proc_write()
625 return -EAGAIN; in siw_proc_write()
633 struct siw_rx_stream *srx = &qp->rx_stream; in siw_proc_rreq()
635 if (!srx->fpdu_part_rem) in siw_proc_rreq()
639 be16_to_cpu(srx->hdr.ctrl.mpa_len)); in siw_proc_rreq()
641 return -EPROTO; in siw_proc_rreq()
665 uint64_t raddr = be64_to_cpu(srx->hdr.rreq.sink_to), in siw_init_rresp()
666 laddr = be64_to_cpu(srx->hdr.rreq.source_to); in siw_init_rresp()
667 uint32_t length = be32_to_cpu(srx->hdr.rreq.read_size), in siw_init_rresp()
668 lkey = be32_to_cpu(srx->hdr.rreq.source_stag), in siw_init_rresp()
669 rkey = be32_to_cpu(srx->hdr.rreq.sink_stag), in siw_init_rresp()
670 msn = be32_to_cpu(srx->hdr.rreq.ddp_msn); in siw_init_rresp()
673 unsigned long flags; in siw_init_rresp()
675 if (unlikely(msn != srx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ])) { in siw_init_rresp()
679 return -EPROTO; in siw_init_rresp()
681 spin_lock_irqsave(&qp->sq_lock, flags); in siw_init_rresp()
683 if (tx_work->wr_status == SIW_WR_IDLE) { in siw_init_rresp()
688 tx_work->processed = 0; in siw_init_rresp()
689 tx_work->mem[0] = NULL; in siw_init_rresp()
690 tx_work->wr_status = SIW_WR_QUEUED; in siw_init_rresp()
691 resp = &tx_work->sqe; in siw_init_rresp()
697 resp->opcode = SIW_OP_READ_RESPONSE; in siw_init_rresp()
699 resp->sge[0].length = length; in siw_init_rresp()
700 resp->sge[0].laddr = laddr; in siw_init_rresp()
701 resp->sge[0].lkey = lkey; in siw_init_rresp()
706 resp->sge[1].length = msn; in siw_init_rresp()
708 resp->raddr = raddr; in siw_init_rresp()
709 resp->rkey = rkey; in siw_init_rresp()
710 resp->num_sge = length ? 1 : 0; in siw_init_rresp()
713 smp_store_mb(resp->flags, SIW_WQE_VALID); in siw_init_rresp()
716 qp->irq_put % qp->attrs.irq_size, qp->attrs.irq_size); in siw_init_rresp()
721 rv = -EPROTO; in siw_init_rresp()
724 spin_unlock_irqrestore(&qp->sq_lock, flags); in siw_init_rresp()
747 if (READ_ONCE(orqe->flags) & SIW_WQE_VALID) { in siw_orqe_start_rx()
749 wqe = rx_wqe(&qp->rx_tagged); in siw_orqe_start_rx()
750 wqe->sqe.id = orqe->id; in siw_orqe_start_rx()
751 wqe->sqe.opcode = orqe->opcode; in siw_orqe_start_rx()
752 wqe->sqe.sge[0].laddr = orqe->sge[0].laddr; in siw_orqe_start_rx()
753 wqe->sqe.sge[0].lkey = orqe->sge[0].lkey; in siw_orqe_start_rx()
754 wqe->sqe.sge[0].length = orqe->sge[0].length; in siw_orqe_start_rx()
755 wqe->sqe.flags = orqe->flags; in siw_orqe_start_rx()
756 wqe->sqe.num_sge = 1; in siw_orqe_start_rx()
757 wqe->bytes = orqe->sge[0].length; in siw_orqe_start_rx()
758 wqe->processed = 0; in siw_orqe_start_rx()
759 wqe->mem[0] = NULL; in siw_orqe_start_rx()
762 wqe->wr_status = SIW_WR_INPROGRESS; in siw_orqe_start_rx()
766 return -EPROTO; in siw_orqe_start_rx()
780 struct siw_rx_stream *srx = &qp->rx_stream; in siw_proc_rresp()
781 struct siw_rx_fpdu *frx = &qp->rx_tagged; in siw_proc_rresp()
787 if (frx->first_ddp_seg) { in siw_proc_rresp()
788 if (unlikely(wqe->wr_status != SIW_WR_IDLE)) { in siw_proc_rresp()
790 qp_id(qp), wqe->wr_status, wqe->sqe.opcode); in siw_proc_rresp()
791 rv = -EPROTO; in siw_proc_rresp()
800 qp_id(qp), qp->orq_get % qp->attrs.orq_size); in siw_proc_rresp()
809 if (unlikely(wqe->wr_status != SIW_WR_INPROGRESS)) { in siw_proc_rresp()
811 qp_id(qp), wqe->wr_status); in siw_proc_rresp()
812 rv = -EPROTO; in siw_proc_rresp()
816 if (!srx->fpdu_part_rem) /* zero length RRESPONSE */ in siw_proc_rresp()
819 sge = wqe->sqe.sge; /* there is only one */ in siw_proc_rresp()
820 mem = &wqe->mem[0]; in siw_proc_rresp()
826 rv = siw_check_sge(qp->pd, sge, mem, IB_ACCESS_LOCAL_WRITE, 0, in siw_proc_rresp()
827 wqe->bytes); in siw_proc_rresp()
830 wqe->wc_status = SIW_WC_LOC_PROT_ERR; in siw_proc_rresp()
834 siw_tagged_error(-rv), 0); in siw_proc_rresp()
838 return -EINVAL; in siw_proc_rresp()
843 bytes = min(srx->fpdu_part_rem, srx->skb_new); in siw_proc_rresp()
845 if (mem_p->mem_obj == NULL) in siw_proc_rresp()
847 (void *)(uintptr_t)(sge->laddr + wqe->processed), in siw_proc_rresp()
849 else if (!mem_p->is_pbl) in siw_proc_rresp()
850 rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed, in siw_proc_rresp()
853 rv = siw_rx_pbl(srx, &frx->pbl_idx, mem_p, in siw_proc_rresp()
854 sge->laddr + wqe->processed, bytes); in siw_proc_rresp()
856 wqe->wc_status = SIW_WC_GENERAL_ERR; in siw_proc_rresp()
857 rv = -EINVAL; in siw_proc_rresp()
860 srx->fpdu_part_rem -= rv; in siw_proc_rresp()
861 srx->fpdu_part_rcvd += rv; in siw_proc_rresp()
862 wqe->processed += rv; in siw_proc_rresp()
864 if (!srx->fpdu_part_rem) { in siw_proc_rresp()
865 srx->ddp_to += srx->fpdu_part_rcvd; in siw_proc_rresp()
868 return -EAGAIN; in siw_proc_rresp()
878 struct siw_rx_stream *srx = &qp->rx_stream; in siw_proc_terminate()
879 struct sk_buff *skb = srx->skb; in siw_proc_terminate()
880 struct iwarp_terminate *term = &srx->hdr.terminate; in siw_proc_terminate() local
887 __rdmap_term_layer(term), __rdmap_term_etype(term), in siw_proc_terminate()
888 __rdmap_term_ecode(term)); in siw_proc_terminate()
890 if (be32_to_cpu(term->ddp_qn) != RDMAP_UNTAGGED_QN_TERMINATE || in siw_proc_terminate()
891 be32_to_cpu(term->ddp_msn) != in siw_proc_terminate()
892 qp->rx_stream.ddp_msn[RDMAP_UNTAGGED_QN_TERMINATE] || in siw_proc_terminate()
893 be32_to_cpu(term->ddp_mo) != 0) { in siw_proc_terminate()
894 pr_warn("siw: rx bogus TERM [QN x%08x, MSN x%08x, MO x%08x]\n", in siw_proc_terminate()
895 be32_to_cpu(term->ddp_qn), be32_to_cpu(term->ddp_msn), in siw_proc_terminate()
896 be32_to_cpu(term->ddp_mo)); in siw_proc_terminate()
897 return -ECONNRESET; in siw_proc_terminate()
900 * Receive remaining pieces of TERM if indicated in siw_proc_terminate()
902 if (!term->flag_m) in siw_proc_terminate()
903 return -ECONNRESET; in siw_proc_terminate()
906 * TERM message in siw_proc_terminate()
908 if (srx->skb_new < sizeof(struct iwarp_ctrl_tagged)) in siw_proc_terminate()
909 return -ECONNRESET; in siw_proc_terminate()
913 skb_copy_bits(skb, srx->skb_offset, infop, to_copy); in siw_proc_terminate()
920 srx->skb_offset += to_copy; in siw_proc_terminate()
921 srx->skb_new -= to_copy; in siw_proc_terminate()
922 srx->skb_copied += to_copy; in siw_proc_terminate()
923 srx->fpdu_part_rcvd += to_copy; in siw_proc_terminate()
924 srx->fpdu_part_rem -= to_copy; in siw_proc_terminate()
926 to_copy = iwarp_pktinfo[op].hdr_len - to_copy; in siw_proc_terminate()
928 /* Again, no network fragmented TERM's */ in siw_proc_terminate()
929 if (to_copy + MPA_CRC_SIZE > srx->skb_new) in siw_proc_terminate()
930 return -ECONNRESET; in siw_proc_terminate()
932 skb_copy_bits(skb, srx->skb_offset, infop, to_copy); in siw_proc_terminate()
934 if (term->flag_r) { in siw_proc_terminate()
935 siw_dbg_qp(qp, "TERM reports RDMAP hdr type %u, len %u (%s)\n", in siw_proc_terminate()
937 term->flag_m ? "valid" : "invalid"); in siw_proc_terminate()
938 } else if (term->flag_d) { in siw_proc_terminate()
939 siw_dbg_qp(qp, "TERM reports DDP hdr type %u, len %u (%s)\n", in siw_proc_terminate()
941 term->flag_m ? "valid" : "invalid"); in siw_proc_terminate()
944 srx->skb_new -= to_copy; in siw_proc_terminate()
945 srx->skb_offset += to_copy; in siw_proc_terminate()
946 srx->skb_copied += to_copy; in siw_proc_terminate()
947 srx->fpdu_part_rcvd += to_copy; in siw_proc_terminate()
948 srx->fpdu_part_rem -= to_copy; in siw_proc_terminate()
950 return -ECONNRESET; in siw_proc_terminate()
955 struct sk_buff *skb = srx->skb; in siw_get_trailer()
956 u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad; in siw_get_trailer()
960 srx->fpdu_part_rem, srx->skb_new, srx->pad); in siw_get_trailer()
962 if (srx->skb_new < srx->fpdu_part_rem) in siw_get_trailer()
963 return -EAGAIN; in siw_get_trailer()
965 skb_copy_bits(skb, srx->skb_offset, tbuf, srx->fpdu_part_rem); in siw_get_trailer()
967 if (srx->mpa_crc_hd && srx->pad) in siw_get_trailer()
968 crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad); in siw_get_trailer()
970 srx->skb_new -= srx->fpdu_part_rem; in siw_get_trailer()
971 srx->skb_offset += srx->fpdu_part_rem; in siw_get_trailer()
972 srx->skb_copied += srx->fpdu_part_rem; in siw_get_trailer()
974 if (!srx->mpa_crc_hd) in siw_get_trailer()
981 crypto_shash_final(srx->mpa_crc_hd, (u8 *)&crc_own); in siw_get_trailer()
982 crc_in = (__force __wsum)srx->trailer.crc; in siw_get_trailer()
986 crc_in, crc_own, qp->rx_stream.rdmap_op); in siw_get_trailer()
991 return -EINVAL; in siw_get_trailer()
1000 struct sk_buff *skb = srx->skb; in siw_get_hdr()
1002 struct iwarp_ctrl *c_hdr = &srx->hdr.ctrl; in siw_get_hdr()
1007 if (srx->fpdu_part_rcvd < MIN_DDP_HDR) { in siw_get_hdr()
1011 bytes = min_t(int, srx->skb_new, in siw_get_hdr()
1012 MIN_DDP_HDR - srx->fpdu_part_rcvd); in siw_get_hdr()
1014 skb_copy_bits(skb, srx->skb_offset, in siw_get_hdr()
1015 (char *)c_hdr + srx->fpdu_part_rcvd, bytes); in siw_get_hdr()
1017 srx->fpdu_part_rcvd += bytes; in siw_get_hdr()
1019 srx->skb_new -= bytes; in siw_get_hdr()
1020 srx->skb_offset += bytes; in siw_get_hdr()
1021 srx->skb_copied += bytes; in siw_get_hdr()
1023 if (srx->fpdu_part_rcvd < MIN_DDP_HDR) in siw_get_hdr()
1024 return -EAGAIN; in siw_get_hdr()
1033 if (c_hdr->ddp_rdmap_ctrl & DDP_FLAG_TAGGED) { in siw_get_hdr()
1042 return -EINVAL; in siw_get_hdr()
1051 return -EINVAL; in siw_get_hdr()
1062 return -EINVAL; in siw_get_hdr()
1069 frx = qp->rx_fpdu; in siw_get_hdr()
1078 bytes = iwarp_pktinfo[opcode].hdr_len - MIN_DDP_HDR; in siw_get_hdr()
1080 if (srx->skb_new < bytes) in siw_get_hdr()
1081 return -EAGAIN; in siw_get_hdr()
1083 skb_copy_bits(skb, srx->skb_offset, in siw_get_hdr()
1084 (char *)c_hdr + srx->fpdu_part_rcvd, bytes); in siw_get_hdr()
1086 srx->fpdu_part_rcvd += bytes; in siw_get_hdr()
1088 srx->skb_new -= bytes; in siw_get_hdr()
1089 srx->skb_offset += bytes; in siw_get_hdr()
1090 srx->skb_copied += bytes; in siw_get_hdr()
1099 * tagged and untagged RDMAP messages is supported, as long as in siw_get_hdr()
1105 if (srx->mpa_crc_hd) { in siw_get_hdr()
1109 crypto_shash_init(srx->mpa_crc_hd); in siw_get_hdr()
1110 crypto_shash_update(srx->mpa_crc_hd, (u8 *)c_hdr, in siw_get_hdr()
1111 srx->fpdu_part_rcvd); in siw_get_hdr()
1113 if (frx->more_ddp_segs) { in siw_get_hdr()
1114 frx->first_ddp_seg = 0; in siw_get_hdr()
1115 if (frx->prev_rdmap_op != opcode) { in siw_get_hdr()
1117 frx->prev_rdmap_op, opcode); in siw_get_hdr()
1126 set_rx_fpdu_context(qp, frx->prev_rdmap_op); in siw_get_hdr()
1127 __rdmap_set_opcode(c_hdr, frx->prev_rdmap_op); in siw_get_hdr()
1128 return -EPROTO; in siw_get_hdr()
1131 frx->prev_rdmap_op = opcode; in siw_get_hdr()
1132 frx->first_ddp_seg = 1; in siw_get_hdr()
1134 frx->more_ddp_segs = c_hdr->ddp_rdmap_ctrl & DDP_FLAG_LAST ? 0 : 1; in siw_get_hdr()
1144 unsigned long flags; in siw_check_tx_fence()
1146 spin_lock_irqsave(&qp->orq_lock, flags); in siw_check_tx_fence()
1151 WRITE_ONCE(rreq->flags, 0); in siw_check_tx_fence()
1153 if (qp->tx_ctx.orq_fence) { in siw_check_tx_fence()
1154 if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) { in siw_check_tx_fence()
1156 qp_id(qp), tx_waiting->wr_status); in siw_check_tx_fence()
1157 rv = -EPROTO; in siw_check_tx_fence()
1161 if (tx_waiting->sqe.opcode == SIW_OP_READ || in siw_check_tx_fence()
1162 tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) { in siw_check_tx_fence()
1166 rv = -EPROTO; in siw_check_tx_fence()
1169 siw_read_to_orq(rreq, &tx_waiting->sqe); in siw_check_tx_fence()
1171 qp->orq_put++; in siw_check_tx_fence()
1172 qp->tx_ctx.orq_fence = 0; in siw_check_tx_fence()
1176 qp->tx_ctx.orq_fence = 0; in siw_check_tx_fence()
1180 qp_id(qp), qp->orq_get, qp->orq_put); in siw_check_tx_fence()
1181 rv = -EPROTO; in siw_check_tx_fence()
1184 qp->orq_get++; in siw_check_tx_fence()
1186 spin_unlock_irqrestore(&qp->orq_lock, flags); in siw_check_tx_fence()
1208 struct siw_rx_stream *srx = &qp->rx_stream; in siw_rdmap_complete()
1209 struct siw_wqe *wqe = rx_wqe(qp->rx_fpdu); in siw_rdmap_complete()
1210 enum siw_wc_status wc_status = wqe->wc_status; in siw_rdmap_complete()
1211 u8 opcode = __rdmap_get_opcode(&srx->hdr.ctrl); in siw_rdmap_complete()
1217 wqe->rqe.flags |= SIW_WQE_SOLICITED; in siw_rdmap_complete()
1222 if (wqe->wr_status == SIW_WR_IDLE) in siw_rdmap_complete()
1225 srx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]++; in siw_rdmap_complete()
1235 rv = siw_invalidate_stag(qp->pd, srx->inval_stag); in siw_rdmap_complete()
1239 rv == -EACCES ? in siw_rdmap_complete()
1246 rv = siw_rqe_complete(qp, &wqe->rqe, wqe->processed, in siw_rdmap_complete()
1247 rv ? 0 : srx->inval_stag, in siw_rdmap_complete()
1250 rv = siw_rqe_complete(qp, &wqe->rqe, wqe->processed, in siw_rdmap_complete()
1257 if (wqe->wr_status == SIW_WR_IDLE) in siw_rdmap_complete()
1261 if ((srx->state == SIW_GET_HDR && in siw_rdmap_complete()
1262 qp->rx_fpdu->first_ddp_seg) || error == -ENODATA) in siw_rdmap_complete()
1268 } else if (rdma_is_kernel_res(&qp->base_qp.res) && in siw_rdmap_complete()
1273 rv = siw_invalidate_stag(qp->pd, wqe->sqe.sge[0].lkey); in siw_rdmap_complete()
1288 if ((wqe->sqe.flags & SIW_WQE_SIGNALLED) || error != 0) in siw_rdmap_complete()
1289 rv = siw_sqe_complete(qp, &wqe->sqe, wqe->processed, in siw_rdmap_complete()
1297 WRITE_ONCE(orq_get_current(qp)->flags, 0); in siw_rdmap_complete()
1303 srx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ]++; in siw_rdmap_complete()
1308 if (wqe->wr_status == SIW_WR_IDLE) in siw_rdmap_complete()
1314 * While a zero-length WRITE is allowed, in siw_rdmap_complete()
1317 if (rx_mem(&qp->rx_tagged)) { in siw_rdmap_complete()
1318 siw_mem_put(rx_mem(&qp->rx_tagged)); in siw_rdmap_complete()
1319 rx_mem(&qp->rx_tagged) = NULL; in siw_rdmap_complete()
1326 wqe->wr_status = SIW_WR_IDLE; in siw_rdmap_complete()
1339 * @len: skb->len - offset : payload in skb
1344 struct siw_qp *qp = rd_desc->arg.data; in siw_tcp_rx_data()
1345 struct siw_rx_stream *srx = &qp->rx_stream; in siw_tcp_rx_data()
1348 srx->skb = skb; in siw_tcp_rx_data()
1349 srx->skb_new = skb->len - off; in siw_tcp_rx_data()
1350 srx->skb_offset = off; in siw_tcp_rx_data()
1351 srx->skb_copied = 0; in siw_tcp_rx_data()
1353 siw_dbg_qp(qp, "new data, len %d\n", srx->skb_new); in siw_tcp_rx_data()
1355 while (srx->skb_new) { in siw_tcp_rx_data()
1358 if (unlikely(srx->rx_suspend)) { in siw_tcp_rx_data()
1360 srx->skb_copied += srx->skb_new; in siw_tcp_rx_data()
1363 switch (srx->state) { in siw_tcp_rx_data()
1367 srx->fpdu_part_rem = in siw_tcp_rx_data()
1368 be16_to_cpu(srx->hdr.ctrl.mpa_len) - in siw_tcp_rx_data()
1369 srx->fpdu_part_rcvd + MPA_HDR_SIZE; in siw_tcp_rx_data()
1371 if (srx->fpdu_part_rem) in siw_tcp_rx_data()
1372 srx->pad = -srx->fpdu_part_rem & 0x3; in siw_tcp_rx_data()
1374 srx->pad = 0; in siw_tcp_rx_data()
1376 srx->state = SIW_GET_DATA_START; in siw_tcp_rx_data()
1377 srx->fpdu_part_rcvd = 0; in siw_tcp_rx_data()
1388 qp->rx_fpdu->first_ddp_seg = 0; in siw_tcp_rx_data()
1393 * Headers will be checked by the opcode-specific in siw_tcp_rx_data()
1396 rv = iwarp_pktinfo[qp->rx_stream.rdmap_op].rx_data(qp); in siw_tcp_rx_data()
1399 be16_to_cpu(srx->hdr.ctrl.mpa_len) in siw_tcp_rx_data()
1402 srx->fpdu_part_rem = (-mpa_len & 0x3) in siw_tcp_rx_data()
1404 srx->fpdu_part_rcvd = 0; in siw_tcp_rx_data()
1405 srx->state = SIW_GET_TRAILER; in siw_tcp_rx_data()
1407 if (unlikely(rv == -ECONNRESET)) in siw_tcp_rx_data()
1410 srx->state = SIW_GET_DATA_MORE; in siw_tcp_rx_data()
1424 srx->state = SIW_GET_HDR; in siw_tcp_rx_data()
1425 srx->fpdu_part_rcvd = 0; in siw_tcp_rx_data()
1427 if (!(srx->hdr.ctrl.ddp_rdmap_ctrl & in siw_tcp_rx_data()
1439 rv = -EPROTO; in siw_tcp_rx_data()
1442 if (unlikely(rv != 0 && rv != -EAGAIN)) { in siw_tcp_rx_data()
1443 if ((srx->state > SIW_GET_HDR || in siw_tcp_rx_data()
1444 qp->rx_fpdu->more_ddp_segs) && run_completion) in siw_tcp_rx_data()
1448 srx->state); in siw_tcp_rx_data()
1456 srx->state, srx->fpdu_part_rem); in siw_tcp_rx_data()
1460 return srx->skb_copied; in siw_tcp_rx_data()