Lines Matching full:send
151 struct rds_ib_send_work *send, in rds_ib_send_unmap_op() argument
157 switch (send->s_wr.opcode) { in rds_ib_send_unmap_op()
159 if (send->s_op) { in rds_ib_send_unmap_op()
160 rm = container_of(send->s_op, struct rds_message, data); in rds_ib_send_unmap_op()
161 rds_ib_send_unmap_data(ic, send->s_op, wc_status); in rds_ib_send_unmap_op()
166 if (send->s_op) { in rds_ib_send_unmap_op()
167 rm = container_of(send->s_op, struct rds_message, rdma); in rds_ib_send_unmap_op()
168 rds_ib_send_unmap_rdma(ic, send->s_op, wc_status); in rds_ib_send_unmap_op()
173 if (send->s_op) { in rds_ib_send_unmap_op()
174 rm = container_of(send->s_op, struct rds_message, atomic); in rds_ib_send_unmap_op()
175 rds_ib_send_unmap_atomic(ic, send->s_op, wc_status); in rds_ib_send_unmap_op()
181 __func__, send->s_wr.opcode); in rds_ib_send_unmap_op()
185 send->s_wr.opcode = 0xdead; in rds_ib_send_unmap_op()
192 struct rds_ib_send_work *send; in rds_ib_send_init_ring() local
195 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { in rds_ib_send_init_ring()
198 send->s_op = NULL; in rds_ib_send_init_ring()
200 send->s_wr.wr_id = i; in rds_ib_send_init_ring()
201 send->s_wr.sg_list = send->s_sge; in rds_ib_send_init_ring()
202 send->s_wr.ex.imm_data = 0; in rds_ib_send_init_ring()
204 sge = &send->s_sge[0]; in rds_ib_send_init_ring()
210 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey; in rds_ib_send_init_ring()
216 struct rds_ib_send_work *send; in rds_ib_send_clear_ring() local
219 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { in rds_ib_send_clear_ring()
220 if (send->s_op && send->s_wr.opcode != 0xdead) in rds_ib_send_clear_ring()
221 rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR); in rds_ib_send_clear_ring()
239 * operations performed in the send path. As the sender allocs and potentially
247 struct rds_ib_send_work *send; in rds_ib_send_cqe_handler() local
272 send = &ic->i_sends[oldest]; in rds_ib_send_cqe_handler()
273 if (send->s_wr.send_flags & IB_SEND_SIGNALED) in rds_ib_send_cqe_handler()
276 rm = rds_ib_send_unmap_op(ic, send, wc->status); in rds_ib_send_cqe_handler()
278 if (time_after(jiffies, send->s_queued + HZ / 2)) in rds_ib_send_cqe_handler()
281 if (send->s_op) { in rds_ib_send_cqe_handler()
282 if (send->s_op == rm->m_final_op) { in rds_ib_send_cqe_handler()
289 send->s_op = NULL; in rds_ib_send_cqe_handler()
304 …rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), vendor err 0x%x, … in rds_ib_send_cqe_handler()
316 * - send credits: this tells us how many WRs we're allowed
318 * each SEND WR we post, we decrement this by one.
327 * exhausted their send credits, and are unable to send new credits
328 * to the peer. We achieve this by requiring that we send at least
334 * The RDS send code is essentially single-threaded; rds_send_xmit
335 * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
339 * In the send path, we need to update the counters for send credits
345 * Spinlocks shared between the send and the receive path are bad,
374 /* The last credit must be used to send a credit update. */ in rds_ib_send_grab_credits()
392 * the posted regardless of whether any send credits are in rds_ib_send_grab_credits()
438 /* Decide whether to send an update to the peer now. in rds_ib_advertise_credits()
439 * If we would send a credit update for every single buffer we in rds_ib_advertise_credits()
441 * consumes buffer, we refill the ring, send ACK to remote in rds_ib_advertise_credits()
444 * Performance pretty much depends on how often we send in rds_ib_advertise_credits()
455 struct rds_ib_send_work *send, in rds_ib_set_wr_signal_state() argument
465 send->s_wr.send_flags |= IB_SEND_SIGNALED; in rds_ib_set_wr_signal_state()
478 * once we send the final fragment.
489 struct rds_ib_send_work *send = NULL; in rds_ib_xmit() local
509 /* Do not send cong updates to IB loopback */ in rds_ib_xmit()
594 * sticking the header into the send ring. Which is why we in rds_ib_xmit()
610 * READ and the following SEND. in rds_ib_xmit()
619 send = &ic->i_sends[pos]; in rds_ib_xmit()
620 first = send; in rds_ib_xmit()
628 send->s_wr.send_flags = send_flags; in rds_ib_xmit()
629 send->s_wr.opcode = IB_WR_SEND; in rds_ib_xmit()
630 send->s_wr.num_sge = 1; in rds_ib_xmit()
631 send->s_wr.next = NULL; in rds_ib_xmit()
632 send->s_queued = jiffies; in rds_ib_xmit()
633 send->s_op = NULL; in rds_ib_xmit()
635 send->s_sge[0].addr = ic->i_send_hdrs_dma[pos]; in rds_ib_xmit()
637 send->s_sge[0].length = sizeof(struct rds_header); in rds_ib_xmit()
638 send->s_sge[0].lkey = ic->i_pd->local_dma_lkey; in rds_ib_xmit()
653 send->s_wr.num_sge = 2; in rds_ib_xmit()
655 send->s_sge[1].addr = sg_dma_address(scat); in rds_ib_xmit()
656 send->s_sge[1].addr += rm->data.op_dmaoff; in rds_ib_xmit()
657 send->s_sge[1].length = len; in rds_ib_xmit()
658 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey; in rds_ib_xmit()
669 rds_ib_set_wr_signal_state(ic, send, false); in rds_ib_xmit()
675 rds_ib_set_wr_signal_state(ic, send, true); in rds_ib_xmit()
676 send->s_wr.send_flags |= IB_SEND_SOLICITED; in rds_ib_xmit()
679 if (send->s_wr.send_flags & IB_SEND_SIGNALED) in rds_ib_xmit()
682 rdsdebug("send %p wr %p num_sge %u next %p\n", send, in rds_ib_xmit()
683 &send->s_wr, send->s_wr.num_sge, send->s_wr.next); in rds_ib_xmit()
700 prev->s_wr.next = &send->s_wr; in rds_ib_xmit()
701 prev = send; in rds_ib_xmit()
704 send = &ic->i_sends[pos]; in rds_ib_xmit()
715 /* if we finished the message then send completion owns it */ in rds_ib_xmit()
769 struct rds_ib_send_work *send = NULL; in rds_ib_xmit_atomic() local
783 /* address of send request in ring */ in rds_ib_xmit_atomic()
784 send = &ic->i_sends[pos]; in rds_ib_xmit_atomic()
785 send->s_queued = jiffies; in rds_ib_xmit_atomic()
788 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP; in rds_ib_xmit_atomic()
789 send->s_atomic_wr.compare_add = op->op_m_cswp.compare; in rds_ib_xmit_atomic()
790 send->s_atomic_wr.swap = op->op_m_cswp.swap; in rds_ib_xmit_atomic()
791 send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask; in rds_ib_xmit_atomic()
792 send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask; in rds_ib_xmit_atomic()
794 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD; in rds_ib_xmit_atomic()
795 send->s_atomic_wr.compare_add = op->op_m_fadd.add; in rds_ib_xmit_atomic()
796 send->s_atomic_wr.swap = 0; in rds_ib_xmit_atomic()
797 send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask; in rds_ib_xmit_atomic()
798 send->s_atomic_wr.swap_mask = 0; in rds_ib_xmit_atomic()
800 send->s_wr.send_flags = 0; in rds_ib_xmit_atomic()
801 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); in rds_ib_xmit_atomic()
802 send->s_atomic_wr.wr.num_sge = 1; in rds_ib_xmit_atomic()
803 send->s_atomic_wr.wr.next = NULL; in rds_ib_xmit_atomic()
804 send->s_atomic_wr.remote_addr = op->op_remote_addr; in rds_ib_xmit_atomic()
805 send->s_atomic_wr.rkey = op->op_rkey; in rds_ib_xmit_atomic()
806 send->s_op = op; in rds_ib_xmit_atomic()
807 rds_message_addref(container_of(send->s_op, struct rds_message, atomic)); in rds_ib_xmit_atomic()
820 send->s_sge[0].addr = sg_dma_address(op->op_sg); in rds_ib_xmit_atomic()
821 send->s_sge[0].length = sg_dma_len(op->op_sg); in rds_ib_xmit_atomic()
822 send->s_sge[0].lkey = ic->i_pd->local_dma_lkey; in rds_ib_xmit_atomic()
825 send->s_sge[0].addr, send->s_sge[0].length); in rds_ib_xmit_atomic()
830 failed_wr = &send->s_atomic_wr.wr; in rds_ib_xmit_atomic()
831 ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr); in rds_ib_xmit_atomic()
832 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic, in rds_ib_xmit_atomic()
833 send, &send->s_atomic_wr, ret, failed_wr); in rds_ib_xmit_atomic()
834 BUG_ON(failed_wr != &send->s_atomic_wr.wr); in rds_ib_xmit_atomic()
843 if (unlikely(failed_wr != &send->s_atomic_wr.wr)) { in rds_ib_xmit_atomic()
845 BUG_ON(failed_wr != &send->s_atomic_wr.wr); in rds_ib_xmit_atomic()
855 struct rds_ib_send_work *send = NULL; in rds_ib_xmit_rdma() local
898 * be enough work requests to send the entire message. in rds_ib_xmit_rdma()
910 send = &ic->i_sends[pos]; in rds_ib_xmit_rdma()
911 first = send; in rds_ib_xmit_rdma()
918 send->s_wr.send_flags = 0; in rds_ib_xmit_rdma()
919 send->s_queued = jiffies; in rds_ib_xmit_rdma()
920 send->s_op = NULL; in rds_ib_xmit_rdma()
923 nr_sig += rds_ib_set_wr_signal_state(ic, send, in rds_ib_xmit_rdma()
926 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; in rds_ib_xmit_rdma()
927 send->s_rdma_wr.remote_addr = remote_addr; in rds_ib_xmit_rdma()
928 send->s_rdma_wr.rkey = op->op_rkey; in rds_ib_xmit_rdma()
931 send->s_rdma_wr.wr.num_sge = max_sge; in rds_ib_xmit_rdma()
934 send->s_rdma_wr.wr.num_sge = num_sge; in rds_ib_xmit_rdma()
937 send->s_rdma_wr.wr.next = NULL; in rds_ib_xmit_rdma()
940 prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr; in rds_ib_xmit_rdma()
942 for (j = 0; j < send->s_rdma_wr.wr.num_sge && in rds_ib_xmit_rdma()
946 send->s_sge[j].addr = sg_dma_address(scat); in rds_ib_xmit_rdma()
947 send->s_sge[j].lkey = ic->i_pd->local_dma_lkey; in rds_ib_xmit_rdma()
949 send->s_sge[j].addr = odp_addr; in rds_ib_xmit_rdma()
950 send->s_sge[j].lkey = odp_lkey; in rds_ib_xmit_rdma()
952 send->s_sge[j].length = len; in rds_ib_xmit_rdma()
962 rdsdebug("send %p wr %p num_sge %u next %p\n", send, in rds_ib_xmit_rdma()
963 &send->s_rdma_wr.wr, in rds_ib_xmit_rdma()
964 send->s_rdma_wr.wr.num_sge, in rds_ib_xmit_rdma()
965 send->s_rdma_wr.wr.next); in rds_ib_xmit_rdma()
967 prev = send; in rds_ib_xmit_rdma()
968 if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) in rds_ib_xmit_rdma()
969 send = ic->i_sends; in rds_ib_xmit_rdma()
1015 * to send previously (due to flow control). Try again. */ in rds_ib_xmit_path_complete()