Lines Matching +full:no +full:- +full:memory +full:- +full:wc

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2014-2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
9 * COPYING in the main directory of this source tree, or the BSD-type
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
49 * o buffer memory
58 #include <asm-generic/barrier.h>
101 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_xprt_drain()
102 struct rdma_cm_id *id = ep->re_id; in rpcrdma_xprt_drain()
107 if (atomic_inc_return(&ep->re_receiving) > 1) in rpcrdma_xprt_drain()
108 wait_for_completion(&ep->re_done); in rpcrdma_xprt_drain()
113 ib_drain_rq(id->qp); in rpcrdma_xprt_drain()
118 ib_drain_sq(id->qp); in rpcrdma_xprt_drain()
129 if (atomic_add_unless(&ep->re_force_disconnect, 1, 1)) in rpcrdma_force_disconnect()
130 xprt_force_disconnect(ep->re_xprt); in rpcrdma_force_disconnect()
134 * rpcrdma_flush_disconnect - Disconnect on flushed completion
136 * @wc: work completion entry
140 void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc) in rpcrdma_flush_disconnect() argument
142 if (wc->status != IB_WC_SUCCESS) in rpcrdma_flush_disconnect()
143 rpcrdma_force_disconnect(r_xprt->rx_ep); in rpcrdma_flush_disconnect()
147 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
149 * @wc: WCE for a completed Send WR
152 static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) in rpcrdma_wc_send() argument
154 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_send()
157 struct rpcrdma_xprt *r_xprt = cq->cq_context; in rpcrdma_wc_send()
160 trace_xprtrdma_wc_send(wc, &sc->sc_cid); in rpcrdma_wc_send()
162 rpcrdma_flush_disconnect(r_xprt, wc); in rpcrdma_wc_send()
166 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
168 * @wc: WCE for a completed Receive WR
171 static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) in rpcrdma_wc_receive() argument
173 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_receive()
176 struct rpcrdma_xprt *r_xprt = cq->cq_context; in rpcrdma_wc_receive()
179 trace_xprtrdma_wc_receive(wc, &rep->rr_cid); in rpcrdma_wc_receive()
180 --r_xprt->rx_ep->re_receive_count; in rpcrdma_wc_receive()
181 if (wc->status != IB_WC_SUCCESS) in rpcrdma_wc_receive()
184 /* status == SUCCESS means all fields in wc are trustworthy */ in rpcrdma_wc_receive()
185 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len); in rpcrdma_wc_receive()
186 rep->rr_wc_flags = wc->wc_flags; in rpcrdma_wc_receive()
187 rep->rr_inv_rkey = wc->ex.invalidate_rkey; in rpcrdma_wc_receive()
189 ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf), in rpcrdma_wc_receive()
190 rdmab_addr(rep->rr_rdmabuf), in rpcrdma_wc_receive()
191 wc->byte_len, DMA_FROM_DEVICE); in rpcrdma_wc_receive()
197 rpcrdma_flush_disconnect(r_xprt, wc); in rpcrdma_wc_receive()
198 rpcrdma_rep_put(&r_xprt->rx_buf, rep); in rpcrdma_wc_receive()
204 const struct rpcrdma_connect_private *pmsg = param->private_data; in rpcrdma_update_cm_private()
207 /* Default settings for RPC-over-RDMA Version One */ in rpcrdma_update_cm_private()
208 ep->re_implicit_roundup = xprt_rdma_pad_optimize; in rpcrdma_update_cm_private()
213 pmsg->cp_magic == rpcrdma_cmp_magic && in rpcrdma_update_cm_private()
214 pmsg->cp_version == RPCRDMA_CMP_VERSION) { in rpcrdma_update_cm_private()
215 ep->re_implicit_roundup = true; in rpcrdma_update_cm_private()
216 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); in rpcrdma_update_cm_private()
217 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); in rpcrdma_update_cm_private()
220 if (rsize < ep->re_inline_recv) in rpcrdma_update_cm_private()
221 ep->re_inline_recv = rsize; in rpcrdma_update_cm_private()
222 if (wsize < ep->re_inline_send) in rpcrdma_update_cm_private()
223 ep->re_inline_send = wsize; in rpcrdma_update_cm_private()
229 * rpcrdma_cm_event_handler - Handle RDMA CM events
239 struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr; in rpcrdma_cm_event_handler()
240 struct rpcrdma_ep *ep = id->context; in rpcrdma_cm_event_handler()
244 switch (event->event) { in rpcrdma_cm_event_handler()
247 ep->re_async_rc = 0; in rpcrdma_cm_event_handler()
248 complete(&ep->re_done); in rpcrdma_cm_event_handler()
251 ep->re_async_rc = -EPROTO; in rpcrdma_cm_event_handler()
252 complete(&ep->re_done); in rpcrdma_cm_event_handler()
255 ep->re_async_rc = -ENETUNREACH; in rpcrdma_cm_event_handler()
256 complete(&ep->re_done); in rpcrdma_cm_event_handler()
260 ep->re_id->device->name, sap); in rpcrdma_cm_event_handler()
263 ep->re_connect_status = -ENODEV; in rpcrdma_cm_event_handler()
267 ep->re_connect_status = 1; in rpcrdma_cm_event_handler()
268 rpcrdma_update_cm_private(ep, &event->param.conn); in rpcrdma_cm_event_handler()
270 wake_up_all(&ep->re_connect_wait); in rpcrdma_cm_event_handler()
273 ep->re_connect_status = -ENOTCONN; in rpcrdma_cm_event_handler()
276 ep->re_connect_status = -ENETUNREACH; in rpcrdma_cm_event_handler()
280 sap, rdma_reject_msg(id, event->status)); in rpcrdma_cm_event_handler()
281 ep->re_connect_status = -ECONNREFUSED; in rpcrdma_cm_event_handler()
282 if (event->status == IB_CM_REJ_STALE_CONN) in rpcrdma_cm_event_handler()
283 ep->re_connect_status = -ENOTCONN; in rpcrdma_cm_event_handler()
285 wake_up_all(&ep->re_connect_wait); in rpcrdma_cm_event_handler()
288 ep->re_connect_status = -ECONNABORTED; in rpcrdma_cm_event_handler()
297 ep->re_id->device->name, rdma_event_msg(event->event)); in rpcrdma_cm_event_handler()
305 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_create_id()
309 init_completion(&ep->re_done); in rpcrdma_create_id()
311 id = rdma_create_id(xprt->xprt_net, rpcrdma_cm_event_handler, ep, in rpcrdma_create_id()
316 ep->re_async_rc = -ETIMEDOUT; in rpcrdma_create_id()
317 rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)&xprt->addr, in rpcrdma_create_id()
321 rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout); in rpcrdma_create_id()
325 rc = ep->re_async_rc; in rpcrdma_create_id()
329 ep->re_async_rc = -ETIMEDOUT; in rpcrdma_create_id()
333 rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout); in rpcrdma_create_id()
336 rc = ep->re_async_rc; in rpcrdma_create_id()
351 if (ep->re_id->qp) { in rpcrdma_ep_destroy()
352 rdma_destroy_qp(ep->re_id); in rpcrdma_ep_destroy()
353 ep->re_id->qp = NULL; in rpcrdma_ep_destroy()
356 if (ep->re_attr.recv_cq) in rpcrdma_ep_destroy()
357 ib_free_cq(ep->re_attr.recv_cq); in rpcrdma_ep_destroy()
358 ep->re_attr.recv_cq = NULL; in rpcrdma_ep_destroy()
359 if (ep->re_attr.send_cq) in rpcrdma_ep_destroy()
360 ib_free_cq(ep->re_attr.send_cq); in rpcrdma_ep_destroy()
361 ep->re_attr.send_cq = NULL; in rpcrdma_ep_destroy()
363 if (ep->re_pd) in rpcrdma_ep_destroy()
364 ib_dealloc_pd(ep->re_pd); in rpcrdma_ep_destroy()
365 ep->re_pd = NULL; in rpcrdma_ep_destroy()
373 kref_get(&ep->re_kref); in rpcrdma_ep_get()
382 return kref_put(&ep->re_kref, rpcrdma_ep_destroy); in rpcrdma_ep_put()
395 return -ENOTCONN; in rpcrdma_ep_create()
396 ep->re_xprt = &r_xprt->rx_xprt; in rpcrdma_ep_create()
397 kref_init(&ep->re_kref); in rpcrdma_ep_create()
405 device = id->device; in rpcrdma_ep_create()
406 ep->re_id = id; in rpcrdma_ep_create()
407 reinit_completion(&ep->re_done); in rpcrdma_ep_create()
409 ep->re_max_requests = r_xprt->rx_xprt.max_reqs; in rpcrdma_ep_create()
410 ep->re_inline_send = xprt_rdma_max_inline_write; in rpcrdma_ep_create()
411 ep->re_inline_recv = xprt_rdma_max_inline_read; in rpcrdma_ep_create()
416 r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->re_max_requests); in rpcrdma_ep_create()
418 ep->re_attr.srq = NULL; in rpcrdma_ep_create()
419 ep->re_attr.cap.max_inline_data = 0; in rpcrdma_ep_create()
420 ep->re_attr.sq_sig_type = IB_SIGNAL_REQ_WR; in rpcrdma_ep_create()
421 ep->re_attr.qp_type = IB_QPT_RC; in rpcrdma_ep_create()
422 ep->re_attr.port_num = ~0; in rpcrdma_ep_create()
427 ep->re_attr.cap.max_send_wr, in rpcrdma_ep_create()
428 ep->re_attr.cap.max_recv_wr, in rpcrdma_ep_create()
429 ep->re_attr.cap.max_send_sge, in rpcrdma_ep_create()
430 ep->re_attr.cap.max_recv_sge); in rpcrdma_ep_create()
432 ep->re_send_batch = ep->re_max_requests >> 3; in rpcrdma_ep_create()
433 ep->re_send_count = ep->re_send_batch; in rpcrdma_ep_create()
434 init_waitqueue_head(&ep->re_connect_wait); in rpcrdma_ep_create()
436 ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create()
437 ep->re_attr.cap.max_send_wr, in rpcrdma_ep_create()
439 if (IS_ERR(ep->re_attr.send_cq)) { in rpcrdma_ep_create()
440 rc = PTR_ERR(ep->re_attr.send_cq); in rpcrdma_ep_create()
444 ep->re_attr.recv_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create()
445 ep->re_attr.cap.max_recv_wr, in rpcrdma_ep_create()
447 if (IS_ERR(ep->re_attr.recv_cq)) { in rpcrdma_ep_create()
448 rc = PTR_ERR(ep->re_attr.recv_cq); in rpcrdma_ep_create()
451 ep->re_receive_count = 0; in rpcrdma_ep_create()
454 memset(&ep->re_remote_cma, 0, sizeof(ep->re_remote_cma)); in rpcrdma_ep_create()
456 /* Prepare RDMA-CM private message */ in rpcrdma_ep_create()
457 pmsg = &ep->re_cm_private; in rpcrdma_ep_create()
458 pmsg->cp_magic = rpcrdma_cmp_magic; in rpcrdma_ep_create()
459 pmsg->cp_version = RPCRDMA_CMP_VERSION; in rpcrdma_ep_create()
460 pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK; in rpcrdma_ep_create()
461 pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->re_inline_send); in rpcrdma_ep_create()
462 pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->re_inline_recv); in rpcrdma_ep_create()
463 ep->re_remote_cma.private_data = pmsg; in rpcrdma_ep_create()
464 ep->re_remote_cma.private_data_len = sizeof(*pmsg); in rpcrdma_ep_create()
467 ep->re_remote_cma.initiator_depth = 0; in rpcrdma_ep_create()
468 ep->re_remote_cma.responder_resources = in rpcrdma_ep_create()
469 min_t(int, U8_MAX, device->attrs.max_qp_rd_atom); in rpcrdma_ep_create()
472 * GID changes quickly. RPC layer handles re-establishing in rpcrdma_ep_create()
475 ep->re_remote_cma.retry_count = 6; in rpcrdma_ep_create()
477 /* RPC-over-RDMA handles its own flow control. In addition, in rpcrdma_ep_create()
478 * make all RNR NAKs visible so we know that RPC-over-RDMA in rpcrdma_ep_create()
479 * flow control is working correctly (no NAKs should be seen). in rpcrdma_ep_create()
481 ep->re_remote_cma.flow_control = 0; in rpcrdma_ep_create()
482 ep->re_remote_cma.rnr_retry_count = 0; in rpcrdma_ep_create()
484 ep->re_pd = ib_alloc_pd(device, 0); in rpcrdma_ep_create()
485 if (IS_ERR(ep->re_pd)) { in rpcrdma_ep_create()
486 rc = PTR_ERR(ep->re_pd); in rpcrdma_ep_create()
490 rc = rdma_create_qp(id, ep->re_pd, &ep->re_attr); in rpcrdma_ep_create()
494 r_xprt->rx_ep = ep; in rpcrdma_ep_create()
504 * rpcrdma_xprt_connect - Connect an unconnected transport
511 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_xprt_connect()
518 ep = r_xprt->rx_ep; in rpcrdma_xprt_connect()
529 rc = rdma_connect(ep->re_id, &ep->re_remote_cma); in rpcrdma_xprt_connect()
533 if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO) in rpcrdma_xprt_connect()
534 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; in rpcrdma_xprt_connect()
535 wait_event_interruptible(ep->re_connect_wait, in rpcrdma_xprt_connect()
536 ep->re_connect_status != 0); in rpcrdma_xprt_connect()
537 if (ep->re_connect_status <= 0) { in rpcrdma_xprt_connect()
538 rc = ep->re_connect_status; in rpcrdma_xprt_connect()
544 rc = -ENOTCONN; in rpcrdma_xprt_connect()
550 rc = -ENOTCONN; in rpcrdma_xprt_connect()
561 * rpcrdma_xprt_disconnect - Disconnect underlying transport
568 * resources and prepared for the next ->connect operation.
572 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_xprt_disconnect()
579 id = ep->re_id; in rpcrdma_xprt_disconnect()
592 r_xprt->rx_ep = NULL; in rpcrdma_xprt_disconnect()
595 /* Fixed-size circular FIFO queue. This implementation is wait-free and
596 * lock-free.
601 * ->send_request call at a time.
614 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctxs_destroy()
617 if (!buf->rb_sc_ctxs) in rpcrdma_sendctxs_destroy()
619 for (i = 0; i <= buf->rb_sc_last; i++) in rpcrdma_sendctxs_destroy()
620 kfree(buf->rb_sc_ctxs[i]); in rpcrdma_sendctxs_destroy()
621 kfree(buf->rb_sc_ctxs); in rpcrdma_sendctxs_destroy()
622 buf->rb_sc_ctxs = NULL; in rpcrdma_sendctxs_destroy()
629 sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge), in rpcrdma_sendctx_create()
634 sc->sc_cqe.done = rpcrdma_wc_send; in rpcrdma_sendctx_create()
635 sc->sc_cid.ci_queue_id = ep->re_attr.send_cq->res.id; in rpcrdma_sendctx_create()
636 sc->sc_cid.ci_completion_id = in rpcrdma_sendctx_create()
637 atomic_inc_return(&ep->re_completion_ids); in rpcrdma_sendctx_create()
643 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctxs_create()
649 * the ->send_request call to fail temporarily before too many in rpcrdma_sendctxs_create()
652 i = r_xprt->rx_ep->re_max_requests + RPCRDMA_MAX_BC_REQUESTS; in rpcrdma_sendctxs_create()
653 buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL); in rpcrdma_sendctxs_create()
654 if (!buf->rb_sc_ctxs) in rpcrdma_sendctxs_create()
655 return -ENOMEM; in rpcrdma_sendctxs_create()
657 buf->rb_sc_last = i - 1; in rpcrdma_sendctxs_create()
658 for (i = 0; i <= buf->rb_sc_last; i++) { in rpcrdma_sendctxs_create()
659 sc = rpcrdma_sendctx_create(r_xprt->rx_ep); in rpcrdma_sendctxs_create()
661 return -ENOMEM; in rpcrdma_sendctxs_create()
663 buf->rb_sc_ctxs[i] = sc; in rpcrdma_sendctxs_create()
666 buf->rb_sc_head = 0; in rpcrdma_sendctxs_create()
667 buf->rb_sc_tail = 0; in rpcrdma_sendctxs_create()
678 return likely(item < buf->rb_sc_last) ? item + 1 : 0; in rpcrdma_sendctx_next()
682 * rpcrdma_sendctx_get_locked - Acquire a send context
691 * provides an effective memory barrier that flushes the new value
696 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctx_get_locked()
700 next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head); in rpcrdma_sendctx_get_locked()
702 if (next_head == READ_ONCE(buf->rb_sc_tail)) in rpcrdma_sendctx_get_locked()
706 sc = buf->rb_sc_ctxs[next_head]; in rpcrdma_sendctx_get_locked()
708 /* Releasing the lock in the caller acts as a memory in rpcrdma_sendctx_get_locked()
711 buf->rb_sc_head = next_head; in rpcrdma_sendctx_get_locked()
720 xprt_wait_for_buffer_space(&r_xprt->rx_xprt); in rpcrdma_sendctx_get_locked()
721 r_xprt->rx_stats.empty_sendctx_q++; in rpcrdma_sendctx_get_locked()
726 * rpcrdma_sendctx_put_locked - Release a send context
738 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctx_put_locked()
744 next_tail = buf->rb_sc_tail; in rpcrdma_sendctx_put_locked()
749 rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]); in rpcrdma_sendctx_put_locked()
751 } while (buf->rb_sc_ctxs[next_tail] != sc); in rpcrdma_sendctx_put_locked()
754 smp_store_release(&buf->rb_sc_tail, next_tail); in rpcrdma_sendctx_put_locked()
756 xprt_write_space(&r_xprt->rx_xprt); in rpcrdma_sendctx_put_locked()
762 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_create()
763 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_mrs_create()
766 for (count = 0; count < ep->re_max_rdma_segs; count++) { in rpcrdma_mrs_create()
780 spin_lock(&buf->rb_lock); in rpcrdma_mrs_create()
781 rpcrdma_mr_push(mr, &buf->rb_mrs); in rpcrdma_mrs_create()
782 list_add(&mr->mr_all, &buf->rb_all_mrs); in rpcrdma_mrs_create()
783 spin_unlock(&buf->rb_lock); in rpcrdma_mrs_create()
786 r_xprt->rx_stats.mrs_allocated += count; in rpcrdma_mrs_create()
799 xprt_write_space(&r_xprt->rx_xprt); in rpcrdma_mr_refresh_worker()
803 * rpcrdma_mrs_refresh - Wake the MR refresh worker
809 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_refresh()
810 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_mrs_refresh()
812 /* If there is no underlying connection, it's no use in rpcrdma_mrs_refresh()
815 if (ep->re_connect_status == 1) { in rpcrdma_mrs_refresh()
820 queue_work(xprtiod_workqueue, &buf->rb_refresh_worker); in rpcrdma_mrs_refresh()
825 * rpcrdma_req_create - Allocate an rpcrdma_req object
828 * @flags: GFP flags passed to memory allocators
835 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; in rpcrdma_req_create()
842 req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags); in rpcrdma_req_create()
843 if (!req->rl_sendbuf) in rpcrdma_req_create()
846 req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags); in rpcrdma_req_create()
847 if (!req->rl_recvbuf) in rpcrdma_req_create()
850 INIT_LIST_HEAD(&req->rl_free_mrs); in rpcrdma_req_create()
851 INIT_LIST_HEAD(&req->rl_registered); in rpcrdma_req_create()
852 spin_lock(&buffer->rb_lock); in rpcrdma_req_create()
853 list_add(&req->rl_all, &buffer->rb_allreqs); in rpcrdma_req_create()
854 spin_unlock(&buffer->rb_lock); in rpcrdma_req_create()
858 kfree(req->rl_sendbuf); in rpcrdma_req_create()
866 * rpcrdma_req_setup - Per-connection instance setup of an rpcrdma_req object
879 r_xprt->rx_ep->re_max_rdma_segs * rpcrdma_readchunk_maxsz; in rpcrdma_req_setup()
889 req->rl_rdmabuf = rb; in rpcrdma_req_setup()
890 xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb)); in rpcrdma_req_setup()
896 return -ENOMEM; in rpcrdma_req_setup()
906 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reqs_setup()
910 list_for_each_entry(req, &buf->rb_allreqs, rl_all) { in rpcrdma_reqs_setup()
921 req->rl_slot.rq_cong = 0; in rpcrdma_req_reset()
923 rpcrdma_regbuf_free(req->rl_rdmabuf); in rpcrdma_req_reset()
924 req->rl_rdmabuf = NULL; in rpcrdma_req_reset()
926 rpcrdma_regbuf_dma_unmap(req->rl_sendbuf); in rpcrdma_req_reset()
927 rpcrdma_regbuf_dma_unmap(req->rl_recvbuf); in rpcrdma_req_reset()
939 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reqs_reset()
942 list_for_each_entry(req, &buf->rb_allreqs, rl_all) in rpcrdma_reqs_reset()
950 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_rep_create()
957 rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep->re_inline_recv, in rpcrdma_rep_create()
959 if (!rep->rr_rdmabuf) in rpcrdma_rep_create()
962 if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) in rpcrdma_rep_create()
965 rep->rr_cid.ci_completion_id = in rpcrdma_rep_create()
966 atomic_inc_return(&r_xprt->rx_ep->re_completion_ids); in rpcrdma_rep_create()
968 xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf), in rpcrdma_rep_create()
969 rdmab_length(rep->rr_rdmabuf)); in rpcrdma_rep_create()
970 rep->rr_cqe.done = rpcrdma_wc_receive; in rpcrdma_rep_create()
971 rep->rr_rxprt = r_xprt; in rpcrdma_rep_create()
972 rep->rr_recv_wr.next = NULL; in rpcrdma_rep_create()
973 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; in rpcrdma_rep_create()
974 rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; in rpcrdma_rep_create()
975 rep->rr_recv_wr.num_sge = 1; in rpcrdma_rep_create()
976 rep->rr_temp = temp; in rpcrdma_rep_create()
978 spin_lock(&buf->rb_lock); in rpcrdma_rep_create()
979 list_add(&rep->rr_all, &buf->rb_all_reps); in rpcrdma_rep_create()
980 spin_unlock(&buf->rb_lock); in rpcrdma_rep_create()
984 rpcrdma_regbuf_free(rep->rr_rdmabuf); in rpcrdma_rep_create()
993 rpcrdma_regbuf_free(rep->rr_rdmabuf); in rpcrdma_rep_free()
999 struct rpcrdma_buffer *buf = &rep->rr_rxprt->rx_buf; in rpcrdma_rep_destroy()
1001 spin_lock(&buf->rb_lock); in rpcrdma_rep_destroy()
1002 list_del(&rep->rr_all); in rpcrdma_rep_destroy()
1003 spin_unlock(&buf->rb_lock); in rpcrdma_rep_destroy()
1013 node = llist_del_first(&buf->rb_free_reps); in rpcrdma_rep_get_locked()
1020 * rpcrdma_rep_put - Release rpcrdma_rep back to free list
1027 llist_add(&rep->rr_node, &buf->rb_free_reps); in rpcrdma_rep_put()
1036 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reps_unmap()
1039 list_for_each_entry(rep, &buf->rb_all_reps, rr_all) { in rpcrdma_reps_unmap()
1040 rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf); in rpcrdma_reps_unmap()
1041 rep->rr_temp = true; /* Mark this rep for destruction */ in rpcrdma_reps_unmap()
1049 spin_lock(&buf->rb_lock); in rpcrdma_reps_destroy()
1050 while ((rep = list_first_entry_or_null(&buf->rb_all_reps, in rpcrdma_reps_destroy()
1053 list_del(&rep->rr_all); in rpcrdma_reps_destroy()
1054 spin_unlock(&buf->rb_lock); in rpcrdma_reps_destroy()
1058 spin_lock(&buf->rb_lock); in rpcrdma_reps_destroy()
1060 spin_unlock(&buf->rb_lock); in rpcrdma_reps_destroy()
1064 * rpcrdma_buffer_create - Create initial set of req/rep objects
1071 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_buffer_create()
1074 buf->rb_bc_srv_max_requests = 0; in rpcrdma_buffer_create()
1075 spin_lock_init(&buf->rb_lock); in rpcrdma_buffer_create()
1076 INIT_LIST_HEAD(&buf->rb_mrs); in rpcrdma_buffer_create()
1077 INIT_LIST_HEAD(&buf->rb_all_mrs); in rpcrdma_buffer_create()
1078 INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker); in rpcrdma_buffer_create()
1080 INIT_LIST_HEAD(&buf->rb_send_bufs); in rpcrdma_buffer_create()
1081 INIT_LIST_HEAD(&buf->rb_allreqs); in rpcrdma_buffer_create()
1082 INIT_LIST_HEAD(&buf->rb_all_reps); in rpcrdma_buffer_create()
1084 rc = -ENOMEM; in rpcrdma_buffer_create()
1085 for (i = 0; i < r_xprt->rx_xprt.max_reqs; i++) { in rpcrdma_buffer_create()
1092 list_add(&req->rl_list, &buf->rb_send_bufs); in rpcrdma_buffer_create()
1095 init_llist_head(&buf->rb_free_reps); in rpcrdma_buffer_create()
1104 * rpcrdma_req_destroy - Destroy an rpcrdma_req object
1108 * removing req->rl_all from buf->rb_all_reqs safely.
1114 list_del(&req->rl_all); in rpcrdma_req_destroy()
1116 while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) { in rpcrdma_req_destroy()
1117 struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf; in rpcrdma_req_destroy()
1119 spin_lock(&buf->rb_lock); in rpcrdma_req_destroy()
1120 list_del(&mr->mr_all); in rpcrdma_req_destroy()
1121 spin_unlock(&buf->rb_lock); in rpcrdma_req_destroy()
1126 rpcrdma_regbuf_free(req->rl_recvbuf); in rpcrdma_req_destroy()
1127 rpcrdma_regbuf_free(req->rl_sendbuf); in rpcrdma_req_destroy()
1128 rpcrdma_regbuf_free(req->rl_rdmabuf); in rpcrdma_req_destroy()
1133 * rpcrdma_mrs_destroy - Release all of a transport's MRs
1137 * removing mr->mr_list from req->rl_free_mrs safely.
1141 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_destroy()
1144 cancel_work_sync(&buf->rb_refresh_worker); in rpcrdma_mrs_destroy()
1146 spin_lock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1147 while ((mr = list_first_entry_or_null(&buf->rb_all_mrs, in rpcrdma_mrs_destroy()
1150 list_del(&mr->mr_list); in rpcrdma_mrs_destroy()
1151 list_del(&mr->mr_all); in rpcrdma_mrs_destroy()
1152 spin_unlock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1156 spin_lock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1158 spin_unlock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1162 * rpcrdma_buffer_destroy - Release all hw resources
1166 * - No more Send or Receive completions can occur
1167 * - All MRs, reps, and reqs are returned to their free lists
1174 while (!list_empty(&buf->rb_send_bufs)) { in rpcrdma_buffer_destroy()
1177 req = list_first_entry(&buf->rb_send_bufs, in rpcrdma_buffer_destroy()
1179 list_del(&req->rl_list); in rpcrdma_buffer_destroy()
1185 * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1188 * Returns an initialized rpcrdma_mr or NULL if no free
1194 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mr_get()
1197 spin_lock(&buf->rb_lock); in rpcrdma_mr_get()
1198 mr = rpcrdma_mr_pop(&buf->rb_mrs); in rpcrdma_mr_get()
1199 spin_unlock(&buf->rb_lock); in rpcrdma_mr_get()
1204 * rpcrdma_reply_put - Put reply buffers back into pool
1211 if (req->rl_reply) { in rpcrdma_reply_put()
1212 rpcrdma_rep_put(buffers, req->rl_reply); in rpcrdma_reply_put()
1213 req->rl_reply = NULL; in rpcrdma_reply_put()
1218 * rpcrdma_buffer_get - Get a request buffer
1228 spin_lock(&buffers->rb_lock); in rpcrdma_buffer_get()
1229 req = list_first_entry_or_null(&buffers->rb_send_bufs, in rpcrdma_buffer_get()
1232 list_del_init(&req->rl_list); in rpcrdma_buffer_get()
1233 spin_unlock(&buffers->rb_lock); in rpcrdma_buffer_get()
1238 * rpcrdma_buffer_put - Put request/reply buffers back into pool
1247 spin_lock(&buffers->rb_lock); in rpcrdma_buffer_put()
1248 list_add(&req->rl_list, &buffers->rb_send_bufs); in rpcrdma_buffer_put()
1249 spin_unlock(&buffers->rb_lock); in rpcrdma_buffer_put()
1267 rb->rg_data = kmalloc(size, flags); in rpcrdma_regbuf_alloc()
1268 if (!rb->rg_data) { in rpcrdma_regbuf_alloc()
1273 rb->rg_device = NULL; in rpcrdma_regbuf_alloc()
1274 rb->rg_direction = direction; in rpcrdma_regbuf_alloc()
1275 rb->rg_iov.length = size; in rpcrdma_regbuf_alloc()
1280 * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
1297 kfree(rb->rg_data); in rpcrdma_regbuf_realloc()
1299 rb->rg_data = buf; in rpcrdma_regbuf_realloc()
1300 rb->rg_iov.length = size; in rpcrdma_regbuf_realloc()
1305 * __rpcrdma_regbuf_dma_map - DMA-map a regbuf
1314 struct ib_device *device = r_xprt->rx_ep->re_id->device; in __rpcrdma_regbuf_dma_map()
1316 if (rb->rg_direction == DMA_NONE) in __rpcrdma_regbuf_dma_map()
1319 rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb), in __rpcrdma_regbuf_dma_map()
1320 rdmab_length(rb), rb->rg_direction); in __rpcrdma_regbuf_dma_map()
1326 rb->rg_device = device; in __rpcrdma_regbuf_dma_map()
1327 rb->rg_iov.lkey = r_xprt->rx_ep->re_pd->local_dma_lkey; in __rpcrdma_regbuf_dma_map()
1339 ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb), in rpcrdma_regbuf_dma_unmap()
1340 rb->rg_direction); in rpcrdma_regbuf_dma_unmap()
1341 rb->rg_device = NULL; in rpcrdma_regbuf_dma_unmap()
1348 kfree(rb->rg_data); in rpcrdma_regbuf_free()
1353 * rpcrdma_post_recvs - Refill the Receive Queue
1361 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_post_recvs()
1362 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_post_recvs()
1370 if (likely(ep->re_receive_count > needed)) in rpcrdma_post_recvs()
1372 needed -= ep->re_receive_count; in rpcrdma_post_recvs()
1376 if (atomic_inc_return(&ep->re_receiving) > 1) in rpcrdma_post_recvs()
1383 if (rep && rep->rr_temp) { in rpcrdma_post_recvs()
1392 rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id; in rpcrdma_post_recvs()
1394 rep->rr_recv_wr.next = wr; in rpcrdma_post_recvs()
1395 wr = &rep->rr_recv_wr; in rpcrdma_post_recvs()
1396 --needed; in rpcrdma_post_recvs()
1402 rc = ib_post_recv(ep->re_id->qp, wr, in rpcrdma_post_recvs()
1410 wr = wr->next; in rpcrdma_post_recvs()
1412 --count; in rpcrdma_post_recvs()
1415 if (atomic_dec_return(&ep->re_receiving) > 0) in rpcrdma_post_recvs()
1416 complete(&ep->re_done); in rpcrdma_post_recvs()
1420 ep->re_receive_count += count; in rpcrdma_post_recvs()