Lines Matching refs:r_xprt
130 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, in rpcrdma_args_inline() argument
134 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_args_inline()
162 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt, in rpcrdma_results_inline() argument
165 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv; in rpcrdma_results_inline()
173 rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt, in rpcrdma_nonpayload_inline() argument
179 r_xprt->rx_ep->re_max_inline_recv; in rpcrdma_nonpayload_inline()
220 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, in rpcrdma_convert_iovs() argument
258 if (type == rpcrdma_readch && r_xprt->rx_ep->re_implicit_roundup) in rpcrdma_convert_iovs()
266 if (type == rpcrdma_writech && r_xprt->rx_ep->re_implicit_roundup) in rpcrdma_convert_iovs()
307 static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt, in rpcrdma_mr_prepare() argument
315 *mr = rpcrdma_mr_get(r_xprt); in rpcrdma_mr_prepare()
323 return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr); in rpcrdma_mr_prepare()
327 xprt_wait_for_buffer_space(&r_xprt->rx_xprt); in rpcrdma_mr_prepare()
328 rpcrdma_mrs_refresh(r_xprt); in rpcrdma_mr_prepare()
346 static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, in rpcrdma_encode_read_list() argument
364 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos, in rpcrdma_encode_read_list()
370 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr); in rpcrdma_encode_read_list()
378 r_xprt->rx_stats.read_chunk_count++; in rpcrdma_encode_read_list()
403 static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, in rpcrdma_encode_write_list() argument
418 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, in rpcrdma_encode_write_list()
433 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr); in rpcrdma_encode_write_list()
441 r_xprt->rx_stats.write_chunk_count++; in rpcrdma_encode_write_list()
442 r_xprt->rx_stats.total_rdma_request += mr->mr_length; in rpcrdma_encode_write_list()
468 static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, in rpcrdma_encode_reply_chunk() argument
486 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg); in rpcrdma_encode_reply_chunk()
499 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr); in rpcrdma_encode_reply_chunk()
507 r_xprt->rx_stats.reply_chunk_count++; in rpcrdma_encode_reply_chunk()
508 r_xprt->rx_stats.total_rdma_request += mr->mr_length; in rpcrdma_encode_reply_chunk()
556 static void rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt, in rpcrdma_prepare_hdr_sge() argument
574 static bool rpcrdma_prepare_head_iov(struct rpcrdma_xprt *r_xprt, in rpcrdma_prepare_head_iov() argument
581 if (!rpcrdma_regbuf_dma_map(r_xprt, rb)) in rpcrdma_prepare_head_iov()
662 static void rpcrdma_pullup_tail_iov(struct rpcrdma_xprt *r_xprt, in rpcrdma_pullup_tail_iov() argument
671 r_xprt->rx_stats.pullup_copy_count += xdr->tail[0].iov_len; in rpcrdma_pullup_tail_iov()
676 static void rpcrdma_pullup_pagelist(struct rpcrdma_xprt *r_xprt, in rpcrdma_pullup_pagelist() argument
694 r_xprt->rx_stats.pullup_copy_count += len; in rpcrdma_pullup_pagelist()
712 static bool rpcrdma_prepare_noch_pullup(struct rpcrdma_xprt *r_xprt, in rpcrdma_prepare_noch_pullup() argument
717 rpcrdma_pullup_tail_iov(r_xprt, req, xdr); in rpcrdma_prepare_noch_pullup()
720 rpcrdma_pullup_pagelist(r_xprt, req, xdr); in rpcrdma_prepare_noch_pullup()
723 return rpcrdma_prepare_head_iov(r_xprt, req, xdr->len); in rpcrdma_prepare_noch_pullup()
726 static bool rpcrdma_prepare_noch_mapped(struct rpcrdma_xprt *r_xprt, in rpcrdma_prepare_noch_mapped() argument
732 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len)) in rpcrdma_prepare_noch_mapped()
748 static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt, in rpcrdma_prepare_readch() argument
752 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len)) in rpcrdma_prepare_readch()
790 inline int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt, in rpcrdma_prepare_send_sges() argument
798 req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt); in rpcrdma_prepare_send_sges()
809 rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen); in rpcrdma_prepare_send_sges()
814 if (!rpcrdma_prepare_noch_pullup(r_xprt, req, xdr)) in rpcrdma_prepare_send_sges()
818 if (!rpcrdma_prepare_noch_mapped(r_xprt, req, xdr)) in rpcrdma_prepare_send_sges()
822 if (!rpcrdma_prepare_readch(r_xprt, req, xdr)) in rpcrdma_prepare_send_sges()
860 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst) in rpcrdma_marshal_req() argument
881 *p++ = r_xprt->rx_buf.rb_max_requests; in rpcrdma_marshal_req()
899 if (rpcrdma_results_inline(r_xprt, rqst)) in rpcrdma_marshal_req()
902 rpcrdma_nonpayload_inline(r_xprt, rqst)) in rpcrdma_marshal_req()
921 if (rpcrdma_args_inline(r_xprt, rqst)) { in rpcrdma_marshal_req()
929 r_xprt->rx_stats.nomsg_call_count++; in rpcrdma_marshal_req()
956 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype); in rpcrdma_marshal_req()
959 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype); in rpcrdma_marshal_req()
962 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype); in rpcrdma_marshal_req()
966 ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len, in rpcrdma_marshal_req()
976 r_xprt->rx_stats.failed_marshal_count++; in rpcrdma_marshal_req()
989 static void rpcrdma_update_cwnd(struct rpcrdma_xprt *r_xprt, u32 grant) in rpcrdma_update_cwnd() argument
991 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_update_cwnd()
994 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, grant); in rpcrdma_update_cwnd()
1005 void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt) in rpcrdma_reset_cwnd() argument
1007 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_reset_cwnd()
1011 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, 1); in rpcrdma_reset_cwnd()
1115 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) in rpcrdma_is_bcall() argument
1148 rpcrdma_bc_receive_call(r_xprt, rep); in rpcrdma_is_bcall()
1255 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, in rpcrdma_decode_msg() argument
1277 r_xprt->rx_stats.fixup_copy_count += in rpcrdma_decode_msg()
1280 r_xprt->rx_stats.total_rdma_reply += writelist; in rpcrdma_decode_msg()
1285 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) in rpcrdma_decode_nomsg() argument
1305 r_xprt->rx_stats.total_rdma_reply += replychunk; in rpcrdma_decode_nomsg()
1310 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, in rpcrdma_decode_error() argument
1350 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; in rpcrdma_complete_rqst() local
1351 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_complete_rqst()
1357 status = rpcrdma_decode_msg(r_xprt, rep, rqst); in rpcrdma_complete_rqst()
1360 status = rpcrdma_decode_nomsg(r_xprt, rep); in rpcrdma_complete_rqst()
1363 status = rpcrdma_decode_error(r_xprt, rep, rqst); in rpcrdma_complete_rqst()
1380 r_xprt->rx_stats.bad_reply_count++; in rpcrdma_complete_rqst()
1403 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; in rpcrdma_reply_handler() local
1404 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_reply_handler()
1405 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reply_handler()
1431 if (rpcrdma_is_bcall(r_xprt, rep)) in rpcrdma_reply_handler()
1446 else if (credits > r_xprt->rx_ep->re_max_requests) in rpcrdma_reply_handler()
1447 credits = r_xprt->rx_ep->re_max_requests; in rpcrdma_reply_handler()
1449 rpcrdma_update_cwnd(r_xprt, credits); in rpcrdma_reply_handler()
1450 rpcrdma_post_recvs(r_xprt, false); in rpcrdma_reply_handler()
1465 frwr_unmap_async(r_xprt, req); in rpcrdma_reply_handler()