Lines Matching refs:r_xprt

130 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,  in rpcrdma_args_inline()  argument
134 struct rpcrdma_ep *ep = r_xprt->rx_ep; in rpcrdma_args_inline()
162 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt, in rpcrdma_results_inline() argument
165 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv; in rpcrdma_results_inline()
173 rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt, in rpcrdma_nonpayload_inline() argument
179 r_xprt->rx_ep->re_max_inline_recv; in rpcrdma_nonpayload_inline()
232 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, in rpcrdma_convert_iovs() argument
266 if (type == rpcrdma_writech && r_xprt->rx_ep->re_implicit_roundup) in rpcrdma_convert_iovs()
307 static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt, in rpcrdma_mr_prepare() argument
315 *mr = rpcrdma_mr_get(r_xprt); in rpcrdma_mr_prepare()
322 return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr); in rpcrdma_mr_prepare()
325 trace_xprtrdma_nomrs_err(r_xprt, req); in rpcrdma_mr_prepare()
326 xprt_wait_for_buffer_space(&r_xprt->rx_xprt); in rpcrdma_mr_prepare()
327 rpcrdma_mrs_refresh(r_xprt); in rpcrdma_mr_prepare()
345 static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, in rpcrdma_encode_read_list() argument
363 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos, in rpcrdma_encode_read_list()
369 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr); in rpcrdma_encode_read_list()
377 r_xprt->rx_stats.read_chunk_count++; in rpcrdma_encode_read_list()
402 static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, in rpcrdma_encode_write_list() argument
417 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, in rpcrdma_encode_write_list()
432 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr); in rpcrdma_encode_write_list()
440 r_xprt->rx_stats.write_chunk_count++; in rpcrdma_encode_write_list()
441 r_xprt->rx_stats.total_rdma_request += mr->mr_length; in rpcrdma_encode_write_list()
467 static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, in rpcrdma_encode_reply_chunk() argument
485 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg); in rpcrdma_encode_reply_chunk()
498 seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr); in rpcrdma_encode_reply_chunk()
506 r_xprt->rx_stats.reply_chunk_count++; in rpcrdma_encode_reply_chunk()
507 r_xprt->rx_stats.total_rdma_request += mr->mr_length; in rpcrdma_encode_reply_chunk()
555 static void rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt, in rpcrdma_prepare_hdr_sge() argument
573 static bool rpcrdma_prepare_head_iov(struct rpcrdma_xprt *r_xprt, in rpcrdma_prepare_head_iov() argument
580 if (!rpcrdma_regbuf_dma_map(r_xprt, rb)) in rpcrdma_prepare_head_iov()
661 static void rpcrdma_pullup_tail_iov(struct rpcrdma_xprt *r_xprt, in rpcrdma_pullup_tail_iov() argument
670 r_xprt->rx_stats.pullup_copy_count += xdr->tail[0].iov_len; in rpcrdma_pullup_tail_iov()
675 static void rpcrdma_pullup_pagelist(struct rpcrdma_xprt *r_xprt, in rpcrdma_pullup_pagelist() argument
693 r_xprt->rx_stats.pullup_copy_count += len; in rpcrdma_pullup_pagelist()
711 static bool rpcrdma_prepare_noch_pullup(struct rpcrdma_xprt *r_xprt, in rpcrdma_prepare_noch_pullup() argument
716 rpcrdma_pullup_tail_iov(r_xprt, req, xdr); in rpcrdma_prepare_noch_pullup()
719 rpcrdma_pullup_pagelist(r_xprt, req, xdr); in rpcrdma_prepare_noch_pullup()
722 return rpcrdma_prepare_head_iov(r_xprt, req, xdr->len); in rpcrdma_prepare_noch_pullup()
725 static bool rpcrdma_prepare_noch_mapped(struct rpcrdma_xprt *r_xprt, in rpcrdma_prepare_noch_mapped() argument
731 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len)) in rpcrdma_prepare_noch_mapped()
747 static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt, in rpcrdma_prepare_readch() argument
751 if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len)) in rpcrdma_prepare_readch()
789 inline int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt, in rpcrdma_prepare_send_sges() argument
797 req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt); in rpcrdma_prepare_send_sges()
808 rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen); in rpcrdma_prepare_send_sges()
813 if (!rpcrdma_prepare_noch_pullup(r_xprt, req, xdr)) in rpcrdma_prepare_send_sges()
817 if (!rpcrdma_prepare_noch_mapped(r_xprt, req, xdr)) in rpcrdma_prepare_send_sges()
821 if (!rpcrdma_prepare_readch(r_xprt, req, xdr)) in rpcrdma_prepare_send_sges()
859 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst) in rpcrdma_marshal_req() argument
886 *p++ = r_xprt->rx_buf.rb_max_requests; in rpcrdma_marshal_req()
904 if (rpcrdma_results_inline(r_xprt, rqst)) in rpcrdma_marshal_req()
907 rpcrdma_nonpayload_inline(r_xprt, rqst)) in rpcrdma_marshal_req()
926 if (rpcrdma_args_inline(r_xprt, rqst)) { in rpcrdma_marshal_req()
934 r_xprt->rx_stats.nomsg_call_count++; in rpcrdma_marshal_req()
961 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype); in rpcrdma_marshal_req()
964 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype); in rpcrdma_marshal_req()
967 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype); in rpcrdma_marshal_req()
971 ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len, in rpcrdma_marshal_req()
981 r_xprt->rx_stats.failed_marshal_count++; in rpcrdma_marshal_req()
994 static void rpcrdma_update_cwnd(struct rpcrdma_xprt *r_xprt, u32 grant) in rpcrdma_update_cwnd() argument
996 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_update_cwnd()
999 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, grant); in rpcrdma_update_cwnd()
1010 void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt) in rpcrdma_reset_cwnd() argument
1012 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_reset_cwnd()
1016 __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, 1); in rpcrdma_reset_cwnd()
1120 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) in rpcrdma_is_bcall() argument
1153 rpcrdma_bc_receive_call(r_xprt, rep); in rpcrdma_is_bcall()
1256 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, in rpcrdma_decode_msg() argument
1278 r_xprt->rx_stats.fixup_copy_count += in rpcrdma_decode_msg()
1281 r_xprt->rx_stats.total_rdma_reply += writelist; in rpcrdma_decode_msg()
1286 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) in rpcrdma_decode_nomsg() argument
1306 r_xprt->rx_stats.total_rdma_reply += replychunk; in rpcrdma_decode_nomsg()
1311 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, in rpcrdma_decode_error() argument
1370 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; in rpcrdma_complete_rqst() local
1371 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_complete_rqst()
1377 status = rpcrdma_decode_msg(r_xprt, rep, rqst); in rpcrdma_complete_rqst()
1380 status = rpcrdma_decode_nomsg(r_xprt, rep); in rpcrdma_complete_rqst()
1383 status = rpcrdma_decode_error(r_xprt, rep, rqst); in rpcrdma_complete_rqst()
1400 r_xprt->rx_stats.bad_reply_count++; in rpcrdma_complete_rqst()
1423 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; in rpcrdma_reply_handler() local
1424 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_reply_handler()
1425 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reply_handler()
1451 if (rpcrdma_is_bcall(r_xprt, rep)) in rpcrdma_reply_handler()
1466 else if (credits > r_xprt->rx_ep->re_max_requests) in rpcrdma_reply_handler()
1467 credits = r_xprt->rx_ep->re_max_requests; in rpcrdma_reply_handler()
1468 rpcrdma_post_recvs(r_xprt, credits + (buf->rb_bc_srv_max_requests << 1), in rpcrdma_reply_handler()
1471 rpcrdma_update_cwnd(r_xprt, credits); in rpcrdma_reply_handler()
1484 frwr_unmap_async(r_xprt, req); in rpcrdma_reply_handler()