Lines Matching refs:r_xprt
110 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt) in rpcrdma_set_max_header_sizes() argument
112 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; in rpcrdma_set_max_header_sizes()
113 struct rpcrdma_ia *ia = &r_xprt->rx_ia; in rpcrdma_set_max_header_sizes()
130 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt, in rpcrdma_args_inline() argument
136 if (xdr->len > r_xprt->rx_ia.ri_max_inline_write) in rpcrdma_args_inline()
147 if (++count > r_xprt->rx_ia.ri_max_send_sges) in rpcrdma_args_inline()
161 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt, in rpcrdma_results_inline() argument
164 struct rpcrdma_ia *ia = &r_xprt->rx_ia; in rpcrdma_results_inline()
207 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf, in rpcrdma_convert_iovs() argument
244 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup) in rpcrdma_convert_iovs()
252 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup) in rpcrdma_convert_iovs()
342 rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, in rpcrdma_encode_read_list() argument
355 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos, in rpcrdma_encode_read_list()
361 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, in rpcrdma_encode_read_list()
371 r_xprt->rx_stats.read_chunk_count++; in rpcrdma_encode_read_list()
394 rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, in rpcrdma_encode_write_list() argument
404 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, in rpcrdma_encode_write_list()
419 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, in rpcrdma_encode_write_list()
429 r_xprt->rx_stats.write_chunk_count++; in rpcrdma_encode_write_list()
430 r_xprt->rx_stats.total_rdma_request += mr->mr_length; in rpcrdma_encode_write_list()
454 rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, in rpcrdma_encode_reply_chunk() argument
464 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg); in rpcrdma_encode_reply_chunk()
477 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, in rpcrdma_encode_reply_chunk()
487 r_xprt->rx_stats.reply_chunk_count++; in rpcrdma_encode_reply_chunk()
488 r_xprt->rx_stats.total_rdma_request += mr->mr_length; in rpcrdma_encode_reply_chunk()
687 rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt, in rpcrdma_prepare_send_sges() argument
691 req->rl_sendctx = rpcrdma_sendctx_get_locked(&r_xprt->rx_buf); in rpcrdma_prepare_send_sges()
699 if (!rpcrdma_prepare_hdr_sge(&r_xprt->rx_ia, req, hdrlen)) in rpcrdma_prepare_send_sges()
703 if (!rpcrdma_prepare_msg_sges(&r_xprt->rx_ia, req, xdr, rtype)) in rpcrdma_prepare_send_sges()
729 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst) in rpcrdma_marshal_req() argument
749 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests); in rpcrdma_marshal_req()
767 if (rpcrdma_results_inline(r_xprt, rqst)) in rpcrdma_marshal_req()
788 if (rpcrdma_args_inline(r_xprt, rqst)) { in rpcrdma_marshal_req()
795 r_xprt->rx_stats.nomsg_call_count++; in rpcrdma_marshal_req()
834 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype); in rpcrdma_marshal_req()
843 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype); in rpcrdma_marshal_req()
854 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype); in rpcrdma_marshal_req()
860 ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr), in rpcrdma_marshal_req()
874 r_xprt->rx_stats.failed_marshal_count++; in rpcrdma_marshal_req()
980 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) in rpcrdma_is_bcall() argument
1013 rpcrdma_bc_receive_call(r_xprt, rep); in rpcrdma_is_bcall()
1123 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, in rpcrdma_decode_msg() argument
1145 r_xprt->rx_stats.fixup_copy_count += in rpcrdma_decode_msg()
1148 r_xprt->rx_stats.total_rdma_reply += writelist; in rpcrdma_decode_msg()
1153 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) in rpcrdma_decode_nomsg() argument
1173 r_xprt->rx_stats.total_rdma_reply += replychunk; in rpcrdma_decode_nomsg()
1178 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, in rpcrdma_decode_error() argument
1206 r_xprt->rx_stats.bad_reply_count++; in rpcrdma_decode_error()
1216 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; in rpcrdma_complete_rqst() local
1217 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_complete_rqst()
1226 status = rpcrdma_decode_msg(r_xprt, rep, rqst); in rpcrdma_complete_rqst()
1229 status = rpcrdma_decode_nomsg(r_xprt, rep); in rpcrdma_complete_rqst()
1232 status = rpcrdma_decode_error(r_xprt, rep, rqst); in rpcrdma_complete_rqst()
1243 xprt->cwnd = r_xprt->rx_buf.rb_credits << RPC_CWNDSHIFT; in rpcrdma_complete_rqst()
1258 r_xprt->rx_stats.bad_reply_count++; in rpcrdma_complete_rqst()
1263 void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) in rpcrdma_release_rqst() argument
1273 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, in rpcrdma_release_rqst()
1283 r_xprt->rx_stats.reply_waits_for_send++; in rpcrdma_release_rqst()
1299 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; in rpcrdma_deferred_completion() local
1303 r_xprt->rx_ia.ri_ops->ro_reminv(rep, &req->rl_registered); in rpcrdma_deferred_completion()
1304 rpcrdma_release_rqst(r_xprt, req); in rpcrdma_deferred_completion()
1315 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; in rpcrdma_reply_handler() local
1316 struct rpc_xprt *xprt = &r_xprt->rx_xprt; in rpcrdma_reply_handler()
1317 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reply_handler()
1342 if (rpcrdma_is_bcall(r_xprt, rep)) in rpcrdma_reply_handler()
1369 rpcrdma_post_recvs(r_xprt, false); in rpcrdma_reply_handler()
1392 rpcrdma_post_recvs(r_xprt, false); in rpcrdma_reply_handler()