1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
11 * license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 *
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
24 *
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
28 * permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 *
42 * Author: Tom Tucker <tom@opengridcomputing.com>
43 */
44
45 /* Operation
46 *
47 * The main entry point is svc_rdma_recvfrom. This is called from
48 * svc_recv when the transport indicates there is incoming data to
49 * be read. "Data Ready" is signaled when an RDMA Receive completes,
50 * or when a set of RDMA Reads complete.
51 *
52 * An svc_rqst is passed in. This structure contains an array of
53 * free pages (rq_pages) that will contain the incoming RPC message.
54 *
55 * Short messages are moved directly into svc_rqst::rq_arg, and
56 * the RPC Call is ready to be processed by the Upper Layer.
57 * svc_rdma_recvfrom returns the length of the RPC Call message,
58 * completing the reception of the RPC Call.
59 *
60 * However, when an incoming message has Read chunks,
61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62 * data payload from the client. svc_rdma_recvfrom sets up the
63 * RDMA Reads using pages in svc_rqst::rq_pages, which are
64 * transferred to an svc_rdma_recv_ctxt for the duration of the
65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66 * is still not yet ready.
67 *
68 * When the Read chunk payloads have become available on the
69 * server, "Data Ready" is raised again, and svc_recv calls
70 * svc_rdma_recvfrom again. This second call may use a different
71 * svc_rqst than the first one, thus any information that needs
72 * to be preserved across these two calls is kept in an
73 * svc_rdma_recv_ctxt.
74 *
75 * The second call to svc_rdma_recvfrom performs final assembly
76 * of the RPC Call message, using the RDMA Read sink pages kept in
77 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
79 * the length of the completed RPC Call message.
80 *
81 * Page Management
82 *
83 * Pages under I/O must be transferred from the first svc_rqst to an
84 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
85 *
86 * The first svc_rqst supplies pages for RDMA Reads. These are moved
87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88 * the rq_pages array are set to NULL and refilled with the first
89 * svc_rdma_recvfrom call returns.
90 *
91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages
92 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst
93 * (see rdma_read_complete() below).
94 */
95
96 #include <linux/spinlock.h>
97 #include <asm/unaligned.h>
98 #include <rdma/ib_verbs.h>
99 #include <rdma/rdma_cm.h>
100
101 #include <linux/sunrpc/xdr.h>
102 #include <linux/sunrpc/debug.h>
103 #include <linux/sunrpc/rpc_rdma.h>
104 #include <linux/sunrpc/svc_rdma.h>
105
106 #include "xprt_rdma.h"
107 #include <trace/events/rpcrdma.h>
108
109 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
110
111 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
112
113 static inline struct svc_rdma_recv_ctxt *
svc_rdma_next_recv_ctxt(struct list_head * list)114 svc_rdma_next_recv_ctxt(struct list_head *list)
115 {
116 return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt,
117 rc_list);
118 }
119
120 static struct svc_rdma_recv_ctxt *
svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma * rdma)121 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
122 {
123 struct svc_rdma_recv_ctxt *ctxt;
124 dma_addr_t addr;
125 void *buffer;
126
127 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
128 if (!ctxt)
129 goto fail0;
130 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
131 if (!buffer)
132 goto fail1;
133 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
134 rdma->sc_max_req_size, DMA_FROM_DEVICE);
135 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
136 goto fail2;
137
138 ctxt->rc_recv_wr.next = NULL;
139 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
140 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
141 ctxt->rc_recv_wr.num_sge = 1;
142 ctxt->rc_cqe.done = svc_rdma_wc_receive;
143 ctxt->rc_recv_sge.addr = addr;
144 ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
145 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
146 ctxt->rc_recv_buf = buffer;
147 ctxt->rc_temp = false;
148 return ctxt;
149
150 fail2:
151 kfree(buffer);
152 fail1:
153 kfree(ctxt);
154 fail0:
155 return NULL;
156 }
157
svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt)158 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
159 struct svc_rdma_recv_ctxt *ctxt)
160 {
161 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
162 ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
163 kfree(ctxt->rc_recv_buf);
164 kfree(ctxt);
165 }
166
167 /**
168 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
169 * @rdma: svcxprt_rdma being torn down
170 *
171 */
svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma * rdma)172 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
173 {
174 struct svc_rdma_recv_ctxt *ctxt;
175 struct llist_node *node;
176
177 while ((node = llist_del_first(&rdma->sc_recv_ctxts))) {
178 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
179 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
180 }
181 }
182
183 static struct svc_rdma_recv_ctxt *
svc_rdma_recv_ctxt_get(struct svcxprt_rdma * rdma)184 svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
185 {
186 struct svc_rdma_recv_ctxt *ctxt;
187 struct llist_node *node;
188
189 node = llist_del_first(&rdma->sc_recv_ctxts);
190 if (!node)
191 goto out_empty;
192 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
193
194 out:
195 ctxt->rc_page_count = 0;
196 return ctxt;
197
198 out_empty:
199 ctxt = svc_rdma_recv_ctxt_alloc(rdma);
200 if (!ctxt)
201 return NULL;
202 goto out;
203 }
204
205 /**
206 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
207 * @rdma: controlling svcxprt_rdma
208 * @ctxt: object to return to the free list
209 *
210 */
svc_rdma_recv_ctxt_put(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt)211 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
212 struct svc_rdma_recv_ctxt *ctxt)
213 {
214 unsigned int i;
215
216 for (i = 0; i < ctxt->rc_page_count; i++)
217 put_page(ctxt->rc_pages[i]);
218
219 if (!ctxt->rc_temp)
220 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
221 else
222 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
223 }
224
__svc_rdma_post_recv(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt)225 static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
226 struct svc_rdma_recv_ctxt *ctxt)
227 {
228 int ret;
229
230 svc_xprt_get(&rdma->sc_xprt);
231 ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
232 trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret);
233 if (ret)
234 goto err_post;
235 return 0;
236
237 err_post:
238 svc_rdma_recv_ctxt_put(rdma, ctxt);
239 svc_xprt_put(&rdma->sc_xprt);
240 return ret;
241 }
242
svc_rdma_post_recv(struct svcxprt_rdma * rdma)243 static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
244 {
245 struct svc_rdma_recv_ctxt *ctxt;
246
247 ctxt = svc_rdma_recv_ctxt_get(rdma);
248 if (!ctxt)
249 return -ENOMEM;
250 return __svc_rdma_post_recv(rdma, ctxt);
251 }
252
253 /**
254 * svc_rdma_post_recvs - Post initial set of Recv WRs
255 * @rdma: fresh svcxprt_rdma
256 *
257 * Returns true if successful, otherwise false.
258 */
svc_rdma_post_recvs(struct svcxprt_rdma * rdma)259 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
260 {
261 struct svc_rdma_recv_ctxt *ctxt;
262 unsigned int i;
263 int ret;
264
265 for (i = 0; i < rdma->sc_max_requests; i++) {
266 ctxt = svc_rdma_recv_ctxt_get(rdma);
267 if (!ctxt)
268 return false;
269 ctxt->rc_temp = true;
270 ret = __svc_rdma_post_recv(rdma, ctxt);
271 if (ret)
272 return false;
273 }
274 return true;
275 }
276
277 /**
278 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
279 * @cq: Completion Queue context
280 * @wc: Work Completion object
281 *
282 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
283 * the Receive completion handler could be running.
284 */
svc_rdma_wc_receive(struct ib_cq * cq,struct ib_wc * wc)285 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
286 {
287 struct svcxprt_rdma *rdma = cq->cq_context;
288 struct ib_cqe *cqe = wc->wr_cqe;
289 struct svc_rdma_recv_ctxt *ctxt;
290
291 trace_svcrdma_wc_receive(wc);
292
293 /* WARNING: Only wc->wr_cqe and wc->status are reliable */
294 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
295
296 if (wc->status != IB_WC_SUCCESS)
297 goto flushed;
298
299 if (svc_rdma_post_recv(rdma))
300 goto post_err;
301
302 /* All wc fields are now known to be valid */
303 ctxt->rc_byte_len = wc->byte_len;
304 ib_dma_sync_single_for_cpu(rdma->sc_pd->device,
305 ctxt->rc_recv_sge.addr,
306 wc->byte_len, DMA_FROM_DEVICE);
307
308 spin_lock(&rdma->sc_rq_dto_lock);
309 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
310 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
311 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
312 spin_unlock(&rdma->sc_rq_dto_lock);
313 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
314 svc_xprt_enqueue(&rdma->sc_xprt);
315 goto out;
316
317 flushed:
318 post_err:
319 svc_rdma_recv_ctxt_put(rdma, ctxt);
320 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
321 svc_xprt_enqueue(&rdma->sc_xprt);
322 out:
323 svc_xprt_put(&rdma->sc_xprt);
324 }
325
326 /**
327 * svc_rdma_flush_recv_queues - Drain pending Receive work
328 * @rdma: svcxprt_rdma being shut down
329 *
330 */
svc_rdma_flush_recv_queues(struct svcxprt_rdma * rdma)331 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
332 {
333 struct svc_rdma_recv_ctxt *ctxt;
334
335 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
336 list_del(&ctxt->rc_list);
337 svc_rdma_recv_ctxt_put(rdma, ctxt);
338 }
339 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
340 list_del(&ctxt->rc_list);
341 svc_rdma_recv_ctxt_put(rdma, ctxt);
342 }
343 }
344
svc_rdma_build_arg_xdr(struct svc_rqst * rqstp,struct svc_rdma_recv_ctxt * ctxt)345 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
346 struct svc_rdma_recv_ctxt *ctxt)
347 {
348 struct xdr_buf *arg = &rqstp->rq_arg;
349
350 arg->head[0].iov_base = ctxt->rc_recv_buf;
351 arg->head[0].iov_len = ctxt->rc_byte_len;
352 arg->tail[0].iov_base = NULL;
353 arg->tail[0].iov_len = 0;
354 arg->page_len = 0;
355 arg->page_base = 0;
356 arg->buflen = ctxt->rc_byte_len;
357 arg->len = ctxt->rc_byte_len;
358 }
359
360 /* This accommodates the largest possible Write chunk,
361 * in one segment.
362 */
363 #define MAX_BYTES_WRITE_SEG ((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT))
364
365 /* This accommodates the largest possible Position-Zero
366 * Read chunk or Reply chunk, in one segment.
367 */
368 #define MAX_BYTES_SPECIAL_SEG ((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT))
369
370 /* Sanity check the Read list.
371 *
372 * Implementation limits:
373 * - This implementation supports only one Read chunk.
374 *
375 * Sanity checks:
376 * - Read list does not overflow buffer.
377 * - Segment size limited by largest NFS data payload.
378 *
379 * The segment count is limited to how many segments can
380 * fit in the transport header without overflowing the
381 * buffer. That's about 40 Read segments for a 1KB inline
382 * threshold.
383 *
384 * Returns pointer to the following Write list.
385 */
xdr_check_read_list(__be32 * p,const __be32 * end)386 static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end)
387 {
388 u32 position;
389 bool first;
390
391 first = true;
392 while (*p++ != xdr_zero) {
393 if (first) {
394 position = be32_to_cpup(p++);
395 first = false;
396 } else if (be32_to_cpup(p++) != position) {
397 return NULL;
398 }
399 p++; /* handle */
400 if (be32_to_cpup(p++) > MAX_BYTES_SPECIAL_SEG)
401 return NULL;
402 p += 2; /* offset */
403
404 if (p > end)
405 return NULL;
406 }
407 return p;
408 }
409
410 /* The segment count is limited to how many segments can
411 * fit in the transport header without overflowing the
412 * buffer. That's about 60 Write segments for a 1KB inline
413 * threshold.
414 */
xdr_check_write_chunk(__be32 * p,const __be32 * end,u32 maxlen)415 static __be32 *xdr_check_write_chunk(__be32 *p, const __be32 *end,
416 u32 maxlen)
417 {
418 u32 i, segcount;
419
420 segcount = be32_to_cpup(p++);
421 for (i = 0; i < segcount; i++) {
422 p++; /* handle */
423 if (be32_to_cpup(p++) > maxlen)
424 return NULL;
425 p += 2; /* offset */
426
427 if (p > end)
428 return NULL;
429 }
430
431 return p;
432 }
433
434 /* Sanity check the Write list.
435 *
436 * Implementation limits:
437 * - This implementation supports only one Write chunk.
438 *
439 * Sanity checks:
440 * - Write list does not overflow buffer.
441 * - Segment size limited by largest NFS data payload.
442 *
443 * Returns pointer to the following Reply chunk.
444 */
xdr_check_write_list(__be32 * p,const __be32 * end)445 static __be32 *xdr_check_write_list(__be32 *p, const __be32 *end)
446 {
447 u32 chcount;
448
449 chcount = 0;
450 while (*p++ != xdr_zero) {
451 p = xdr_check_write_chunk(p, end, MAX_BYTES_WRITE_SEG);
452 if (!p)
453 return NULL;
454 if (chcount++ > 1)
455 return NULL;
456 }
457 return p;
458 }
459
460 /* Sanity check the Reply chunk.
461 *
462 * Sanity checks:
463 * - Reply chunk does not overflow buffer.
464 * - Segment size limited by largest NFS data payload.
465 *
466 * Returns pointer to the following RPC header.
467 */
xdr_check_reply_chunk(__be32 * p,const __be32 * end)468 static __be32 *xdr_check_reply_chunk(__be32 *p, const __be32 *end)
469 {
470 if (*p++ != xdr_zero) {
471 p = xdr_check_write_chunk(p, end, MAX_BYTES_SPECIAL_SEG);
472 if (!p)
473 return NULL;
474 }
475 return p;
476 }
477
478 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
479 * Responder's choice: requester signals it can handle Send With
480 * Invalidate, and responder chooses one R_key to invalidate.
481 *
482 * If there is exactly one distinct R_key in the received transport
483 * header, set rc_inv_rkey to that R_key. Otherwise, set it to zero.
484 *
485 * Perform this operation while the received transport header is
486 * still in the CPU cache.
487 */
svc_rdma_get_inv_rkey(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt)488 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
489 struct svc_rdma_recv_ctxt *ctxt)
490 {
491 __be32 inv_rkey, *p;
492 u32 i, segcount;
493
494 ctxt->rc_inv_rkey = 0;
495
496 if (!rdma->sc_snd_w_inv)
497 return;
498
499 inv_rkey = xdr_zero;
500 p = ctxt->rc_recv_buf;
501 p += rpcrdma_fixed_maxsz;
502
503 /* Read list */
504 while (*p++ != xdr_zero) {
505 p++; /* position */
506 if (inv_rkey == xdr_zero)
507 inv_rkey = *p;
508 else if (inv_rkey != *p)
509 return;
510 p += 4;
511 }
512
513 /* Write list */
514 while (*p++ != xdr_zero) {
515 segcount = be32_to_cpup(p++);
516 for (i = 0; i < segcount; i++) {
517 if (inv_rkey == xdr_zero)
518 inv_rkey = *p;
519 else if (inv_rkey != *p)
520 return;
521 p += 4;
522 }
523 }
524
525 /* Reply chunk */
526 if (*p++ != xdr_zero) {
527 segcount = be32_to_cpup(p++);
528 for (i = 0; i < segcount; i++) {
529 if (inv_rkey == xdr_zero)
530 inv_rkey = *p;
531 else if (inv_rkey != *p)
532 return;
533 p += 4;
534 }
535 }
536
537 ctxt->rc_inv_rkey = be32_to_cpu(inv_rkey);
538 }
539
540 /* On entry, xdr->head[0].iov_base points to first byte in the
541 * RPC-over-RDMA header.
542 *
543 * On successful exit, head[0] points to first byte past the
544 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
545 * The length of the RPC-over-RDMA header is returned.
546 *
547 * Assumptions:
548 * - The transport header is entirely contained in the head iovec.
549 */
svc_rdma_xdr_decode_req(struct xdr_buf * rq_arg)550 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg)
551 {
552 __be32 *p, *end, *rdma_argp;
553 unsigned int hdr_len;
554
555 /* Verify that there's enough bytes for header + something */
556 if (rq_arg->len <= RPCRDMA_HDRLEN_ERR)
557 goto out_short;
558
559 rdma_argp = rq_arg->head[0].iov_base;
560 if (*(rdma_argp + 1) != rpcrdma_version)
561 goto out_version;
562
563 switch (*(rdma_argp + 3)) {
564 case rdma_msg:
565 break;
566 case rdma_nomsg:
567 break;
568
569 case rdma_done:
570 goto out_drop;
571
572 case rdma_error:
573 goto out_drop;
574
575 default:
576 goto out_proc;
577 }
578
579 end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len);
580 p = xdr_check_read_list(rdma_argp + 4, end);
581 if (!p)
582 goto out_inval;
583 p = xdr_check_write_list(p, end);
584 if (!p)
585 goto out_inval;
586 p = xdr_check_reply_chunk(p, end);
587 if (!p)
588 goto out_inval;
589 if (p > end)
590 goto out_inval;
591
592 rq_arg->head[0].iov_base = p;
593 hdr_len = (unsigned long)p - (unsigned long)rdma_argp;
594 rq_arg->head[0].iov_len -= hdr_len;
595 rq_arg->len -= hdr_len;
596 trace_svcrdma_decode_rqst(rdma_argp, hdr_len);
597 return hdr_len;
598
599 out_short:
600 trace_svcrdma_decode_short(rq_arg->len);
601 return -EINVAL;
602
603 out_version:
604 trace_svcrdma_decode_badvers(rdma_argp);
605 return -EPROTONOSUPPORT;
606
607 out_drop:
608 trace_svcrdma_decode_drop(rdma_argp);
609 return 0;
610
611 out_proc:
612 trace_svcrdma_decode_badproc(rdma_argp);
613 return -EINVAL;
614
615 out_inval:
616 trace_svcrdma_decode_parse(rdma_argp);
617 return -EINVAL;
618 }
619
rdma_read_complete(struct svc_rqst * rqstp,struct svc_rdma_recv_ctxt * head)620 static void rdma_read_complete(struct svc_rqst *rqstp,
621 struct svc_rdma_recv_ctxt *head)
622 {
623 int page_no;
624
625 /* Move Read chunk pages to rqstp so that they will be released
626 * when svc_process is done with them.
627 */
628 for (page_no = 0; page_no < head->rc_page_count; page_no++) {
629 put_page(rqstp->rq_pages[page_no]);
630 rqstp->rq_pages[page_no] = head->rc_pages[page_no];
631 }
632 head->rc_page_count = 0;
633
634 /* Point rq_arg.pages past header */
635 rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count];
636 rqstp->rq_arg.page_len = head->rc_arg.page_len;
637
638 /* rq_respages starts after the last arg page */
639 rqstp->rq_respages = &rqstp->rq_pages[page_no];
640 rqstp->rq_next_page = rqstp->rq_respages + 1;
641
642 /* Rebuild rq_arg head and tail. */
643 rqstp->rq_arg.head[0] = head->rc_arg.head[0];
644 rqstp->rq_arg.tail[0] = head->rc_arg.tail[0];
645 rqstp->rq_arg.len = head->rc_arg.len;
646 rqstp->rq_arg.buflen = head->rc_arg.buflen;
647 }
648
svc_rdma_send_error(struct svcxprt_rdma * xprt,__be32 * rdma_argp,int status)649 static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
650 __be32 *rdma_argp, int status)
651 {
652 struct svc_rdma_send_ctxt *ctxt;
653 unsigned int length;
654 __be32 *p;
655 int ret;
656
657 ctxt = svc_rdma_send_ctxt_get(xprt);
658 if (!ctxt)
659 return;
660
661 p = ctxt->sc_xprt_buf;
662 *p++ = *rdma_argp;
663 *p++ = *(rdma_argp + 1);
664 *p++ = xprt->sc_fc_credits;
665 *p++ = rdma_error;
666 switch (status) {
667 case -EPROTONOSUPPORT:
668 *p++ = err_vers;
669 *p++ = rpcrdma_version;
670 *p++ = rpcrdma_version;
671 trace_svcrdma_err_vers(*rdma_argp);
672 break;
673 default:
674 *p++ = err_chunk;
675 trace_svcrdma_err_chunk(*rdma_argp);
676 }
677 length = (unsigned long)p - (unsigned long)ctxt->sc_xprt_buf;
678 svc_rdma_sync_reply_hdr(xprt, ctxt, length);
679
680 ctxt->sc_send_wr.opcode = IB_WR_SEND;
681 ret = svc_rdma_send(xprt, &ctxt->sc_send_wr);
682 if (ret)
683 svc_rdma_send_ctxt_put(xprt, ctxt);
684 }
685
686 /* By convention, backchannel calls arrive via rdma_msg type
687 * messages, and never populate the chunk lists. This makes
688 * the RPC/RDMA header small and fixed in size, so it is
689 * straightforward to check the RPC header's direction field.
690 */
svc_rdma_is_backchannel_reply(struct svc_xprt * xprt,__be32 * rdma_resp)691 static bool svc_rdma_is_backchannel_reply(struct svc_xprt *xprt,
692 __be32 *rdma_resp)
693 {
694 __be32 *p;
695
696 if (!xprt->xpt_bc_xprt)
697 return false;
698
699 p = rdma_resp + 3;
700 if (*p++ != rdma_msg)
701 return false;
702
703 if (*p++ != xdr_zero)
704 return false;
705 if (*p++ != xdr_zero)
706 return false;
707 if (*p++ != xdr_zero)
708 return false;
709
710 /* XID sanity */
711 if (*p++ != *rdma_resp)
712 return false;
713 /* call direction */
714 if (*p == cpu_to_be32(RPC_CALL))
715 return false;
716
717 return true;
718 }
719
720 /**
721 * svc_rdma_recvfrom - Receive an RPC call
722 * @rqstp: request structure into which to receive an RPC Call
723 *
724 * Returns:
725 * The positive number of bytes in the RPC Call message,
726 * %0 if there were no Calls ready to return,
727 * %-EINVAL if the Read chunk data is too large,
728 * %-ENOMEM if rdma_rw context pool was exhausted,
729 * %-ENOTCONN if posting failed (connection is lost),
730 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
731 *
732 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
733 * when there are no remaining ctxt's to process.
734 *
735 * The next ctxt is removed from the "receive" lists.
736 *
737 * - If the ctxt completes a Read, then finish assembling the Call
738 * message and return the number of bytes in the message.
739 *
740 * - If the ctxt completes a Receive, then construct the Call
741 * message from the contents of the Receive buffer.
742 *
743 * - If there are no Read chunks in this message, then finish
744 * assembling the Call message and return the number of bytes
745 * in the message.
746 *
747 * - If there are Read chunks in this message, post Read WRs to
748 * pull that payload and return 0.
749 */
svc_rdma_recvfrom(struct svc_rqst * rqstp)750 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
751 {
752 struct svc_xprt *xprt = rqstp->rq_xprt;
753 struct svcxprt_rdma *rdma_xprt =
754 container_of(xprt, struct svcxprt_rdma, sc_xprt);
755 struct svc_rdma_recv_ctxt *ctxt;
756 __be32 *p;
757 int ret;
758
759 spin_lock(&rdma_xprt->sc_rq_dto_lock);
760 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
761 if (ctxt) {
762 list_del(&ctxt->rc_list);
763 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
764 rdma_read_complete(rqstp, ctxt);
765 goto complete;
766 }
767 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
768 if (!ctxt) {
769 /* No new incoming requests, terminate the loop */
770 clear_bit(XPT_DATA, &xprt->xpt_flags);
771 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
772 return 0;
773 }
774 list_del(&ctxt->rc_list);
775 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
776
777 atomic_inc(&rdma_stat_recv);
778
779 svc_rdma_build_arg_xdr(rqstp, ctxt);
780
781 /* Prevent svc_xprt_release from releasing pages in rq_pages
782 * if we return 0 or an error.
783 */
784 rqstp->rq_respages = rqstp->rq_pages;
785 rqstp->rq_next_page = rqstp->rq_respages;
786
787 p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
788 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg);
789 if (ret < 0)
790 goto out_err;
791 if (ret == 0)
792 goto out_drop;
793 rqstp->rq_xprt_hlen = ret;
794
795 if (svc_rdma_is_backchannel_reply(xprt, p)) {
796 ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
797 &rqstp->rq_arg);
798 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
799 return ret;
800 }
801 svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
802
803 p += rpcrdma_fixed_maxsz;
804 if (*p != xdr_zero)
805 goto out_readchunk;
806
807 complete:
808 rqstp->rq_xprt_ctxt = ctxt;
809 rqstp->rq_prot = IPPROTO_MAX;
810 svc_xprt_copy_addrs(rqstp, xprt);
811 return rqstp->rq_arg.len;
812
813 out_readchunk:
814 ret = svc_rdma_recv_read_chunk(rdma_xprt, rqstp, ctxt, p);
815 if (ret < 0)
816 goto out_postfail;
817 return 0;
818
819 out_err:
820 svc_rdma_send_error(rdma_xprt, p, ret);
821 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
822 return 0;
823
824 out_postfail:
825 if (ret == -EINVAL)
826 svc_rdma_send_error(rdma_xprt, p, ret);
827 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
828 return ret;
829
830 out_drop:
831 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
832 return 0;
833 }
834