1 /*
2 * linux/fs/9p/trans_rdma.c
3 *
4 * RDMA transport layer based on the trans_fd.c implementation.
5 *
6 * Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com>
7 * Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
8 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
9 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
10 * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to:
23 * Free Software Foundation
24 * 51 Franklin Street, Fifth Floor
25 * Boston, MA 02111-1301 USA
26 *
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/in.h>
32 #include <linux/module.h>
33 #include <linux/net.h>
34 #include <linux/ipv6.h>
35 #include <linux/kthread.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/un.h>
39 #include <linux/uaccess.h>
40 #include <linux/inet.h>
41 #include <linux/idr.h>
42 #include <linux/file.h>
43 #include <linux/parser.h>
44 #include <linux/semaphore.h>
45 #include <linux/slab.h>
46 #include <linux/seq_file.h>
47 #include <net/9p/9p.h>
48 #include <net/9p/client.h>
49 #include <net/9p/transport.h>
50 #include <rdma/ib_verbs.h>
51 #include <rdma/rdma_cm.h>
52
53 #define P9_PORT 5640
54 #define P9_RDMA_SQ_DEPTH 32
55 #define P9_RDMA_RQ_DEPTH 32
56 #define P9_RDMA_SEND_SGE 4
57 #define P9_RDMA_RECV_SGE 4
58 #define P9_RDMA_IRD 0
59 #define P9_RDMA_ORD 0
60 #define P9_RDMA_TIMEOUT 30000 /* 30 seconds */
61 #define P9_RDMA_MAXSIZE (1024*1024) /* 1MB */
62
63 /**
64 * struct p9_trans_rdma - RDMA transport instance
65 *
66 * @state: tracks the transport state machine for connection setup and tear down
67 * @cm_id: The RDMA CM ID
68 * @pd: Protection Domain pointer
69 * @qp: Queue Pair pointer
70 * @cq: Completion Queue pointer
71 * @timeout: Number of uSecs to wait for connection management events
72 * @privport: Whether a privileged port may be used
73 * @port: The port to use
74 * @sq_depth: The depth of the Send Queue
75 * @sq_sem: Semaphore for the SQ
76 * @rq_depth: The depth of the Receive Queue.
77 * @rq_sem: Semaphore for the RQ
78 * @excess_rc : Amount of posted Receive Contexts without a pending request.
79 * See rdma_request()
80 * @addr: The remote peer's address
81 * @req_lock: Protects the active request list
82 * @cm_done: Completion event for connection management tracking
83 */
84 struct p9_trans_rdma {
85 enum {
86 P9_RDMA_INIT,
87 P9_RDMA_ADDR_RESOLVED,
88 P9_RDMA_ROUTE_RESOLVED,
89 P9_RDMA_CONNECTED,
90 P9_RDMA_FLUSHING,
91 P9_RDMA_CLOSING,
92 P9_RDMA_CLOSED,
93 } state;
94 struct rdma_cm_id *cm_id;
95 struct ib_pd *pd;
96 struct ib_qp *qp;
97 struct ib_cq *cq;
98 long timeout;
99 bool privport;
100 u16 port;
101 int sq_depth;
102 struct semaphore sq_sem;
103 int rq_depth;
104 struct semaphore rq_sem;
105 atomic_t excess_rc;
106 struct sockaddr_in addr;
107 spinlock_t req_lock;
108
109 struct completion cm_done;
110 };
111
112 /**
113 * p9_rdma_context - Keeps track of in-process WR
114 *
115 * @busa: Bus address to unmap when the WR completes
116 * @req: Keeps track of requests (send)
117 * @rc: Keepts track of replies (receive)
118 */
119 struct p9_rdma_req;
120 struct p9_rdma_context {
121 struct ib_cqe cqe;
122 dma_addr_t busa;
123 union {
124 struct p9_req_t *req;
125 struct p9_fcall *rc;
126 };
127 };
128
129 /**
130 * p9_rdma_opts - Collection of mount options
131 * @port: port of connection
132 * @sq_depth: The requested depth of the SQ. This really doesn't need
133 * to be any deeper than the number of threads used in the client
134 * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
135 * @timeout: Time to wait in msecs for CM events
136 */
137 struct p9_rdma_opts {
138 short port;
139 bool privport;
140 int sq_depth;
141 int rq_depth;
142 long timeout;
143 };
144
145 /*
146 * Option Parsing (code inspired by NFS code)
147 */
148 enum {
149 /* Options that take integer arguments */
150 Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout,
151 /* Options that take no argument */
152 Opt_privport,
153 Opt_err,
154 };
155
156 static match_table_t tokens = {
157 {Opt_port, "port=%u"},
158 {Opt_sq_depth, "sq=%u"},
159 {Opt_rq_depth, "rq=%u"},
160 {Opt_timeout, "timeout=%u"},
161 {Opt_privport, "privport"},
162 {Opt_err, NULL},
163 };
164
p9_rdma_show_options(struct seq_file * m,struct p9_client * clnt)165 static int p9_rdma_show_options(struct seq_file *m, struct p9_client *clnt)
166 {
167 struct p9_trans_rdma *rdma = clnt->trans;
168
169 if (rdma->port != P9_PORT)
170 seq_printf(m, ",port=%u", rdma->port);
171 if (rdma->sq_depth != P9_RDMA_SQ_DEPTH)
172 seq_printf(m, ",sq=%u", rdma->sq_depth);
173 if (rdma->rq_depth != P9_RDMA_RQ_DEPTH)
174 seq_printf(m, ",rq=%u", rdma->rq_depth);
175 if (rdma->timeout != P9_RDMA_TIMEOUT)
176 seq_printf(m, ",timeout=%lu", rdma->timeout);
177 if (rdma->privport)
178 seq_puts(m, ",privport");
179 return 0;
180 }
181
182 /**
183 * parse_opts - parse mount options into rdma options structure
184 * @params: options string passed from mount
185 * @opts: rdma transport-specific structure to parse options into
186 *
187 * Returns 0 upon success, -ERRNO upon failure
188 */
parse_opts(char * params,struct p9_rdma_opts * opts)189 static int parse_opts(char *params, struct p9_rdma_opts *opts)
190 {
191 char *p;
192 substring_t args[MAX_OPT_ARGS];
193 int option;
194 char *options, *tmp_options;
195
196 opts->port = P9_PORT;
197 opts->sq_depth = P9_RDMA_SQ_DEPTH;
198 opts->rq_depth = P9_RDMA_RQ_DEPTH;
199 opts->timeout = P9_RDMA_TIMEOUT;
200 opts->privport = false;
201
202 if (!params)
203 return 0;
204
205 tmp_options = kstrdup(params, GFP_KERNEL);
206 if (!tmp_options) {
207 p9_debug(P9_DEBUG_ERROR,
208 "failed to allocate copy of option string\n");
209 return -ENOMEM;
210 }
211 options = tmp_options;
212
213 while ((p = strsep(&options, ",")) != NULL) {
214 int token;
215 int r;
216 if (!*p)
217 continue;
218 token = match_token(p, tokens, args);
219 if ((token != Opt_err) && (token != Opt_privport)) {
220 r = match_int(&args[0], &option);
221 if (r < 0) {
222 p9_debug(P9_DEBUG_ERROR,
223 "integer field, but no integer?\n");
224 continue;
225 }
226 }
227 switch (token) {
228 case Opt_port:
229 opts->port = option;
230 break;
231 case Opt_sq_depth:
232 opts->sq_depth = option;
233 break;
234 case Opt_rq_depth:
235 opts->rq_depth = option;
236 break;
237 case Opt_timeout:
238 opts->timeout = option;
239 break;
240 case Opt_privport:
241 opts->privport = true;
242 break;
243 default:
244 continue;
245 }
246 }
247 /* RQ must be at least as large as the SQ */
248 opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
249 kfree(tmp_options);
250 return 0;
251 }
252
253 static int
p9_cm_event_handler(struct rdma_cm_id * id,struct rdma_cm_event * event)254 p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
255 {
256 struct p9_client *c = id->context;
257 struct p9_trans_rdma *rdma = c->trans;
258 switch (event->event) {
259 case RDMA_CM_EVENT_ADDR_RESOLVED:
260 BUG_ON(rdma->state != P9_RDMA_INIT);
261 rdma->state = P9_RDMA_ADDR_RESOLVED;
262 break;
263
264 case RDMA_CM_EVENT_ROUTE_RESOLVED:
265 BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED);
266 rdma->state = P9_RDMA_ROUTE_RESOLVED;
267 break;
268
269 case RDMA_CM_EVENT_ESTABLISHED:
270 BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED);
271 rdma->state = P9_RDMA_CONNECTED;
272 break;
273
274 case RDMA_CM_EVENT_DISCONNECTED:
275 if (rdma)
276 rdma->state = P9_RDMA_CLOSED;
277 if (c)
278 c->status = Disconnected;
279 break;
280
281 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
282 break;
283
284 case RDMA_CM_EVENT_ADDR_CHANGE:
285 case RDMA_CM_EVENT_ROUTE_ERROR:
286 case RDMA_CM_EVENT_DEVICE_REMOVAL:
287 case RDMA_CM_EVENT_MULTICAST_JOIN:
288 case RDMA_CM_EVENT_MULTICAST_ERROR:
289 case RDMA_CM_EVENT_REJECTED:
290 case RDMA_CM_EVENT_CONNECT_REQUEST:
291 case RDMA_CM_EVENT_CONNECT_RESPONSE:
292 case RDMA_CM_EVENT_CONNECT_ERROR:
293 case RDMA_CM_EVENT_ADDR_ERROR:
294 case RDMA_CM_EVENT_UNREACHABLE:
295 c->status = Disconnected;
296 rdma_disconnect(rdma->cm_id);
297 break;
298 default:
299 BUG();
300 }
301 complete(&rdma->cm_done);
302 return 0;
303 }
304
305 static void
recv_done(struct ib_cq * cq,struct ib_wc * wc)306 recv_done(struct ib_cq *cq, struct ib_wc *wc)
307 {
308 struct p9_client *client = cq->cq_context;
309 struct p9_trans_rdma *rdma = client->trans;
310 struct p9_rdma_context *c =
311 container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
312 struct p9_req_t *req;
313 int err = 0;
314 int16_t tag;
315
316 req = NULL;
317 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
318 DMA_FROM_DEVICE);
319
320 if (wc->status != IB_WC_SUCCESS)
321 goto err_out;
322
323 c->rc->size = wc->byte_len;
324 err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
325 if (err)
326 goto err_out;
327
328 req = p9_tag_lookup(client, tag);
329 if (!req)
330 goto err_out;
331
332 /* Check that we have not yet received a reply for this request.
333 */
334 if (unlikely(req->rc)) {
335 pr_err("Duplicate reply for request %d", tag);
336 goto err_out;
337 }
338
339 req->rc = c->rc;
340 p9_client_cb(client, req, REQ_STATUS_RCVD);
341
342 out:
343 up(&rdma->rq_sem);
344 kfree(c);
345 return;
346
347 err_out:
348 p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n",
349 req, err, wc->status);
350 rdma->state = P9_RDMA_FLUSHING;
351 client->status = Disconnected;
352 goto out;
353 }
354
355 static void
send_done(struct ib_cq * cq,struct ib_wc * wc)356 send_done(struct ib_cq *cq, struct ib_wc *wc)
357 {
358 struct p9_client *client = cq->cq_context;
359 struct p9_trans_rdma *rdma = client->trans;
360 struct p9_rdma_context *c =
361 container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
362
363 ib_dma_unmap_single(rdma->cm_id->device,
364 c->busa, c->req->tc->size,
365 DMA_TO_DEVICE);
366 up(&rdma->sq_sem);
367 kfree(c);
368 }
369
qp_event_handler(struct ib_event * event,void * context)370 static void qp_event_handler(struct ib_event *event, void *context)
371 {
372 p9_debug(P9_DEBUG_ERROR, "QP event %d context %p\n",
373 event->event, context);
374 }
375
rdma_destroy_trans(struct p9_trans_rdma * rdma)376 static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
377 {
378 if (!rdma)
379 return;
380
381 if (rdma->qp && !IS_ERR(rdma->qp))
382 ib_destroy_qp(rdma->qp);
383
384 if (rdma->pd && !IS_ERR(rdma->pd))
385 ib_dealloc_pd(rdma->pd);
386
387 if (rdma->cq && !IS_ERR(rdma->cq))
388 ib_free_cq(rdma->cq);
389
390 if (rdma->cm_id && !IS_ERR(rdma->cm_id))
391 rdma_destroy_id(rdma->cm_id);
392
393 kfree(rdma);
394 }
395
396 static int
post_recv(struct p9_client * client,struct p9_rdma_context * c)397 post_recv(struct p9_client *client, struct p9_rdma_context *c)
398 {
399 struct p9_trans_rdma *rdma = client->trans;
400 struct ib_recv_wr wr;
401 struct ib_sge sge;
402
403 c->busa = ib_dma_map_single(rdma->cm_id->device,
404 c->rc->sdata, client->msize,
405 DMA_FROM_DEVICE);
406 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
407 goto error;
408
409 c->cqe.done = recv_done;
410
411 sge.addr = c->busa;
412 sge.length = client->msize;
413 sge.lkey = rdma->pd->local_dma_lkey;
414
415 wr.next = NULL;
416 wr.wr_cqe = &c->cqe;
417 wr.sg_list = &sge;
418 wr.num_sge = 1;
419 return ib_post_recv(rdma->qp, &wr, NULL);
420
421 error:
422 p9_debug(P9_DEBUG_ERROR, "EIO\n");
423 return -EIO;
424 }
425
rdma_request(struct p9_client * client,struct p9_req_t * req)426 static int rdma_request(struct p9_client *client, struct p9_req_t *req)
427 {
428 struct p9_trans_rdma *rdma = client->trans;
429 struct ib_send_wr wr;
430 struct ib_sge sge;
431 int err = 0;
432 unsigned long flags;
433 struct p9_rdma_context *c = NULL;
434 struct p9_rdma_context *rpl_context = NULL;
435
436 /* When an error occurs between posting the recv and the send,
437 * there will be a receive context posted without a pending request.
438 * Since there is no way to "un-post" it, we remember it and skip
439 * post_recv() for the next request.
440 * So here,
441 * see if we are this `next request' and need to absorb an excess rc.
442 * If yes, then drop and free our own, and do not recv_post().
443 **/
444 if (unlikely(atomic_read(&rdma->excess_rc) > 0)) {
445 if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) {
446 /* Got one ! */
447 kfree(req->rc);
448 req->rc = NULL;
449 goto dont_need_post_recv;
450 } else {
451 /* We raced and lost. */
452 atomic_inc(&rdma->excess_rc);
453 }
454 }
455
456 /* Allocate an fcall for the reply */
457 rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS);
458 if (!rpl_context) {
459 err = -ENOMEM;
460 goto recv_error;
461 }
462 rpl_context->rc = req->rc;
463
464 /*
465 * Post a receive buffer for this request. We need to ensure
466 * there is a reply buffer available for every outstanding
467 * request. A flushed request can result in no reply for an
468 * outstanding request, so we must keep a count to avoid
469 * overflowing the RQ.
470 */
471 if (down_interruptible(&rdma->rq_sem)) {
472 err = -EINTR;
473 goto recv_error;
474 }
475
476 err = post_recv(client, rpl_context);
477 if (err) {
478 p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n");
479 goto recv_error;
480 }
481 /* remove posted receive buffer from request structure */
482 req->rc = NULL;
483
484 dont_need_post_recv:
485 /* Post the request */
486 c = kmalloc(sizeof *c, GFP_NOFS);
487 if (!c) {
488 err = -ENOMEM;
489 goto send_error;
490 }
491 c->req = req;
492
493 c->busa = ib_dma_map_single(rdma->cm_id->device,
494 c->req->tc->sdata, c->req->tc->size,
495 DMA_TO_DEVICE);
496 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) {
497 err = -EIO;
498 goto send_error;
499 }
500
501 c->cqe.done = send_done;
502
503 sge.addr = c->busa;
504 sge.length = c->req->tc->size;
505 sge.lkey = rdma->pd->local_dma_lkey;
506
507 wr.next = NULL;
508 wr.wr_cqe = &c->cqe;
509 wr.opcode = IB_WR_SEND;
510 wr.send_flags = IB_SEND_SIGNALED;
511 wr.sg_list = &sge;
512 wr.num_sge = 1;
513
514 if (down_interruptible(&rdma->sq_sem)) {
515 err = -EINTR;
516 goto send_error;
517 }
518
519 /* Mark request as `sent' *before* we actually send it,
520 * because doing if after could erase the REQ_STATUS_RCVD
521 * status in case of a very fast reply.
522 */
523 req->status = REQ_STATUS_SENT;
524 err = ib_post_send(rdma->qp, &wr, NULL);
525 if (err)
526 goto send_error;
527
528 /* Success */
529 return 0;
530
531 /* Handle errors that happened during or while preparing the send: */
532 send_error:
533 req->status = REQ_STATUS_ERROR;
534 kfree(c);
535 p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err);
536
537 /* Ach.
538 * We did recv_post(), but not send. We have one recv_post in excess.
539 */
540 atomic_inc(&rdma->excess_rc);
541 return err;
542
543 /* Handle errors that happened during or while preparing post_recv(): */
544 recv_error:
545 kfree(rpl_context);
546 spin_lock_irqsave(&rdma->req_lock, flags);
547 if (rdma->state < P9_RDMA_CLOSING) {
548 rdma->state = P9_RDMA_CLOSING;
549 spin_unlock_irqrestore(&rdma->req_lock, flags);
550 rdma_disconnect(rdma->cm_id);
551 } else
552 spin_unlock_irqrestore(&rdma->req_lock, flags);
553 return err;
554 }
555
rdma_close(struct p9_client * client)556 static void rdma_close(struct p9_client *client)
557 {
558 struct p9_trans_rdma *rdma;
559
560 if (!client)
561 return;
562
563 rdma = client->trans;
564 if (!rdma)
565 return;
566
567 client->status = Disconnected;
568 rdma_disconnect(rdma->cm_id);
569 rdma_destroy_trans(rdma);
570 }
571
572 /**
573 * alloc_rdma - Allocate and initialize the rdma transport structure
574 * @opts: Mount options structure
575 */
alloc_rdma(struct p9_rdma_opts * opts)576 static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
577 {
578 struct p9_trans_rdma *rdma;
579
580 rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL);
581 if (!rdma)
582 return NULL;
583
584 rdma->port = opts->port;
585 rdma->privport = opts->privport;
586 rdma->sq_depth = opts->sq_depth;
587 rdma->rq_depth = opts->rq_depth;
588 rdma->timeout = opts->timeout;
589 spin_lock_init(&rdma->req_lock);
590 init_completion(&rdma->cm_done);
591 sema_init(&rdma->sq_sem, rdma->sq_depth);
592 sema_init(&rdma->rq_sem, rdma->rq_depth);
593 atomic_set(&rdma->excess_rc, 0);
594
595 return rdma;
596 }
597
rdma_cancel(struct p9_client * client,struct p9_req_t * req)598 static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
599 {
600 /* Nothing to do here.
601 * We will take care of it (if we have to) in rdma_cancelled()
602 */
603 return 1;
604 }
605
606 /* A request has been fully flushed without a reply.
607 * That means we have posted one buffer in excess.
608 */
rdma_cancelled(struct p9_client * client,struct p9_req_t * req)609 static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
610 {
611 struct p9_trans_rdma *rdma = client->trans;
612 atomic_inc(&rdma->excess_rc);
613 return 0;
614 }
615
p9_rdma_bind_privport(struct p9_trans_rdma * rdma)616 static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma)
617 {
618 struct sockaddr_in cl = {
619 .sin_family = AF_INET,
620 .sin_addr.s_addr = htonl(INADDR_ANY),
621 };
622 int port, err = -EINVAL;
623
624 for (port = P9_DEF_MAX_RESVPORT; port >= P9_DEF_MIN_RESVPORT; port--) {
625 cl.sin_port = htons((ushort)port);
626 err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl);
627 if (err != -EADDRINUSE)
628 break;
629 }
630 return err;
631 }
632
633 /**
634 * rdma_create_trans - Transport method for creating a transport instance
635 * @client: client instance
636 * @addr: IP address string
637 * @args: Mount options string
638 */
639 static int
rdma_create_trans(struct p9_client * client,const char * addr,char * args)640 rdma_create_trans(struct p9_client *client, const char *addr, char *args)
641 {
642 int err;
643 struct p9_rdma_opts opts;
644 struct p9_trans_rdma *rdma;
645 struct rdma_conn_param conn_param;
646 struct ib_qp_init_attr qp_attr;
647
648 if (addr == NULL)
649 return -EINVAL;
650
651 /* Parse the transport specific mount options */
652 err = parse_opts(args, &opts);
653 if (err < 0)
654 return err;
655
656 /* Create and initialize the RDMA transport structure */
657 rdma = alloc_rdma(&opts);
658 if (!rdma)
659 return -ENOMEM;
660
661 /* Create the RDMA CM ID */
662 rdma->cm_id = rdma_create_id(&init_net, p9_cm_event_handler, client,
663 RDMA_PS_TCP, IB_QPT_RC);
664 if (IS_ERR(rdma->cm_id))
665 goto error;
666
667 /* Associate the client with the transport */
668 client->trans = rdma;
669
670 /* Bind to a privileged port if we need to */
671 if (opts.privport) {
672 err = p9_rdma_bind_privport(rdma);
673 if (err < 0) {
674 pr_err("%s (%d): problem binding to privport: %d\n",
675 __func__, task_pid_nr(current), -err);
676 goto error;
677 }
678 }
679
680 /* Resolve the server's address */
681 rdma->addr.sin_family = AF_INET;
682 rdma->addr.sin_addr.s_addr = in_aton(addr);
683 rdma->addr.sin_port = htons(opts.port);
684 err = rdma_resolve_addr(rdma->cm_id, NULL,
685 (struct sockaddr *)&rdma->addr,
686 rdma->timeout);
687 if (err)
688 goto error;
689 err = wait_for_completion_interruptible(&rdma->cm_done);
690 if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED))
691 goto error;
692
693 /* Resolve the route to the server */
694 err = rdma_resolve_route(rdma->cm_id, rdma->timeout);
695 if (err)
696 goto error;
697 err = wait_for_completion_interruptible(&rdma->cm_done);
698 if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED))
699 goto error;
700
701 /* Create the Completion Queue */
702 rdma->cq = ib_alloc_cq(rdma->cm_id->device, client,
703 opts.sq_depth + opts.rq_depth + 1,
704 0, IB_POLL_SOFTIRQ);
705 if (IS_ERR(rdma->cq))
706 goto error;
707
708 /* Create the Protection Domain */
709 rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0);
710 if (IS_ERR(rdma->pd))
711 goto error;
712
713 /* Create the Queue Pair */
714 memset(&qp_attr, 0, sizeof qp_attr);
715 qp_attr.event_handler = qp_event_handler;
716 qp_attr.qp_context = client;
717 qp_attr.cap.max_send_wr = opts.sq_depth;
718 qp_attr.cap.max_recv_wr = opts.rq_depth;
719 qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE;
720 qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE;
721 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
722 qp_attr.qp_type = IB_QPT_RC;
723 qp_attr.send_cq = rdma->cq;
724 qp_attr.recv_cq = rdma->cq;
725 err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr);
726 if (err)
727 goto error;
728 rdma->qp = rdma->cm_id->qp;
729
730 /* Request a connection */
731 memset(&conn_param, 0, sizeof(conn_param));
732 conn_param.private_data = NULL;
733 conn_param.private_data_len = 0;
734 conn_param.responder_resources = P9_RDMA_IRD;
735 conn_param.initiator_depth = P9_RDMA_ORD;
736 err = rdma_connect(rdma->cm_id, &conn_param);
737 if (err)
738 goto error;
739 err = wait_for_completion_interruptible(&rdma->cm_done);
740 if (err || (rdma->state != P9_RDMA_CONNECTED))
741 goto error;
742
743 client->status = Connected;
744
745 return 0;
746
747 error:
748 rdma_destroy_trans(rdma);
749 return -ENOTCONN;
750 }
751
752 static struct p9_trans_module p9_rdma_trans = {
753 .name = "rdma",
754 .maxsize = P9_RDMA_MAXSIZE,
755 .def = 0,
756 .owner = THIS_MODULE,
757 .create = rdma_create_trans,
758 .close = rdma_close,
759 .request = rdma_request,
760 .cancel = rdma_cancel,
761 .cancelled = rdma_cancelled,
762 .show_options = p9_rdma_show_options,
763 };
764
765 /**
766 * p9_trans_rdma_init - Register the 9P RDMA transport driver
767 */
p9_trans_rdma_init(void)768 static int __init p9_trans_rdma_init(void)
769 {
770 v9fs_register_trans(&p9_rdma_trans);
771 return 0;
772 }
773
p9_trans_rdma_exit(void)774 static void __exit p9_trans_rdma_exit(void)
775 {
776 v9fs_unregister_trans(&p9_rdma_trans);
777 }
778
779 module_init(p9_trans_rdma_init);
780 module_exit(p9_trans_rdma_exit);
781
782 MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
783 MODULE_DESCRIPTION("RDMA Transport for 9P");
784 MODULE_LICENSE("Dual BSD/GPL");
785