1 /*
2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
15 */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
18 #include <linux/parser.h>
19 #include <uapi/scsi/fc/fc_fs.h>
20
21 #include "../host/nvme.h"
22 #include "../target/nvmet.h"
23 #include <linux/nvme-fc-driver.h>
24 #include <linux/nvme-fc.h>
25
26
27 enum {
28 NVMF_OPT_ERR = 0,
29 NVMF_OPT_WWNN = 1 << 0,
30 NVMF_OPT_WWPN = 1 << 1,
31 NVMF_OPT_ROLES = 1 << 2,
32 NVMF_OPT_FCADDR = 1 << 3,
33 NVMF_OPT_LPWWNN = 1 << 4,
34 NVMF_OPT_LPWWPN = 1 << 5,
35 };
36
37 struct fcloop_ctrl_options {
38 int mask;
39 u64 wwnn;
40 u64 wwpn;
41 u32 roles;
42 u32 fcaddr;
43 u64 lpwwnn;
44 u64 lpwwpn;
45 };
46
47 static const match_table_t opt_tokens = {
48 { NVMF_OPT_WWNN, "wwnn=%s" },
49 { NVMF_OPT_WWPN, "wwpn=%s" },
50 { NVMF_OPT_ROLES, "roles=%d" },
51 { NVMF_OPT_FCADDR, "fcaddr=%x" },
52 { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
53 { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
54 { NVMF_OPT_ERR, NULL }
55 };
56
57 static int
fcloop_parse_options(struct fcloop_ctrl_options * opts,const char * buf)58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
59 const char *buf)
60 {
61 substring_t args[MAX_OPT_ARGS];
62 char *options, *o, *p;
63 int token, ret = 0;
64 u64 token64;
65
66 options = o = kstrdup(buf, GFP_KERNEL);
67 if (!options)
68 return -ENOMEM;
69
70 while ((p = strsep(&o, ",\n")) != NULL) {
71 if (!*p)
72 continue;
73
74 token = match_token(p, opt_tokens, args);
75 opts->mask |= token;
76 switch (token) {
77 case NVMF_OPT_WWNN:
78 if (match_u64(args, &token64)) {
79 ret = -EINVAL;
80 goto out_free_options;
81 }
82 opts->wwnn = token64;
83 break;
84 case NVMF_OPT_WWPN:
85 if (match_u64(args, &token64)) {
86 ret = -EINVAL;
87 goto out_free_options;
88 }
89 opts->wwpn = token64;
90 break;
91 case NVMF_OPT_ROLES:
92 if (match_int(args, &token)) {
93 ret = -EINVAL;
94 goto out_free_options;
95 }
96 opts->roles = token;
97 break;
98 case NVMF_OPT_FCADDR:
99 if (match_hex(args, &token)) {
100 ret = -EINVAL;
101 goto out_free_options;
102 }
103 opts->fcaddr = token;
104 break;
105 case NVMF_OPT_LPWWNN:
106 if (match_u64(args, &token64)) {
107 ret = -EINVAL;
108 goto out_free_options;
109 }
110 opts->lpwwnn = token64;
111 break;
112 case NVMF_OPT_LPWWPN:
113 if (match_u64(args, &token64)) {
114 ret = -EINVAL;
115 goto out_free_options;
116 }
117 opts->lpwwpn = token64;
118 break;
119 default:
120 pr_warn("unknown parameter or missing value '%s'\n", p);
121 ret = -EINVAL;
122 goto out_free_options;
123 }
124 }
125
126 out_free_options:
127 kfree(options);
128 return ret;
129 }
130
131
132 static int
fcloop_parse_nm_options(struct device * dev,u64 * nname,u64 * pname,const char * buf)133 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
134 const char *buf)
135 {
136 substring_t args[MAX_OPT_ARGS];
137 char *options, *o, *p;
138 int token, ret = 0;
139 u64 token64;
140
141 *nname = -1;
142 *pname = -1;
143
144 options = o = kstrdup(buf, GFP_KERNEL);
145 if (!options)
146 return -ENOMEM;
147
148 while ((p = strsep(&o, ",\n")) != NULL) {
149 if (!*p)
150 continue;
151
152 token = match_token(p, opt_tokens, args);
153 switch (token) {
154 case NVMF_OPT_WWNN:
155 if (match_u64(args, &token64)) {
156 ret = -EINVAL;
157 goto out_free_options;
158 }
159 *nname = token64;
160 break;
161 case NVMF_OPT_WWPN:
162 if (match_u64(args, &token64)) {
163 ret = -EINVAL;
164 goto out_free_options;
165 }
166 *pname = token64;
167 break;
168 default:
169 pr_warn("unknown parameter or missing value '%s'\n", p);
170 ret = -EINVAL;
171 goto out_free_options;
172 }
173 }
174
175 out_free_options:
176 kfree(options);
177
178 if (!ret) {
179 if (*nname == -1)
180 return -EINVAL;
181 if (*pname == -1)
182 return -EINVAL;
183 }
184
185 return ret;
186 }
187
188
189 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
190
191 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
192 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
193
194 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
195
196
197 static DEFINE_SPINLOCK(fcloop_lock);
198 static LIST_HEAD(fcloop_lports);
199 static LIST_HEAD(fcloop_nports);
200
201 struct fcloop_lport {
202 struct nvme_fc_local_port *localport;
203 struct list_head lport_list;
204 struct completion unreg_done;
205 };
206
207 struct fcloop_lport_priv {
208 struct fcloop_lport *lport;
209 };
210
211 struct fcloop_rport {
212 struct nvme_fc_remote_port *remoteport;
213 struct nvmet_fc_target_port *targetport;
214 struct fcloop_nport *nport;
215 struct fcloop_lport *lport;
216 };
217
218 struct fcloop_tport {
219 struct nvmet_fc_target_port *targetport;
220 struct nvme_fc_remote_port *remoteport;
221 struct fcloop_nport *nport;
222 struct fcloop_lport *lport;
223 };
224
225 struct fcloop_nport {
226 struct fcloop_rport *rport;
227 struct fcloop_tport *tport;
228 struct fcloop_lport *lport;
229 struct list_head nport_list;
230 struct kref ref;
231 u64 node_name;
232 u64 port_name;
233 u32 port_role;
234 u32 port_id;
235 };
236
237 struct fcloop_lsreq {
238 struct fcloop_tport *tport;
239 struct nvmefc_ls_req *lsreq;
240 struct work_struct work;
241 struct nvmefc_tgt_ls_req tgt_ls_req;
242 int status;
243 };
244
245 enum {
246 INI_IO_START = 0,
247 INI_IO_ACTIVE = 1,
248 INI_IO_ABORTED = 2,
249 INI_IO_COMPLETED = 3,
250 };
251
252 struct fcloop_fcpreq {
253 struct fcloop_tport *tport;
254 struct nvmefc_fcp_req *fcpreq;
255 spinlock_t reqlock;
256 u16 status;
257 u32 inistate;
258 bool active;
259 bool aborted;
260 struct kref ref;
261 struct work_struct fcp_rcv_work;
262 struct work_struct abort_rcv_work;
263 struct work_struct tio_done_work;
264 struct nvmefc_tgt_fcp_req tgt_fcp_req;
265 };
266
267 struct fcloop_ini_fcpreq {
268 struct nvmefc_fcp_req *fcpreq;
269 struct fcloop_fcpreq *tfcp_req;
270 spinlock_t inilock;
271 };
272
273 static inline struct fcloop_lsreq *
tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req * tgt_lsreq)274 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
275 {
276 return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
277 }
278
279 static inline struct fcloop_fcpreq *
tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req * tgt_fcpreq)280 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
281 {
282 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
283 }
284
285
286 static int
fcloop_create_queue(struct nvme_fc_local_port * localport,unsigned int qidx,u16 qsize,void ** handle)287 fcloop_create_queue(struct nvme_fc_local_port *localport,
288 unsigned int qidx, u16 qsize,
289 void **handle)
290 {
291 *handle = localport;
292 return 0;
293 }
294
295 static void
fcloop_delete_queue(struct nvme_fc_local_port * localport,unsigned int idx,void * handle)296 fcloop_delete_queue(struct nvme_fc_local_port *localport,
297 unsigned int idx, void *handle)
298 {
299 }
300
301
302 /*
303 * Transmit of LS RSP done (e.g. buffers all set). call back up
304 * initiator "done" flows.
305 */
306 static void
fcloop_tgt_lsrqst_done_work(struct work_struct * work)307 fcloop_tgt_lsrqst_done_work(struct work_struct *work)
308 {
309 struct fcloop_lsreq *tls_req =
310 container_of(work, struct fcloop_lsreq, work);
311 struct fcloop_tport *tport = tls_req->tport;
312 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
313
314 if (!tport || tport->remoteport)
315 lsreq->done(lsreq, tls_req->status);
316 }
317
318 static int
fcloop_ls_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)319 fcloop_ls_req(struct nvme_fc_local_port *localport,
320 struct nvme_fc_remote_port *remoteport,
321 struct nvmefc_ls_req *lsreq)
322 {
323 struct fcloop_lsreq *tls_req = lsreq->private;
324 struct fcloop_rport *rport = remoteport->private;
325 int ret = 0;
326
327 tls_req->lsreq = lsreq;
328 INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
329
330 if (!rport->targetport) {
331 tls_req->status = -ECONNREFUSED;
332 tls_req->tport = NULL;
333 schedule_work(&tls_req->work);
334 return ret;
335 }
336
337 tls_req->status = 0;
338 tls_req->tport = rport->targetport->private;
339 ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
340 lsreq->rqstaddr, lsreq->rqstlen);
341
342 return ret;
343 }
344
345 static int
fcloop_xmt_ls_rsp(struct nvmet_fc_target_port * tport,struct nvmefc_tgt_ls_req * tgt_lsreq)346 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
347 struct nvmefc_tgt_ls_req *tgt_lsreq)
348 {
349 struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
350 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
351
352 memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
353 ((lsreq->rsplen < tgt_lsreq->rsplen) ?
354 lsreq->rsplen : tgt_lsreq->rsplen));
355 tgt_lsreq->done(tgt_lsreq);
356
357 schedule_work(&tls_req->work);
358
359 return 0;
360 }
361
362 static void
fcloop_tfcp_req_free(struct kref * ref)363 fcloop_tfcp_req_free(struct kref *ref)
364 {
365 struct fcloop_fcpreq *tfcp_req =
366 container_of(ref, struct fcloop_fcpreq, ref);
367
368 kfree(tfcp_req);
369 }
370
371 static void
fcloop_tfcp_req_put(struct fcloop_fcpreq * tfcp_req)372 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
373 {
374 kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
375 }
376
377 static int
fcloop_tfcp_req_get(struct fcloop_fcpreq * tfcp_req)378 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
379 {
380 return kref_get_unless_zero(&tfcp_req->ref);
381 }
382
383 static void
fcloop_call_host_done(struct nvmefc_fcp_req * fcpreq,struct fcloop_fcpreq * tfcp_req,int status)384 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
385 struct fcloop_fcpreq *tfcp_req, int status)
386 {
387 struct fcloop_ini_fcpreq *inireq = NULL;
388
389 if (fcpreq) {
390 inireq = fcpreq->private;
391 spin_lock(&inireq->inilock);
392 inireq->tfcp_req = NULL;
393 spin_unlock(&inireq->inilock);
394
395 fcpreq->status = status;
396 fcpreq->done(fcpreq);
397 }
398
399 /* release original io reference on tgt struct */
400 fcloop_tfcp_req_put(tfcp_req);
401 }
402
403 static void
fcloop_fcp_recv_work(struct work_struct * work)404 fcloop_fcp_recv_work(struct work_struct *work)
405 {
406 struct fcloop_fcpreq *tfcp_req =
407 container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
408 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
409 int ret = 0;
410 bool aborted = false;
411
412 spin_lock(&tfcp_req->reqlock);
413 switch (tfcp_req->inistate) {
414 case INI_IO_START:
415 tfcp_req->inistate = INI_IO_ACTIVE;
416 break;
417 case INI_IO_ABORTED:
418 aborted = true;
419 break;
420 default:
421 spin_unlock(&tfcp_req->reqlock);
422 WARN_ON(1);
423 return;
424 }
425 spin_unlock(&tfcp_req->reqlock);
426
427 if (unlikely(aborted))
428 ret = -ECANCELED;
429 else
430 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
431 &tfcp_req->tgt_fcp_req,
432 fcpreq->cmdaddr, fcpreq->cmdlen);
433 if (ret)
434 fcloop_call_host_done(fcpreq, tfcp_req, ret);
435
436 return;
437 }
438
439 static void
fcloop_fcp_abort_recv_work(struct work_struct * work)440 fcloop_fcp_abort_recv_work(struct work_struct *work)
441 {
442 struct fcloop_fcpreq *tfcp_req =
443 container_of(work, struct fcloop_fcpreq, abort_rcv_work);
444 struct nvmefc_fcp_req *fcpreq;
445 bool completed = false;
446
447 spin_lock(&tfcp_req->reqlock);
448 fcpreq = tfcp_req->fcpreq;
449 switch (tfcp_req->inistate) {
450 case INI_IO_ABORTED:
451 break;
452 case INI_IO_COMPLETED:
453 completed = true;
454 break;
455 default:
456 spin_unlock(&tfcp_req->reqlock);
457 WARN_ON(1);
458 return;
459 }
460 spin_unlock(&tfcp_req->reqlock);
461
462 if (unlikely(completed)) {
463 /* remove reference taken in original abort downcall */
464 fcloop_tfcp_req_put(tfcp_req);
465 return;
466 }
467
468 if (tfcp_req->tport->targetport)
469 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
470 &tfcp_req->tgt_fcp_req);
471
472 spin_lock(&tfcp_req->reqlock);
473 tfcp_req->fcpreq = NULL;
474 spin_unlock(&tfcp_req->reqlock);
475
476 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
477 /* call_host_done releases reference for abort downcall */
478 }
479
480 /*
481 * FCP IO operation done by target completion.
482 * call back up initiator "done" flows.
483 */
484 static void
fcloop_tgt_fcprqst_done_work(struct work_struct * work)485 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
486 {
487 struct fcloop_fcpreq *tfcp_req =
488 container_of(work, struct fcloop_fcpreq, tio_done_work);
489 struct nvmefc_fcp_req *fcpreq;
490
491 spin_lock(&tfcp_req->reqlock);
492 fcpreq = tfcp_req->fcpreq;
493 tfcp_req->inistate = INI_IO_COMPLETED;
494 spin_unlock(&tfcp_req->reqlock);
495
496 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
497 }
498
499
500 static int
fcloop_fcp_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)501 fcloop_fcp_req(struct nvme_fc_local_port *localport,
502 struct nvme_fc_remote_port *remoteport,
503 void *hw_queue_handle,
504 struct nvmefc_fcp_req *fcpreq)
505 {
506 struct fcloop_rport *rport = remoteport->private;
507 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
508 struct fcloop_fcpreq *tfcp_req;
509
510 if (!rport->targetport)
511 return -ECONNREFUSED;
512
513 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
514 if (!tfcp_req)
515 return -ENOMEM;
516
517 inireq->fcpreq = fcpreq;
518 inireq->tfcp_req = tfcp_req;
519 spin_lock_init(&inireq->inilock);
520
521 tfcp_req->fcpreq = fcpreq;
522 tfcp_req->tport = rport->targetport->private;
523 tfcp_req->inistate = INI_IO_START;
524 spin_lock_init(&tfcp_req->reqlock);
525 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
526 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
527 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
528 kref_init(&tfcp_req->ref);
529
530 schedule_work(&tfcp_req->fcp_rcv_work);
531
532 return 0;
533 }
534
535 static void
fcloop_fcp_copy_data(u8 op,struct scatterlist * data_sg,struct scatterlist * io_sg,u32 offset,u32 length)536 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
537 struct scatterlist *io_sg, u32 offset, u32 length)
538 {
539 void *data_p, *io_p;
540 u32 data_len, io_len, tlen;
541
542 io_p = sg_virt(io_sg);
543 io_len = io_sg->length;
544
545 for ( ; offset; ) {
546 tlen = min_t(u32, offset, io_len);
547 offset -= tlen;
548 io_len -= tlen;
549 if (!io_len) {
550 io_sg = sg_next(io_sg);
551 io_p = sg_virt(io_sg);
552 io_len = io_sg->length;
553 } else
554 io_p += tlen;
555 }
556
557 data_p = sg_virt(data_sg);
558 data_len = data_sg->length;
559
560 for ( ; length; ) {
561 tlen = min_t(u32, io_len, data_len);
562 tlen = min_t(u32, tlen, length);
563
564 if (op == NVMET_FCOP_WRITEDATA)
565 memcpy(data_p, io_p, tlen);
566 else
567 memcpy(io_p, data_p, tlen);
568
569 length -= tlen;
570
571 io_len -= tlen;
572 if ((!io_len) && (length)) {
573 io_sg = sg_next(io_sg);
574 io_p = sg_virt(io_sg);
575 io_len = io_sg->length;
576 } else
577 io_p += tlen;
578
579 data_len -= tlen;
580 if ((!data_len) && (length)) {
581 data_sg = sg_next(data_sg);
582 data_p = sg_virt(data_sg);
583 data_len = data_sg->length;
584 } else
585 data_p += tlen;
586 }
587 }
588
589 static int
fcloop_fcp_op(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)590 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
591 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
592 {
593 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
594 struct nvmefc_fcp_req *fcpreq;
595 u32 rsplen = 0, xfrlen = 0;
596 int fcp_err = 0, active, aborted;
597 u8 op = tgt_fcpreq->op;
598
599 spin_lock(&tfcp_req->reqlock);
600 fcpreq = tfcp_req->fcpreq;
601 active = tfcp_req->active;
602 aborted = tfcp_req->aborted;
603 tfcp_req->active = true;
604 spin_unlock(&tfcp_req->reqlock);
605
606 if (unlikely(active))
607 /* illegal - call while i/o active */
608 return -EALREADY;
609
610 if (unlikely(aborted)) {
611 /* target transport has aborted i/o prior */
612 spin_lock(&tfcp_req->reqlock);
613 tfcp_req->active = false;
614 spin_unlock(&tfcp_req->reqlock);
615 tgt_fcpreq->transferred_length = 0;
616 tgt_fcpreq->fcp_error = -ECANCELED;
617 tgt_fcpreq->done(tgt_fcpreq);
618 return 0;
619 }
620
621 /*
622 * if fcpreq is NULL, the I/O has been aborted (from
623 * initiator side). For the target side, act as if all is well
624 * but don't actually move data.
625 */
626
627 switch (op) {
628 case NVMET_FCOP_WRITEDATA:
629 xfrlen = tgt_fcpreq->transfer_length;
630 if (fcpreq) {
631 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
632 fcpreq->first_sgl, tgt_fcpreq->offset,
633 xfrlen);
634 fcpreq->transferred_length += xfrlen;
635 }
636 break;
637
638 case NVMET_FCOP_READDATA:
639 case NVMET_FCOP_READDATA_RSP:
640 xfrlen = tgt_fcpreq->transfer_length;
641 if (fcpreq) {
642 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
643 fcpreq->first_sgl, tgt_fcpreq->offset,
644 xfrlen);
645 fcpreq->transferred_length += xfrlen;
646 }
647 if (op == NVMET_FCOP_READDATA)
648 break;
649
650 /* Fall-Thru to RSP handling */
651
652 case NVMET_FCOP_RSP:
653 if (fcpreq) {
654 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
655 fcpreq->rsplen : tgt_fcpreq->rsplen);
656 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
657 if (rsplen < tgt_fcpreq->rsplen)
658 fcp_err = -E2BIG;
659 fcpreq->rcv_rsplen = rsplen;
660 fcpreq->status = 0;
661 }
662 tfcp_req->status = 0;
663 break;
664
665 default:
666 fcp_err = -EINVAL;
667 break;
668 }
669
670 spin_lock(&tfcp_req->reqlock);
671 tfcp_req->active = false;
672 spin_unlock(&tfcp_req->reqlock);
673
674 tgt_fcpreq->transferred_length = xfrlen;
675 tgt_fcpreq->fcp_error = fcp_err;
676 tgt_fcpreq->done(tgt_fcpreq);
677
678 return 0;
679 }
680
681 static void
fcloop_tgt_fcp_abort(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)682 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
683 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
684 {
685 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
686
687 /*
688 * mark aborted only in case there were 2 threads in transport
689 * (one doing io, other doing abort) and only kills ops posted
690 * after the abort request
691 */
692 spin_lock(&tfcp_req->reqlock);
693 tfcp_req->aborted = true;
694 spin_unlock(&tfcp_req->reqlock);
695
696 tfcp_req->status = NVME_SC_INTERNAL;
697
698 /*
699 * nothing more to do. If io wasn't active, the transport should
700 * immediately call the req_release. If it was active, the op
701 * will complete, and the lldd should call req_release.
702 */
703 }
704
705 static void
fcloop_fcp_req_release(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)706 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
707 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
708 {
709 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
710
711 schedule_work(&tfcp_req->tio_done_work);
712 }
713
714 static void
fcloop_ls_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)715 fcloop_ls_abort(struct nvme_fc_local_port *localport,
716 struct nvme_fc_remote_port *remoteport,
717 struct nvmefc_ls_req *lsreq)
718 {
719 }
720
721 static void
fcloop_fcp_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)722 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
723 struct nvme_fc_remote_port *remoteport,
724 void *hw_queue_handle,
725 struct nvmefc_fcp_req *fcpreq)
726 {
727 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
728 struct fcloop_fcpreq *tfcp_req;
729 bool abortio = true;
730
731 spin_lock(&inireq->inilock);
732 tfcp_req = inireq->tfcp_req;
733 if (tfcp_req)
734 fcloop_tfcp_req_get(tfcp_req);
735 spin_unlock(&inireq->inilock);
736
737 if (!tfcp_req)
738 /* abort has already been called */
739 return;
740
741 /* break initiator/target relationship for io */
742 spin_lock(&tfcp_req->reqlock);
743 switch (tfcp_req->inistate) {
744 case INI_IO_START:
745 case INI_IO_ACTIVE:
746 tfcp_req->inistate = INI_IO_ABORTED;
747 break;
748 case INI_IO_COMPLETED:
749 abortio = false;
750 break;
751 default:
752 spin_unlock(&tfcp_req->reqlock);
753 WARN_ON(1);
754 return;
755 }
756 spin_unlock(&tfcp_req->reqlock);
757
758 if (abortio)
759 /* leave the reference while the work item is scheduled */
760 WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
761 else {
762 /*
763 * as the io has already had the done callback made,
764 * nothing more to do. So release the reference taken above
765 */
766 fcloop_tfcp_req_put(tfcp_req);
767 }
768 }
769
770 static void
fcloop_nport_free(struct kref * ref)771 fcloop_nport_free(struct kref *ref)
772 {
773 struct fcloop_nport *nport =
774 container_of(ref, struct fcloop_nport, ref);
775 unsigned long flags;
776
777 spin_lock_irqsave(&fcloop_lock, flags);
778 list_del(&nport->nport_list);
779 spin_unlock_irqrestore(&fcloop_lock, flags);
780
781 kfree(nport);
782 }
783
784 static void
fcloop_nport_put(struct fcloop_nport * nport)785 fcloop_nport_put(struct fcloop_nport *nport)
786 {
787 kref_put(&nport->ref, fcloop_nport_free);
788 }
789
790 static int
fcloop_nport_get(struct fcloop_nport * nport)791 fcloop_nport_get(struct fcloop_nport *nport)
792 {
793 return kref_get_unless_zero(&nport->ref);
794 }
795
796 static void
fcloop_localport_delete(struct nvme_fc_local_port * localport)797 fcloop_localport_delete(struct nvme_fc_local_port *localport)
798 {
799 struct fcloop_lport_priv *lport_priv = localport->private;
800 struct fcloop_lport *lport = lport_priv->lport;
801
802 /* release any threads waiting for the unreg to complete */
803 complete(&lport->unreg_done);
804 }
805
806 static void
fcloop_remoteport_delete(struct nvme_fc_remote_port * remoteport)807 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
808 {
809 struct fcloop_rport *rport = remoteport->private;
810
811 fcloop_nport_put(rport->nport);
812 }
813
814 static void
fcloop_targetport_delete(struct nvmet_fc_target_port * targetport)815 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
816 {
817 struct fcloop_tport *tport = targetport->private;
818
819 fcloop_nport_put(tport->nport);
820 }
821
822 #define FCLOOP_HW_QUEUES 4
823 #define FCLOOP_SGL_SEGS 256
824 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
825
826 static struct nvme_fc_port_template fctemplate = {
827 .localport_delete = fcloop_localport_delete,
828 .remoteport_delete = fcloop_remoteport_delete,
829 .create_queue = fcloop_create_queue,
830 .delete_queue = fcloop_delete_queue,
831 .ls_req = fcloop_ls_req,
832 .fcp_io = fcloop_fcp_req,
833 .ls_abort = fcloop_ls_abort,
834 .fcp_abort = fcloop_fcp_abort,
835 .max_hw_queues = FCLOOP_HW_QUEUES,
836 .max_sgl_segments = FCLOOP_SGL_SEGS,
837 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
838 .dma_boundary = FCLOOP_DMABOUND_4G,
839 /* sizes of additional private data for data structures */
840 .local_priv_sz = sizeof(struct fcloop_lport_priv),
841 .remote_priv_sz = sizeof(struct fcloop_rport),
842 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
843 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
844 };
845
846 static struct nvmet_fc_target_template tgttemplate = {
847 .targetport_delete = fcloop_targetport_delete,
848 .xmt_ls_rsp = fcloop_xmt_ls_rsp,
849 .fcp_op = fcloop_fcp_op,
850 .fcp_abort = fcloop_tgt_fcp_abort,
851 .fcp_req_release = fcloop_fcp_req_release,
852 .max_hw_queues = FCLOOP_HW_QUEUES,
853 .max_sgl_segments = FCLOOP_SGL_SEGS,
854 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
855 .dma_boundary = FCLOOP_DMABOUND_4G,
856 /* optional features */
857 .target_features = 0,
858 /* sizes of additional private data for data structures */
859 .target_priv_sz = sizeof(struct fcloop_tport),
860 };
861
862 static ssize_t
fcloop_create_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)863 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
864 const char *buf, size_t count)
865 {
866 struct nvme_fc_port_info pinfo;
867 struct fcloop_ctrl_options *opts;
868 struct nvme_fc_local_port *localport;
869 struct fcloop_lport *lport;
870 struct fcloop_lport_priv *lport_priv;
871 unsigned long flags;
872 int ret = -ENOMEM;
873
874 lport = kzalloc(sizeof(*lport), GFP_KERNEL);
875 if (!lport)
876 return -ENOMEM;
877
878 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
879 if (!opts)
880 goto out_free_lport;
881
882 ret = fcloop_parse_options(opts, buf);
883 if (ret)
884 goto out_free_opts;
885
886 /* everything there ? */
887 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
888 ret = -EINVAL;
889 goto out_free_opts;
890 }
891
892 memset(&pinfo, 0, sizeof(pinfo));
893 pinfo.node_name = opts->wwnn;
894 pinfo.port_name = opts->wwpn;
895 pinfo.port_role = opts->roles;
896 pinfo.port_id = opts->fcaddr;
897
898 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
899 if (!ret) {
900 /* success */
901 lport_priv = localport->private;
902 lport_priv->lport = lport;
903
904 lport->localport = localport;
905 INIT_LIST_HEAD(&lport->lport_list);
906
907 spin_lock_irqsave(&fcloop_lock, flags);
908 list_add_tail(&lport->lport_list, &fcloop_lports);
909 spin_unlock_irqrestore(&fcloop_lock, flags);
910 }
911
912 out_free_opts:
913 kfree(opts);
914 out_free_lport:
915 /* free only if we're going to fail */
916 if (ret)
917 kfree(lport);
918
919 return ret ? ret : count;
920 }
921
922
923 static void
__unlink_local_port(struct fcloop_lport * lport)924 __unlink_local_port(struct fcloop_lport *lport)
925 {
926 list_del(&lport->lport_list);
927 }
928
929 static int
__wait_localport_unreg(struct fcloop_lport * lport)930 __wait_localport_unreg(struct fcloop_lport *lport)
931 {
932 int ret;
933
934 init_completion(&lport->unreg_done);
935
936 ret = nvme_fc_unregister_localport(lport->localport);
937
938 wait_for_completion(&lport->unreg_done);
939
940 kfree(lport);
941
942 return ret;
943 }
944
945
946 static ssize_t
fcloop_delete_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)947 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
948 const char *buf, size_t count)
949 {
950 struct fcloop_lport *tlport, *lport = NULL;
951 u64 nodename, portname;
952 unsigned long flags;
953 int ret;
954
955 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
956 if (ret)
957 return ret;
958
959 spin_lock_irqsave(&fcloop_lock, flags);
960
961 list_for_each_entry(tlport, &fcloop_lports, lport_list) {
962 if (tlport->localport->node_name == nodename &&
963 tlport->localport->port_name == portname) {
964 lport = tlport;
965 __unlink_local_port(lport);
966 break;
967 }
968 }
969 spin_unlock_irqrestore(&fcloop_lock, flags);
970
971 if (!lport)
972 return -ENOENT;
973
974 ret = __wait_localport_unreg(lport);
975
976 return ret ? ret : count;
977 }
978
979 static struct fcloop_nport *
fcloop_alloc_nport(const char * buf,size_t count,bool remoteport)980 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
981 {
982 struct fcloop_nport *newnport, *nport = NULL;
983 struct fcloop_lport *tmplport, *lport = NULL;
984 struct fcloop_ctrl_options *opts;
985 unsigned long flags;
986 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
987 int ret;
988
989 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
990 if (!opts)
991 return NULL;
992
993 ret = fcloop_parse_options(opts, buf);
994 if (ret)
995 goto out_free_opts;
996
997 /* everything there ? */
998 if ((opts->mask & opts_mask) != opts_mask) {
999 ret = -EINVAL;
1000 goto out_free_opts;
1001 }
1002
1003 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1004 if (!newnport)
1005 goto out_free_opts;
1006
1007 INIT_LIST_HEAD(&newnport->nport_list);
1008 newnport->node_name = opts->wwnn;
1009 newnport->port_name = opts->wwpn;
1010 if (opts->mask & NVMF_OPT_ROLES)
1011 newnport->port_role = opts->roles;
1012 if (opts->mask & NVMF_OPT_FCADDR)
1013 newnport->port_id = opts->fcaddr;
1014 kref_init(&newnport->ref);
1015
1016 spin_lock_irqsave(&fcloop_lock, flags);
1017
1018 list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
1019 if (tmplport->localport->node_name == opts->wwnn &&
1020 tmplport->localport->port_name == opts->wwpn)
1021 goto out_invalid_opts;
1022
1023 if (tmplport->localport->node_name == opts->lpwwnn &&
1024 tmplport->localport->port_name == opts->lpwwpn)
1025 lport = tmplport;
1026 }
1027
1028 if (remoteport) {
1029 if (!lport)
1030 goto out_invalid_opts;
1031 newnport->lport = lport;
1032 }
1033
1034 list_for_each_entry(nport, &fcloop_nports, nport_list) {
1035 if (nport->node_name == opts->wwnn &&
1036 nport->port_name == opts->wwpn) {
1037 if ((remoteport && nport->rport) ||
1038 (!remoteport && nport->tport)) {
1039 nport = NULL;
1040 goto out_invalid_opts;
1041 }
1042
1043 fcloop_nport_get(nport);
1044
1045 spin_unlock_irqrestore(&fcloop_lock, flags);
1046
1047 if (remoteport)
1048 nport->lport = lport;
1049 if (opts->mask & NVMF_OPT_ROLES)
1050 nport->port_role = opts->roles;
1051 if (opts->mask & NVMF_OPT_FCADDR)
1052 nport->port_id = opts->fcaddr;
1053 goto out_free_newnport;
1054 }
1055 }
1056
1057 list_add_tail(&newnport->nport_list, &fcloop_nports);
1058
1059 spin_unlock_irqrestore(&fcloop_lock, flags);
1060
1061 kfree(opts);
1062 return newnport;
1063
1064 out_invalid_opts:
1065 spin_unlock_irqrestore(&fcloop_lock, flags);
1066 out_free_newnport:
1067 kfree(newnport);
1068 out_free_opts:
1069 kfree(opts);
1070 return nport;
1071 }
1072
1073 static ssize_t
fcloop_create_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1074 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1075 const char *buf, size_t count)
1076 {
1077 struct nvme_fc_remote_port *remoteport;
1078 struct fcloop_nport *nport;
1079 struct fcloop_rport *rport;
1080 struct nvme_fc_port_info pinfo;
1081 int ret;
1082
1083 nport = fcloop_alloc_nport(buf, count, true);
1084 if (!nport)
1085 return -EIO;
1086
1087 memset(&pinfo, 0, sizeof(pinfo));
1088 pinfo.node_name = nport->node_name;
1089 pinfo.port_name = nport->port_name;
1090 pinfo.port_role = nport->port_role;
1091 pinfo.port_id = nport->port_id;
1092
1093 ret = nvme_fc_register_remoteport(nport->lport->localport,
1094 &pinfo, &remoteport);
1095 if (ret || !remoteport) {
1096 fcloop_nport_put(nport);
1097 return ret;
1098 }
1099
1100 /* success */
1101 rport = remoteport->private;
1102 rport->remoteport = remoteport;
1103 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
1104 if (nport->tport) {
1105 nport->tport->remoteport = remoteport;
1106 nport->tport->lport = nport->lport;
1107 }
1108 rport->nport = nport;
1109 rport->lport = nport->lport;
1110 nport->rport = rport;
1111
1112 return count;
1113 }
1114
1115
1116 static struct fcloop_rport *
__unlink_remote_port(struct fcloop_nport * nport)1117 __unlink_remote_port(struct fcloop_nport *nport)
1118 {
1119 struct fcloop_rport *rport = nport->rport;
1120
1121 if (rport && nport->tport)
1122 nport->tport->remoteport = NULL;
1123 nport->rport = NULL;
1124
1125 return rport;
1126 }
1127
1128 static int
__remoteport_unreg(struct fcloop_nport * nport,struct fcloop_rport * rport)1129 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1130 {
1131 if (!rport)
1132 return -EALREADY;
1133
1134 return nvme_fc_unregister_remoteport(rport->remoteport);
1135 }
1136
1137 static ssize_t
fcloop_delete_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1138 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1139 const char *buf, size_t count)
1140 {
1141 struct fcloop_nport *nport = NULL, *tmpport;
1142 static struct fcloop_rport *rport;
1143 u64 nodename, portname;
1144 unsigned long flags;
1145 int ret;
1146
1147 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1148 if (ret)
1149 return ret;
1150
1151 spin_lock_irqsave(&fcloop_lock, flags);
1152
1153 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1154 if (tmpport->node_name == nodename &&
1155 tmpport->port_name == portname && tmpport->rport) {
1156 nport = tmpport;
1157 rport = __unlink_remote_port(nport);
1158 break;
1159 }
1160 }
1161
1162 spin_unlock_irqrestore(&fcloop_lock, flags);
1163
1164 if (!nport)
1165 return -ENOENT;
1166
1167 ret = __remoteport_unreg(nport, rport);
1168
1169 return ret ? ret : count;
1170 }
1171
1172 static ssize_t
fcloop_create_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1173 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1174 const char *buf, size_t count)
1175 {
1176 struct nvmet_fc_target_port *targetport;
1177 struct fcloop_nport *nport;
1178 struct fcloop_tport *tport;
1179 struct nvmet_fc_port_info tinfo;
1180 int ret;
1181
1182 nport = fcloop_alloc_nport(buf, count, false);
1183 if (!nport)
1184 return -EIO;
1185
1186 tinfo.node_name = nport->node_name;
1187 tinfo.port_name = nport->port_name;
1188 tinfo.port_id = nport->port_id;
1189
1190 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1191 &targetport);
1192 if (ret) {
1193 fcloop_nport_put(nport);
1194 return ret;
1195 }
1196
1197 /* success */
1198 tport = targetport->private;
1199 tport->targetport = targetport;
1200 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
1201 if (nport->rport)
1202 nport->rport->targetport = targetport;
1203 tport->nport = nport;
1204 tport->lport = nport->lport;
1205 nport->tport = tport;
1206
1207 return count;
1208 }
1209
1210
1211 static struct fcloop_tport *
__unlink_target_port(struct fcloop_nport * nport)1212 __unlink_target_port(struct fcloop_nport *nport)
1213 {
1214 struct fcloop_tport *tport = nport->tport;
1215
1216 if (tport && nport->rport)
1217 nport->rport->targetport = NULL;
1218 nport->tport = NULL;
1219
1220 return tport;
1221 }
1222
1223 static int
__targetport_unreg(struct fcloop_nport * nport,struct fcloop_tport * tport)1224 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1225 {
1226 if (!tport)
1227 return -EALREADY;
1228
1229 return nvmet_fc_unregister_targetport(tport->targetport);
1230 }
1231
1232 static ssize_t
fcloop_delete_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1233 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1234 const char *buf, size_t count)
1235 {
1236 struct fcloop_nport *nport = NULL, *tmpport;
1237 struct fcloop_tport *tport = NULL;
1238 u64 nodename, portname;
1239 unsigned long flags;
1240 int ret;
1241
1242 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1243 if (ret)
1244 return ret;
1245
1246 spin_lock_irqsave(&fcloop_lock, flags);
1247
1248 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1249 if (tmpport->node_name == nodename &&
1250 tmpport->port_name == portname && tmpport->tport) {
1251 nport = tmpport;
1252 tport = __unlink_target_port(nport);
1253 break;
1254 }
1255 }
1256
1257 spin_unlock_irqrestore(&fcloop_lock, flags);
1258
1259 if (!nport)
1260 return -ENOENT;
1261
1262 ret = __targetport_unreg(nport, tport);
1263
1264 return ret ? ret : count;
1265 }
1266
1267
1268 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1269 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1270 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1271 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1272 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1273 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1274
1275 static struct attribute *fcloop_dev_attrs[] = {
1276 &dev_attr_add_local_port.attr,
1277 &dev_attr_del_local_port.attr,
1278 &dev_attr_add_remote_port.attr,
1279 &dev_attr_del_remote_port.attr,
1280 &dev_attr_add_target_port.attr,
1281 &dev_attr_del_target_port.attr,
1282 NULL
1283 };
1284
1285 static struct attribute_group fclopp_dev_attrs_group = {
1286 .attrs = fcloop_dev_attrs,
1287 };
1288
1289 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1290 &fclopp_dev_attrs_group,
1291 NULL,
1292 };
1293
1294 static struct class *fcloop_class;
1295 static struct device *fcloop_device;
1296
1297
fcloop_init(void)1298 static int __init fcloop_init(void)
1299 {
1300 int ret;
1301
1302 fcloop_class = class_create(THIS_MODULE, "fcloop");
1303 if (IS_ERR(fcloop_class)) {
1304 pr_err("couldn't register class fcloop\n");
1305 ret = PTR_ERR(fcloop_class);
1306 return ret;
1307 }
1308
1309 fcloop_device = device_create_with_groups(
1310 fcloop_class, NULL, MKDEV(0, 0), NULL,
1311 fcloop_dev_attr_groups, "ctl");
1312 if (IS_ERR(fcloop_device)) {
1313 pr_err("couldn't create ctl device!\n");
1314 ret = PTR_ERR(fcloop_device);
1315 goto out_destroy_class;
1316 }
1317
1318 get_device(fcloop_device);
1319
1320 return 0;
1321
1322 out_destroy_class:
1323 class_destroy(fcloop_class);
1324 return ret;
1325 }
1326
fcloop_exit(void)1327 static void __exit fcloop_exit(void)
1328 {
1329 struct fcloop_lport *lport;
1330 struct fcloop_nport *nport;
1331 struct fcloop_tport *tport;
1332 struct fcloop_rport *rport;
1333 unsigned long flags;
1334 int ret;
1335
1336 spin_lock_irqsave(&fcloop_lock, flags);
1337
1338 for (;;) {
1339 nport = list_first_entry_or_null(&fcloop_nports,
1340 typeof(*nport), nport_list);
1341 if (!nport)
1342 break;
1343
1344 tport = __unlink_target_port(nport);
1345 rport = __unlink_remote_port(nport);
1346
1347 spin_unlock_irqrestore(&fcloop_lock, flags);
1348
1349 ret = __targetport_unreg(nport, tport);
1350 if (ret)
1351 pr_warn("%s: Failed deleting target port\n", __func__);
1352
1353 ret = __remoteport_unreg(nport, rport);
1354 if (ret)
1355 pr_warn("%s: Failed deleting remote port\n", __func__);
1356
1357 spin_lock_irqsave(&fcloop_lock, flags);
1358 }
1359
1360 for (;;) {
1361 lport = list_first_entry_or_null(&fcloop_lports,
1362 typeof(*lport), lport_list);
1363 if (!lport)
1364 break;
1365
1366 __unlink_local_port(lport);
1367
1368 spin_unlock_irqrestore(&fcloop_lock, flags);
1369
1370 ret = __wait_localport_unreg(lport);
1371 if (ret)
1372 pr_warn("%s: Failed deleting local port\n", __func__);
1373
1374 spin_lock_irqsave(&fcloop_lock, flags);
1375 }
1376
1377 spin_unlock_irqrestore(&fcloop_lock, flags);
1378
1379 put_device(fcloop_device);
1380
1381 device_destroy(fcloop_class, MKDEV(0, 0));
1382 class_destroy(fcloop_class);
1383 }
1384
1385 module_init(fcloop_init);
1386 module_exit(fcloop_exit);
1387
1388 MODULE_LICENSE("GPL v2");
1389