1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
14
15
16 enum {
17 NVMF_OPT_ERR = 0,
18 NVMF_OPT_WWNN = 1 << 0,
19 NVMF_OPT_WWPN = 1 << 1,
20 NVMF_OPT_ROLES = 1 << 2,
21 NVMF_OPT_FCADDR = 1 << 3,
22 NVMF_OPT_LPWWNN = 1 << 4,
23 NVMF_OPT_LPWWPN = 1 << 5,
24 };
25
26 struct fcloop_ctrl_options {
27 int mask;
28 u64 wwnn;
29 u64 wwpn;
30 u32 roles;
31 u32 fcaddr;
32 u64 lpwwnn;
33 u64 lpwwpn;
34 };
35
36 static const match_table_t opt_tokens = {
37 { NVMF_OPT_WWNN, "wwnn=%s" },
38 { NVMF_OPT_WWPN, "wwpn=%s" },
39 { NVMF_OPT_ROLES, "roles=%d" },
40 { NVMF_OPT_FCADDR, "fcaddr=%x" },
41 { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
42 { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
43 { NVMF_OPT_ERR, NULL }
44 };
45
fcloop_verify_addr(substring_t * s)46 static int fcloop_verify_addr(substring_t *s)
47 {
48 size_t blen = s->to - s->from + 1;
49
50 if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
51 strncmp(s->from, "0x", 2))
52 return -EINVAL;
53
54 return 0;
55 }
56
57 static int
fcloop_parse_options(struct fcloop_ctrl_options * opts,const char * buf)58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
59 const char *buf)
60 {
61 substring_t args[MAX_OPT_ARGS];
62 char *options, *o, *p;
63 int token, ret = 0;
64 u64 token64;
65
66 options = o = kstrdup(buf, GFP_KERNEL);
67 if (!options)
68 return -ENOMEM;
69
70 while ((p = strsep(&o, ",\n")) != NULL) {
71 if (!*p)
72 continue;
73
74 token = match_token(p, opt_tokens, args);
75 opts->mask |= token;
76 switch (token) {
77 case NVMF_OPT_WWNN:
78 if (fcloop_verify_addr(args) ||
79 match_u64(args, &token64)) {
80 ret = -EINVAL;
81 goto out_free_options;
82 }
83 opts->wwnn = token64;
84 break;
85 case NVMF_OPT_WWPN:
86 if (fcloop_verify_addr(args) ||
87 match_u64(args, &token64)) {
88 ret = -EINVAL;
89 goto out_free_options;
90 }
91 opts->wwpn = token64;
92 break;
93 case NVMF_OPT_ROLES:
94 if (match_int(args, &token)) {
95 ret = -EINVAL;
96 goto out_free_options;
97 }
98 opts->roles = token;
99 break;
100 case NVMF_OPT_FCADDR:
101 if (match_hex(args, &token)) {
102 ret = -EINVAL;
103 goto out_free_options;
104 }
105 opts->fcaddr = token;
106 break;
107 case NVMF_OPT_LPWWNN:
108 if (fcloop_verify_addr(args) ||
109 match_u64(args, &token64)) {
110 ret = -EINVAL;
111 goto out_free_options;
112 }
113 opts->lpwwnn = token64;
114 break;
115 case NVMF_OPT_LPWWPN:
116 if (fcloop_verify_addr(args) ||
117 match_u64(args, &token64)) {
118 ret = -EINVAL;
119 goto out_free_options;
120 }
121 opts->lpwwpn = token64;
122 break;
123 default:
124 pr_warn("unknown parameter or missing value '%s'\n", p);
125 ret = -EINVAL;
126 goto out_free_options;
127 }
128 }
129
130 out_free_options:
131 kfree(options);
132 return ret;
133 }
134
135
136 static int
fcloop_parse_nm_options(struct device * dev,u64 * nname,u64 * pname,const char * buf)137 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
138 const char *buf)
139 {
140 substring_t args[MAX_OPT_ARGS];
141 char *options, *o, *p;
142 int token, ret = 0;
143 u64 token64;
144
145 *nname = -1;
146 *pname = -1;
147
148 options = o = kstrdup(buf, GFP_KERNEL);
149 if (!options)
150 return -ENOMEM;
151
152 while ((p = strsep(&o, ",\n")) != NULL) {
153 if (!*p)
154 continue;
155
156 token = match_token(p, opt_tokens, args);
157 switch (token) {
158 case NVMF_OPT_WWNN:
159 if (fcloop_verify_addr(args) ||
160 match_u64(args, &token64)) {
161 ret = -EINVAL;
162 goto out_free_options;
163 }
164 *nname = token64;
165 break;
166 case NVMF_OPT_WWPN:
167 if (fcloop_verify_addr(args) ||
168 match_u64(args, &token64)) {
169 ret = -EINVAL;
170 goto out_free_options;
171 }
172 *pname = token64;
173 break;
174 default:
175 pr_warn("unknown parameter or missing value '%s'\n", p);
176 ret = -EINVAL;
177 goto out_free_options;
178 }
179 }
180
181 out_free_options:
182 kfree(options);
183
184 if (!ret) {
185 if (*nname == -1)
186 return -EINVAL;
187 if (*pname == -1)
188 return -EINVAL;
189 }
190
191 return ret;
192 }
193
194
195 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
196
197 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
198 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
199
200 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
201
202
203 static DEFINE_SPINLOCK(fcloop_lock);
204 static LIST_HEAD(fcloop_lports);
205 static LIST_HEAD(fcloop_nports);
206
207 struct fcloop_lport {
208 struct nvme_fc_local_port *localport;
209 struct list_head lport_list;
210 struct completion unreg_done;
211 };
212
213 struct fcloop_lport_priv {
214 struct fcloop_lport *lport;
215 };
216
217 struct fcloop_rport {
218 struct nvme_fc_remote_port *remoteport;
219 struct nvmet_fc_target_port *targetport;
220 struct fcloop_nport *nport;
221 struct fcloop_lport *lport;
222 spinlock_t lock;
223 struct list_head ls_list;
224 struct work_struct ls_work;
225 };
226
227 struct fcloop_tport {
228 struct nvmet_fc_target_port *targetport;
229 struct nvme_fc_remote_port *remoteport;
230 struct fcloop_nport *nport;
231 struct fcloop_lport *lport;
232 spinlock_t lock;
233 struct list_head ls_list;
234 struct work_struct ls_work;
235 };
236
237 struct fcloop_nport {
238 struct fcloop_rport *rport;
239 struct fcloop_tport *tport;
240 struct fcloop_lport *lport;
241 struct list_head nport_list;
242 struct kref ref;
243 u64 node_name;
244 u64 port_name;
245 u32 port_role;
246 u32 port_id;
247 };
248
249 struct fcloop_lsreq {
250 struct nvmefc_ls_req *lsreq;
251 struct nvmefc_ls_rsp ls_rsp;
252 int lsdir; /* H2T or T2H */
253 int status;
254 struct list_head ls_list; /* fcloop_rport->ls_list */
255 };
256
257 struct fcloop_rscn {
258 struct fcloop_tport *tport;
259 struct work_struct work;
260 };
261
262 enum {
263 INI_IO_START = 0,
264 INI_IO_ACTIVE = 1,
265 INI_IO_ABORTED = 2,
266 INI_IO_COMPLETED = 3,
267 };
268
269 struct fcloop_fcpreq {
270 struct fcloop_tport *tport;
271 struct nvmefc_fcp_req *fcpreq;
272 spinlock_t reqlock;
273 u16 status;
274 u32 inistate;
275 bool active;
276 bool aborted;
277 struct kref ref;
278 struct work_struct fcp_rcv_work;
279 struct work_struct abort_rcv_work;
280 struct work_struct tio_done_work;
281 struct nvmefc_tgt_fcp_req tgt_fcp_req;
282 };
283
284 struct fcloop_ini_fcpreq {
285 struct nvmefc_fcp_req *fcpreq;
286 struct fcloop_fcpreq *tfcp_req;
287 spinlock_t inilock;
288 };
289
290 static inline struct fcloop_lsreq *
ls_rsp_to_lsreq(struct nvmefc_ls_rsp * lsrsp)291 ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
292 {
293 return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
294 }
295
296 static inline struct fcloop_fcpreq *
tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req * tgt_fcpreq)297 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
298 {
299 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
300 }
301
302
303 static int
fcloop_create_queue(struct nvme_fc_local_port * localport,unsigned int qidx,u16 qsize,void ** handle)304 fcloop_create_queue(struct nvme_fc_local_port *localport,
305 unsigned int qidx, u16 qsize,
306 void **handle)
307 {
308 *handle = localport;
309 return 0;
310 }
311
312 static void
fcloop_delete_queue(struct nvme_fc_local_port * localport,unsigned int idx,void * handle)313 fcloop_delete_queue(struct nvme_fc_local_port *localport,
314 unsigned int idx, void *handle)
315 {
316 }
317
318 static void
fcloop_rport_lsrqst_work(struct work_struct * work)319 fcloop_rport_lsrqst_work(struct work_struct *work)
320 {
321 struct fcloop_rport *rport =
322 container_of(work, struct fcloop_rport, ls_work);
323 struct fcloop_lsreq *tls_req;
324
325 spin_lock(&rport->lock);
326 for (;;) {
327 tls_req = list_first_entry_or_null(&rport->ls_list,
328 struct fcloop_lsreq, ls_list);
329 if (!tls_req)
330 break;
331
332 list_del(&tls_req->ls_list);
333 spin_unlock(&rport->lock);
334
335 tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
336 /*
337 * callee may free memory containing tls_req.
338 * do not reference lsreq after this.
339 */
340
341 spin_lock(&rport->lock);
342 }
343 spin_unlock(&rport->lock);
344 }
345
346 static int
fcloop_h2t_ls_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)347 fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
348 struct nvme_fc_remote_port *remoteport,
349 struct nvmefc_ls_req *lsreq)
350 {
351 struct fcloop_lsreq *tls_req = lsreq->private;
352 struct fcloop_rport *rport = remoteport->private;
353 int ret = 0;
354
355 tls_req->lsreq = lsreq;
356 INIT_LIST_HEAD(&tls_req->ls_list);
357
358 if (!rport->targetport) {
359 tls_req->status = -ECONNREFUSED;
360 spin_lock(&rport->lock);
361 list_add_tail(&rport->ls_list, &tls_req->ls_list);
362 spin_unlock(&rport->lock);
363 queue_work(nvmet_wq, &rport->ls_work);
364 return ret;
365 }
366
367 tls_req->status = 0;
368 ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
369 &tls_req->ls_rsp,
370 lsreq->rqstaddr, lsreq->rqstlen);
371
372 return ret;
373 }
374
375 static int
fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port * targetport,struct nvmefc_ls_rsp * lsrsp)376 fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
377 struct nvmefc_ls_rsp *lsrsp)
378 {
379 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
380 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
381 struct fcloop_tport *tport = targetport->private;
382 struct nvme_fc_remote_port *remoteport = tport->remoteport;
383 struct fcloop_rport *rport;
384
385 memcpy(lsreq->rspaddr, lsrsp->rspbuf,
386 ((lsreq->rsplen < lsrsp->rsplen) ?
387 lsreq->rsplen : lsrsp->rsplen));
388
389 lsrsp->done(lsrsp);
390
391 if (remoteport) {
392 rport = remoteport->private;
393 spin_lock(&rport->lock);
394 list_add_tail(&rport->ls_list, &tls_req->ls_list);
395 spin_unlock(&rport->lock);
396 queue_work(nvmet_wq, &rport->ls_work);
397 }
398
399 return 0;
400 }
401
402 static void
fcloop_tport_lsrqst_work(struct work_struct * work)403 fcloop_tport_lsrqst_work(struct work_struct *work)
404 {
405 struct fcloop_tport *tport =
406 container_of(work, struct fcloop_tport, ls_work);
407 struct fcloop_lsreq *tls_req;
408
409 spin_lock(&tport->lock);
410 for (;;) {
411 tls_req = list_first_entry_or_null(&tport->ls_list,
412 struct fcloop_lsreq, ls_list);
413 if (!tls_req)
414 break;
415
416 list_del(&tls_req->ls_list);
417 spin_unlock(&tport->lock);
418
419 tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
420 /*
421 * callee may free memory containing tls_req.
422 * do not reference lsreq after this.
423 */
424
425 spin_lock(&tport->lock);
426 }
427 spin_unlock(&tport->lock);
428 }
429
430 static int
fcloop_t2h_ls_req(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * lsreq)431 fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
432 struct nvmefc_ls_req *lsreq)
433 {
434 struct fcloop_lsreq *tls_req = lsreq->private;
435 struct fcloop_tport *tport = targetport->private;
436 int ret = 0;
437
438 /*
439 * hosthandle should be the dst.rport value.
440 * hosthandle ignored as fcloop currently is
441 * 1:1 tgtport vs remoteport
442 */
443 tls_req->lsreq = lsreq;
444 INIT_LIST_HEAD(&tls_req->ls_list);
445
446 if (!tport->remoteport) {
447 tls_req->status = -ECONNREFUSED;
448 spin_lock(&tport->lock);
449 list_add_tail(&tport->ls_list, &tls_req->ls_list);
450 spin_unlock(&tport->lock);
451 queue_work(nvmet_wq, &tport->ls_work);
452 return ret;
453 }
454
455 tls_req->status = 0;
456 ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
457 lsreq->rqstaddr, lsreq->rqstlen);
458
459 return ret;
460 }
461
462 static int
fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_rsp * lsrsp)463 fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
464 struct nvme_fc_remote_port *remoteport,
465 struct nvmefc_ls_rsp *lsrsp)
466 {
467 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
468 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
469 struct fcloop_rport *rport = remoteport->private;
470 struct nvmet_fc_target_port *targetport = rport->targetport;
471 struct fcloop_tport *tport;
472
473 memcpy(lsreq->rspaddr, lsrsp->rspbuf,
474 ((lsreq->rsplen < lsrsp->rsplen) ?
475 lsreq->rsplen : lsrsp->rsplen));
476 lsrsp->done(lsrsp);
477
478 if (targetport) {
479 tport = targetport->private;
480 spin_lock(&tport->lock);
481 list_add_tail(&tport->ls_list, &tls_req->ls_list);
482 spin_unlock(&tport->lock);
483 queue_work(nvmet_wq, &tport->ls_work);
484 }
485
486 return 0;
487 }
488
489 static void
fcloop_t2h_host_release(void * hosthandle)490 fcloop_t2h_host_release(void *hosthandle)
491 {
492 /* host handle ignored for now */
493 }
494
495 /*
496 * Simulate reception of RSCN and converting it to a initiator transport
497 * call to rescan a remote port.
498 */
499 static void
fcloop_tgt_rscn_work(struct work_struct * work)500 fcloop_tgt_rscn_work(struct work_struct *work)
501 {
502 struct fcloop_rscn *tgt_rscn =
503 container_of(work, struct fcloop_rscn, work);
504 struct fcloop_tport *tport = tgt_rscn->tport;
505
506 if (tport->remoteport)
507 nvme_fc_rescan_remoteport(tport->remoteport);
508 kfree(tgt_rscn);
509 }
510
511 static void
fcloop_tgt_discovery_evt(struct nvmet_fc_target_port * tgtport)512 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
513 {
514 struct fcloop_rscn *tgt_rscn;
515
516 tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
517 if (!tgt_rscn)
518 return;
519
520 tgt_rscn->tport = tgtport->private;
521 INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
522
523 queue_work(nvmet_wq, &tgt_rscn->work);
524 }
525
526 static void
fcloop_tfcp_req_free(struct kref * ref)527 fcloop_tfcp_req_free(struct kref *ref)
528 {
529 struct fcloop_fcpreq *tfcp_req =
530 container_of(ref, struct fcloop_fcpreq, ref);
531
532 kfree(tfcp_req);
533 }
534
535 static void
fcloop_tfcp_req_put(struct fcloop_fcpreq * tfcp_req)536 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
537 {
538 kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
539 }
540
541 static int
fcloop_tfcp_req_get(struct fcloop_fcpreq * tfcp_req)542 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
543 {
544 return kref_get_unless_zero(&tfcp_req->ref);
545 }
546
547 static void
fcloop_call_host_done(struct nvmefc_fcp_req * fcpreq,struct fcloop_fcpreq * tfcp_req,int status)548 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
549 struct fcloop_fcpreq *tfcp_req, int status)
550 {
551 struct fcloop_ini_fcpreq *inireq = NULL;
552
553 if (fcpreq) {
554 inireq = fcpreq->private;
555 spin_lock(&inireq->inilock);
556 inireq->tfcp_req = NULL;
557 spin_unlock(&inireq->inilock);
558
559 fcpreq->status = status;
560 fcpreq->done(fcpreq);
561 }
562
563 /* release original io reference on tgt struct */
564 fcloop_tfcp_req_put(tfcp_req);
565 }
566
567 static bool drop_fabric_opcode;
568 #define DROP_OPCODE_MASK 0x00FF
569 /* fabrics opcode will have a bit set above 1st byte */
570 static int drop_opcode = -1;
571 static int drop_instance;
572 static int drop_amount;
573 static int drop_current_cnt;
574
575 /*
576 * Routine to parse io and determine if the io is to be dropped.
577 * Returns:
578 * 0 if io is not obstructed
579 * 1 if io was dropped
580 */
check_for_drop(struct fcloop_fcpreq * tfcp_req)581 static int check_for_drop(struct fcloop_fcpreq *tfcp_req)
582 {
583 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
584 struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr;
585 struct nvme_command *sqe = &cmdiu->sqe;
586
587 if (drop_opcode == -1)
588 return 0;
589
590 pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x "
591 "inst %d start %d amt %d\n",
592 __func__, sqe->common.opcode, sqe->fabrics.fctype,
593 drop_fabric_opcode ? "y" : "n",
594 drop_opcode, drop_current_cnt, drop_instance, drop_amount);
595
596 if ((drop_fabric_opcode &&
597 (sqe->common.opcode != nvme_fabrics_command ||
598 sqe->fabrics.fctype != drop_opcode)) ||
599 (!drop_fabric_opcode && sqe->common.opcode != drop_opcode))
600 return 0;
601
602 if (++drop_current_cnt >= drop_instance) {
603 if (drop_current_cnt >= drop_instance + drop_amount)
604 drop_opcode = -1;
605 return 1;
606 }
607
608 return 0;
609 }
610
611 static void
fcloop_fcp_recv_work(struct work_struct * work)612 fcloop_fcp_recv_work(struct work_struct *work)
613 {
614 struct fcloop_fcpreq *tfcp_req =
615 container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
616 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
617 unsigned long flags;
618 int ret = 0;
619 bool aborted = false;
620
621 spin_lock_irqsave(&tfcp_req->reqlock, flags);
622 switch (tfcp_req->inistate) {
623 case INI_IO_START:
624 tfcp_req->inistate = INI_IO_ACTIVE;
625 break;
626 case INI_IO_ABORTED:
627 aborted = true;
628 break;
629 default:
630 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
631 WARN_ON(1);
632 return;
633 }
634 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
635
636 if (unlikely(aborted))
637 ret = -ECANCELED;
638 else {
639 if (likely(!check_for_drop(tfcp_req)))
640 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
641 &tfcp_req->tgt_fcp_req,
642 fcpreq->cmdaddr, fcpreq->cmdlen);
643 else
644 pr_info("%s: dropped command ********\n", __func__);
645 }
646 if (ret)
647 fcloop_call_host_done(fcpreq, tfcp_req, ret);
648 }
649
650 static void
fcloop_fcp_abort_recv_work(struct work_struct * work)651 fcloop_fcp_abort_recv_work(struct work_struct *work)
652 {
653 struct fcloop_fcpreq *tfcp_req =
654 container_of(work, struct fcloop_fcpreq, abort_rcv_work);
655 struct nvmefc_fcp_req *fcpreq;
656 bool completed = false;
657 unsigned long flags;
658
659 spin_lock_irqsave(&tfcp_req->reqlock, flags);
660 fcpreq = tfcp_req->fcpreq;
661 switch (tfcp_req->inistate) {
662 case INI_IO_ABORTED:
663 break;
664 case INI_IO_COMPLETED:
665 completed = true;
666 break;
667 default:
668 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
669 WARN_ON(1);
670 return;
671 }
672 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
673
674 if (unlikely(completed)) {
675 /* remove reference taken in original abort downcall */
676 fcloop_tfcp_req_put(tfcp_req);
677 return;
678 }
679
680 if (tfcp_req->tport->targetport)
681 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
682 &tfcp_req->tgt_fcp_req);
683
684 spin_lock_irqsave(&tfcp_req->reqlock, flags);
685 tfcp_req->fcpreq = NULL;
686 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
687
688 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
689 /* call_host_done releases reference for abort downcall */
690 }
691
692 /*
693 * FCP IO operation done by target completion.
694 * call back up initiator "done" flows.
695 */
696 static void
fcloop_tgt_fcprqst_done_work(struct work_struct * work)697 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
698 {
699 struct fcloop_fcpreq *tfcp_req =
700 container_of(work, struct fcloop_fcpreq, tio_done_work);
701 struct nvmefc_fcp_req *fcpreq;
702 unsigned long flags;
703
704 spin_lock_irqsave(&tfcp_req->reqlock, flags);
705 fcpreq = tfcp_req->fcpreq;
706 tfcp_req->inistate = INI_IO_COMPLETED;
707 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
708
709 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
710 }
711
712
713 static int
fcloop_fcp_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)714 fcloop_fcp_req(struct nvme_fc_local_port *localport,
715 struct nvme_fc_remote_port *remoteport,
716 void *hw_queue_handle,
717 struct nvmefc_fcp_req *fcpreq)
718 {
719 struct fcloop_rport *rport = remoteport->private;
720 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
721 struct fcloop_fcpreq *tfcp_req;
722
723 if (!rport->targetport)
724 return -ECONNREFUSED;
725
726 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
727 if (!tfcp_req)
728 return -ENOMEM;
729
730 inireq->fcpreq = fcpreq;
731 inireq->tfcp_req = tfcp_req;
732 spin_lock_init(&inireq->inilock);
733
734 tfcp_req->fcpreq = fcpreq;
735 tfcp_req->tport = rport->targetport->private;
736 tfcp_req->inistate = INI_IO_START;
737 spin_lock_init(&tfcp_req->reqlock);
738 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
739 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
740 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
741 kref_init(&tfcp_req->ref);
742
743 queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
744
745 return 0;
746 }
747
748 static void
fcloop_fcp_copy_data(u8 op,struct scatterlist * data_sg,struct scatterlist * io_sg,u32 offset,u32 length)749 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
750 struct scatterlist *io_sg, u32 offset, u32 length)
751 {
752 void *data_p, *io_p;
753 u32 data_len, io_len, tlen;
754
755 io_p = sg_virt(io_sg);
756 io_len = io_sg->length;
757
758 for ( ; offset; ) {
759 tlen = min_t(u32, offset, io_len);
760 offset -= tlen;
761 io_len -= tlen;
762 if (!io_len) {
763 io_sg = sg_next(io_sg);
764 io_p = sg_virt(io_sg);
765 io_len = io_sg->length;
766 } else
767 io_p += tlen;
768 }
769
770 data_p = sg_virt(data_sg);
771 data_len = data_sg->length;
772
773 for ( ; length; ) {
774 tlen = min_t(u32, io_len, data_len);
775 tlen = min_t(u32, tlen, length);
776
777 if (op == NVMET_FCOP_WRITEDATA)
778 memcpy(data_p, io_p, tlen);
779 else
780 memcpy(io_p, data_p, tlen);
781
782 length -= tlen;
783
784 io_len -= tlen;
785 if ((!io_len) && (length)) {
786 io_sg = sg_next(io_sg);
787 io_p = sg_virt(io_sg);
788 io_len = io_sg->length;
789 } else
790 io_p += tlen;
791
792 data_len -= tlen;
793 if ((!data_len) && (length)) {
794 data_sg = sg_next(data_sg);
795 data_p = sg_virt(data_sg);
796 data_len = data_sg->length;
797 } else
798 data_p += tlen;
799 }
800 }
801
802 static int
fcloop_fcp_op(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)803 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
804 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
805 {
806 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
807 struct nvmefc_fcp_req *fcpreq;
808 u32 rsplen = 0, xfrlen = 0;
809 int fcp_err = 0, active, aborted;
810 u8 op = tgt_fcpreq->op;
811 unsigned long flags;
812
813 spin_lock_irqsave(&tfcp_req->reqlock, flags);
814 fcpreq = tfcp_req->fcpreq;
815 active = tfcp_req->active;
816 aborted = tfcp_req->aborted;
817 tfcp_req->active = true;
818 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
819
820 if (unlikely(active))
821 /* illegal - call while i/o active */
822 return -EALREADY;
823
824 if (unlikely(aborted)) {
825 /* target transport has aborted i/o prior */
826 spin_lock_irqsave(&tfcp_req->reqlock, flags);
827 tfcp_req->active = false;
828 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
829 tgt_fcpreq->transferred_length = 0;
830 tgt_fcpreq->fcp_error = -ECANCELED;
831 tgt_fcpreq->done(tgt_fcpreq);
832 return 0;
833 }
834
835 /*
836 * if fcpreq is NULL, the I/O has been aborted (from
837 * initiator side). For the target side, act as if all is well
838 * but don't actually move data.
839 */
840
841 switch (op) {
842 case NVMET_FCOP_WRITEDATA:
843 xfrlen = tgt_fcpreq->transfer_length;
844 if (fcpreq) {
845 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
846 fcpreq->first_sgl, tgt_fcpreq->offset,
847 xfrlen);
848 fcpreq->transferred_length += xfrlen;
849 }
850 break;
851
852 case NVMET_FCOP_READDATA:
853 case NVMET_FCOP_READDATA_RSP:
854 xfrlen = tgt_fcpreq->transfer_length;
855 if (fcpreq) {
856 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
857 fcpreq->first_sgl, tgt_fcpreq->offset,
858 xfrlen);
859 fcpreq->transferred_length += xfrlen;
860 }
861 if (op == NVMET_FCOP_READDATA)
862 break;
863
864 /* Fall-Thru to RSP handling */
865 fallthrough;
866
867 case NVMET_FCOP_RSP:
868 if (fcpreq) {
869 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
870 fcpreq->rsplen : tgt_fcpreq->rsplen);
871 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
872 if (rsplen < tgt_fcpreq->rsplen)
873 fcp_err = -E2BIG;
874 fcpreq->rcv_rsplen = rsplen;
875 fcpreq->status = 0;
876 }
877 tfcp_req->status = 0;
878 break;
879
880 default:
881 fcp_err = -EINVAL;
882 break;
883 }
884
885 spin_lock_irqsave(&tfcp_req->reqlock, flags);
886 tfcp_req->active = false;
887 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
888
889 tgt_fcpreq->transferred_length = xfrlen;
890 tgt_fcpreq->fcp_error = fcp_err;
891 tgt_fcpreq->done(tgt_fcpreq);
892
893 return 0;
894 }
895
896 static void
fcloop_tgt_fcp_abort(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)897 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
898 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
899 {
900 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
901 unsigned long flags;
902
903 /*
904 * mark aborted only in case there were 2 threads in transport
905 * (one doing io, other doing abort) and only kills ops posted
906 * after the abort request
907 */
908 spin_lock_irqsave(&tfcp_req->reqlock, flags);
909 tfcp_req->aborted = true;
910 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
911
912 tfcp_req->status = NVME_SC_INTERNAL;
913
914 /*
915 * nothing more to do. If io wasn't active, the transport should
916 * immediately call the req_release. If it was active, the op
917 * will complete, and the lldd should call req_release.
918 */
919 }
920
921 static void
fcloop_fcp_req_release(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)922 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
923 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
924 {
925 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
926
927 queue_work(nvmet_wq, &tfcp_req->tio_done_work);
928 }
929
930 static void
fcloop_h2t_ls_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)931 fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
932 struct nvme_fc_remote_port *remoteport,
933 struct nvmefc_ls_req *lsreq)
934 {
935 }
936
937 static void
fcloop_t2h_ls_abort(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * lsreq)938 fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
939 void *hosthandle, struct nvmefc_ls_req *lsreq)
940 {
941 }
942
943 static void
fcloop_fcp_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)944 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
945 struct nvme_fc_remote_port *remoteport,
946 void *hw_queue_handle,
947 struct nvmefc_fcp_req *fcpreq)
948 {
949 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
950 struct fcloop_fcpreq *tfcp_req;
951 bool abortio = true;
952 unsigned long flags;
953
954 spin_lock(&inireq->inilock);
955 tfcp_req = inireq->tfcp_req;
956 if (tfcp_req)
957 fcloop_tfcp_req_get(tfcp_req);
958 spin_unlock(&inireq->inilock);
959
960 if (!tfcp_req)
961 /* abort has already been called */
962 return;
963
964 /* break initiator/target relationship for io */
965 spin_lock_irqsave(&tfcp_req->reqlock, flags);
966 switch (tfcp_req->inistate) {
967 case INI_IO_START:
968 case INI_IO_ACTIVE:
969 tfcp_req->inistate = INI_IO_ABORTED;
970 break;
971 case INI_IO_COMPLETED:
972 abortio = false;
973 break;
974 default:
975 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
976 WARN_ON(1);
977 return;
978 }
979 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
980
981 if (abortio)
982 /* leave the reference while the work item is scheduled */
983 WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
984 else {
985 /*
986 * as the io has already had the done callback made,
987 * nothing more to do. So release the reference taken above
988 */
989 fcloop_tfcp_req_put(tfcp_req);
990 }
991 }
992
993 static void
fcloop_nport_free(struct kref * ref)994 fcloop_nport_free(struct kref *ref)
995 {
996 struct fcloop_nport *nport =
997 container_of(ref, struct fcloop_nport, ref);
998 unsigned long flags;
999
1000 spin_lock_irqsave(&fcloop_lock, flags);
1001 list_del(&nport->nport_list);
1002 spin_unlock_irqrestore(&fcloop_lock, flags);
1003
1004 kfree(nport);
1005 }
1006
1007 static void
fcloop_nport_put(struct fcloop_nport * nport)1008 fcloop_nport_put(struct fcloop_nport *nport)
1009 {
1010 kref_put(&nport->ref, fcloop_nport_free);
1011 }
1012
1013 static int
fcloop_nport_get(struct fcloop_nport * nport)1014 fcloop_nport_get(struct fcloop_nport *nport)
1015 {
1016 return kref_get_unless_zero(&nport->ref);
1017 }
1018
1019 static void
fcloop_localport_delete(struct nvme_fc_local_port * localport)1020 fcloop_localport_delete(struct nvme_fc_local_port *localport)
1021 {
1022 struct fcloop_lport_priv *lport_priv = localport->private;
1023 struct fcloop_lport *lport = lport_priv->lport;
1024
1025 /* release any threads waiting for the unreg to complete */
1026 complete(&lport->unreg_done);
1027 }
1028
1029 static void
fcloop_remoteport_delete(struct nvme_fc_remote_port * remoteport)1030 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
1031 {
1032 struct fcloop_rport *rport = remoteport->private;
1033
1034 flush_work(&rport->ls_work);
1035 fcloop_nport_put(rport->nport);
1036 }
1037
1038 static void
fcloop_targetport_delete(struct nvmet_fc_target_port * targetport)1039 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
1040 {
1041 struct fcloop_tport *tport = targetport->private;
1042
1043 flush_work(&tport->ls_work);
1044 fcloop_nport_put(tport->nport);
1045 }
1046
1047 #define FCLOOP_HW_QUEUES 4
1048 #define FCLOOP_SGL_SEGS 256
1049 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
1050
1051 static struct nvme_fc_port_template fctemplate = {
1052 .localport_delete = fcloop_localport_delete,
1053 .remoteport_delete = fcloop_remoteport_delete,
1054 .create_queue = fcloop_create_queue,
1055 .delete_queue = fcloop_delete_queue,
1056 .ls_req = fcloop_h2t_ls_req,
1057 .fcp_io = fcloop_fcp_req,
1058 .ls_abort = fcloop_h2t_ls_abort,
1059 .fcp_abort = fcloop_fcp_abort,
1060 .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
1061 .max_hw_queues = FCLOOP_HW_QUEUES,
1062 .max_sgl_segments = FCLOOP_SGL_SEGS,
1063 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
1064 .dma_boundary = FCLOOP_DMABOUND_4G,
1065 /* sizes of additional private data for data structures */
1066 .local_priv_sz = sizeof(struct fcloop_lport_priv),
1067 .remote_priv_sz = sizeof(struct fcloop_rport),
1068 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
1069 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
1070 };
1071
1072 static struct nvmet_fc_target_template tgttemplate = {
1073 .targetport_delete = fcloop_targetport_delete,
1074 .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp,
1075 .fcp_op = fcloop_fcp_op,
1076 .fcp_abort = fcloop_tgt_fcp_abort,
1077 .fcp_req_release = fcloop_fcp_req_release,
1078 .discovery_event = fcloop_tgt_discovery_evt,
1079 .ls_req = fcloop_t2h_ls_req,
1080 .ls_abort = fcloop_t2h_ls_abort,
1081 .host_release = fcloop_t2h_host_release,
1082 .max_hw_queues = FCLOOP_HW_QUEUES,
1083 .max_sgl_segments = FCLOOP_SGL_SEGS,
1084 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
1085 .dma_boundary = FCLOOP_DMABOUND_4G,
1086 /* optional features */
1087 .target_features = 0,
1088 /* sizes of additional private data for data structures */
1089 .target_priv_sz = sizeof(struct fcloop_tport),
1090 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
1091 };
1092
1093 static ssize_t
fcloop_create_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1094 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
1095 const char *buf, size_t count)
1096 {
1097 struct nvme_fc_port_info pinfo;
1098 struct fcloop_ctrl_options *opts;
1099 struct nvme_fc_local_port *localport;
1100 struct fcloop_lport *lport;
1101 struct fcloop_lport_priv *lport_priv;
1102 unsigned long flags;
1103 int ret = -ENOMEM;
1104
1105 lport = kzalloc(sizeof(*lport), GFP_KERNEL);
1106 if (!lport)
1107 return -ENOMEM;
1108
1109 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1110 if (!opts)
1111 goto out_free_lport;
1112
1113 ret = fcloop_parse_options(opts, buf);
1114 if (ret)
1115 goto out_free_opts;
1116
1117 /* everything there ? */
1118 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
1119 ret = -EINVAL;
1120 goto out_free_opts;
1121 }
1122
1123 memset(&pinfo, 0, sizeof(pinfo));
1124 pinfo.node_name = opts->wwnn;
1125 pinfo.port_name = opts->wwpn;
1126 pinfo.port_role = opts->roles;
1127 pinfo.port_id = opts->fcaddr;
1128
1129 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
1130 if (!ret) {
1131 /* success */
1132 lport_priv = localport->private;
1133 lport_priv->lport = lport;
1134
1135 lport->localport = localport;
1136 INIT_LIST_HEAD(&lport->lport_list);
1137
1138 spin_lock_irqsave(&fcloop_lock, flags);
1139 list_add_tail(&lport->lport_list, &fcloop_lports);
1140 spin_unlock_irqrestore(&fcloop_lock, flags);
1141 }
1142
1143 out_free_opts:
1144 kfree(opts);
1145 out_free_lport:
1146 /* free only if we're going to fail */
1147 if (ret)
1148 kfree(lport);
1149
1150 return ret ? ret : count;
1151 }
1152
1153
1154 static void
__unlink_local_port(struct fcloop_lport * lport)1155 __unlink_local_port(struct fcloop_lport *lport)
1156 {
1157 list_del(&lport->lport_list);
1158 }
1159
1160 static int
__wait_localport_unreg(struct fcloop_lport * lport)1161 __wait_localport_unreg(struct fcloop_lport *lport)
1162 {
1163 int ret;
1164
1165 init_completion(&lport->unreg_done);
1166
1167 ret = nvme_fc_unregister_localport(lport->localport);
1168
1169 if (!ret)
1170 wait_for_completion(&lport->unreg_done);
1171
1172 kfree(lport);
1173
1174 return ret;
1175 }
1176
1177
1178 static ssize_t
fcloop_delete_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1179 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
1180 const char *buf, size_t count)
1181 {
1182 struct fcloop_lport *tlport, *lport = NULL;
1183 u64 nodename, portname;
1184 unsigned long flags;
1185 int ret;
1186
1187 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1188 if (ret)
1189 return ret;
1190
1191 spin_lock_irqsave(&fcloop_lock, flags);
1192
1193 list_for_each_entry(tlport, &fcloop_lports, lport_list) {
1194 if (tlport->localport->node_name == nodename &&
1195 tlport->localport->port_name == portname) {
1196 lport = tlport;
1197 __unlink_local_port(lport);
1198 break;
1199 }
1200 }
1201 spin_unlock_irqrestore(&fcloop_lock, flags);
1202
1203 if (!lport)
1204 return -ENOENT;
1205
1206 ret = __wait_localport_unreg(lport);
1207
1208 return ret ? ret : count;
1209 }
1210
1211 static struct fcloop_nport *
fcloop_alloc_nport(const char * buf,size_t count,bool remoteport)1212 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
1213 {
1214 struct fcloop_nport *newnport, *nport = NULL;
1215 struct fcloop_lport *tmplport, *lport = NULL;
1216 struct fcloop_ctrl_options *opts;
1217 unsigned long flags;
1218 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
1219 int ret;
1220
1221 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1222 if (!opts)
1223 return NULL;
1224
1225 ret = fcloop_parse_options(opts, buf);
1226 if (ret)
1227 goto out_free_opts;
1228
1229 /* everything there ? */
1230 if ((opts->mask & opts_mask) != opts_mask) {
1231 ret = -EINVAL;
1232 goto out_free_opts;
1233 }
1234
1235 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1236 if (!newnport)
1237 goto out_free_opts;
1238
1239 INIT_LIST_HEAD(&newnport->nport_list);
1240 newnport->node_name = opts->wwnn;
1241 newnport->port_name = opts->wwpn;
1242 if (opts->mask & NVMF_OPT_ROLES)
1243 newnport->port_role = opts->roles;
1244 if (opts->mask & NVMF_OPT_FCADDR)
1245 newnport->port_id = opts->fcaddr;
1246 kref_init(&newnport->ref);
1247
1248 spin_lock_irqsave(&fcloop_lock, flags);
1249
1250 list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
1251 if (tmplport->localport->node_name == opts->wwnn &&
1252 tmplport->localport->port_name == opts->wwpn)
1253 goto out_invalid_opts;
1254
1255 if (tmplport->localport->node_name == opts->lpwwnn &&
1256 tmplport->localport->port_name == opts->lpwwpn)
1257 lport = tmplport;
1258 }
1259
1260 if (remoteport) {
1261 if (!lport)
1262 goto out_invalid_opts;
1263 newnport->lport = lport;
1264 }
1265
1266 list_for_each_entry(nport, &fcloop_nports, nport_list) {
1267 if (nport->node_name == opts->wwnn &&
1268 nport->port_name == opts->wwpn) {
1269 if ((remoteport && nport->rport) ||
1270 (!remoteport && nport->tport)) {
1271 nport = NULL;
1272 goto out_invalid_opts;
1273 }
1274
1275 fcloop_nport_get(nport);
1276
1277 spin_unlock_irqrestore(&fcloop_lock, flags);
1278
1279 if (remoteport)
1280 nport->lport = lport;
1281 if (opts->mask & NVMF_OPT_ROLES)
1282 nport->port_role = opts->roles;
1283 if (opts->mask & NVMF_OPT_FCADDR)
1284 nport->port_id = opts->fcaddr;
1285 goto out_free_newnport;
1286 }
1287 }
1288
1289 list_add_tail(&newnport->nport_list, &fcloop_nports);
1290
1291 spin_unlock_irqrestore(&fcloop_lock, flags);
1292
1293 kfree(opts);
1294 return newnport;
1295
1296 out_invalid_opts:
1297 spin_unlock_irqrestore(&fcloop_lock, flags);
1298 out_free_newnport:
1299 kfree(newnport);
1300 out_free_opts:
1301 kfree(opts);
1302 return nport;
1303 }
1304
1305 static ssize_t
fcloop_create_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1306 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1307 const char *buf, size_t count)
1308 {
1309 struct nvme_fc_remote_port *remoteport;
1310 struct fcloop_nport *nport;
1311 struct fcloop_rport *rport;
1312 struct nvme_fc_port_info pinfo;
1313 int ret;
1314
1315 nport = fcloop_alloc_nport(buf, count, true);
1316 if (!nport)
1317 return -EIO;
1318
1319 memset(&pinfo, 0, sizeof(pinfo));
1320 pinfo.node_name = nport->node_name;
1321 pinfo.port_name = nport->port_name;
1322 pinfo.port_role = nport->port_role;
1323 pinfo.port_id = nport->port_id;
1324
1325 ret = nvme_fc_register_remoteport(nport->lport->localport,
1326 &pinfo, &remoteport);
1327 if (ret || !remoteport) {
1328 fcloop_nport_put(nport);
1329 return ret;
1330 }
1331
1332 /* success */
1333 rport = remoteport->private;
1334 rport->remoteport = remoteport;
1335 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
1336 if (nport->tport) {
1337 nport->tport->remoteport = remoteport;
1338 nport->tport->lport = nport->lport;
1339 }
1340 rport->nport = nport;
1341 rport->lport = nport->lport;
1342 nport->rport = rport;
1343 spin_lock_init(&rport->lock);
1344 INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
1345 INIT_LIST_HEAD(&rport->ls_list);
1346
1347 return count;
1348 }
1349
1350
1351 static struct fcloop_rport *
__unlink_remote_port(struct fcloop_nport * nport)1352 __unlink_remote_port(struct fcloop_nport *nport)
1353 {
1354 struct fcloop_rport *rport = nport->rport;
1355
1356 if (rport && nport->tport)
1357 nport->tport->remoteport = NULL;
1358 nport->rport = NULL;
1359
1360 return rport;
1361 }
1362
1363 static int
__remoteport_unreg(struct fcloop_nport * nport,struct fcloop_rport * rport)1364 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1365 {
1366 if (!rport)
1367 return -EALREADY;
1368
1369 return nvme_fc_unregister_remoteport(rport->remoteport);
1370 }
1371
1372 static ssize_t
fcloop_delete_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1373 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1374 const char *buf, size_t count)
1375 {
1376 struct fcloop_nport *nport = NULL, *tmpport;
1377 static struct fcloop_rport *rport;
1378 u64 nodename, portname;
1379 unsigned long flags;
1380 int ret;
1381
1382 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1383 if (ret)
1384 return ret;
1385
1386 spin_lock_irqsave(&fcloop_lock, flags);
1387
1388 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1389 if (tmpport->node_name == nodename &&
1390 tmpport->port_name == portname && tmpport->rport) {
1391 nport = tmpport;
1392 rport = __unlink_remote_port(nport);
1393 break;
1394 }
1395 }
1396
1397 spin_unlock_irqrestore(&fcloop_lock, flags);
1398
1399 if (!nport)
1400 return -ENOENT;
1401
1402 ret = __remoteport_unreg(nport, rport);
1403
1404 return ret ? ret : count;
1405 }
1406
1407 static ssize_t
fcloop_create_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1408 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1409 const char *buf, size_t count)
1410 {
1411 struct nvmet_fc_target_port *targetport;
1412 struct fcloop_nport *nport;
1413 struct fcloop_tport *tport;
1414 struct nvmet_fc_port_info tinfo;
1415 int ret;
1416
1417 nport = fcloop_alloc_nport(buf, count, false);
1418 if (!nport)
1419 return -EIO;
1420
1421 tinfo.node_name = nport->node_name;
1422 tinfo.port_name = nport->port_name;
1423 tinfo.port_id = nport->port_id;
1424
1425 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1426 &targetport);
1427 if (ret) {
1428 fcloop_nport_put(nport);
1429 return ret;
1430 }
1431
1432 /* success */
1433 tport = targetport->private;
1434 tport->targetport = targetport;
1435 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
1436 if (nport->rport)
1437 nport->rport->targetport = targetport;
1438 tport->nport = nport;
1439 tport->lport = nport->lport;
1440 nport->tport = tport;
1441 spin_lock_init(&tport->lock);
1442 INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
1443 INIT_LIST_HEAD(&tport->ls_list);
1444
1445 return count;
1446 }
1447
1448
1449 static struct fcloop_tport *
__unlink_target_port(struct fcloop_nport * nport)1450 __unlink_target_port(struct fcloop_nport *nport)
1451 {
1452 struct fcloop_tport *tport = nport->tport;
1453
1454 if (tport && nport->rport)
1455 nport->rport->targetport = NULL;
1456 nport->tport = NULL;
1457
1458 return tport;
1459 }
1460
1461 static int
__targetport_unreg(struct fcloop_nport * nport,struct fcloop_tport * tport)1462 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1463 {
1464 if (!tport)
1465 return -EALREADY;
1466
1467 return nvmet_fc_unregister_targetport(tport->targetport);
1468 }
1469
1470 static ssize_t
fcloop_delete_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1471 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1472 const char *buf, size_t count)
1473 {
1474 struct fcloop_nport *nport = NULL, *tmpport;
1475 struct fcloop_tport *tport = NULL;
1476 u64 nodename, portname;
1477 unsigned long flags;
1478 int ret;
1479
1480 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1481 if (ret)
1482 return ret;
1483
1484 spin_lock_irqsave(&fcloop_lock, flags);
1485
1486 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1487 if (tmpport->node_name == nodename &&
1488 tmpport->port_name == portname && tmpport->tport) {
1489 nport = tmpport;
1490 tport = __unlink_target_port(nport);
1491 break;
1492 }
1493 }
1494
1495 spin_unlock_irqrestore(&fcloop_lock, flags);
1496
1497 if (!nport)
1498 return -ENOENT;
1499
1500 ret = __targetport_unreg(nport, tport);
1501
1502 return ret ? ret : count;
1503 }
1504
1505 static ssize_t
fcloop_set_cmd_drop(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1506 fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
1507 const char *buf, size_t count)
1508 {
1509 unsigned int opcode;
1510 int starting, amount;
1511
1512 if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
1513 return -EBADRQC;
1514
1515 drop_current_cnt = 0;
1516 drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false;
1517 drop_opcode = (opcode & DROP_OPCODE_MASK);
1518 drop_instance = starting;
1519 /* the check to drop routine uses instance + count to know when
1520 * to end. Thus, if dropping 1 instance, count should be 0.
1521 * so subtract 1 from the count.
1522 */
1523 drop_amount = amount - 1;
1524
1525 pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d "
1526 "instances\n",
1527 __func__, drop_instance, drop_fabric_opcode ? " fabric" : "",
1528 drop_opcode, drop_amount);
1529
1530 return count;
1531 }
1532
1533
1534 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1535 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1536 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1537 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1538 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1539 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1540 static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop);
1541
1542 static struct attribute *fcloop_dev_attrs[] = {
1543 &dev_attr_add_local_port.attr,
1544 &dev_attr_del_local_port.attr,
1545 &dev_attr_add_remote_port.attr,
1546 &dev_attr_del_remote_port.attr,
1547 &dev_attr_add_target_port.attr,
1548 &dev_attr_del_target_port.attr,
1549 &dev_attr_set_cmd_drop.attr,
1550 NULL
1551 };
1552
1553 static const struct attribute_group fclopp_dev_attrs_group = {
1554 .attrs = fcloop_dev_attrs,
1555 };
1556
1557 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1558 &fclopp_dev_attrs_group,
1559 NULL,
1560 };
1561
1562 static struct class *fcloop_class;
1563 static struct device *fcloop_device;
1564
1565
fcloop_init(void)1566 static int __init fcloop_init(void)
1567 {
1568 int ret;
1569
1570 fcloop_class = class_create("fcloop");
1571 if (IS_ERR(fcloop_class)) {
1572 pr_err("couldn't register class fcloop\n");
1573 ret = PTR_ERR(fcloop_class);
1574 return ret;
1575 }
1576
1577 fcloop_device = device_create_with_groups(
1578 fcloop_class, NULL, MKDEV(0, 0), NULL,
1579 fcloop_dev_attr_groups, "ctl");
1580 if (IS_ERR(fcloop_device)) {
1581 pr_err("couldn't create ctl device!\n");
1582 ret = PTR_ERR(fcloop_device);
1583 goto out_destroy_class;
1584 }
1585
1586 get_device(fcloop_device);
1587
1588 return 0;
1589
1590 out_destroy_class:
1591 class_destroy(fcloop_class);
1592 return ret;
1593 }
1594
fcloop_exit(void)1595 static void __exit fcloop_exit(void)
1596 {
1597 struct fcloop_lport *lport = NULL;
1598 struct fcloop_nport *nport = NULL;
1599 struct fcloop_tport *tport;
1600 struct fcloop_rport *rport;
1601 unsigned long flags;
1602 int ret;
1603
1604 spin_lock_irqsave(&fcloop_lock, flags);
1605
1606 for (;;) {
1607 nport = list_first_entry_or_null(&fcloop_nports,
1608 typeof(*nport), nport_list);
1609 if (!nport)
1610 break;
1611
1612 tport = __unlink_target_port(nport);
1613 rport = __unlink_remote_port(nport);
1614
1615 spin_unlock_irqrestore(&fcloop_lock, flags);
1616
1617 ret = __targetport_unreg(nport, tport);
1618 if (ret)
1619 pr_warn("%s: Failed deleting target port\n", __func__);
1620
1621 ret = __remoteport_unreg(nport, rport);
1622 if (ret)
1623 pr_warn("%s: Failed deleting remote port\n", __func__);
1624
1625 spin_lock_irqsave(&fcloop_lock, flags);
1626 }
1627
1628 for (;;) {
1629 lport = list_first_entry_or_null(&fcloop_lports,
1630 typeof(*lport), lport_list);
1631 if (!lport)
1632 break;
1633
1634 __unlink_local_port(lport);
1635
1636 spin_unlock_irqrestore(&fcloop_lock, flags);
1637
1638 ret = __wait_localport_unreg(lport);
1639 if (ret)
1640 pr_warn("%s: Failed deleting local port\n", __func__);
1641
1642 spin_lock_irqsave(&fcloop_lock, flags);
1643 }
1644
1645 spin_unlock_irqrestore(&fcloop_lock, flags);
1646
1647 put_device(fcloop_device);
1648
1649 device_destroy(fcloop_class, MKDEV(0, 0));
1650 class_destroy(fcloop_class);
1651 }
1652
1653 module_init(fcloop_init);
1654 module_exit(fcloop_exit);
1655
1656 MODULE_LICENSE("GPL v2");
1657