1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23 /* See Fibre Channel protocol T11 FC-LS for details */
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <uapi/scsi/fc/fc_fs.h>
34 #include <uapi/scsi/fc/fc_els.h>
35
36 #include "lpfc_hw4.h"
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_nl.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
43 #include "lpfc.h"
44 #include "lpfc_logmsg.h"
45 #include "lpfc_crtn.h"
46 #include "lpfc_vport.h"
47 #include "lpfc_debugfs.h"
48
49 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
50 struct lpfc_iocbq *);
51 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
52 struct lpfc_iocbq *);
53 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
54 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
55 struct lpfc_nodelist *ndlp, uint8_t retry);
56 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
57 struct lpfc_iocbq *iocb);
58
59 static int lpfc_max_els_tries = 3;
60
61 /**
62 * lpfc_els_chk_latt - Check host link attention event for a vport
63 * @vport: pointer to a host virtual N_Port data structure.
64 *
65 * This routine checks whether there is an outstanding host link
66 * attention event during the discovery process with the @vport. It is done
67 * by reading the HBA's Host Attention (HA) register. If there is any host
68 * link attention events during this @vport's discovery process, the @vport
69 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
70 * be issued if the link state is not already in host link cleared state,
71 * and a return code shall indicate whether the host link attention event
72 * had happened.
73 *
74 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
75 * state in LPFC_VPORT_READY, the request for checking host link attention
76 * event will be ignored and a return code shall indicate no host link
77 * attention event had happened.
78 *
79 * Return codes
80 * 0 - no host link attention event happened
81 * 1 - host link attention event happened
82 **/
83 int
lpfc_els_chk_latt(struct lpfc_vport * vport)84 lpfc_els_chk_latt(struct lpfc_vport *vport)
85 {
86 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
87 struct lpfc_hba *phba = vport->phba;
88 uint32_t ha_copy;
89
90 if (vport->port_state >= LPFC_VPORT_READY ||
91 phba->link_state == LPFC_LINK_DOWN ||
92 phba->sli_rev > LPFC_SLI_REV3)
93 return 0;
94
95 /* Read the HBA Host Attention Register */
96 if (lpfc_readl(phba->HAregaddr, &ha_copy))
97 return 1;
98
99 if (!(ha_copy & HA_LATT))
100 return 0;
101
102 /* Pending Link Event during Discovery */
103 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
104 "0237 Pending Link Event during "
105 "Discovery: State x%x\n",
106 phba->pport->port_state);
107
108 /* CLEAR_LA should re-enable link attention events and
109 * we should then immediately take a LATT event. The
110 * LATT processing should call lpfc_linkdown() which
111 * will cleanup any left over in-progress discovery
112 * events.
113 */
114 spin_lock_irq(shost->host_lock);
115 vport->fc_flag |= FC_ABORT_DISCOVERY;
116 spin_unlock_irq(shost->host_lock);
117
118 if (phba->link_state != LPFC_CLEAR_LA)
119 lpfc_issue_clear_la(phba, vport);
120
121 return 1;
122 }
123
124 /**
125 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
126 * @vport: pointer to a host virtual N_Port data structure.
127 * @expectRsp: flag indicating whether response is expected.
128 * @cmdSize: size of the ELS command.
129 * @retry: number of retries to the command IOCB when it fails.
130 * @ndlp: pointer to a node-list data structure.
131 * @did: destination identifier.
132 * @elscmd: the ELS command code.
133 *
134 * This routine is used for allocating a lpfc-IOCB data structure from
135 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
136 * passed into the routine for discovery state machine to issue an Extended
137 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
138 * and preparation routine that is used by all the discovery state machine
139 * routines and the ELS command-specific fields will be later set up by
140 * the individual discovery machine routines after calling this routine
141 * allocating and preparing a generic IOCB data structure. It fills in the
142 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
143 * payload and response payload (if expected). The reference count on the
144 * ndlp is incremented by 1 and the reference to the ndlp is put into
145 * context1 of the IOCB data structure for this IOCB to hold the ndlp
146 * reference for the command's callback function to access later.
147 *
148 * Return code
149 * Pointer to the newly allocated/prepared els iocb data structure
150 * NULL - when els iocb data structure allocation/preparation failed
151 **/
152 struct lpfc_iocbq *
lpfc_prep_els_iocb(struct lpfc_vport * vport,uint8_t expectRsp,uint16_t cmdSize,uint8_t retry,struct lpfc_nodelist * ndlp,uint32_t did,uint32_t elscmd)153 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
154 uint16_t cmdSize, uint8_t retry,
155 struct lpfc_nodelist *ndlp, uint32_t did,
156 uint32_t elscmd)
157 {
158 struct lpfc_hba *phba = vport->phba;
159 struct lpfc_iocbq *elsiocb;
160 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
161 struct ulp_bde64 *bpl;
162 IOCB_t *icmd;
163
164
165 if (!lpfc_is_link_up(phba))
166 return NULL;
167
168 /* Allocate buffer for command iocb */
169 elsiocb = lpfc_sli_get_iocbq(phba);
170
171 if (elsiocb == NULL)
172 return NULL;
173
174 /*
175 * If this command is for fabric controller and HBA running
176 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
177 */
178 if ((did == Fabric_DID) &&
179 (phba->hba_flag & HBA_FIP_SUPPORT) &&
180 ((elscmd == ELS_CMD_FLOGI) ||
181 (elscmd == ELS_CMD_FDISC) ||
182 (elscmd == ELS_CMD_LOGO)))
183 switch (elscmd) {
184 case ELS_CMD_FLOGI:
185 elsiocb->iocb_flag |=
186 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
187 & LPFC_FIP_ELS_ID_MASK);
188 break;
189 case ELS_CMD_FDISC:
190 elsiocb->iocb_flag |=
191 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
192 & LPFC_FIP_ELS_ID_MASK);
193 break;
194 case ELS_CMD_LOGO:
195 elsiocb->iocb_flag |=
196 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
197 & LPFC_FIP_ELS_ID_MASK);
198 break;
199 }
200 else
201 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
202
203 icmd = &elsiocb->iocb;
204
205 /* fill in BDEs for command */
206 /* Allocate buffer for command payload */
207 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
208 if (pcmd)
209 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
210 if (!pcmd || !pcmd->virt)
211 goto els_iocb_free_pcmb_exit;
212
213 INIT_LIST_HEAD(&pcmd->list);
214
215 /* Allocate buffer for response payload */
216 if (expectRsp) {
217 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
218 if (prsp)
219 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
220 &prsp->phys);
221 if (!prsp || !prsp->virt)
222 goto els_iocb_free_prsp_exit;
223 INIT_LIST_HEAD(&prsp->list);
224 } else
225 prsp = NULL;
226
227 /* Allocate buffer for Buffer ptr list */
228 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
229 if (pbuflist)
230 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
231 &pbuflist->phys);
232 if (!pbuflist || !pbuflist->virt)
233 goto els_iocb_free_pbuf_exit;
234
235 INIT_LIST_HEAD(&pbuflist->list);
236
237 if (expectRsp) {
238 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
239 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
240 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
241 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
242
243 icmd->un.elsreq64.remoteID = did; /* DID */
244 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
245 if (elscmd == ELS_CMD_FLOGI)
246 icmd->ulpTimeout = FF_DEF_RATOV * 2;
247 else if (elscmd == ELS_CMD_LOGO)
248 icmd->ulpTimeout = phba->fc_ratov;
249 else
250 icmd->ulpTimeout = phba->fc_ratov * 2;
251 } else {
252 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
253 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
254 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
255 icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
256 icmd->un.xseq64.xmit_els_remoteID = did; /* DID */
257 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
258 }
259 icmd->ulpBdeCount = 1;
260 icmd->ulpLe = 1;
261 icmd->ulpClass = CLASS3;
262
263 /*
264 * If we have NPIV enabled, we want to send ELS traffic by VPI.
265 * For SLI4, since the driver controls VPIs we also want to include
266 * all ELS pt2pt protocol traffic as well.
267 */
268 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
269 ((phba->sli_rev == LPFC_SLI_REV4) &&
270 (vport->fc_flag & FC_PT2PT))) {
271
272 if (expectRsp) {
273 icmd->un.elsreq64.myID = vport->fc_myDID;
274
275 /* For ELS_REQUEST64_CR, use the VPI by default */
276 icmd->ulpContext = phba->vpi_ids[vport->vpi];
277 }
278
279 icmd->ulpCt_h = 0;
280 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
281 if (elscmd == ELS_CMD_ECHO)
282 icmd->ulpCt_l = 0; /* context = invalid RPI */
283 else
284 icmd->ulpCt_l = 1; /* context = VPI */
285 }
286
287 bpl = (struct ulp_bde64 *) pbuflist->virt;
288 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
289 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
290 bpl->tus.f.bdeSize = cmdSize;
291 bpl->tus.f.bdeFlags = 0;
292 bpl->tus.w = le32_to_cpu(bpl->tus.w);
293
294 if (expectRsp) {
295 bpl++;
296 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
297 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
298 bpl->tus.f.bdeSize = FCELSSIZE;
299 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
300 bpl->tus.w = le32_to_cpu(bpl->tus.w);
301 }
302
303 /* prevent preparing iocb with NULL ndlp reference */
304 elsiocb->context1 = lpfc_nlp_get(ndlp);
305 if (!elsiocb->context1)
306 goto els_iocb_free_pbuf_exit;
307 elsiocb->context2 = pcmd;
308 elsiocb->context3 = pbuflist;
309 elsiocb->retry = retry;
310 elsiocb->vport = vport;
311 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
312
313 if (prsp) {
314 list_add(&prsp->list, &pcmd->list);
315 }
316 if (expectRsp) {
317 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
318 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
319 "0116 Xmit ELS command x%x to remote "
320 "NPORT x%x I/O tag: x%x, port state:x%x "
321 "rpi x%x fc_flag:x%x\n",
322 elscmd, did, elsiocb->iotag,
323 vport->port_state, ndlp->nlp_rpi,
324 vport->fc_flag);
325 } else {
326 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
327 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
328 "0117 Xmit ELS response x%x to remote "
329 "NPORT x%x I/O tag: x%x, size: x%x "
330 "port_state x%x rpi x%x fc_flag x%x\n",
331 elscmd, ndlp->nlp_DID, elsiocb->iotag,
332 cmdSize, vport->port_state,
333 ndlp->nlp_rpi, vport->fc_flag);
334 }
335 return elsiocb;
336
337 els_iocb_free_pbuf_exit:
338 if (expectRsp)
339 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
340 kfree(pbuflist);
341
342 els_iocb_free_prsp_exit:
343 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
344 kfree(prsp);
345
346 els_iocb_free_pcmb_exit:
347 kfree(pcmd);
348 lpfc_sli_release_iocbq(phba, elsiocb);
349 return NULL;
350 }
351
352 /**
353 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
354 * @vport: pointer to a host virtual N_Port data structure.
355 *
356 * This routine issues a fabric registration login for a @vport. An
357 * active ndlp node with Fabric_DID must already exist for this @vport.
358 * The routine invokes two mailbox commands to carry out fabric registration
359 * login through the HBA firmware: the first mailbox command requests the
360 * HBA to perform link configuration for the @vport; and the second mailbox
361 * command requests the HBA to perform the actual fabric registration login
362 * with the @vport.
363 *
364 * Return code
365 * 0 - successfully issued fabric registration login for @vport
366 * -ENXIO -- failed to issue fabric registration login for @vport
367 **/
368 int
lpfc_issue_fabric_reglogin(struct lpfc_vport * vport)369 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
370 {
371 struct lpfc_hba *phba = vport->phba;
372 LPFC_MBOXQ_t *mbox;
373 struct lpfc_dmabuf *mp;
374 struct lpfc_nodelist *ndlp;
375 struct serv_parm *sp;
376 int rc;
377 int err = 0;
378
379 sp = &phba->fc_fabparam;
380 ndlp = lpfc_findnode_did(vport, Fabric_DID);
381 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
382 err = 1;
383 goto fail;
384 }
385
386 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
387 if (!mbox) {
388 err = 2;
389 goto fail;
390 }
391
392 vport->port_state = LPFC_FABRIC_CFG_LINK;
393 lpfc_config_link(phba, mbox);
394 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
395 mbox->vport = vport;
396
397 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
398 if (rc == MBX_NOT_FINISHED) {
399 err = 3;
400 goto fail_free_mbox;
401 }
402
403 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
404 if (!mbox) {
405 err = 4;
406 goto fail;
407 }
408 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
409 ndlp->nlp_rpi);
410 if (rc) {
411 err = 5;
412 goto fail_free_mbox;
413 }
414
415 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
416 mbox->vport = vport;
417 /* increment the reference count on ndlp to hold reference
418 * for the callback routine.
419 */
420 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
421
422 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
423 if (rc == MBX_NOT_FINISHED) {
424 err = 6;
425 goto fail_issue_reg_login;
426 }
427
428 return 0;
429
430 fail_issue_reg_login:
431 /* decrement the reference count on ndlp just incremented
432 * for the failed mbox command.
433 */
434 lpfc_nlp_put(ndlp);
435 mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
436 lpfc_mbuf_free(phba, mp->virt, mp->phys);
437 kfree(mp);
438 fail_free_mbox:
439 mempool_free(mbox, phba->mbox_mem_pool);
440
441 fail:
442 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
443 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
444 "0249 Cannot issue Register Fabric login: Err %d\n",
445 err);
446 return -ENXIO;
447 }
448
449 /**
450 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
451 * @vport: pointer to a host virtual N_Port data structure.
452 *
453 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
454 * the @vport. This mailbox command is necessary for SLI4 port only.
455 *
456 * Return code
457 * 0 - successfully issued REG_VFI for @vport
458 * A failure code otherwise.
459 **/
460 int
lpfc_issue_reg_vfi(struct lpfc_vport * vport)461 lpfc_issue_reg_vfi(struct lpfc_vport *vport)
462 {
463 struct lpfc_hba *phba = vport->phba;
464 LPFC_MBOXQ_t *mboxq = NULL;
465 struct lpfc_nodelist *ndlp;
466 struct lpfc_dmabuf *dmabuf = NULL;
467 int rc = 0;
468
469 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */
470 if ((phba->sli_rev == LPFC_SLI_REV4) &&
471 !(phba->link_flag & LS_LOOPBACK_MODE) &&
472 !(vport->fc_flag & FC_PT2PT)) {
473 ndlp = lpfc_findnode_did(vport, Fabric_DID);
474 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
475 rc = -ENODEV;
476 goto fail;
477 }
478 }
479
480 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
481 if (!mboxq) {
482 rc = -ENOMEM;
483 goto fail;
484 }
485
486 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */
487 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
488 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
489 if (!dmabuf) {
490 rc = -ENOMEM;
491 goto fail;
492 }
493 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
494 if (!dmabuf->virt) {
495 rc = -ENOMEM;
496 goto fail;
497 }
498 memcpy(dmabuf->virt, &phba->fc_fabparam,
499 sizeof(struct serv_parm));
500 }
501
502 vport->port_state = LPFC_FABRIC_CFG_LINK;
503 if (dmabuf)
504 lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
505 else
506 lpfc_reg_vfi(mboxq, vport, 0);
507
508 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
509 mboxq->vport = vport;
510 mboxq->ctx_buf = dmabuf;
511 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
512 if (rc == MBX_NOT_FINISHED) {
513 rc = -ENXIO;
514 goto fail;
515 }
516 return 0;
517
518 fail:
519 if (mboxq)
520 mempool_free(mboxq, phba->mbox_mem_pool);
521 if (dmabuf) {
522 if (dmabuf->virt)
523 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
524 kfree(dmabuf);
525 }
526
527 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
528 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
529 "0289 Issue Register VFI failed: Err %d\n", rc);
530 return rc;
531 }
532
533 /**
534 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
535 * @vport: pointer to a host virtual N_Port data structure.
536 *
537 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
538 * the @vport. This mailbox command is necessary for SLI4 port only.
539 *
540 * Return code
541 * 0 - successfully issued REG_VFI for @vport
542 * A failure code otherwise.
543 **/
544 int
lpfc_issue_unreg_vfi(struct lpfc_vport * vport)545 lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
546 {
547 struct lpfc_hba *phba = vport->phba;
548 struct Scsi_Host *shost;
549 LPFC_MBOXQ_t *mboxq;
550 int rc;
551
552 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
553 if (!mboxq) {
554 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
555 "2556 UNREG_VFI mbox allocation failed"
556 "HBA state x%x\n", phba->pport->port_state);
557 return -ENOMEM;
558 }
559
560 lpfc_unreg_vfi(mboxq, vport);
561 mboxq->vport = vport;
562 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
563
564 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
565 if (rc == MBX_NOT_FINISHED) {
566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
567 "2557 UNREG_VFI issue mbox failed rc x%x "
568 "HBA state x%x\n",
569 rc, phba->pport->port_state);
570 mempool_free(mboxq, phba->mbox_mem_pool);
571 return -EIO;
572 }
573
574 shost = lpfc_shost_from_vport(vport);
575 spin_lock_irq(shost->host_lock);
576 vport->fc_flag &= ~FC_VFI_REGISTERED;
577 spin_unlock_irq(shost->host_lock);
578 return 0;
579 }
580
581 /**
582 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
583 * @vport: pointer to a host virtual N_Port data structure.
584 * @sp: pointer to service parameter data structure.
585 *
586 * This routine is called from FLOGI/FDISC completion handler functions.
587 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
588 * node nodename is changed in the completion service parameter else return
589 * 0. This function also set flag in the vport data structure to delay
590 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
591 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
592 * node nodename is changed in the completion service parameter.
593 *
594 * Return code
595 * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
596 * 1 - FCID or Fabric Nodename or Fabric portname is changed.
597 *
598 **/
599 static uint8_t
lpfc_check_clean_addr_bit(struct lpfc_vport * vport,struct serv_parm * sp)600 lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
601 struct serv_parm *sp)
602 {
603 struct lpfc_hba *phba = vport->phba;
604 uint8_t fabric_param_changed = 0;
605 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
606
607 if ((vport->fc_prevDID != vport->fc_myDID) ||
608 memcmp(&vport->fabric_portname, &sp->portName,
609 sizeof(struct lpfc_name)) ||
610 memcmp(&vport->fabric_nodename, &sp->nodeName,
611 sizeof(struct lpfc_name)) ||
612 (vport->vport_flag & FAWWPN_PARAM_CHG)) {
613 fabric_param_changed = 1;
614 vport->vport_flag &= ~FAWWPN_PARAM_CHG;
615 }
616 /*
617 * Word 1 Bit 31 in common service parameter is overloaded.
618 * Word 1 Bit 31 in FLOGI request is multiple NPort request
619 * Word 1 Bit 31 in FLOGI response is clean address bit
620 *
621 * If fabric parameter is changed and clean address bit is
622 * cleared delay nport discovery if
623 * - vport->fc_prevDID != 0 (not initial discovery) OR
624 * - lpfc_delay_discovery module parameter is set.
625 */
626 if (fabric_param_changed && !sp->cmn.clean_address_bit &&
627 (vport->fc_prevDID || phba->cfg_delay_discovery)) {
628 spin_lock_irq(shost->host_lock);
629 vport->fc_flag |= FC_DISC_DELAYED;
630 spin_unlock_irq(shost->host_lock);
631 }
632
633 return fabric_param_changed;
634 }
635
636
637 /**
638 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
639 * @vport: pointer to a host virtual N_Port data structure.
640 * @ndlp: pointer to a node-list data structure.
641 * @sp: pointer to service parameter data structure.
642 * @irsp: pointer to the IOCB within the lpfc response IOCB.
643 *
644 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
645 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
646 * port in a fabric topology. It properly sets up the parameters to the @ndlp
647 * from the IOCB response. It also check the newly assigned N_Port ID to the
648 * @vport against the previously assigned N_Port ID. If it is different from
649 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
650 * is invoked on all the remaining nodes with the @vport to unregister the
651 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
652 * is invoked to register login to the fabric.
653 *
654 * Return code
655 * 0 - Success (currently, always return 0)
656 **/
657 static int
lpfc_cmpl_els_flogi_fabric(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,struct serv_parm * sp,IOCB_t * irsp)658 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
659 struct serv_parm *sp, IOCB_t *irsp)
660 {
661 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
662 struct lpfc_hba *phba = vport->phba;
663 struct lpfc_nodelist *np;
664 struct lpfc_nodelist *next_np;
665 uint8_t fabric_param_changed;
666
667 spin_lock_irq(shost->host_lock);
668 vport->fc_flag |= FC_FABRIC;
669 spin_unlock_irq(shost->host_lock);
670
671 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
672 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
673 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
674
675 phba->fc_edtovResol = sp->cmn.edtovResolution;
676 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
677
678 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
679 spin_lock_irq(shost->host_lock);
680 vport->fc_flag |= FC_PUBLIC_LOOP;
681 spin_unlock_irq(shost->host_lock);
682 }
683
684 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
685 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
686 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
687 ndlp->nlp_class_sup = 0;
688 if (sp->cls1.classValid)
689 ndlp->nlp_class_sup |= FC_COS_CLASS1;
690 if (sp->cls2.classValid)
691 ndlp->nlp_class_sup |= FC_COS_CLASS2;
692 if (sp->cls3.classValid)
693 ndlp->nlp_class_sup |= FC_COS_CLASS3;
694 if (sp->cls4.classValid)
695 ndlp->nlp_class_sup |= FC_COS_CLASS4;
696 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
697 sp->cmn.bbRcvSizeLsb;
698
699 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
700 if (fabric_param_changed) {
701 /* Reset FDMI attribute masks based on config parameter */
702 if (phba->cfg_enable_SmartSAN ||
703 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
704 /* Setup appropriate attribute masks */
705 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
706 if (phba->cfg_enable_SmartSAN)
707 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
708 else
709 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
710 } else {
711 vport->fdmi_hba_mask = 0;
712 vport->fdmi_port_mask = 0;
713 }
714
715 }
716 memcpy(&vport->fabric_portname, &sp->portName,
717 sizeof(struct lpfc_name));
718 memcpy(&vport->fabric_nodename, &sp->nodeName,
719 sizeof(struct lpfc_name));
720 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
721
722 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
723 if (sp->cmn.response_multiple_NPort) {
724 lpfc_printf_vlog(vport, KERN_WARNING,
725 LOG_ELS | LOG_VPORT,
726 "1816 FLOGI NPIV supported, "
727 "response data 0x%x\n",
728 sp->cmn.response_multiple_NPort);
729 spin_lock_irq(&phba->hbalock);
730 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
731 spin_unlock_irq(&phba->hbalock);
732 } else {
733 /* Because we asked f/w for NPIV it still expects us
734 to call reg_vnpid atleast for the physcial host */
735 lpfc_printf_vlog(vport, KERN_WARNING,
736 LOG_ELS | LOG_VPORT,
737 "1817 Fabric does not support NPIV "
738 "- configuring single port mode.\n");
739 spin_lock_irq(&phba->hbalock);
740 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
741 spin_unlock_irq(&phba->hbalock);
742 }
743 }
744
745 /*
746 * For FC we need to do some special processing because of the SLI
747 * Port's default settings of the Common Service Parameters.
748 */
749 if ((phba->sli_rev == LPFC_SLI_REV4) &&
750 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
751 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
752 if (fabric_param_changed)
753 lpfc_unregister_fcf_prep(phba);
754
755 /* This should just update the VFI CSPs*/
756 if (vport->fc_flag & FC_VFI_REGISTERED)
757 lpfc_issue_reg_vfi(vport);
758 }
759
760 if (fabric_param_changed &&
761 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
762
763 /* If our NportID changed, we need to ensure all
764 * remaining NPORTs get unreg_login'ed.
765 */
766 list_for_each_entry_safe(np, next_np,
767 &vport->fc_nodes, nlp_listp) {
768 if (!NLP_CHK_NODE_ACT(np))
769 continue;
770 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
771 !(np->nlp_flag & NLP_NPR_ADISC))
772 continue;
773 spin_lock_irq(shost->host_lock);
774 np->nlp_flag &= ~NLP_NPR_ADISC;
775 spin_unlock_irq(shost->host_lock);
776 lpfc_unreg_rpi(vport, np);
777 }
778 lpfc_cleanup_pending_mbox(vport);
779
780 if (phba->sli_rev == LPFC_SLI_REV4) {
781 lpfc_sli4_unreg_all_rpis(vport);
782 lpfc_mbx_unreg_vpi(vport);
783 spin_lock_irq(shost->host_lock);
784 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
785 spin_unlock_irq(shost->host_lock);
786 }
787
788 /*
789 * For SLI3 and SLI4, the VPI needs to be reregistered in
790 * response to this fabric parameter change event.
791 */
792 spin_lock_irq(shost->host_lock);
793 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
794 spin_unlock_irq(shost->host_lock);
795 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
796 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
797 /*
798 * Driver needs to re-reg VPI in order for f/w
799 * to update the MAC address.
800 */
801 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
802 lpfc_register_new_vport(phba, vport, ndlp);
803 return 0;
804 }
805
806 if (phba->sli_rev < LPFC_SLI_REV4) {
807 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
808 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
809 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
810 lpfc_register_new_vport(phba, vport, ndlp);
811 else
812 lpfc_issue_fabric_reglogin(vport);
813 } else {
814 ndlp->nlp_type |= NLP_FABRIC;
815 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
816 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
817 (vport->vpi_state & LPFC_VPI_REGISTERED)) {
818 lpfc_start_fdiscs(phba);
819 lpfc_do_scr_ns_plogi(phba, vport);
820 } else if (vport->fc_flag & FC_VFI_REGISTERED)
821 lpfc_issue_init_vpi(vport);
822 else {
823 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
824 "3135 Need register VFI: (x%x/%x)\n",
825 vport->fc_prevDID, vport->fc_myDID);
826 lpfc_issue_reg_vfi(vport);
827 }
828 }
829 return 0;
830 }
831
832 /**
833 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
834 * @vport: pointer to a host virtual N_Port data structure.
835 * @ndlp: pointer to a node-list data structure.
836 * @sp: pointer to service parameter data structure.
837 *
838 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
839 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
840 * in a point-to-point topology. First, the @vport's N_Port Name is compared
841 * with the received N_Port Name: if the @vport's N_Port Name is greater than
842 * the received N_Port Name lexicographically, this node shall assign local
843 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
844 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
845 * this node shall just wait for the remote node to issue PLOGI and assign
846 * N_Port IDs.
847 *
848 * Return code
849 * 0 - Success
850 * -ENXIO - Fail
851 **/
852 static int
lpfc_cmpl_els_flogi_nport(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,struct serv_parm * sp)853 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
854 struct serv_parm *sp)
855 {
856 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
857 struct lpfc_hba *phba = vport->phba;
858 LPFC_MBOXQ_t *mbox;
859 int rc;
860
861 spin_lock_irq(shost->host_lock);
862 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
863 vport->fc_flag |= FC_PT2PT;
864 spin_unlock_irq(shost->host_lock);
865
866 /* If we are pt2pt with another NPort, force NPIV off! */
867 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
868
869 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
870 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
871 lpfc_unregister_fcf_prep(phba);
872
873 spin_lock_irq(shost->host_lock);
874 vport->fc_flag &= ~FC_VFI_REGISTERED;
875 spin_unlock_irq(shost->host_lock);
876 phba->fc_topology_changed = 0;
877 }
878
879 rc = memcmp(&vport->fc_portname, &sp->portName,
880 sizeof(vport->fc_portname));
881
882 if (rc >= 0) {
883 /* This side will initiate the PLOGI */
884 spin_lock_irq(shost->host_lock);
885 vport->fc_flag |= FC_PT2PT_PLOGI;
886 spin_unlock_irq(shost->host_lock);
887
888 /*
889 * N_Port ID cannot be 0, set our Id to LocalID
890 * the other side will be RemoteID.
891 */
892
893 /* not equal */
894 if (rc)
895 vport->fc_myDID = PT2PT_LocalID;
896
897 /* Decrement ndlp reference count indicating that ndlp can be
898 * safely released when other references to it are done.
899 */
900 lpfc_nlp_put(ndlp);
901
902 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
903 if (!ndlp) {
904 /*
905 * Cannot find existing Fabric ndlp, so allocate a
906 * new one
907 */
908 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
909 if (!ndlp)
910 goto fail;
911 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
912 ndlp = lpfc_enable_node(vport, ndlp,
913 NLP_STE_UNUSED_NODE);
914 if(!ndlp)
915 goto fail;
916 }
917
918 memcpy(&ndlp->nlp_portname, &sp->portName,
919 sizeof(struct lpfc_name));
920 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
921 sizeof(struct lpfc_name));
922 /* Set state will put ndlp onto node list if not already done */
923 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
924 spin_lock_irq(shost->host_lock);
925 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
926 spin_unlock_irq(shost->host_lock);
927
928 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
929 if (!mbox)
930 goto fail;
931
932 lpfc_config_link(phba, mbox);
933
934 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
935 mbox->vport = vport;
936 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
937 if (rc == MBX_NOT_FINISHED) {
938 mempool_free(mbox, phba->mbox_mem_pool);
939 goto fail;
940 }
941 } else {
942 /* This side will wait for the PLOGI, decrement ndlp reference
943 * count indicating that ndlp can be released when other
944 * references to it are done.
945 */
946 lpfc_nlp_put(ndlp);
947
948 /* Start discovery - this should just do CLEAR_LA */
949 lpfc_disc_start(vport);
950 }
951
952 return 0;
953 fail:
954 return -ENXIO;
955 }
956
957 /**
958 * lpfc_cmpl_els_flogi - Completion callback function for flogi
959 * @phba: pointer to lpfc hba data structure.
960 * @cmdiocb: pointer to lpfc command iocb data structure.
961 * @rspiocb: pointer to lpfc response iocb data structure.
962 *
963 * This routine is the top-level completion callback function for issuing
964 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
965 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
966 * retry has been made (either immediately or delayed with lpfc_els_retry()
967 * returning 1), the command IOCB will be released and function returned.
968 * If the retry attempt has been given up (possibly reach the maximum
969 * number of retries), one additional decrement of ndlp reference shall be
970 * invoked before going out after releasing the command IOCB. This will
971 * actually release the remote node (Note, lpfc_els_free_iocb() will also
972 * invoke one decrement of ndlp reference count). If no error reported in
973 * the IOCB status, the command Port ID field is used to determine whether
974 * this is a point-to-point topology or a fabric topology: if the Port ID
975 * field is assigned, it is a fabric topology; otherwise, it is a
976 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
977 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
978 * specific topology completion conditions.
979 **/
980 static void
lpfc_cmpl_els_flogi(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)981 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
982 struct lpfc_iocbq *rspiocb)
983 {
984 struct lpfc_vport *vport = cmdiocb->vport;
985 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
986 IOCB_t *irsp = &rspiocb->iocb;
987 struct lpfc_nodelist *ndlp = cmdiocb->context1;
988 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
989 struct serv_parm *sp;
990 uint16_t fcf_index;
991 int rc;
992
993 /* Check to see if link went down during discovery */
994 if (lpfc_els_chk_latt(vport)) {
995 /* One additional decrement on node reference count to
996 * trigger the release of the node
997 */
998 lpfc_nlp_put(ndlp);
999 goto out;
1000 }
1001
1002 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1003 "FLOGI cmpl: status:x%x/x%x state:x%x",
1004 irsp->ulpStatus, irsp->un.ulpWord[4],
1005 vport->port_state);
1006
1007 if (irsp->ulpStatus) {
1008 /*
1009 * In case of FIP mode, perform roundrobin FCF failover
1010 * due to new FCF discovery
1011 */
1012 if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
1013 (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
1014 if (phba->link_state < LPFC_LINK_UP)
1015 goto stop_rr_fcf_flogi;
1016 if ((phba->fcoe_cvl_eventtag_attn ==
1017 phba->fcoe_cvl_eventtag) &&
1018 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1019 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1020 IOERR_SLI_ABORTED))
1021 goto stop_rr_fcf_flogi;
1022 else
1023 phba->fcoe_cvl_eventtag_attn =
1024 phba->fcoe_cvl_eventtag;
1025 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
1026 "2611 FLOGI failed on FCF (x%x), "
1027 "status:x%x/x%x, tmo:x%x, perform "
1028 "roundrobin FCF failover\n",
1029 phba->fcf.current_rec.fcf_indx,
1030 irsp->ulpStatus, irsp->un.ulpWord[4],
1031 irsp->ulpTimeout);
1032 lpfc_sli4_set_fcf_flogi_fail(phba,
1033 phba->fcf.current_rec.fcf_indx);
1034 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
1035 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
1036 if (rc)
1037 goto out;
1038 }
1039
1040 stop_rr_fcf_flogi:
1041 /* FLOGI failure */
1042 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
1043 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1044 IOERR_LOOP_OPEN_FAILURE)))
1045 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1046 "2858 FLOGI failure Status:x%x/x%x TMO"
1047 ":x%x Data x%x x%x\n",
1048 irsp->ulpStatus, irsp->un.ulpWord[4],
1049 irsp->ulpTimeout, phba->hba_flag,
1050 phba->fcf.fcf_flag);
1051
1052 /* Check for retry */
1053 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
1054 goto out;
1055
1056 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT,
1057 "0150 FLOGI failure Status:x%x/x%x "
1058 "xri x%x TMO:x%x\n",
1059 irsp->ulpStatus, irsp->un.ulpWord[4],
1060 cmdiocb->sli4_xritag, irsp->ulpTimeout);
1061
1062 /* If this is not a loop open failure, bail out */
1063 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
1064 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1065 IOERR_LOOP_OPEN_FAILURE)))
1066 goto flogifail;
1067
1068 /* FLOGI failed, so there is no fabric */
1069 spin_lock_irq(shost->host_lock);
1070 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1071 spin_unlock_irq(shost->host_lock);
1072
1073 /* If private loop, then allow max outstanding els to be
1074 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
1075 * alpa map would take too long otherwise.
1076 */
1077 if (phba->alpa_map[0] == 0)
1078 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
1079 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1080 (!(vport->fc_flag & FC_VFI_REGISTERED) ||
1081 (vport->fc_prevDID != vport->fc_myDID) ||
1082 phba->fc_topology_changed)) {
1083 if (vport->fc_flag & FC_VFI_REGISTERED) {
1084 if (phba->fc_topology_changed) {
1085 lpfc_unregister_fcf_prep(phba);
1086 spin_lock_irq(shost->host_lock);
1087 vport->fc_flag &= ~FC_VFI_REGISTERED;
1088 spin_unlock_irq(shost->host_lock);
1089 phba->fc_topology_changed = 0;
1090 } else {
1091 lpfc_sli4_unreg_all_rpis(vport);
1092 }
1093 }
1094
1095 /* Do not register VFI if the driver aborted FLOGI */
1096 if (!lpfc_error_lost_link(irsp))
1097 lpfc_issue_reg_vfi(vport);
1098 lpfc_nlp_put(ndlp);
1099 goto out;
1100 }
1101 goto flogifail;
1102 }
1103 spin_lock_irq(shost->host_lock);
1104 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
1105 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
1106 spin_unlock_irq(shost->host_lock);
1107
1108 /*
1109 * The FLogI succeeded. Sync the data for the CPU before
1110 * accessing it.
1111 */
1112 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1113 if (!prsp)
1114 goto out;
1115 sp = prsp->virt + sizeof(uint32_t);
1116
1117 /* FLOGI completes successfully */
1118 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1119 "0101 FLOGI completes successfully, I/O tag:x%x, "
1120 "xri x%x Data: x%x x%x x%x x%x x%x %x\n",
1121 cmdiocb->iotag, cmdiocb->sli4_xritag,
1122 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
1123 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1124 vport->port_state, vport->fc_flag);
1125
1126 if (vport->port_state == LPFC_FLOGI) {
1127 /*
1128 * If Common Service Parameters indicate Nport
1129 * we are point to point, if Fport we are Fabric.
1130 */
1131 if (sp->cmn.fPort)
1132 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
1133 else if (!(phba->hba_flag & HBA_FCOE_MODE))
1134 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
1135 else {
1136 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1137 "2831 FLOGI response with cleared Fabric "
1138 "bit fcf_index 0x%x "
1139 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
1140 "Fabric Name "
1141 "%02x%02x%02x%02x%02x%02x%02x%02x\n",
1142 phba->fcf.current_rec.fcf_indx,
1143 phba->fcf.current_rec.switch_name[0],
1144 phba->fcf.current_rec.switch_name[1],
1145 phba->fcf.current_rec.switch_name[2],
1146 phba->fcf.current_rec.switch_name[3],
1147 phba->fcf.current_rec.switch_name[4],
1148 phba->fcf.current_rec.switch_name[5],
1149 phba->fcf.current_rec.switch_name[6],
1150 phba->fcf.current_rec.switch_name[7],
1151 phba->fcf.current_rec.fabric_name[0],
1152 phba->fcf.current_rec.fabric_name[1],
1153 phba->fcf.current_rec.fabric_name[2],
1154 phba->fcf.current_rec.fabric_name[3],
1155 phba->fcf.current_rec.fabric_name[4],
1156 phba->fcf.current_rec.fabric_name[5],
1157 phba->fcf.current_rec.fabric_name[6],
1158 phba->fcf.current_rec.fabric_name[7]);
1159 lpfc_nlp_put(ndlp);
1160 spin_lock_irq(&phba->hbalock);
1161 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1162 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1163 spin_unlock_irq(&phba->hbalock);
1164 phba->fcf.fcf_redisc_attempted = 0; /* reset */
1165 goto out;
1166 }
1167 if (!rc) {
1168 /* Mark the FCF discovery process done */
1169 if (phba->hba_flag & HBA_FIP_SUPPORT)
1170 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
1171 LOG_ELS,
1172 "2769 FLOGI to FCF (x%x) "
1173 "completed successfully\n",
1174 phba->fcf.current_rec.fcf_indx);
1175 spin_lock_irq(&phba->hbalock);
1176 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1177 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1178 spin_unlock_irq(&phba->hbalock);
1179 phba->fcf.fcf_redisc_attempted = 0; /* reset */
1180 goto out;
1181 }
1182 }
1183
1184 flogifail:
1185 spin_lock_irq(&phba->hbalock);
1186 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1187 spin_unlock_irq(&phba->hbalock);
1188
1189 lpfc_nlp_put(ndlp);
1190
1191 if (!lpfc_error_lost_link(irsp)) {
1192 /* FLOGI failed, so just use loop map to make discovery list */
1193 lpfc_disc_list_loopmap(vport);
1194
1195 /* Start discovery */
1196 lpfc_disc_start(vport);
1197 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1198 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1199 IOERR_SLI_ABORTED) &&
1200 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1201 IOERR_SLI_DOWN))) &&
1202 (phba->link_state != LPFC_CLEAR_LA)) {
1203 /* If FLOGI failed enable link interrupt. */
1204 lpfc_issue_clear_la(phba, vport);
1205 }
1206 out:
1207 lpfc_els_free_iocb(phba, cmdiocb);
1208 }
1209
1210 /**
1211 * lpfc_cmpl_els_link_down - Completion callback function for ELS command
1212 * aborted during a link down
1213 * @phba: pointer to lpfc hba data structure.
1214 * @cmdiocb: pointer to lpfc command iocb data structure.
1215 * @rspiocb: pointer to lpfc response iocb data structure.
1216 *
1217 */
1218 static void
lpfc_cmpl_els_link_down(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1219 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1220 struct lpfc_iocbq *rspiocb)
1221 {
1222 IOCB_t *irsp;
1223 uint32_t *pcmd;
1224 uint32_t cmd;
1225
1226 pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
1227 cmd = *pcmd;
1228 irsp = &rspiocb->iocb;
1229
1230 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1231 "6445 ELS completes after LINK_DOWN: "
1232 " Status %x/%x cmd x%x flg x%x\n",
1233 irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
1234 cmdiocb->iocb_flag);
1235
1236 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
1237 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
1238 atomic_dec(&phba->fabric_iocb_count);
1239 }
1240 lpfc_els_free_iocb(phba, cmdiocb);
1241 }
1242
1243 /**
1244 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
1245 * @vport: pointer to a host virtual N_Port data structure.
1246 * @ndlp: pointer to a node-list data structure.
1247 * @retry: number of retries to the command IOCB.
1248 *
1249 * This routine issues a Fabric Login (FLOGI) Request ELS command
1250 * for a @vport. The initiator service parameters are put into the payload
1251 * of the FLOGI Request IOCB and the top-level callback function pointer
1252 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1253 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1254 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1255 *
1256 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1257 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1258 * will be stored into the context1 field of the IOCB for the completion
1259 * callback function to the FLOGI ELS command.
1260 *
1261 * Return code
1262 * 0 - successfully issued flogi iocb for @vport
1263 * 1 - failed to issue flogi iocb for @vport
1264 **/
1265 static int
lpfc_issue_els_flogi(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)1266 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1267 uint8_t retry)
1268 {
1269 struct lpfc_hba *phba = vport->phba;
1270 struct serv_parm *sp;
1271 IOCB_t *icmd;
1272 struct lpfc_iocbq *elsiocb;
1273 struct lpfc_iocbq defer_flogi_acc;
1274 uint8_t *pcmd;
1275 uint16_t cmdsize;
1276 uint32_t tmo, did;
1277 int rc;
1278
1279 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1280 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1281 ndlp->nlp_DID, ELS_CMD_FLOGI);
1282
1283 if (!elsiocb)
1284 return 1;
1285
1286 icmd = &elsiocb->iocb;
1287 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1288
1289 /* For FLOGI request, remainder of payload is service parameters */
1290 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
1291 pcmd += sizeof(uint32_t);
1292 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1293 sp = (struct serv_parm *) pcmd;
1294
1295 /* Setup CSPs accordingly for Fabric */
1296 sp->cmn.e_d_tov = 0;
1297 sp->cmn.w2.r_a_tov = 0;
1298 sp->cmn.virtual_fabric_support = 0;
1299 sp->cls1.classValid = 0;
1300 if (sp->cmn.fcphLow < FC_PH3)
1301 sp->cmn.fcphLow = FC_PH3;
1302 if (sp->cmn.fcphHigh < FC_PH3)
1303 sp->cmn.fcphHigh = FC_PH3;
1304
1305 if (phba->sli_rev == LPFC_SLI_REV4) {
1306 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1307 LPFC_SLI_INTF_IF_TYPE_0) {
1308 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1309 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1310 /* FLOGI needs to be 3 for WQE FCFI */
1311 /* Set the fcfi to the fcfi we registered with */
1312 elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1313 }
1314 /* Can't do SLI4 class2 without support sequence coalescing */
1315 sp->cls2.classValid = 0;
1316 sp->cls2.seqDelivery = 0;
1317 } else {
1318 /* Historical, setting sequential-delivery bit for SLI3 */
1319 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
1320 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
1321 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1322 sp->cmn.request_multiple_Nport = 1;
1323 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1324 icmd->ulpCt_h = 1;
1325 icmd->ulpCt_l = 0;
1326 } else
1327 sp->cmn.request_multiple_Nport = 0;
1328 }
1329
1330 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
1331 icmd->un.elsreq64.myID = 0;
1332 icmd->un.elsreq64.fl = 1;
1333 }
1334
1335 tmo = phba->fc_ratov;
1336 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
1337 lpfc_set_disctmo(vport);
1338 phba->fc_ratov = tmo;
1339
1340 phba->fc_stat.elsXmitFLOGI++;
1341 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
1342
1343 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1344 "Issue FLOGI: opt:x%x",
1345 phba->sli3_options, 0, 0);
1346
1347 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
1348
1349 phba->hba_flag |= HBA_FLOGI_ISSUED;
1350
1351 /* Check for a deferred FLOGI ACC condition */
1352 if (phba->defer_flogi_acc_flag) {
1353 did = vport->fc_myDID;
1354 vport->fc_myDID = Fabric_DID;
1355
1356 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
1357
1358 defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id;
1359 defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id =
1360 phba->defer_flogi_acc_ox_id;
1361
1362 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1363 "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
1364 " ox_id: x%x, hba_flag x%x\n",
1365 phba->defer_flogi_acc_rx_id,
1366 phba->defer_flogi_acc_ox_id, phba->hba_flag);
1367
1368 /* Send deferred FLOGI ACC */
1369 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
1370 ndlp, NULL);
1371
1372 phba->defer_flogi_acc_flag = false;
1373
1374 vport->fc_myDID = did;
1375 }
1376
1377 if (rc == IOCB_ERROR) {
1378 lpfc_els_free_iocb(phba, elsiocb);
1379 return 1;
1380 }
1381 return 0;
1382 }
1383
1384 /**
1385 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1386 * @phba: pointer to lpfc hba data structure.
1387 *
1388 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1389 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1390 * list and issues an abort IOCB commond on each outstanding IOCB that
1391 * contains a active Fabric_DID ndlp. Note that this function is to issue
1392 * the abort IOCB command on all the outstanding IOCBs, thus when this
1393 * function returns, it does not guarantee all the IOCBs are actually aborted.
1394 *
1395 * Return code
1396 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1397 **/
1398 int
lpfc_els_abort_flogi(struct lpfc_hba * phba)1399 lpfc_els_abort_flogi(struct lpfc_hba *phba)
1400 {
1401 struct lpfc_sli_ring *pring;
1402 struct lpfc_iocbq *iocb, *next_iocb;
1403 struct lpfc_nodelist *ndlp;
1404 IOCB_t *icmd;
1405
1406 /* Abort outstanding I/O on NPort <nlp_DID> */
1407 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1408 "0201 Abort outstanding I/O on NPort x%x\n",
1409 Fabric_DID);
1410
1411 pring = lpfc_phba_elsring(phba);
1412 if (unlikely(!pring))
1413 return -EIO;
1414
1415 /*
1416 * Check the txcmplq for an iocb that matches the nport the driver is
1417 * searching for.
1418 */
1419 spin_lock_irq(&phba->hbalock);
1420 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1421 icmd = &iocb->iocb;
1422 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
1423 ndlp = (struct lpfc_nodelist *)(iocb->context1);
1424 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1425 (ndlp->nlp_DID == Fabric_DID))
1426 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1427 }
1428 }
1429 spin_unlock_irq(&phba->hbalock);
1430
1431 return 0;
1432 }
1433
1434 /**
1435 * lpfc_initial_flogi - Issue an initial fabric login for a vport
1436 * @vport: pointer to a host virtual N_Port data structure.
1437 *
1438 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1439 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1440 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1441 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1442 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1443 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1444 * @vport.
1445 *
1446 * Return code
1447 * 0 - failed to issue initial flogi for @vport
1448 * 1 - successfully issued initial flogi for @vport
1449 **/
1450 int
lpfc_initial_flogi(struct lpfc_vport * vport)1451 lpfc_initial_flogi(struct lpfc_vport *vport)
1452 {
1453 struct lpfc_nodelist *ndlp;
1454
1455 vport->port_state = LPFC_FLOGI;
1456 lpfc_set_disctmo(vport);
1457
1458 /* First look for the Fabric ndlp */
1459 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1460 if (!ndlp) {
1461 /* Cannot find existing Fabric ndlp, so allocate a new one */
1462 ndlp = lpfc_nlp_init(vport, Fabric_DID);
1463 if (!ndlp)
1464 return 0;
1465 /* Set the node type */
1466 ndlp->nlp_type |= NLP_FABRIC;
1467 /* Put ndlp onto node list */
1468 lpfc_enqueue_node(vport, ndlp);
1469 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1470 /* re-setup ndlp without removing from node list */
1471 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1472 if (!ndlp)
1473 return 0;
1474 }
1475
1476 if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
1477 /* This decrement of reference count to node shall kick off
1478 * the release of the node.
1479 */
1480 lpfc_nlp_put(ndlp);
1481 return 0;
1482 }
1483 return 1;
1484 }
1485
1486 /**
1487 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1488 * @vport: pointer to a host virtual N_Port data structure.
1489 *
1490 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1491 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1492 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1493 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1494 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1495 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1496 * @vport.
1497 *
1498 * Return code
1499 * 0 - failed to issue initial fdisc for @vport
1500 * 1 - successfully issued initial fdisc for @vport
1501 **/
1502 int
lpfc_initial_fdisc(struct lpfc_vport * vport)1503 lpfc_initial_fdisc(struct lpfc_vport *vport)
1504 {
1505 struct lpfc_nodelist *ndlp;
1506
1507 /* First look for the Fabric ndlp */
1508 ndlp = lpfc_findnode_did(vport, Fabric_DID);
1509 if (!ndlp) {
1510 /* Cannot find existing Fabric ndlp, so allocate a new one */
1511 ndlp = lpfc_nlp_init(vport, Fabric_DID);
1512 if (!ndlp)
1513 return 0;
1514 /* Put ndlp onto node list */
1515 lpfc_enqueue_node(vport, ndlp);
1516 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
1517 /* re-setup ndlp without removing from node list */
1518 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1519 if (!ndlp)
1520 return 0;
1521 }
1522
1523 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
1524 /* decrement node reference count to trigger the release of
1525 * the node.
1526 */
1527 lpfc_nlp_put(ndlp);
1528 return 0;
1529 }
1530 return 1;
1531 }
1532
1533 /**
1534 * lpfc_more_plogi - Check and issue remaining plogis for a vport
1535 * @vport: pointer to a host virtual N_Port data structure.
1536 *
1537 * This routine checks whether there are more remaining Port Logins
1538 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1539 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1540 * to issue ELS PLOGIs up to the configured discover threads with the
1541 * @vport (@vport->cfg_discovery_threads). The function also decrement
1542 * the @vport's num_disc_node by 1 if it is not already 0.
1543 **/
1544 void
lpfc_more_plogi(struct lpfc_vport * vport)1545 lpfc_more_plogi(struct lpfc_vport *vport)
1546 {
1547 if (vport->num_disc_nodes)
1548 vport->num_disc_nodes--;
1549
1550 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
1551 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1552 "0232 Continue discovery with %d PLOGIs to go "
1553 "Data: x%x x%x x%x\n",
1554 vport->num_disc_nodes, vport->fc_plogi_cnt,
1555 vport->fc_flag, vport->port_state);
1556 /* Check to see if there are more PLOGIs to be sent */
1557 if (vport->fc_flag & FC_NLP_MORE)
1558 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
1559 lpfc_els_disc_plogi(vport);
1560
1561 return;
1562 }
1563
1564 /**
1565 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
1566 * @phba: pointer to lpfc hba data structure.
1567 * @prsp: pointer to response IOCB payload.
1568 * @ndlp: pointer to a node-list data structure.
1569 *
1570 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1571 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1572 * The following cases are considered N_Port confirmed:
1573 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1574 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1575 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1576 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1577 * 1) if there is a node on vport list other than the @ndlp with the same
1578 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1579 * on that node to release the RPI associated with the node; 2) if there is
1580 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1581 * into, a new node shall be allocated (or activated). In either case, the
1582 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1583 * be released and the new_ndlp shall be put on to the vport node list and
1584 * its pointer returned as the confirmed node.
1585 *
1586 * Note that before the @ndlp got "released", the keepDID from not-matching
1587 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1588 * of the @ndlp. This is because the release of @ndlp is actually to put it
1589 * into an inactive state on the vport node list and the vport node list
1590 * management algorithm does not allow two node with a same DID.
1591 *
1592 * Return code
1593 * pointer to the PLOGI N_Port @ndlp
1594 **/
1595 static struct lpfc_nodelist *
lpfc_plogi_confirm_nport(struct lpfc_hba * phba,uint32_t * prsp,struct lpfc_nodelist * ndlp)1596 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1597 struct lpfc_nodelist *ndlp)
1598 {
1599 struct lpfc_vport *vport = ndlp->vport;
1600 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1601 struct lpfc_nodelist *new_ndlp;
1602 struct lpfc_rport_data *rdata;
1603 struct fc_rport *rport;
1604 struct serv_parm *sp;
1605 uint8_t name[sizeof(struct lpfc_name)];
1606 uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
1607 uint32_t keep_new_nlp_flag = 0;
1608 uint16_t keep_nlp_state;
1609 u32 keep_nlp_fc4_type = 0;
1610 struct lpfc_nvme_rport *keep_nrport = NULL;
1611 int put_node;
1612 int put_rport;
1613 unsigned long *active_rrqs_xri_bitmap = NULL;
1614
1615 /* Fabric nodes can have the same WWPN so we don't bother searching
1616 * by WWPN. Just return the ndlp that was given to us.
1617 */
1618 if (ndlp->nlp_type & NLP_FABRIC)
1619 return ndlp;
1620
1621 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
1622 memset(name, 0, sizeof(struct lpfc_name));
1623
1624 /* Now we find out if the NPort we are logging into, matches the WWPN
1625 * we have for that ndlp. If not, we have some work to do.
1626 */
1627 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
1628
1629 /* return immediately if the WWPN matches ndlp */
1630 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
1631 return ndlp;
1632
1633 if (phba->sli_rev == LPFC_SLI_REV4) {
1634 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
1635 GFP_KERNEL);
1636 if (active_rrqs_xri_bitmap)
1637 memset(active_rrqs_xri_bitmap, 0,
1638 phba->cfg_rrq_xri_bitmap_sz);
1639 }
1640
1641 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1642 "3178 PLOGI confirm: ndlp x%x x%x x%x: "
1643 "new_ndlp x%x x%x x%x\n",
1644 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type,
1645 (new_ndlp ? new_ndlp->nlp_DID : 0),
1646 (new_ndlp ? new_ndlp->nlp_flag : 0),
1647 (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
1648
1649 if (!new_ndlp) {
1650 rc = memcmp(&ndlp->nlp_portname, name,
1651 sizeof(struct lpfc_name));
1652 if (!rc) {
1653 if (active_rrqs_xri_bitmap)
1654 mempool_free(active_rrqs_xri_bitmap,
1655 phba->active_rrq_pool);
1656 return ndlp;
1657 }
1658 new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
1659 if (!new_ndlp) {
1660 if (active_rrqs_xri_bitmap)
1661 mempool_free(active_rrqs_xri_bitmap,
1662 phba->active_rrq_pool);
1663 return ndlp;
1664 }
1665 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
1666 rc = memcmp(&ndlp->nlp_portname, name,
1667 sizeof(struct lpfc_name));
1668 if (!rc) {
1669 if (active_rrqs_xri_bitmap)
1670 mempool_free(active_rrqs_xri_bitmap,
1671 phba->active_rrq_pool);
1672 return ndlp;
1673 }
1674 new_ndlp = lpfc_enable_node(vport, new_ndlp,
1675 NLP_STE_UNUSED_NODE);
1676 if (!new_ndlp) {
1677 if (active_rrqs_xri_bitmap)
1678 mempool_free(active_rrqs_xri_bitmap,
1679 phba->active_rrq_pool);
1680 return ndlp;
1681 }
1682 keepDID = new_ndlp->nlp_DID;
1683 if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
1684 memcpy(active_rrqs_xri_bitmap,
1685 new_ndlp->active_rrqs_xri_bitmap,
1686 phba->cfg_rrq_xri_bitmap_sz);
1687 } else {
1688 keepDID = new_ndlp->nlp_DID;
1689 if (phba->sli_rev == LPFC_SLI_REV4 &&
1690 active_rrqs_xri_bitmap)
1691 memcpy(active_rrqs_xri_bitmap,
1692 new_ndlp->active_rrqs_xri_bitmap,
1693 phba->cfg_rrq_xri_bitmap_sz);
1694 }
1695
1696 /* At this point in this routine, we know new_ndlp will be
1697 * returned. however, any previous GID_FTs that were done
1698 * would have updated nlp_fc4_type in ndlp, so we must ensure
1699 * new_ndlp has the right value.
1700 */
1701 if (vport->fc_flag & FC_FABRIC) {
1702 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type;
1703 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
1704 }
1705
1706 lpfc_unreg_rpi(vport, new_ndlp);
1707 new_ndlp->nlp_DID = ndlp->nlp_DID;
1708 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1709 if (phba->sli_rev == LPFC_SLI_REV4)
1710 memcpy(new_ndlp->active_rrqs_xri_bitmap,
1711 ndlp->active_rrqs_xri_bitmap,
1712 phba->cfg_rrq_xri_bitmap_sz);
1713
1714 spin_lock_irq(shost->host_lock);
1715 keep_new_nlp_flag = new_ndlp->nlp_flag;
1716 keep_nlp_flag = ndlp->nlp_flag;
1717 new_ndlp->nlp_flag = ndlp->nlp_flag;
1718
1719 /* if new_ndlp had NLP_UNREG_INP set, keep it */
1720 if (keep_new_nlp_flag & NLP_UNREG_INP)
1721 new_ndlp->nlp_flag |= NLP_UNREG_INP;
1722 else
1723 new_ndlp->nlp_flag &= ~NLP_UNREG_INP;
1724
1725 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */
1726 if (keep_new_nlp_flag & NLP_RPI_REGISTERED)
1727 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1728 else
1729 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1730
1731 ndlp->nlp_flag = keep_new_nlp_flag;
1732
1733 /* if ndlp had NLP_UNREG_INP set, keep it */
1734 if (keep_nlp_flag & NLP_UNREG_INP)
1735 ndlp->nlp_flag |= NLP_UNREG_INP;
1736 else
1737 ndlp->nlp_flag &= ~NLP_UNREG_INP;
1738
1739 /* if ndlp had NLP_RPI_REGISTERED set, keep it */
1740 if (keep_nlp_flag & NLP_RPI_REGISTERED)
1741 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1742 else
1743 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1744
1745 spin_unlock_irq(shost->host_lock);
1746
1747 /* Set nlp_states accordingly */
1748 keep_nlp_state = new_ndlp->nlp_state;
1749 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
1750
1751 /* interchange the nvme remoteport structs */
1752 keep_nrport = new_ndlp->nrport;
1753 new_ndlp->nrport = ndlp->nrport;
1754
1755 /* Move this back to NPR state */
1756 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1757 /* The new_ndlp is replacing ndlp totally, so we need
1758 * to put ndlp on UNUSED list and try to free it.
1759 */
1760 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1761 "3179 PLOGI confirm NEW: %x %x\n",
1762 new_ndlp->nlp_DID, keepDID);
1763
1764 /* Fix up the rport accordingly */
1765 rport = ndlp->rport;
1766 if (rport) {
1767 rdata = rport->dd_data;
1768 if (rdata->pnode == ndlp) {
1769 /* break the link before dropping the ref */
1770 ndlp->rport = NULL;
1771 lpfc_nlp_put(ndlp);
1772 rdata->pnode = lpfc_nlp_get(new_ndlp);
1773 new_ndlp->rport = rport;
1774 }
1775 new_ndlp->nlp_type = ndlp->nlp_type;
1776 }
1777
1778 /* Fix up the nvme rport */
1779 if (ndlp->nrport) {
1780 ndlp->nrport = NULL;
1781 lpfc_nlp_put(ndlp);
1782 }
1783
1784 /* We shall actually free the ndlp with both nlp_DID and
1785 * nlp_portname fields equals 0 to avoid any ndlp on the
1786 * nodelist never to be used.
1787 */
1788 if (ndlp->nlp_DID == 0) {
1789 spin_lock_irq(&phba->ndlp_lock);
1790 NLP_SET_FREE_REQ(ndlp);
1791 spin_unlock_irq(&phba->ndlp_lock);
1792 }
1793
1794 /* Two ndlps cannot have the same did on the nodelist.
1795 * Note: for this case, ndlp has a NULL WWPN so setting
1796 * the nlp_fc4_type isn't required.
1797 */
1798 ndlp->nlp_DID = keepDID;
1799 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1800 if (phba->sli_rev == LPFC_SLI_REV4 &&
1801 active_rrqs_xri_bitmap)
1802 memcpy(ndlp->active_rrqs_xri_bitmap,
1803 active_rrqs_xri_bitmap,
1804 phba->cfg_rrq_xri_bitmap_sz);
1805
1806 if (!NLP_CHK_NODE_ACT(ndlp))
1807 lpfc_drop_node(vport, ndlp);
1808 }
1809 else {
1810 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1811 "3180 PLOGI confirm SWAP: %x %x\n",
1812 new_ndlp->nlp_DID, keepDID);
1813
1814 lpfc_unreg_rpi(vport, ndlp);
1815
1816 /* Two ndlps cannot have the same did and the fc4
1817 * type must be transferred because the ndlp is in
1818 * flight.
1819 */
1820 ndlp->nlp_DID = keepDID;
1821 ndlp->nlp_fc4_type = keep_nlp_fc4_type;
1822
1823 if (phba->sli_rev == LPFC_SLI_REV4 &&
1824 active_rrqs_xri_bitmap)
1825 memcpy(ndlp->active_rrqs_xri_bitmap,
1826 active_rrqs_xri_bitmap,
1827 phba->cfg_rrq_xri_bitmap_sz);
1828
1829 /* Since we are switching over to the new_ndlp,
1830 * reset the old ndlp state
1831 */
1832 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
1833 (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
1834 keep_nlp_state = NLP_STE_NPR_NODE;
1835 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1836
1837 /* Previous ndlp no longer active with nvme host transport.
1838 * Remove reference from earlier registration unless the
1839 * nvme host took care of it.
1840 */
1841 if (ndlp->nrport)
1842 lpfc_nlp_put(ndlp);
1843 ndlp->nrport = keep_nrport;
1844
1845 /* Fix up the rport accordingly */
1846 rport = ndlp->rport;
1847 if (rport) {
1848 rdata = rport->dd_data;
1849 put_node = rdata->pnode != NULL;
1850 put_rport = ndlp->rport != NULL;
1851 rdata->pnode = NULL;
1852 ndlp->rport = NULL;
1853 if (put_node)
1854 lpfc_nlp_put(ndlp);
1855 if (put_rport)
1856 put_device(&rport->dev);
1857 }
1858 }
1859 if (phba->sli_rev == LPFC_SLI_REV4 &&
1860 active_rrqs_xri_bitmap)
1861 mempool_free(active_rrqs_xri_bitmap,
1862 phba->active_rrq_pool);
1863
1864 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1865 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n",
1866 new_ndlp->nlp_DID, new_ndlp->nlp_flag,
1867 new_ndlp->nlp_fc4_type);
1868
1869 return new_ndlp;
1870 }
1871
1872 /**
1873 * lpfc_end_rscn - Check and handle more rscn for a vport
1874 * @vport: pointer to a host virtual N_Port data structure.
1875 *
1876 * This routine checks whether more Registration State Change
1877 * Notifications (RSCNs) came in while the discovery state machine was in
1878 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1879 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1880 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1881 * handling the RSCNs.
1882 **/
1883 void
lpfc_end_rscn(struct lpfc_vport * vport)1884 lpfc_end_rscn(struct lpfc_vport *vport)
1885 {
1886 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1887
1888 if (vport->fc_flag & FC_RSCN_MODE) {
1889 /*
1890 * Check to see if more RSCNs came in while we were
1891 * processing this one.
1892 */
1893 if (vport->fc_rscn_id_cnt ||
1894 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1895 lpfc_els_handle_rscn(vport);
1896 else {
1897 spin_lock_irq(shost->host_lock);
1898 vport->fc_flag &= ~FC_RSCN_MODE;
1899 spin_unlock_irq(shost->host_lock);
1900 }
1901 }
1902 }
1903
1904 /**
1905 * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1906 * @phba: pointer to lpfc hba data structure.
1907 * @cmdiocb: pointer to lpfc command iocb data structure.
1908 * @rspiocb: pointer to lpfc response iocb data structure.
1909 *
1910 * This routine will call the clear rrq function to free the rrq and
1911 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1912 * exist then the clear_rrq is still called because the rrq needs to
1913 * be freed.
1914 **/
1915
1916 static void
lpfc_cmpl_els_rrq(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1917 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1918 struct lpfc_iocbq *rspiocb)
1919 {
1920 struct lpfc_vport *vport = cmdiocb->vport;
1921 IOCB_t *irsp;
1922 struct lpfc_nodelist *ndlp;
1923 struct lpfc_node_rrq *rrq;
1924
1925 /* we pass cmdiocb to state machine which needs rspiocb as well */
1926 rrq = cmdiocb->context_un.rrq;
1927 cmdiocb->context_un.rsp_iocb = rspiocb;
1928
1929 irsp = &rspiocb->iocb;
1930 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1931 "RRQ cmpl: status:x%x/x%x did:x%x",
1932 irsp->ulpStatus, irsp->un.ulpWord[4],
1933 irsp->un.elsreq64.remoteID);
1934
1935 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1936 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
1937 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1938 "2882 RRQ completes to NPort x%x "
1939 "with no ndlp. Data: x%x x%x x%x\n",
1940 irsp->un.elsreq64.remoteID,
1941 irsp->ulpStatus, irsp->un.ulpWord[4],
1942 irsp->ulpIoTag);
1943 goto out;
1944 }
1945
1946 /* rrq completes to NPort <nlp_DID> */
1947 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1948 "2880 RRQ completes to NPort x%x "
1949 "Data: x%x x%x x%x x%x x%x\n",
1950 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1951 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
1952
1953 if (irsp->ulpStatus) {
1954 /* Check for retry */
1955 /* RRQ failed Don't print the vport to vport rjts */
1956 if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1957 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1958 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1959 (phba)->pport->cfg_log_verbose & LOG_ELS)
1960 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1961 "2881 RRQ failure DID:%06X Status:"
1962 "x%x/x%x\n",
1963 ndlp->nlp_DID, irsp->ulpStatus,
1964 irsp->un.ulpWord[4]);
1965 }
1966 out:
1967 if (rrq)
1968 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1969 lpfc_els_free_iocb(phba, cmdiocb);
1970 return;
1971 }
1972 /**
1973 * lpfc_cmpl_els_plogi - Completion callback function for plogi
1974 * @phba: pointer to lpfc hba data structure.
1975 * @cmdiocb: pointer to lpfc command iocb data structure.
1976 * @rspiocb: pointer to lpfc response iocb data structure.
1977 *
1978 * This routine is the completion callback function for issuing the Port
1979 * Login (PLOGI) command. For PLOGI completion, there must be an active
1980 * ndlp on the vport node list that matches the remote node ID from the
1981 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
1982 * ignored and command IOCB released. The PLOGI response IOCB status is
1983 * checked for error conditons. If there is error status reported, PLOGI
1984 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1985 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1986 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1987 * (DSM) is set for this PLOGI completion. Finally, it checks whether
1988 * there are additional N_Port nodes with the vport that need to perform
1989 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1990 * PLOGIs.
1991 **/
1992 static void
lpfc_cmpl_els_plogi(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1993 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1994 struct lpfc_iocbq *rspiocb)
1995 {
1996 struct lpfc_vport *vport = cmdiocb->vport;
1997 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1998 IOCB_t *irsp;
1999 struct lpfc_nodelist *ndlp;
2000 struct lpfc_dmabuf *prsp;
2001 int disc;
2002
2003 /* we pass cmdiocb to state machine which needs rspiocb as well */
2004 cmdiocb->context_un.rsp_iocb = rspiocb;
2005
2006 irsp = &rspiocb->iocb;
2007 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2008 "PLOGI cmpl: status:x%x/x%x did:x%x",
2009 irsp->ulpStatus, irsp->un.ulpWord[4],
2010 irsp->un.elsreq64.remoteID);
2011
2012 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
2013 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
2014 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2015 "0136 PLOGI completes to NPort x%x "
2016 "with no ndlp. Data: x%x x%x x%x\n",
2017 irsp->un.elsreq64.remoteID,
2018 irsp->ulpStatus, irsp->un.ulpWord[4],
2019 irsp->ulpIoTag);
2020 goto out;
2021 }
2022
2023 /* Since ndlp can be freed in the disc state machine, note if this node
2024 * is being used during discovery.
2025 */
2026 spin_lock_irq(shost->host_lock);
2027 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2028 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2029 spin_unlock_irq(shost->host_lock);
2030
2031 /* PLOGI completes to NPort <nlp_DID> */
2032 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2033 "0102 PLOGI completes to NPort x%06x "
2034 "Data: x%x x%x x%x x%x x%x\n",
2035 ndlp->nlp_DID, ndlp->nlp_fc4_type,
2036 irsp->ulpStatus, irsp->un.ulpWord[4],
2037 disc, vport->num_disc_nodes);
2038
2039 /* Check to see if link went down during discovery */
2040 if (lpfc_els_chk_latt(vport)) {
2041 spin_lock_irq(shost->host_lock);
2042 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2043 spin_unlock_irq(shost->host_lock);
2044 goto out;
2045 }
2046
2047 if (irsp->ulpStatus) {
2048 /* Check for retry */
2049 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2050 /* ELS command is being retried */
2051 if (disc) {
2052 spin_lock_irq(shost->host_lock);
2053 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2054 spin_unlock_irq(shost->host_lock);
2055 }
2056 goto out;
2057 }
2058 /* PLOGI failed Don't print the vport to vport rjts */
2059 if (irsp->ulpStatus != IOSTAT_LS_RJT ||
2060 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
2061 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
2062 (phba)->pport->cfg_log_verbose & LOG_ELS)
2063 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2064 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
2065 ndlp->nlp_DID, irsp->ulpStatus,
2066 irsp->un.ulpWord[4]);
2067 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2068 if (!lpfc_error_lost_link(irsp))
2069 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2070 NLP_EVT_CMPL_PLOGI);
2071 } else {
2072 /* Good status, call state machine */
2073 prsp = list_entry(((struct lpfc_dmabuf *)
2074 cmdiocb->context2)->list.next,
2075 struct lpfc_dmabuf, list);
2076 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
2077 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2078 NLP_EVT_CMPL_PLOGI);
2079 }
2080
2081 if (disc && vport->num_disc_nodes) {
2082 /* Check to see if there are more PLOGIs to be sent */
2083 lpfc_more_plogi(vport);
2084
2085 if (vport->num_disc_nodes == 0) {
2086 spin_lock_irq(shost->host_lock);
2087 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2088 spin_unlock_irq(shost->host_lock);
2089
2090 lpfc_can_disctmo(vport);
2091 lpfc_end_rscn(vport);
2092 }
2093 }
2094
2095 out:
2096 lpfc_els_free_iocb(phba, cmdiocb);
2097 return;
2098 }
2099
2100 /**
2101 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
2102 * @vport: pointer to a host virtual N_Port data structure.
2103 * @did: destination port identifier.
2104 * @retry: number of retries to the command IOCB.
2105 *
2106 * This routine issues a Port Login (PLOGI) command to a remote N_Port
2107 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
2108 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
2109 * This routine constructs the proper feilds of the PLOGI IOCB and invokes
2110 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
2111 *
2112 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2113 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2114 * will be stored into the context1 field of the IOCB for the completion
2115 * callback function to the PLOGI ELS command.
2116 *
2117 * Return code
2118 * 0 - Successfully issued a plogi for @vport
2119 * 1 - failed to issue a plogi for @vport
2120 **/
2121 int
lpfc_issue_els_plogi(struct lpfc_vport * vport,uint32_t did,uint8_t retry)2122 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
2123 {
2124 struct lpfc_hba *phba = vport->phba;
2125 struct Scsi_Host *shost;
2126 struct serv_parm *sp;
2127 struct lpfc_nodelist *ndlp;
2128 struct lpfc_iocbq *elsiocb;
2129 uint8_t *pcmd;
2130 uint16_t cmdsize;
2131 int ret;
2132
2133 ndlp = lpfc_findnode_did(vport, did);
2134
2135 if (ndlp) {
2136 /* Defer the processing of the issue PLOGI until after the
2137 * outstanding UNREG_RPI mbox command completes, unless we
2138 * are going offline. This logic does not apply for Fabric DIDs
2139 */
2140 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2141 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
2142 !(vport->fc_flag & FC_OFFLINE_MODE)) {
2143 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2144 "4110 Issue PLOGI x%x deferred "
2145 "on NPort x%x rpi x%x Data: x%px\n",
2146 ndlp->nlp_defer_did, ndlp->nlp_DID,
2147 ndlp->nlp_rpi, ndlp);
2148
2149 /* We can only defer 1st PLOGI */
2150 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
2151 ndlp->nlp_defer_did = did;
2152 return 0;
2153 }
2154 if (!NLP_CHK_NODE_ACT(ndlp))
2155 ndlp = NULL;
2156 }
2157
2158 /* If ndlp is not NULL, we will bump the reference count on it */
2159 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
2160 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
2161 ELS_CMD_PLOGI);
2162 if (!elsiocb)
2163 return 1;
2164
2165 shost = lpfc_shost_from_vport(vport);
2166 spin_lock_irq(shost->host_lock);
2167 ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
2168 spin_unlock_irq(shost->host_lock);
2169
2170 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2171
2172 /* For PLOGI request, remainder of payload is service parameters */
2173 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
2174 pcmd += sizeof(uint32_t);
2175 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
2176 sp = (struct serv_parm *) pcmd;
2177
2178 /*
2179 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
2180 * to device on remote loops work.
2181 */
2182 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
2183 sp->cmn.altBbCredit = 1;
2184
2185 if (sp->cmn.fcphLow < FC_PH_4_3)
2186 sp->cmn.fcphLow = FC_PH_4_3;
2187
2188 if (sp->cmn.fcphHigh < FC_PH3)
2189 sp->cmn.fcphHigh = FC_PH3;
2190
2191 sp->cmn.valid_vendor_ver_level = 0;
2192 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
2193 sp->cmn.bbRcvSizeMsb &= 0xF;
2194
2195 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2196 "Issue PLOGI: did:x%x",
2197 did, 0, 0);
2198
2199 /* If our firmware supports this feature, convey that
2200 * information to the target using the vendor specific field.
2201 */
2202 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
2203 sp->cmn.valid_vendor_ver_level = 1;
2204 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
2205 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
2206 }
2207
2208 phba->fc_stat.elsXmitPLOGI++;
2209 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
2210 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2211
2212 if (ret == IOCB_ERROR) {
2213 lpfc_els_free_iocb(phba, elsiocb);
2214 return 1;
2215 }
2216 return 0;
2217 }
2218
2219 /**
2220 * lpfc_cmpl_els_prli - Completion callback function for prli
2221 * @phba: pointer to lpfc hba data structure.
2222 * @cmdiocb: pointer to lpfc command iocb data structure.
2223 * @rspiocb: pointer to lpfc response iocb data structure.
2224 *
2225 * This routine is the completion callback function for a Process Login
2226 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
2227 * status. If there is error status reported, PRLI retry shall be attempted
2228 * by invoking the lpfc_els_retry() routine. Otherwise, the state
2229 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
2230 * ndlp to mark the PRLI completion.
2231 **/
2232 static void
lpfc_cmpl_els_prli(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)2233 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2234 struct lpfc_iocbq *rspiocb)
2235 {
2236 struct lpfc_vport *vport = cmdiocb->vport;
2237 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2238 IOCB_t *irsp;
2239 struct lpfc_nodelist *ndlp;
2240 char *mode;
2241 u32 loglevel;
2242
2243 /* we pass cmdiocb to state machine which needs rspiocb as well */
2244 cmdiocb->context_un.rsp_iocb = rspiocb;
2245
2246 irsp = &(rspiocb->iocb);
2247 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2248 spin_lock_irq(shost->host_lock);
2249 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2250
2251 /* Driver supports multiple FC4 types. Counters matter. */
2252 vport->fc_prli_sent--;
2253 ndlp->fc4_prli_sent--;
2254 spin_unlock_irq(shost->host_lock);
2255
2256 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2257 "PRLI cmpl: status:x%x/x%x did:x%x",
2258 irsp->ulpStatus, irsp->un.ulpWord[4],
2259 ndlp->nlp_DID);
2260
2261 /* PRLI completes to NPort <nlp_DID> */
2262 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2263 "0103 PRLI completes to NPort x%06x "
2264 "Data: x%x x%x x%x x%x\n",
2265 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2266 vport->num_disc_nodes, ndlp->fc4_prli_sent);
2267
2268 /* Check to see if link went down during discovery */
2269 if (lpfc_els_chk_latt(vport))
2270 goto out;
2271
2272 if (irsp->ulpStatus) {
2273 /* Check for retry */
2274 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2275 /* ELS command is being retried */
2276 goto out;
2277 }
2278
2279 /* If we don't send GFT_ID to Fabric, a PRLI error
2280 * could be expected.
2281 */
2282 if ((vport->fc_flag & FC_FABRIC) ||
2283 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) {
2284 mode = KERN_ERR;
2285 loglevel = LOG_TRACE_EVENT;
2286 } else {
2287 mode = KERN_INFO;
2288 loglevel = LOG_ELS;
2289 }
2290
2291 /* PRLI failed */
2292 lpfc_printf_vlog(vport, mode, loglevel,
2293 "2754 PRLI failure DID:%06X Status:x%x/x%x, "
2294 "data: x%x\n",
2295 ndlp->nlp_DID, irsp->ulpStatus,
2296 irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
2297
2298 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2299 if (lpfc_error_lost_link(irsp))
2300 goto out;
2301 else
2302 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2303 NLP_EVT_CMPL_PRLI);
2304 } else {
2305 /* Good status, call state machine. However, if another
2306 * PRLI is outstanding, don't call the state machine
2307 * because final disposition to Mapped or Unmapped is
2308 * completed there.
2309 */
2310 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2311 NLP_EVT_CMPL_PRLI);
2312 }
2313
2314 out:
2315 lpfc_els_free_iocb(phba, cmdiocb);
2316 return;
2317 }
2318
2319 /**
2320 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
2321 * @vport: pointer to a host virtual N_Port data structure.
2322 * @ndlp: pointer to a node-list data structure.
2323 * @retry: number of retries to the command IOCB.
2324 *
2325 * This routine issues a Process Login (PRLI) ELS command for the
2326 * @vport. The PRLI service parameters are set up in the payload of the
2327 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
2328 * is put to the IOCB completion callback func field before invoking the
2329 * routine lpfc_sli_issue_iocb() to send out PRLI command.
2330 *
2331 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2332 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2333 * will be stored into the context1 field of the IOCB for the completion
2334 * callback function to the PRLI ELS command.
2335 *
2336 * Return code
2337 * 0 - successfully issued prli iocb command for @vport
2338 * 1 - failed to issue prli iocb command for @vport
2339 **/
2340 int
lpfc_issue_els_prli(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)2341 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2342 uint8_t retry)
2343 {
2344 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2345 struct lpfc_hba *phba = vport->phba;
2346 PRLI *npr;
2347 struct lpfc_nvme_prli *npr_nvme;
2348 struct lpfc_iocbq *elsiocb;
2349 uint8_t *pcmd;
2350 uint16_t cmdsize;
2351 u32 local_nlp_type, elscmd;
2352
2353 /*
2354 * If we are in RSCN mode, the FC4 types supported from a
2355 * previous GFT_ID command may not be accurate. So, if we
2356 * are a NVME Initiator, always look for the possibility of
2357 * the remote NPort beng a NVME Target.
2358 */
2359 if (phba->sli_rev == LPFC_SLI_REV4 &&
2360 vport->fc_flag & FC_RSCN_MODE &&
2361 vport->nvmei_support)
2362 ndlp->nlp_fc4_type |= NLP_FC4_NVME;
2363 local_nlp_type = ndlp->nlp_fc4_type;
2364
2365 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
2366 * fields here before any of them can complete.
2367 */
2368 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
2369 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
2370 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
2371 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC);
2372 ndlp->nvme_fb_size = 0;
2373
2374 send_next_prli:
2375 if (local_nlp_type & NLP_FC4_FCP) {
2376 /* Payload is 4 + 16 = 20 x14 bytes. */
2377 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
2378 elscmd = ELS_CMD_PRLI;
2379 } else if (local_nlp_type & NLP_FC4_NVME) {
2380 /* Payload is 4 + 20 = 24 x18 bytes. */
2381 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli));
2382 elscmd = ELS_CMD_NVMEPRLI;
2383 } else {
2384 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2385 "3083 Unknown FC_TYPE x%x ndlp x%06x\n",
2386 ndlp->nlp_fc4_type, ndlp->nlp_DID);
2387 return 1;
2388 }
2389
2390 /* SLI3 ports don't support NVME. If this rport is a strict NVME
2391 * FC4 type, implicitly LOGO.
2392 */
2393 if (phba->sli_rev == LPFC_SLI_REV3 &&
2394 ndlp->nlp_fc4_type == NLP_FC4_NVME) {
2395 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2396 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n",
2397 ndlp->nlp_type);
2398 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
2399 return 1;
2400 }
2401
2402 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2403 ndlp->nlp_DID, elscmd);
2404 if (!elsiocb)
2405 return 1;
2406
2407 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2408
2409 /* For PRLI request, remainder of payload is service parameters */
2410 memset(pcmd, 0, cmdsize);
2411
2412 if (local_nlp_type & NLP_FC4_FCP) {
2413 /* Remainder of payload is FCP PRLI parameter page.
2414 * Note: this data structure is defined as
2415 * BE/LE in the structure definition so no
2416 * byte swap call is made.
2417 */
2418 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI;
2419 pcmd += sizeof(uint32_t);
2420 npr = (PRLI *)pcmd;
2421
2422 /*
2423 * If our firmware version is 3.20 or later,
2424 * set the following bits for FC-TAPE support.
2425 */
2426 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
2427 npr->ConfmComplAllowed = 1;
2428 npr->Retry = 1;
2429 npr->TaskRetryIdReq = 1;
2430 }
2431 npr->estabImagePair = 1;
2432 npr->readXferRdyDis = 1;
2433 if (vport->cfg_first_burst_size)
2434 npr->writeXferRdyDis = 1;
2435
2436 /* For FCP support */
2437 npr->prliType = PRLI_FCP_TYPE;
2438 npr->initiatorFunc = 1;
2439 elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ;
2440
2441 /* Remove FCP type - processed. */
2442 local_nlp_type &= ~NLP_FC4_FCP;
2443 } else if (local_nlp_type & NLP_FC4_NVME) {
2444 /* Remainder of payload is NVME PRLI parameter page.
2445 * This data structure is the newer definition that
2446 * uses bf macros so a byte swap is required.
2447 */
2448 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI;
2449 pcmd += sizeof(uint32_t);
2450 npr_nvme = (struct lpfc_nvme_prli *)pcmd;
2451 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
2452 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
2453 if (phba->nsler) {
2454 bf_set(prli_nsler, npr_nvme, 1);
2455 bf_set(prli_conf, npr_nvme, 1);
2456 }
2457
2458 /* Only initiators request first burst. */
2459 if ((phba->cfg_nvme_enable_fb) &&
2460 !phba->nvmet_support)
2461 bf_set(prli_fba, npr_nvme, 1);
2462
2463 if (phba->nvmet_support) {
2464 bf_set(prli_tgt, npr_nvme, 1);
2465 bf_set(prli_disc, npr_nvme, 1);
2466 } else {
2467 bf_set(prli_init, npr_nvme, 1);
2468 bf_set(prli_conf, npr_nvme, 1);
2469 }
2470
2471 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
2472 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
2473 elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ;
2474
2475 /* Remove NVME type - processed. */
2476 local_nlp_type &= ~NLP_FC4_NVME;
2477 }
2478
2479 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2480 "Issue PRLI: did:x%x",
2481 ndlp->nlp_DID, 0, 0);
2482
2483 phba->fc_stat.elsXmitPRLI++;
2484 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
2485 spin_lock_irq(shost->host_lock);
2486 ndlp->nlp_flag |= NLP_PRLI_SND;
2487
2488 /* The vport counters are used for lpfc_scan_finished, but
2489 * the ndlp is used to track outstanding PRLIs for different
2490 * FC4 types.
2491 */
2492 vport->fc_prli_sent++;
2493 ndlp->fc4_prli_sent++;
2494 spin_unlock_irq(shost->host_lock);
2495 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2496 IOCB_ERROR) {
2497 spin_lock_irq(shost->host_lock);
2498 ndlp->nlp_flag &= ~NLP_PRLI_SND;
2499 spin_unlock_irq(shost->host_lock);
2500 lpfc_els_free_iocb(phba, elsiocb);
2501 return 1;
2502 }
2503
2504
2505 /* The driver supports 2 FC4 types. Make sure
2506 * a PRLI is issued for all types before exiting.
2507 */
2508 if (phba->sli_rev == LPFC_SLI_REV4 &&
2509 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
2510 goto send_next_prli;
2511
2512 return 0;
2513 }
2514
2515 /**
2516 * lpfc_rscn_disc - Perform rscn discovery for a vport
2517 * @vport: pointer to a host virtual N_Port data structure.
2518 *
2519 * This routine performs Registration State Change Notification (RSCN)
2520 * discovery for a @vport. If the @vport's node port recovery count is not
2521 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
2522 * the nodes that need recovery. If none of the PLOGI were needed through
2523 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
2524 * invoked to check and handle possible more RSCN came in during the period
2525 * of processing the current ones.
2526 **/
2527 static void
lpfc_rscn_disc(struct lpfc_vport * vport)2528 lpfc_rscn_disc(struct lpfc_vport *vport)
2529 {
2530 lpfc_can_disctmo(vport);
2531
2532 /* RSCN discovery */
2533 /* go thru NPR nodes and issue ELS PLOGIs */
2534 if (vport->fc_npr_cnt)
2535 if (lpfc_els_disc_plogi(vport))
2536 return;
2537
2538 lpfc_end_rscn(vport);
2539 }
2540
2541 /**
2542 * lpfc_adisc_done - Complete the adisc phase of discovery
2543 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
2544 *
2545 * This function is called when the final ADISC is completed during discovery.
2546 * This function handles clearing link attention or issuing reg_vpi depending
2547 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2548 * discovery.
2549 * This function is called with no locks held.
2550 **/
2551 static void
lpfc_adisc_done(struct lpfc_vport * vport)2552 lpfc_adisc_done(struct lpfc_vport *vport)
2553 {
2554 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2555 struct lpfc_hba *phba = vport->phba;
2556
2557 /*
2558 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2559 * and continue discovery.
2560 */
2561 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2562 !(vport->fc_flag & FC_RSCN_MODE) &&
2563 (phba->sli_rev < LPFC_SLI_REV4)) {
2564 /* The ADISCs are complete. Doesn't matter if they
2565 * succeeded or failed because the ADISC completion
2566 * routine guarantees to call the state machine and
2567 * the RPI is either unregistered (failed ADISC response)
2568 * or the RPI is still valid and the node is marked
2569 * mapped for a target. The exchanges should be in the
2570 * correct state. This code is specific to SLI3.
2571 */
2572 lpfc_issue_clear_la(phba, vport);
2573 lpfc_issue_reg_vpi(phba, vport);
2574 return;
2575 }
2576 /*
2577 * For SLI2, we need to set port_state to READY
2578 * and continue discovery.
2579 */
2580 if (vport->port_state < LPFC_VPORT_READY) {
2581 /* If we get here, there is nothing to ADISC */
2582 lpfc_issue_clear_la(phba, vport);
2583 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2584 vport->num_disc_nodes = 0;
2585 /* go thru NPR list, issue ELS PLOGIs */
2586 if (vport->fc_npr_cnt)
2587 lpfc_els_disc_plogi(vport);
2588 if (!vport->num_disc_nodes) {
2589 spin_lock_irq(shost->host_lock);
2590 vport->fc_flag &= ~FC_NDISC_ACTIVE;
2591 spin_unlock_irq(shost->host_lock);
2592 lpfc_can_disctmo(vport);
2593 lpfc_end_rscn(vport);
2594 }
2595 }
2596 vport->port_state = LPFC_VPORT_READY;
2597 } else
2598 lpfc_rscn_disc(vport);
2599 }
2600
2601 /**
2602 * lpfc_more_adisc - Issue more adisc as needed
2603 * @vport: pointer to a host virtual N_Port data structure.
2604 *
2605 * This routine determines whether there are more ndlps on a @vport
2606 * node list need to have Address Discover (ADISC) issued. If so, it will
2607 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2608 * remaining nodes which need to have ADISC sent.
2609 **/
2610 void
lpfc_more_adisc(struct lpfc_vport * vport)2611 lpfc_more_adisc(struct lpfc_vport *vport)
2612 {
2613 if (vport->num_disc_nodes)
2614 vport->num_disc_nodes--;
2615 /* Continue discovery with <num_disc_nodes> ADISCs to go */
2616 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2617 "0210 Continue discovery with %d ADISCs to go "
2618 "Data: x%x x%x x%x\n",
2619 vport->num_disc_nodes, vport->fc_adisc_cnt,
2620 vport->fc_flag, vport->port_state);
2621 /* Check to see if there are more ADISCs to be sent */
2622 if (vport->fc_flag & FC_NLP_MORE) {
2623 lpfc_set_disctmo(vport);
2624 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2625 lpfc_els_disc_adisc(vport);
2626 }
2627 if (!vport->num_disc_nodes)
2628 lpfc_adisc_done(vport);
2629 return;
2630 }
2631
2632 /**
2633 * lpfc_cmpl_els_adisc - Completion callback function for adisc
2634 * @phba: pointer to lpfc hba data structure.
2635 * @cmdiocb: pointer to lpfc command iocb data structure.
2636 * @rspiocb: pointer to lpfc response iocb data structure.
2637 *
2638 * This routine is the completion function for issuing the Address Discover
2639 * (ADISC) command. It first checks to see whether link went down during
2640 * the discovery process. If so, the node will be marked as node port
2641 * recovery for issuing discover IOCB by the link attention handler and
2642 * exit. Otherwise, the response status is checked. If error was reported
2643 * in the response status, the ADISC command shall be retried by invoking
2644 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2645 * the response status, the state machine is invoked to set transition
2646 * with respect to NLP_EVT_CMPL_ADISC event.
2647 **/
2648 static void
lpfc_cmpl_els_adisc(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)2649 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2650 struct lpfc_iocbq *rspiocb)
2651 {
2652 struct lpfc_vport *vport = cmdiocb->vport;
2653 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2654 IOCB_t *irsp;
2655 struct lpfc_nodelist *ndlp;
2656 int disc;
2657
2658 /* we pass cmdiocb to state machine which needs rspiocb as well */
2659 cmdiocb->context_un.rsp_iocb = rspiocb;
2660
2661 irsp = &(rspiocb->iocb);
2662 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2663
2664 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2665 "ADISC cmpl: status:x%x/x%x did:x%x",
2666 irsp->ulpStatus, irsp->un.ulpWord[4],
2667 ndlp->nlp_DID);
2668
2669 /* Since ndlp can be freed in the disc state machine, note if this node
2670 * is being used during discovery.
2671 */
2672 spin_lock_irq(shost->host_lock);
2673 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2674 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2675 spin_unlock_irq(shost->host_lock);
2676 /* ADISC completes to NPort <nlp_DID> */
2677 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2678 "0104 ADISC completes to NPort x%x "
2679 "Data: x%x x%x x%x x%x x%x\n",
2680 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2681 irsp->ulpTimeout, disc, vport->num_disc_nodes);
2682 /* Check to see if link went down during discovery */
2683 if (lpfc_els_chk_latt(vport)) {
2684 spin_lock_irq(shost->host_lock);
2685 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2686 spin_unlock_irq(shost->host_lock);
2687 goto out;
2688 }
2689
2690 if (irsp->ulpStatus) {
2691 /* Check for retry */
2692 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2693 /* ELS command is being retried */
2694 if (disc) {
2695 spin_lock_irq(shost->host_lock);
2696 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2697 spin_unlock_irq(shost->host_lock);
2698 lpfc_set_disctmo(vport);
2699 }
2700 goto out;
2701 }
2702 /* ADISC failed */
2703 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2704 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2705 ndlp->nlp_DID, irsp->ulpStatus,
2706 irsp->un.ulpWord[4]);
2707 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2708 if (!lpfc_error_lost_link(irsp))
2709 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2710 NLP_EVT_CMPL_ADISC);
2711 } else
2712 /* Good status, call state machine */
2713 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2714 NLP_EVT_CMPL_ADISC);
2715
2716 /* Check to see if there are more ADISCs to be sent */
2717 if (disc && vport->num_disc_nodes)
2718 lpfc_more_adisc(vport);
2719 out:
2720 lpfc_els_free_iocb(phba, cmdiocb);
2721 return;
2722 }
2723
2724 /**
2725 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
2726 * @vport: pointer to a virtual N_Port data structure.
2727 * @ndlp: pointer to a node-list data structure.
2728 * @retry: number of retries to the command IOCB.
2729 *
2730 * This routine issues an Address Discover (ADISC) for an @ndlp on a
2731 * @vport. It prepares the payload of the ADISC ELS command, updates the
2732 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2733 * to issue the ADISC ELS command.
2734 *
2735 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2736 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2737 * will be stored into the context1 field of the IOCB for the completion
2738 * callback function to the ADISC ELS command.
2739 *
2740 * Return code
2741 * 0 - successfully issued adisc
2742 * 1 - failed to issue adisc
2743 **/
2744 int
lpfc_issue_els_adisc(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)2745 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2746 uint8_t retry)
2747 {
2748 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2749 struct lpfc_hba *phba = vport->phba;
2750 ADISC *ap;
2751 struct lpfc_iocbq *elsiocb;
2752 uint8_t *pcmd;
2753 uint16_t cmdsize;
2754
2755 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2756 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2757 ndlp->nlp_DID, ELS_CMD_ADISC);
2758 if (!elsiocb)
2759 return 1;
2760
2761 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2762
2763 /* For ADISC request, remainder of payload is service parameters */
2764 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
2765 pcmd += sizeof(uint32_t);
2766
2767 /* Fill in ADISC payload */
2768 ap = (ADISC *) pcmd;
2769 ap->hardAL_PA = phba->fc_pref_ALPA;
2770 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2771 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2772 ap->DID = be32_to_cpu(vport->fc_myDID);
2773
2774 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2775 "Issue ADISC: did:x%x",
2776 ndlp->nlp_DID, 0, 0);
2777
2778 phba->fc_stat.elsXmitADISC++;
2779 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
2780 spin_lock_irq(shost->host_lock);
2781 ndlp->nlp_flag |= NLP_ADISC_SND;
2782 spin_unlock_irq(shost->host_lock);
2783 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2784 IOCB_ERROR) {
2785 spin_lock_irq(shost->host_lock);
2786 ndlp->nlp_flag &= ~NLP_ADISC_SND;
2787 spin_unlock_irq(shost->host_lock);
2788 lpfc_els_free_iocb(phba, elsiocb);
2789 return 1;
2790 }
2791 return 0;
2792 }
2793
2794 /**
2795 * lpfc_cmpl_els_logo - Completion callback function for logo
2796 * @phba: pointer to lpfc hba data structure.
2797 * @cmdiocb: pointer to lpfc command iocb data structure.
2798 * @rspiocb: pointer to lpfc response iocb data structure.
2799 *
2800 * This routine is the completion function for issuing the ELS Logout (LOGO)
2801 * command. If no error status was reported from the LOGO response, the
2802 * state machine of the associated ndlp shall be invoked for transition with
2803 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
2804 * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
2805 **/
2806 static void
lpfc_cmpl_els_logo(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)2807 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2808 struct lpfc_iocbq *rspiocb)
2809 {
2810 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2811 struct lpfc_vport *vport = ndlp->vport;
2812 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2813 IOCB_t *irsp;
2814 struct lpfcMboxq *mbox;
2815 unsigned long flags;
2816 uint32_t skip_recovery = 0;
2817
2818 /* we pass cmdiocb to state machine which needs rspiocb as well */
2819 cmdiocb->context_un.rsp_iocb = rspiocb;
2820
2821 irsp = &(rspiocb->iocb);
2822 spin_lock_irq(shost->host_lock);
2823 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2824 spin_unlock_irq(shost->host_lock);
2825
2826 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2827 "LOGO cmpl: status:x%x/x%x did:x%x",
2828 irsp->ulpStatus, irsp->un.ulpWord[4],
2829 ndlp->nlp_DID);
2830
2831 /* LOGO completes to NPort <nlp_DID> */
2832 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2833 "0105 LOGO completes to NPort x%x "
2834 "Data: x%x x%x x%x x%x\n",
2835 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2836 irsp->ulpTimeout, vport->num_disc_nodes);
2837
2838 if (lpfc_els_chk_latt(vport)) {
2839 skip_recovery = 1;
2840 goto out;
2841 }
2842
2843 /* Check to see if link went down during discovery */
2844 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2845 /* NLP_EVT_DEVICE_RM should unregister the RPI
2846 * which should abort all outstanding IOs.
2847 */
2848 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2849 NLP_EVT_DEVICE_RM);
2850 skip_recovery = 1;
2851 goto out;
2852 }
2853
2854 /* The LOGO will not be retried on failure. A LOGO was
2855 * issued to the remote rport and a ACC or RJT or no Answer are
2856 * all acceptable. Note the failure and move forward with
2857 * discovery. The PLOGI will retry.
2858 */
2859 if (irsp->ulpStatus) {
2860 /* LOGO failed */
2861 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2862 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
2863 ndlp->nlp_DID, irsp->ulpStatus,
2864 irsp->un.ulpWord[4]);
2865 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2866 if (lpfc_error_lost_link(irsp)) {
2867 skip_recovery = 1;
2868 goto out;
2869 }
2870 }
2871
2872 /* Call state machine. This will unregister the rpi if needed. */
2873 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
2874
2875 out:
2876 lpfc_els_free_iocb(phba, cmdiocb);
2877 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
2878 if ((vport->fc_flag & FC_PT2PT) &&
2879 !(vport->fc_flag & FC_PT2PT_PLOGI)) {
2880 phba->pport->fc_myDID = 0;
2881
2882 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
2883 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
2884 if (phba->nvmet_support)
2885 lpfc_nvmet_update_targetport(phba);
2886 else
2887 lpfc_nvme_update_localport(phba->pport);
2888 }
2889
2890 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2891 if (mbox) {
2892 lpfc_config_link(phba, mbox);
2893 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2894 mbox->vport = vport;
2895 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
2896 MBX_NOT_FINISHED) {
2897 mempool_free(mbox, phba->mbox_mem_pool);
2898 skip_recovery = 1;
2899 }
2900 }
2901 }
2902
2903 /*
2904 * If the node is a target, the handling attempts to recover the port.
2905 * For any other port type, the rpi is unregistered as an implicit
2906 * LOGO.
2907 */
2908 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
2909 skip_recovery == 0) {
2910 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2911 spin_lock_irqsave(shost->host_lock, flags);
2912 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2913 spin_unlock_irqrestore(shost->host_lock, flags);
2914
2915 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2916 "3187 LOGO completes to NPort x%x: Start "
2917 "Recovery Data: x%x x%x x%x x%x\n",
2918 ndlp->nlp_DID, irsp->ulpStatus,
2919 irsp->un.ulpWord[4], irsp->ulpTimeout,
2920 vport->num_disc_nodes);
2921 lpfc_disc_start(vport);
2922 }
2923 return;
2924 }
2925
2926 /**
2927 * lpfc_issue_els_logo - Issue a logo to an node on a vport
2928 * @vport: pointer to a virtual N_Port data structure.
2929 * @ndlp: pointer to a node-list data structure.
2930 * @retry: number of retries to the command IOCB.
2931 *
2932 * This routine constructs and issues an ELS Logout (LOGO) iocb command
2933 * to a remote node, referred by an @ndlp on a @vport. It constructs the
2934 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
2935 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
2936 *
2937 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2938 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2939 * will be stored into the context1 field of the IOCB for the completion
2940 * callback function to the LOGO ELS command.
2941 *
2942 * Callers of this routine are expected to unregister the RPI first
2943 *
2944 * Return code
2945 * 0 - successfully issued logo
2946 * 1 - failed to issue logo
2947 **/
2948 int
lpfc_issue_els_logo(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)2949 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2950 uint8_t retry)
2951 {
2952 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2953 struct lpfc_hba *phba = vport->phba;
2954 struct lpfc_iocbq *elsiocb;
2955 uint8_t *pcmd;
2956 uint16_t cmdsize;
2957 int rc;
2958
2959 spin_lock_irq(shost->host_lock);
2960 if (ndlp->nlp_flag & NLP_LOGO_SND) {
2961 spin_unlock_irq(shost->host_lock);
2962 return 0;
2963 }
2964 spin_unlock_irq(shost->host_lock);
2965
2966 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
2967 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2968 ndlp->nlp_DID, ELS_CMD_LOGO);
2969 if (!elsiocb)
2970 return 1;
2971
2972 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2973 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
2974 pcmd += sizeof(uint32_t);
2975
2976 /* Fill in LOGO payload */
2977 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
2978 pcmd += sizeof(uint32_t);
2979 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
2980
2981 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2982 "Issue LOGO: did:x%x",
2983 ndlp->nlp_DID, 0, 0);
2984
2985 phba->fc_stat.elsXmitLOGO++;
2986 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2987 spin_lock_irq(shost->host_lock);
2988 ndlp->nlp_flag |= NLP_LOGO_SND;
2989 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
2990 spin_unlock_irq(shost->host_lock);
2991 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2992 if (rc == IOCB_ERROR) {
2993 spin_lock_irq(shost->host_lock);
2994 ndlp->nlp_flag &= ~NLP_LOGO_SND;
2995 spin_unlock_irq(shost->host_lock);
2996 lpfc_els_free_iocb(phba, elsiocb);
2997 return 1;
2998 }
2999
3000 spin_lock_irq(shost->host_lock);
3001 ndlp->nlp_prev_state = ndlp->nlp_state;
3002 spin_unlock_irq(shost->host_lock);
3003 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3004 return 0;
3005 }
3006
3007 /**
3008 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
3009 * @phba: pointer to lpfc hba data structure.
3010 * @cmdiocb: pointer to lpfc command iocb data structure.
3011 * @rspiocb: pointer to lpfc response iocb data structure.
3012 *
3013 * This routine is a generic completion callback function for ELS commands.
3014 * Specifically, it is the callback function which does not need to perform
3015 * any command specific operations. It is currently used by the ELS command
3016 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel
3017 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr().
3018 * Other than certain debug loggings, this callback function simply invokes the
3019 * lpfc_els_chk_latt() routine to check whether link went down during the
3020 * discovery process.
3021 **/
3022 static void
lpfc_cmpl_els_cmd(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)3023 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3024 struct lpfc_iocbq *rspiocb)
3025 {
3026 struct lpfc_vport *vport = cmdiocb->vport;
3027 IOCB_t *irsp;
3028
3029 irsp = &rspiocb->iocb;
3030
3031 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3032 "ELS cmd cmpl: status:x%x/x%x did:x%x",
3033 irsp->ulpStatus, irsp->un.ulpWord[4],
3034 irsp->un.elsreq64.remoteID);
3035
3036 /* ELS cmd tag <ulpIoTag> completes */
3037 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3038 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
3039 irsp->ulpIoTag, irsp->ulpStatus,
3040 irsp->un.ulpWord[4], irsp->ulpTimeout);
3041
3042 /* Check to see if link went down during discovery */
3043 lpfc_els_chk_latt(vport);
3044 lpfc_els_free_iocb(phba, cmdiocb);
3045 }
3046
3047 /**
3048 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd
3049 * @phba: pointer to lpfc hba data structure.
3050 * @cmdiocb: pointer to lpfc command iocb data structure.
3051 * @rspiocb: pointer to lpfc response iocb data structure.
3052 *
3053 * This routine is a generic completion callback function for Discovery ELS cmd.
3054 * Currently used by the ELS command issuing routines for the ELS State Change
3055 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf().
3056 * These commands will be retried once only for ELS timeout errors.
3057 **/
3058 static void
lpfc_cmpl_els_disc_cmd(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)3059 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3060 struct lpfc_iocbq *rspiocb)
3061 {
3062 struct lpfc_vport *vport = cmdiocb->vport;
3063 IOCB_t *irsp;
3064 struct lpfc_els_rdf_rsp *prdf;
3065 struct lpfc_dmabuf *pcmd, *prsp;
3066 u32 *pdata;
3067 u32 cmd;
3068
3069 irsp = &rspiocb->iocb;
3070
3071 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3072 "ELS cmd cmpl: status:x%x/x%x did:x%x",
3073 irsp->ulpStatus, irsp->un.ulpWord[4],
3074 irsp->un.elsreq64.remoteID);
3075 /* ELS cmd tag <ulpIoTag> completes */
3076 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3077 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x "
3078 "x%x\n",
3079 irsp->ulpIoTag, irsp->ulpStatus,
3080 irsp->un.ulpWord[4], irsp->ulpTimeout,
3081 cmdiocb->retry);
3082
3083 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
3084 if (!pcmd)
3085 goto out;
3086
3087 pdata = (u32 *)pcmd->virt;
3088 if (!pdata)
3089 goto out;
3090 cmd = *pdata;
3091
3092 /* Only 1 retry for ELS Timeout only */
3093 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
3094 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3095 IOERR_SEQUENCE_TIMEOUT)) {
3096 cmdiocb->retry++;
3097 if (cmdiocb->retry <= 1) {
3098 switch (cmd) {
3099 case ELS_CMD_SCR:
3100 lpfc_issue_els_scr(vport, cmdiocb->retry);
3101 break;
3102 case ELS_CMD_RDF:
3103 cmdiocb->context1 = NULL; /* save ndlp refcnt */
3104 lpfc_issue_els_rdf(vport, cmdiocb->retry);
3105 break;
3106 }
3107 goto out;
3108 }
3109 phba->fc_stat.elsRetryExceeded++;
3110 }
3111 if (irsp->ulpStatus) {
3112 /* ELS discovery cmd completes with error */
3113 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
3114 "4203 ELS cmd x%x error: x%x x%X\n", cmd,
3115 irsp->ulpStatus, irsp->un.ulpWord[4]);
3116 goto out;
3117 }
3118
3119 /* The RDF response doesn't have any impact on the running driver
3120 * but the notification descriptors are dumped here for support.
3121 */
3122 if (cmd == ELS_CMD_RDF) {
3123 int i;
3124
3125 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
3126 if (!prsp)
3127 goto out;
3128
3129 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt;
3130 if (!prdf)
3131 goto out;
3132
3133 for (i = 0; i < ELS_RDF_REG_TAG_CNT &&
3134 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++)
3135 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3136 "4677 Fabric RDF Notification Grant Data: "
3137 "0x%08x\n",
3138 be32_to_cpu(
3139 prdf->reg_d1.desc_tags[i]));
3140 }
3141
3142 out:
3143 /* Check to see if link went down during discovery */
3144 lpfc_els_chk_latt(vport);
3145 lpfc_els_free_iocb(phba, cmdiocb);
3146 return;
3147 }
3148
3149 /**
3150 * lpfc_issue_els_scr - Issue a scr to an node on a vport
3151 * @vport: pointer to a host virtual N_Port data structure.
3152 * @retry: retry counter for the command IOCB.
3153 *
3154 * This routine issues a State Change Request (SCR) to a fabric node
3155 * on a @vport. The remote node is Fabric Controller (0xfffffd). It
3156 * first search the @vport node list to find the matching ndlp. If no such
3157 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
3158 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
3159 * routine is invoked to send the SCR IOCB.
3160 *
3161 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3162 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3163 * will be stored into the context1 field of the IOCB for the completion
3164 * callback function to the SCR ELS command.
3165 *
3166 * Return code
3167 * 0 - Successfully issued scr command
3168 * 1 - Failed to issue scr command
3169 **/
3170 int
lpfc_issue_els_scr(struct lpfc_vport * vport,uint8_t retry)3171 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
3172 {
3173 struct lpfc_hba *phba = vport->phba;
3174 struct lpfc_iocbq *elsiocb;
3175 uint8_t *pcmd;
3176 uint16_t cmdsize;
3177 struct lpfc_nodelist *ndlp;
3178
3179 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
3180
3181 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
3182 if (!ndlp) {
3183 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
3184 if (!ndlp)
3185 return 1;
3186 lpfc_enqueue_node(vport, ndlp);
3187 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
3188 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3189 if (!ndlp)
3190 return 1;
3191 }
3192
3193 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3194 ndlp->nlp_DID, ELS_CMD_SCR);
3195
3196 if (!elsiocb) {
3197 /* This will trigger the release of the node just
3198 * allocated
3199 */
3200 lpfc_nlp_put(ndlp);
3201 return 1;
3202 }
3203
3204 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3205
3206 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
3207 pcmd += sizeof(uint32_t);
3208
3209 /* For SCR, remainder of payload is SCR parameter page */
3210 memset(pcmd, 0, sizeof(SCR));
3211 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
3212
3213 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3214 "Issue SCR: did:x%x",
3215 ndlp->nlp_DID, 0, 0);
3216
3217 phba->fc_stat.elsXmitSCR++;
3218 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
3219 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
3220 IOCB_ERROR) {
3221 /* The additional lpfc_nlp_put will cause the following
3222 * lpfc_els_free_iocb routine to trigger the rlease of
3223 * the node.
3224 */
3225 lpfc_nlp_put(ndlp);
3226 lpfc_els_free_iocb(phba, elsiocb);
3227 return 1;
3228 }
3229 /* This will cause the callback-function lpfc_cmpl_els_cmd to
3230 * trigger the release of node.
3231 */
3232 if (!(vport->fc_flag & FC_PT2PT))
3233 lpfc_nlp_put(ndlp);
3234 return 0;
3235 }
3236
3237 /**
3238 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric)
3239 * or the other nport (pt2pt).
3240 * @vport: pointer to a host virtual N_Port data structure.
3241 * @retry: number of retries to the command IOCB.
3242 *
3243 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD)
3244 * when connected to a fabric, or to the remote port when connected
3245 * in point-to-point mode. When sent to the Fabric Controller, it will
3246 * replay the RSCN to registered recipients.
3247 *
3248 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3249 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3250 * will be stored into the context1 field of the IOCB for the completion
3251 * callback function to the RSCN ELS command.
3252 *
3253 * Return code
3254 * 0 - Successfully issued RSCN command
3255 * 1 - Failed to issue RSCN command
3256 **/
3257 int
lpfc_issue_els_rscn(struct lpfc_vport * vport,uint8_t retry)3258 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
3259 {
3260 struct lpfc_hba *phba = vport->phba;
3261 struct lpfc_iocbq *elsiocb;
3262 struct lpfc_nodelist *ndlp;
3263 struct {
3264 struct fc_els_rscn rscn;
3265 struct fc_els_rscn_page portid;
3266 } *event;
3267 uint32_t nportid;
3268 uint16_t cmdsize = sizeof(*event);
3269
3270 /* Not supported for private loop */
3271 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
3272 !(vport->fc_flag & FC_PUBLIC_LOOP))
3273 return 1;
3274
3275 if (vport->fc_flag & FC_PT2PT) {
3276 /* find any mapped nport - that would be the other nport */
3277 ndlp = lpfc_findnode_mapped(vport);
3278 if (!ndlp)
3279 return 1;
3280 } else {
3281 nportid = FC_FID_FCTRL;
3282 /* find the fabric controller node */
3283 ndlp = lpfc_findnode_did(vport, nportid);
3284 if (!ndlp) {
3285 /* if one didn't exist, make one */
3286 ndlp = lpfc_nlp_init(vport, nportid);
3287 if (!ndlp)
3288 return 1;
3289 lpfc_enqueue_node(vport, ndlp);
3290 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
3291 ndlp = lpfc_enable_node(vport, ndlp,
3292 NLP_STE_UNUSED_NODE);
3293 if (!ndlp)
3294 return 1;
3295 }
3296 }
3297
3298 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3299 ndlp->nlp_DID, ELS_CMD_RSCN_XMT);
3300
3301 if (!elsiocb) {
3302 /* This will trigger the release of the node just
3303 * allocated
3304 */
3305 lpfc_nlp_put(ndlp);
3306 return 1;
3307 }
3308
3309 event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
3310
3311 event->rscn.rscn_cmd = ELS_RSCN;
3312 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page);
3313 event->rscn.rscn_plen = cpu_to_be16(cmdsize);
3314
3315 nportid = vport->fc_myDID;
3316 /* appears that page flags must be 0 for fabric to broadcast RSCN */
3317 event->portid.rscn_page_flags = 0;
3318 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16;
3319 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8;
3320 event->portid.rscn_fid[2] = nportid & 0x000000FF;
3321
3322 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3323 "Issue RSCN: did:x%x",
3324 ndlp->nlp_DID, 0, 0);
3325
3326 phba->fc_stat.elsXmitRSCN++;
3327 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3328 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
3329 IOCB_ERROR) {
3330 /* The additional lpfc_nlp_put will cause the following
3331 * lpfc_els_free_iocb routine to trigger the rlease of
3332 * the node.
3333 */
3334 lpfc_nlp_put(ndlp);
3335 lpfc_els_free_iocb(phba, elsiocb);
3336 return 1;
3337 }
3338 /* This will cause the callback-function lpfc_cmpl_els_cmd to
3339 * trigger the release of node.
3340 */
3341 if (!(vport->fc_flag & FC_PT2PT))
3342 lpfc_nlp_put(ndlp);
3343
3344 return 0;
3345 }
3346
3347 /**
3348 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
3349 * @vport: pointer to a host virtual N_Port data structure.
3350 * @nportid: N_Port identifier to the remote node.
3351 * @retry: number of retries to the command IOCB.
3352 *
3353 * This routine issues a Fibre Channel Address Resolution Response
3354 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
3355 * is passed into the function. It first search the @vport node list to find
3356 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
3357 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
3358 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
3359 *
3360 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3361 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3362 * will be stored into the context1 field of the IOCB for the completion
3363 * callback function to the PARPR ELS command.
3364 *
3365 * Return code
3366 * 0 - Successfully issued farpr command
3367 * 1 - Failed to issue farpr command
3368 **/
3369 static int
lpfc_issue_els_farpr(struct lpfc_vport * vport,uint32_t nportid,uint8_t retry)3370 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
3371 {
3372 struct lpfc_hba *phba = vport->phba;
3373 struct lpfc_iocbq *elsiocb;
3374 FARP *fp;
3375 uint8_t *pcmd;
3376 uint32_t *lp;
3377 uint16_t cmdsize;
3378 struct lpfc_nodelist *ondlp;
3379 struct lpfc_nodelist *ndlp;
3380
3381 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
3382
3383 ndlp = lpfc_findnode_did(vport, nportid);
3384 if (!ndlp) {
3385 ndlp = lpfc_nlp_init(vport, nportid);
3386 if (!ndlp)
3387 return 1;
3388 lpfc_enqueue_node(vport, ndlp);
3389 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
3390 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3391 if (!ndlp)
3392 return 1;
3393 }
3394
3395 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3396 ndlp->nlp_DID, ELS_CMD_RNID);
3397 if (!elsiocb) {
3398 /* This will trigger the release of the node just
3399 * allocated
3400 */
3401 lpfc_nlp_put(ndlp);
3402 return 1;
3403 }
3404
3405 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3406
3407 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
3408 pcmd += sizeof(uint32_t);
3409
3410 /* Fill in FARPR payload */
3411 fp = (FARP *) (pcmd);
3412 memset(fp, 0, sizeof(FARP));
3413 lp = (uint32_t *) pcmd;
3414 *lp++ = be32_to_cpu(nportid);
3415 *lp++ = be32_to_cpu(vport->fc_myDID);
3416 fp->Rflags = 0;
3417 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
3418
3419 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
3420 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
3421 ondlp = lpfc_findnode_did(vport, nportid);
3422 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
3423 memcpy(&fp->OportName, &ondlp->nlp_portname,
3424 sizeof(struct lpfc_name));
3425 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
3426 sizeof(struct lpfc_name));
3427 }
3428
3429 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3430 "Issue FARPR: did:x%x",
3431 ndlp->nlp_DID, 0, 0);
3432
3433 phba->fc_stat.elsXmitFARPR++;
3434 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3435 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
3436 IOCB_ERROR) {
3437 /* The additional lpfc_nlp_put will cause the following
3438 * lpfc_els_free_iocb routine to trigger the release of
3439 * the node.
3440 */
3441 lpfc_nlp_put(ndlp);
3442 lpfc_els_free_iocb(phba, elsiocb);
3443 return 1;
3444 }
3445 /* This will cause the callback-function lpfc_cmpl_els_cmd to
3446 * trigger the release of the node.
3447 */
3448 /* Don't release reference count as RDF is likely outstanding */
3449 return 0;
3450 }
3451
3452 /**
3453 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric.
3454 * @vport: pointer to a host virtual N_Port data structure.
3455 * @retry: retry counter for the command IOCB.
3456 *
3457 * This routine issues an ELS RDF to the Fabric Controller to register
3458 * for diagnostic functions.
3459 *
3460 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3461 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3462 * will be stored into the context1 field of the IOCB for the completion
3463 * callback function to the RDF ELS command.
3464 *
3465 * Return code
3466 * 0 - Successfully issued rdf command
3467 * 1 - Failed to issue rdf command
3468 **/
3469 int
lpfc_issue_els_rdf(struct lpfc_vport * vport,uint8_t retry)3470 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
3471 {
3472 struct lpfc_hba *phba = vport->phba;
3473 struct lpfc_iocbq *elsiocb;
3474 struct lpfc_els_rdf_req *prdf;
3475 struct lpfc_nodelist *ndlp;
3476 uint16_t cmdsize;
3477
3478 cmdsize = sizeof(*prdf);
3479
3480 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
3481 if (!ndlp) {
3482 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
3483 if (!ndlp)
3484 return -ENODEV;
3485 lpfc_enqueue_node(vport, ndlp);
3486 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
3487 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3488 if (!ndlp)
3489 return -ENODEV;
3490 }
3491
3492 /* RDF ELS is not required on an NPIV VN_Port. */
3493 if (vport->port_type == LPFC_NPIV_PORT) {
3494 lpfc_nlp_put(ndlp);
3495 return -EACCES;
3496 }
3497
3498 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3499 ndlp->nlp_DID, ELS_CMD_RDF);
3500 if (!elsiocb) {
3501 /* This will trigger the release of the node just
3502 * allocated
3503 */
3504 lpfc_nlp_put(ndlp);
3505 return -ENOMEM;
3506 }
3507
3508 /* Configure the payload for the supported FPIN events. */
3509 prdf = (struct lpfc_els_rdf_req *)
3510 (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
3511 memset(prdf, 0, cmdsize);
3512 prdf->rdf.fpin_cmd = ELS_RDF;
3513 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
3514 sizeof(struct fc_els_rdf));
3515 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER);
3516 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32(
3517 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
3518 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT);
3519 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY);
3520 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY);
3521 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
3522 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION);
3523
3524 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3525 "Issue RDF: did:x%x",
3526 ndlp->nlp_DID, 0, 0);
3527
3528 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3529 "6444 Xmit RDF to remote NPORT x%x\n",
3530 ndlp->nlp_DID);
3531
3532 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
3533 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
3534 IOCB_ERROR) {
3535 /* The additional lpfc_nlp_put will cause the following
3536 * lpfc_els_free_iocb routine to trigger the rlease of
3537 * the node.
3538 */
3539 lpfc_nlp_put(ndlp);
3540 lpfc_els_free_iocb(phba, elsiocb);
3541 return -EIO;
3542 }
3543
3544 /* An RDF was issued - this put ensures the ndlp is cleaned up
3545 * when the RDF completes.
3546 */
3547 lpfc_nlp_put(ndlp);
3548 return 0;
3549 }
3550
3551 /**
3552 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
3553 * @vport: pointer to a host virtual N_Port data structure.
3554 * @nlp: pointer to a node-list data structure.
3555 *
3556 * This routine cancels the timer with a delayed IOCB-command retry for
3557 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
3558 * removes the ELS retry event if it presents. In addition, if the
3559 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
3560 * commands are sent for the @vport's nodes that require issuing discovery
3561 * ADISC.
3562 **/
3563 void
lpfc_cancel_retry_delay_tmo(struct lpfc_vport * vport,struct lpfc_nodelist * nlp)3564 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
3565 {
3566 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3567 struct lpfc_work_evt *evtp;
3568
3569 if (!(nlp->nlp_flag & NLP_DELAY_TMO))
3570 return;
3571 spin_lock_irq(shost->host_lock);
3572 nlp->nlp_flag &= ~NLP_DELAY_TMO;
3573 spin_unlock_irq(shost->host_lock);
3574 del_timer_sync(&nlp->nlp_delayfunc);
3575 nlp->nlp_last_elscmd = 0;
3576 if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
3577 list_del_init(&nlp->els_retry_evt.evt_listp);
3578 /* Decrement nlp reference count held for the delayed retry */
3579 evtp = &nlp->els_retry_evt;
3580 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
3581 }
3582 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
3583 spin_lock_irq(shost->host_lock);
3584 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
3585 spin_unlock_irq(shost->host_lock);
3586 if (vport->num_disc_nodes) {
3587 if (vport->port_state < LPFC_VPORT_READY) {
3588 /* Check if there are more ADISCs to be sent */
3589 lpfc_more_adisc(vport);
3590 } else {
3591 /* Check if there are more PLOGIs to be sent */
3592 lpfc_more_plogi(vport);
3593 if (vport->num_disc_nodes == 0) {
3594 spin_lock_irq(shost->host_lock);
3595 vport->fc_flag &= ~FC_NDISC_ACTIVE;
3596 spin_unlock_irq(shost->host_lock);
3597 lpfc_can_disctmo(vport);
3598 lpfc_end_rscn(vport);
3599 }
3600 }
3601 }
3602 }
3603 return;
3604 }
3605
3606 /**
3607 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
3608 * @t: pointer to the timer function associated data (ndlp).
3609 *
3610 * This routine is invoked by the ndlp delayed-function timer to check
3611 * whether there is any pending ELS retry event(s) with the node. If not, it
3612 * simply returns. Otherwise, if there is at least one ELS delayed event, it
3613 * adds the delayed events to the HBA work list and invokes the
3614 * lpfc_worker_wake_up() routine to wake up worker thread to process the
3615 * event. Note that lpfc_nlp_get() is called before posting the event to
3616 * the work list to hold reference count of ndlp so that it guarantees the
3617 * reference to ndlp will still be available when the worker thread gets
3618 * to the event associated with the ndlp.
3619 **/
3620 void
lpfc_els_retry_delay(struct timer_list * t)3621 lpfc_els_retry_delay(struct timer_list *t)
3622 {
3623 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc);
3624 struct lpfc_vport *vport = ndlp->vport;
3625 struct lpfc_hba *phba = vport->phba;
3626 unsigned long flags;
3627 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
3628
3629 spin_lock_irqsave(&phba->hbalock, flags);
3630 if (!list_empty(&evtp->evt_listp)) {
3631 spin_unlock_irqrestore(&phba->hbalock, flags);
3632 return;
3633 }
3634
3635 /* We need to hold the node by incrementing the reference
3636 * count until the queued work is done
3637 */
3638 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
3639 if (evtp->evt_arg1) {
3640 evtp->evt = LPFC_EVT_ELS_RETRY;
3641 list_add_tail(&evtp->evt_listp, &phba->work_list);
3642 lpfc_worker_wake_up(phba);
3643 }
3644 spin_unlock_irqrestore(&phba->hbalock, flags);
3645 return;
3646 }
3647
3648 /**
3649 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
3650 * @ndlp: pointer to a node-list data structure.
3651 *
3652 * This routine is the worker-thread handler for processing the @ndlp delayed
3653 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
3654 * the last ELS command from the associated ndlp and invokes the proper ELS
3655 * function according to the delayed ELS command to retry the command.
3656 **/
3657 void
lpfc_els_retry_delay_handler(struct lpfc_nodelist * ndlp)3658 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
3659 {
3660 struct lpfc_vport *vport = ndlp->vport;
3661 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3662 uint32_t cmd, retry;
3663
3664 spin_lock_irq(shost->host_lock);
3665 cmd = ndlp->nlp_last_elscmd;
3666 ndlp->nlp_last_elscmd = 0;
3667
3668 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
3669 spin_unlock_irq(shost->host_lock);
3670 return;
3671 }
3672
3673 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
3674 spin_unlock_irq(shost->host_lock);
3675 /*
3676 * If a discovery event readded nlp_delayfunc after timer
3677 * firing and before processing the timer, cancel the
3678 * nlp_delayfunc.
3679 */
3680 del_timer_sync(&ndlp->nlp_delayfunc);
3681 retry = ndlp->nlp_retry;
3682 ndlp->nlp_retry = 0;
3683
3684 switch (cmd) {
3685 case ELS_CMD_FLOGI:
3686 lpfc_issue_els_flogi(vport, ndlp, retry);
3687 break;
3688 case ELS_CMD_PLOGI:
3689 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
3690 ndlp->nlp_prev_state = ndlp->nlp_state;
3691 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
3692 }
3693 break;
3694 case ELS_CMD_ADISC:
3695 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
3696 ndlp->nlp_prev_state = ndlp->nlp_state;
3697 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3698 }
3699 break;
3700 case ELS_CMD_PRLI:
3701 case ELS_CMD_NVMEPRLI:
3702 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
3703 ndlp->nlp_prev_state = ndlp->nlp_state;
3704 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
3705 }
3706 break;
3707 case ELS_CMD_LOGO:
3708 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
3709 ndlp->nlp_prev_state = ndlp->nlp_state;
3710 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3711 }
3712 break;
3713 case ELS_CMD_FDISC:
3714 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
3715 lpfc_issue_els_fdisc(vport, ndlp, retry);
3716 break;
3717 }
3718 return;
3719 }
3720
3721 /**
3722 * lpfc_link_reset - Issue link reset
3723 * @vport: pointer to a virtual N_Port data structure.
3724 *
3725 * This routine performs link reset by sending INIT_LINK mailbox command.
3726 * For SLI-3 adapter, link attention interrupt is enabled before issuing
3727 * INIT_LINK mailbox command.
3728 *
3729 * Return code
3730 * 0 - Link reset initiated successfully
3731 * 1 - Failed to initiate link reset
3732 **/
3733 int
lpfc_link_reset(struct lpfc_vport * vport)3734 lpfc_link_reset(struct lpfc_vport *vport)
3735 {
3736 struct lpfc_hba *phba = vport->phba;
3737 LPFC_MBOXQ_t *mbox;
3738 uint32_t control;
3739 int rc;
3740
3741 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3742 "2851 Attempt link reset\n");
3743 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3744 if (!mbox) {
3745 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3746 "2852 Failed to allocate mbox memory");
3747 return 1;
3748 }
3749
3750 /* Enable Link attention interrupts */
3751 if (phba->sli_rev <= LPFC_SLI_REV3) {
3752 spin_lock_irq(&phba->hbalock);
3753 phba->sli.sli_flag |= LPFC_PROCESS_LA;
3754 control = readl(phba->HCregaddr);
3755 control |= HC_LAINT_ENA;
3756 writel(control, phba->HCregaddr);
3757 readl(phba->HCregaddr); /* flush */
3758 spin_unlock_irq(&phba->hbalock);
3759 }
3760
3761 lpfc_init_link(phba, mbox, phba->cfg_topology,
3762 phba->cfg_link_speed);
3763 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3764 mbox->vport = vport;
3765 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3766 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3767 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3768 "2853 Failed to issue INIT_LINK "
3769 "mbox command, rc:x%x\n", rc);
3770 mempool_free(mbox, phba->mbox_mem_pool);
3771 return 1;
3772 }
3773
3774 return 0;
3775 }
3776
3777 /**
3778 * lpfc_els_retry - Make retry decision on an els command iocb
3779 * @phba: pointer to lpfc hba data structure.
3780 * @cmdiocb: pointer to lpfc command iocb data structure.
3781 * @rspiocb: pointer to lpfc response iocb data structure.
3782 *
3783 * This routine makes a retry decision on an ELS command IOCB, which has
3784 * failed. The following ELS IOCBs use this function for retrying the command
3785 * when previously issued command responsed with error status: FLOGI, PLOGI,
3786 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
3787 * returned error status, it makes the decision whether a retry shall be
3788 * issued for the command, and whether a retry shall be made immediately or
3789 * delayed. In the former case, the corresponding ELS command issuing-function
3790 * is called to retry the command. In the later case, the ELS command shall
3791 * be posted to the ndlp delayed event and delayed function timer set to the
3792 * ndlp for the delayed command issusing.
3793 *
3794 * Return code
3795 * 0 - No retry of els command is made
3796 * 1 - Immediate or delayed retry of els command is made
3797 **/
3798 static int
lpfc_els_retry(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)3799 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3800 struct lpfc_iocbq *rspiocb)
3801 {
3802 struct lpfc_vport *vport = cmdiocb->vport;
3803 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3804 IOCB_t *irsp = &rspiocb->iocb;
3805 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3806 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3807 uint32_t *elscmd;
3808 struct ls_rjt stat;
3809 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
3810 int logerr = 0;
3811 uint32_t cmd = 0;
3812 uint32_t did;
3813 int link_reset = 0, rc;
3814
3815
3816 /* Note: context2 may be 0 for internal driver abort
3817 * of delays ELS command.
3818 */
3819
3820 if (pcmd && pcmd->virt) {
3821 elscmd = (uint32_t *) (pcmd->virt);
3822 cmd = *elscmd++;
3823 }
3824
3825 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
3826 did = ndlp->nlp_DID;
3827 else {
3828 /* We should only hit this case for retrying PLOGI */
3829 did = irsp->un.elsreq64.remoteID;
3830 ndlp = lpfc_findnode_did(vport, did);
3831 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
3832 && (cmd != ELS_CMD_PLOGI))
3833 return 1;
3834 }
3835
3836 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3837 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
3838 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
3839
3840 switch (irsp->ulpStatus) {
3841 case IOSTAT_FCP_RSP_ERROR:
3842 break;
3843 case IOSTAT_REMOTE_STOP:
3844 if (phba->sli_rev == LPFC_SLI_REV4) {
3845 /* This IO was aborted by the target, we don't
3846 * know the rxid and because we did not send the
3847 * ABTS we cannot generate and RRQ.
3848 */
3849 lpfc_set_rrq_active(phba, ndlp,
3850 cmdiocb->sli4_lxritag, 0, 0);
3851 }
3852 break;
3853 case IOSTAT_LOCAL_REJECT:
3854 switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
3855 case IOERR_LOOP_OPEN_FAILURE:
3856 if (cmd == ELS_CMD_FLOGI) {
3857 if (PCI_DEVICE_ID_HORNET ==
3858 phba->pcidev->device) {
3859 phba->fc_topology = LPFC_TOPOLOGY_LOOP;
3860 phba->pport->fc_myDID = 0;
3861 phba->alpa_map[0] = 0;
3862 phba->alpa_map[1] = 0;
3863 }
3864 }
3865 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
3866 delay = 1000;
3867 retry = 1;
3868 break;
3869
3870 case IOERR_ILLEGAL_COMMAND:
3871 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3872 "0124 Retry illegal cmd x%x "
3873 "retry:x%x delay:x%x\n",
3874 cmd, cmdiocb->retry, delay);
3875 retry = 1;
3876 /* All command's retry policy */
3877 maxretry = 8;
3878 if (cmdiocb->retry > 2)
3879 delay = 1000;
3880 break;
3881
3882 case IOERR_NO_RESOURCES:
3883 logerr = 1; /* HBA out of resources */
3884 retry = 1;
3885 if (cmdiocb->retry > 100)
3886 delay = 100;
3887 maxretry = 250;
3888 break;
3889
3890 case IOERR_ILLEGAL_FRAME:
3891 delay = 100;
3892 retry = 1;
3893 break;
3894
3895 case IOERR_INVALID_RPI:
3896 if (cmd == ELS_CMD_PLOGI &&
3897 did == NameServer_DID) {
3898 /* Continue forever if plogi to */
3899 /* the nameserver fails */
3900 maxretry = 0;
3901 delay = 100;
3902 }
3903 retry = 1;
3904 break;
3905
3906 case IOERR_SEQUENCE_TIMEOUT:
3907 if (cmd == ELS_CMD_PLOGI &&
3908 did == NameServer_DID &&
3909 (cmdiocb->retry + 1) == maxretry) {
3910 /* Reset the Link */
3911 link_reset = 1;
3912 break;
3913 }
3914 retry = 1;
3915 delay = 100;
3916 break;
3917 }
3918 break;
3919
3920 case IOSTAT_NPORT_RJT:
3921 case IOSTAT_FABRIC_RJT:
3922 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
3923 retry = 1;
3924 break;
3925 }
3926 break;
3927
3928 case IOSTAT_NPORT_BSY:
3929 case IOSTAT_FABRIC_BSY:
3930 logerr = 1; /* Fabric / Remote NPort out of resources */
3931 retry = 1;
3932 break;
3933
3934 case IOSTAT_LS_RJT:
3935 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
3936 /* Added for Vendor specifc support
3937 * Just keep retrying for these Rsn / Exp codes
3938 */
3939 switch (stat.un.b.lsRjtRsnCode) {
3940 case LSRJT_UNABLE_TPC:
3941 /* The driver has a VALID PLOGI but the rport has
3942 * rejected the PRLI - can't do it now. Delay
3943 * for 1 second and try again.
3944 *
3945 * However, if explanation is REQ_UNSUPPORTED there's
3946 * no point to retry PRLI.
3947 */
3948 if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) &&
3949 stat.un.b.lsRjtRsnCodeExp !=
3950 LSEXP_REQ_UNSUPPORTED) {
3951 delay = 1000;
3952 maxretry = lpfc_max_els_tries + 1;
3953 retry = 1;
3954 break;
3955 }
3956
3957 /* Legacy bug fix code for targets with PLOGI delays. */
3958 if (stat.un.b.lsRjtRsnCodeExp ==
3959 LSEXP_CMD_IN_PROGRESS) {
3960 if (cmd == ELS_CMD_PLOGI) {
3961 delay = 1000;
3962 maxretry = 48;
3963 }
3964 retry = 1;
3965 break;
3966 }
3967 if (stat.un.b.lsRjtRsnCodeExp ==
3968 LSEXP_CANT_GIVE_DATA) {
3969 if (cmd == ELS_CMD_PLOGI) {
3970 delay = 1000;
3971 maxretry = 48;
3972 }
3973 retry = 1;
3974 break;
3975 }
3976 if (cmd == ELS_CMD_PLOGI) {
3977 delay = 1000;
3978 maxretry = lpfc_max_els_tries + 1;
3979 retry = 1;
3980 break;
3981 }
3982 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3983 (cmd == ELS_CMD_FDISC) &&
3984 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
3985 lpfc_printf_vlog(vport, KERN_ERR,
3986 LOG_TRACE_EVENT,
3987 "0125 FDISC Failed (x%x). "
3988 "Fabric out of resources\n",
3989 stat.un.lsRjtError);
3990 lpfc_vport_set_state(vport,
3991 FC_VPORT_NO_FABRIC_RSCS);
3992 }
3993 break;
3994
3995 case LSRJT_LOGICAL_BSY:
3996 if ((cmd == ELS_CMD_PLOGI) ||
3997 (cmd == ELS_CMD_PRLI) ||
3998 (cmd == ELS_CMD_NVMEPRLI)) {
3999 delay = 1000;
4000 maxretry = 48;
4001 } else if (cmd == ELS_CMD_FDISC) {
4002 /* FDISC retry policy */
4003 maxretry = 48;
4004 if (cmdiocb->retry >= 32)
4005 delay = 1000;
4006 }
4007 retry = 1;
4008 break;
4009
4010 case LSRJT_LOGICAL_ERR:
4011 /* There are some cases where switches return this
4012 * error when they are not ready and should be returning
4013 * Logical Busy. We should delay every time.
4014 */
4015 if (cmd == ELS_CMD_FDISC &&
4016 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
4017 maxretry = 3;
4018 delay = 1000;
4019 retry = 1;
4020 } else if (cmd == ELS_CMD_FLOGI &&
4021 stat.un.b.lsRjtRsnCodeExp ==
4022 LSEXP_NOTHING_MORE) {
4023 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
4024 retry = 1;
4025 lpfc_printf_vlog(vport, KERN_ERR,
4026 LOG_TRACE_EVENT,
4027 "0820 FLOGI Failed (x%x). "
4028 "BBCredit Not Supported\n",
4029 stat.un.lsRjtError);
4030 }
4031 break;
4032
4033 case LSRJT_PROTOCOL_ERR:
4034 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4035 (cmd == ELS_CMD_FDISC) &&
4036 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
4037 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
4038 ) {
4039 lpfc_printf_vlog(vport, KERN_ERR,
4040 LOG_TRACE_EVENT,
4041 "0122 FDISC Failed (x%x). "
4042 "Fabric Detected Bad WWN\n",
4043 stat.un.lsRjtError);
4044 lpfc_vport_set_state(vport,
4045 FC_VPORT_FABRIC_REJ_WWN);
4046 }
4047 break;
4048 case LSRJT_VENDOR_UNIQUE:
4049 if ((stat.un.b.vendorUnique == 0x45) &&
4050 (cmd == ELS_CMD_FLOGI)) {
4051 goto out_retry;
4052 }
4053 break;
4054 case LSRJT_CMD_UNSUPPORTED:
4055 /* lpfc nvmet returns this type of LS_RJT when it
4056 * receives an FCP PRLI because lpfc nvmet only
4057 * support NVME. ELS request is terminated for FCP4
4058 * on this rport.
4059 */
4060 if (stat.un.b.lsRjtRsnCodeExp ==
4061 LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) {
4062 spin_lock_irq(shost->host_lock);
4063 ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
4064 spin_unlock_irq(shost->host_lock);
4065 retry = 0;
4066 goto out_retry;
4067 }
4068 break;
4069 }
4070 break;
4071
4072 case IOSTAT_INTERMED_RSP:
4073 case IOSTAT_BA_RJT:
4074 break;
4075
4076 default:
4077 break;
4078 }
4079
4080 if (link_reset) {
4081 rc = lpfc_link_reset(vport);
4082 if (rc) {
4083 /* Do not give up. Retry PLOGI one more time and attempt
4084 * link reset if PLOGI fails again.
4085 */
4086 retry = 1;
4087 delay = 100;
4088 goto out_retry;
4089 }
4090 return 1;
4091 }
4092
4093 if (did == FDMI_DID)
4094 retry = 1;
4095
4096 if ((cmd == ELS_CMD_FLOGI) &&
4097 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
4098 !lpfc_error_lost_link(irsp)) {
4099 /* FLOGI retry policy */
4100 retry = 1;
4101 /* retry FLOGI forever */
4102 if (phba->link_flag != LS_LOOPBACK_MODE)
4103 maxretry = 0;
4104 else
4105 maxretry = 2;
4106
4107 if (cmdiocb->retry >= 100)
4108 delay = 5000;
4109 else if (cmdiocb->retry >= 32)
4110 delay = 1000;
4111 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
4112 /* retry FDISCs every second up to devloss */
4113 retry = 1;
4114 maxretry = vport->cfg_devloss_tmo;
4115 delay = 1000;
4116 }
4117
4118 cmdiocb->retry++;
4119 if (maxretry && (cmdiocb->retry >= maxretry)) {
4120 phba->fc_stat.elsRetryExceeded++;
4121 retry = 0;
4122 }
4123
4124 if ((vport->load_flag & FC_UNLOADING) != 0)
4125 retry = 0;
4126
4127 out_retry:
4128 if (retry) {
4129 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
4130 /* Stop retrying PLOGI and FDISC if in FCF discovery */
4131 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4132 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4133 "2849 Stop retry ELS command "
4134 "x%x to remote NPORT x%x, "
4135 "Data: x%x x%x\n", cmd, did,
4136 cmdiocb->retry, delay);
4137 return 0;
4138 }
4139 }
4140
4141 /* Retry ELS command <elsCmd> to remote NPORT <did> */
4142 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4143 "0107 Retry ELS command x%x to remote "
4144 "NPORT x%x Data: x%x x%x\n",
4145 cmd, did, cmdiocb->retry, delay);
4146
4147 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
4148 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
4149 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
4150 IOERR_NO_RESOURCES))) {
4151 /* Don't reset timer for no resources */
4152
4153 /* If discovery / RSCN timer is running, reset it */
4154 if (timer_pending(&vport->fc_disctmo) ||
4155 (vport->fc_flag & FC_RSCN_MODE))
4156 lpfc_set_disctmo(vport);
4157 }
4158
4159 phba->fc_stat.elsXmitRetry++;
4160 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
4161 phba->fc_stat.elsDelayRetry++;
4162 ndlp->nlp_retry = cmdiocb->retry;
4163
4164 /* delay is specified in milliseconds */
4165 mod_timer(&ndlp->nlp_delayfunc,
4166 jiffies + msecs_to_jiffies(delay));
4167 spin_lock_irq(shost->host_lock);
4168 ndlp->nlp_flag |= NLP_DELAY_TMO;
4169 spin_unlock_irq(shost->host_lock);
4170
4171 ndlp->nlp_prev_state = ndlp->nlp_state;
4172 if ((cmd == ELS_CMD_PRLI) ||
4173 (cmd == ELS_CMD_NVMEPRLI))
4174 lpfc_nlp_set_state(vport, ndlp,
4175 NLP_STE_PRLI_ISSUE);
4176 else
4177 lpfc_nlp_set_state(vport, ndlp,
4178 NLP_STE_NPR_NODE);
4179 ndlp->nlp_last_elscmd = cmd;
4180
4181 return 1;
4182 }
4183 switch (cmd) {
4184 case ELS_CMD_FLOGI:
4185 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
4186 return 1;
4187 case ELS_CMD_FDISC:
4188 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
4189 return 1;
4190 case ELS_CMD_PLOGI:
4191 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
4192 ndlp->nlp_prev_state = ndlp->nlp_state;
4193 lpfc_nlp_set_state(vport, ndlp,
4194 NLP_STE_PLOGI_ISSUE);
4195 }
4196 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
4197 return 1;
4198 case ELS_CMD_ADISC:
4199 ndlp->nlp_prev_state = ndlp->nlp_state;
4200 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
4201 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
4202 return 1;
4203 case ELS_CMD_PRLI:
4204 case ELS_CMD_NVMEPRLI:
4205 ndlp->nlp_prev_state = ndlp->nlp_state;
4206 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
4207 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
4208 return 1;
4209 case ELS_CMD_LOGO:
4210 ndlp->nlp_prev_state = ndlp->nlp_state;
4211 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
4212 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
4213 return 1;
4214 }
4215 }
4216 /* No retry ELS command <elsCmd> to remote NPORT <did> */
4217 if (logerr) {
4218 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4219 "0137 No retry ELS command x%x to remote "
4220 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
4221 cmd, did, irsp->ulpStatus,
4222 irsp->un.ulpWord[4]);
4223 }
4224 else {
4225 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4226 "0108 No retry ELS command x%x to remote "
4227 "NPORT x%x Retried:%d Error:x%x/%x\n",
4228 cmd, did, cmdiocb->retry, irsp->ulpStatus,
4229 irsp->un.ulpWord[4]);
4230 }
4231 return 0;
4232 }
4233
4234 /**
4235 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
4236 * @phba: pointer to lpfc hba data structure.
4237 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
4238 *
4239 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
4240 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
4241 * checks to see whether there is a lpfc DMA buffer associated with the
4242 * response of the command IOCB. If so, it will be released before releasing
4243 * the lpfc DMA buffer associated with the IOCB itself.
4244 *
4245 * Return code
4246 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
4247 **/
4248 static int
lpfc_els_free_data(struct lpfc_hba * phba,struct lpfc_dmabuf * buf_ptr1)4249 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
4250 {
4251 struct lpfc_dmabuf *buf_ptr;
4252
4253 /* Free the response before processing the command. */
4254 if (!list_empty(&buf_ptr1->list)) {
4255 list_remove_head(&buf_ptr1->list, buf_ptr,
4256 struct lpfc_dmabuf,
4257 list);
4258 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
4259 kfree(buf_ptr);
4260 }
4261 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
4262 kfree(buf_ptr1);
4263 return 0;
4264 }
4265
4266 /**
4267 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
4268 * @phba: pointer to lpfc hba data structure.
4269 * @buf_ptr: pointer to the lpfc dma buffer data structure.
4270 *
4271 * This routine releases the lpfc Direct Memory Access (DMA) buffer
4272 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
4273 * pool.
4274 *
4275 * Return code
4276 * 0 - Successfully released lpfc DMA buffer (currently, always return 0)
4277 **/
4278 static int
lpfc_els_free_bpl(struct lpfc_hba * phba,struct lpfc_dmabuf * buf_ptr)4279 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
4280 {
4281 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
4282 kfree(buf_ptr);
4283 return 0;
4284 }
4285
4286 /**
4287 * lpfc_els_free_iocb - Free a command iocb and its associated resources
4288 * @phba: pointer to lpfc hba data structure.
4289 * @elsiocb: pointer to lpfc els command iocb data structure.
4290 *
4291 * This routine frees a command IOCB and its associated resources. The
4292 * command IOCB data structure contains the reference to various associated
4293 * resources, these fields must be set to NULL if the associated reference
4294 * not present:
4295 * context1 - reference to ndlp
4296 * context2 - reference to cmd
4297 * context2->next - reference to rsp
4298 * context3 - reference to bpl
4299 *
4300 * It first properly decrements the reference count held on ndlp for the
4301 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
4302 * set, it invokes the lpfc_els_free_data() routine to release the Direct
4303 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
4304 * adds the DMA buffer the @phba data structure for the delayed release.
4305 * If reference to the Buffer Pointer List (BPL) is present, the
4306 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
4307 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
4308 * invoked to release the IOCB data structure back to @phba IOCBQ list.
4309 *
4310 * Return code
4311 * 0 - Success (currently, always return 0)
4312 **/
4313 int
lpfc_els_free_iocb(struct lpfc_hba * phba,struct lpfc_iocbq * elsiocb)4314 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
4315 {
4316 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
4317 struct lpfc_nodelist *ndlp;
4318
4319 ndlp = (struct lpfc_nodelist *)elsiocb->context1;
4320 if (ndlp) {
4321 if (ndlp->nlp_flag & NLP_DEFER_RM) {
4322 lpfc_nlp_put(ndlp);
4323
4324 /* If the ndlp is not being used by another discovery
4325 * thread, free it.
4326 */
4327 if (!lpfc_nlp_not_used(ndlp)) {
4328 /* If ndlp is being used by another discovery
4329 * thread, just clear NLP_DEFER_RM
4330 */
4331 ndlp->nlp_flag &= ~NLP_DEFER_RM;
4332 }
4333 }
4334 else
4335 lpfc_nlp_put(ndlp);
4336 elsiocb->context1 = NULL;
4337 }
4338 /* context2 = cmd, context2->next = rsp, context3 = bpl */
4339 if (elsiocb->context2) {
4340 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
4341 /* Firmware could still be in progress of DMAing
4342 * payload, so don't free data buffer till after
4343 * a hbeat.
4344 */
4345 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
4346 buf_ptr = elsiocb->context2;
4347 elsiocb->context2 = NULL;
4348 if (buf_ptr) {
4349 buf_ptr1 = NULL;
4350 spin_lock_irq(&phba->hbalock);
4351 if (!list_empty(&buf_ptr->list)) {
4352 list_remove_head(&buf_ptr->list,
4353 buf_ptr1, struct lpfc_dmabuf,
4354 list);
4355 INIT_LIST_HEAD(&buf_ptr1->list);
4356 list_add_tail(&buf_ptr1->list,
4357 &phba->elsbuf);
4358 phba->elsbuf_cnt++;
4359 }
4360 INIT_LIST_HEAD(&buf_ptr->list);
4361 list_add_tail(&buf_ptr->list, &phba->elsbuf);
4362 phba->elsbuf_cnt++;
4363 spin_unlock_irq(&phba->hbalock);
4364 }
4365 } else {
4366 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
4367 lpfc_els_free_data(phba, buf_ptr1);
4368 elsiocb->context2 = NULL;
4369 }
4370 }
4371
4372 if (elsiocb->context3) {
4373 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
4374 lpfc_els_free_bpl(phba, buf_ptr);
4375 elsiocb->context3 = NULL;
4376 }
4377 lpfc_sli_release_iocbq(phba, elsiocb);
4378 return 0;
4379 }
4380
4381 /**
4382 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
4383 * @phba: pointer to lpfc hba data structure.
4384 * @cmdiocb: pointer to lpfc command iocb data structure.
4385 * @rspiocb: pointer to lpfc response iocb data structure.
4386 *
4387 * This routine is the completion callback function to the Logout (LOGO)
4388 * Accept (ACC) Response ELS command. This routine is invoked to indicate
4389 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
4390 * release the ndlp if it has the last reference remaining (reference count
4391 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
4392 * field to NULL to inform the following lpfc_els_free_iocb() routine no
4393 * ndlp reference count needs to be decremented. Otherwise, the ndlp
4394 * reference use-count shall be decremented by the lpfc_els_free_iocb()
4395 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
4396 * IOCB data structure.
4397 **/
4398 static void
lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)4399 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4400 struct lpfc_iocbq *rspiocb)
4401 {
4402 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
4403 struct lpfc_vport *vport = cmdiocb->vport;
4404 IOCB_t *irsp;
4405
4406 irsp = &rspiocb->iocb;
4407 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4408 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
4409 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
4410 /* ACC to LOGO completes to NPort <nlp_DID> */
4411 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4412 "0109 ACC to LOGO completes to NPort x%x "
4413 "Data: x%x x%x x%x\n",
4414 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4415 ndlp->nlp_rpi);
4416
4417 if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
4418 /* NPort Recovery mode or node is just allocated */
4419 if (!lpfc_nlp_not_used(ndlp)) {
4420 /* If the ndlp is being used by another discovery
4421 * thread, just unregister the RPI.
4422 */
4423 lpfc_unreg_rpi(vport, ndlp);
4424 } else {
4425 /* Indicate the node has already released, should
4426 * not reference to it from within lpfc_els_free_iocb.
4427 */
4428 cmdiocb->context1 = NULL;
4429 }
4430 }
4431
4432 /*
4433 * The driver received a LOGO from the rport and has ACK'd it.
4434 * At this point, the driver is done so release the IOCB
4435 */
4436 lpfc_els_free_iocb(phba, cmdiocb);
4437 }
4438
4439 /**
4440 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
4441 * @phba: pointer to lpfc hba data structure.
4442 * @pmb: pointer to the driver internal queue element for mailbox command.
4443 *
4444 * This routine is the completion callback function for unregister default
4445 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
4446 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
4447 * decrements the ndlp reference count held for this completion callback
4448 * function. After that, it invokes the lpfc_nlp_not_used() to check
4449 * whether there is only one reference left on the ndlp. If so, it will
4450 * perform one more decrement and trigger the release of the ndlp.
4451 **/
4452 void
lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)4453 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4454 {
4455 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
4456 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4457
4458 pmb->ctx_buf = NULL;
4459 pmb->ctx_ndlp = NULL;
4460
4461 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4462 kfree(mp);
4463 mempool_free(pmb, phba->mbox_mem_pool);
4464 if (ndlp) {
4465 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4466 "0006 rpi%x DID:%x flg:%x %d map:%x x%px\n",
4467 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4468 kref_read(&ndlp->kref),
4469 ndlp->nlp_usg_map, ndlp);
4470 if (NLP_CHK_NODE_ACT(ndlp)) {
4471 lpfc_nlp_put(ndlp);
4472 /* This is the end of the default RPI cleanup logic for
4473 * this ndlp. If no other discovery threads are using
4474 * this ndlp, free all resources associated with it.
4475 */
4476 lpfc_nlp_not_used(ndlp);
4477 } else {
4478 lpfc_drop_node(ndlp->vport, ndlp);
4479 }
4480 }
4481
4482 return;
4483 }
4484
4485 /**
4486 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
4487 * @phba: pointer to lpfc hba data structure.
4488 * @cmdiocb: pointer to lpfc command iocb data structure.
4489 * @rspiocb: pointer to lpfc response iocb data structure.
4490 *
4491 * This routine is the completion callback function for ELS Response IOCB
4492 * command. In normal case, this callback function just properly sets the
4493 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
4494 * field in the command IOCB is not NULL, the referred mailbox command will
4495 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
4496 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
4497 * link down event occurred during the discovery, the lpfc_nlp_not_used()
4498 * routine shall be invoked trying to release the ndlp if no other threads
4499 * are currently referring it.
4500 **/
4501 static void
lpfc_cmpl_els_rsp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)4502 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4503 struct lpfc_iocbq *rspiocb)
4504 {
4505 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
4506 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
4507 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
4508 IOCB_t *irsp;
4509 uint8_t *pcmd;
4510 LPFC_MBOXQ_t *mbox = NULL;
4511 struct lpfc_dmabuf *mp = NULL;
4512 uint32_t ls_rjt = 0;
4513
4514 irsp = &rspiocb->iocb;
4515
4516 if (!vport) {
4517 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4518 "3177 ELS response failed\n");
4519 goto out;
4520 }
4521 if (cmdiocb->context_un.mbox)
4522 mbox = cmdiocb->context_un.mbox;
4523
4524 /* First determine if this is a LS_RJT cmpl. Note, this callback
4525 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
4526 */
4527 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
4528 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
4529 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
4530 /* A LS_RJT associated with Default RPI cleanup has its own
4531 * separate code path.
4532 */
4533 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
4534 ls_rjt = 1;
4535 }
4536
4537 /* Check to see if link went down during discovery */
4538 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
4539 if (mbox) {
4540 mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
4541 if (mp) {
4542 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4543 kfree(mp);
4544 }
4545 mempool_free(mbox, phba->mbox_mem_pool);
4546 }
4547 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
4548 (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
4549 if (lpfc_nlp_not_used(ndlp)) {
4550 ndlp = NULL;
4551 /* Indicate the node has already released,
4552 * should not reference to it from within
4553 * the routine lpfc_els_free_iocb.
4554 */
4555 cmdiocb->context1 = NULL;
4556 }
4557 goto out;
4558 }
4559
4560 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4561 "ELS rsp cmpl: status:x%x/x%x did:x%x",
4562 irsp->ulpStatus, irsp->un.ulpWord[4],
4563 cmdiocb->iocb.un.elsreq64.remoteID);
4564 /* ELS response tag <ulpIoTag> completes */
4565 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4566 "0110 ELS response tag x%x completes "
4567 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
4568 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
4569 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
4570 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4571 ndlp->nlp_rpi);
4572 if (mbox) {
4573 if ((rspiocb->iocb.ulpStatus == 0)
4574 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
4575 if (!lpfc_unreg_rpi(vport, ndlp) &&
4576 (!(vport->fc_flag & FC_PT2PT)) &&
4577 (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
4578 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) {
4579 lpfc_printf_vlog(vport, KERN_INFO,
4580 LOG_DISCOVERY,
4581 "0314 PLOGI recov DID x%x "
4582 "Data: x%x x%x x%x\n",
4583 ndlp->nlp_DID, ndlp->nlp_state,
4584 ndlp->nlp_rpi, ndlp->nlp_flag);
4585 mp = mbox->ctx_buf;
4586 if (mp) {
4587 lpfc_mbuf_free(phba, mp->virt,
4588 mp->phys);
4589 kfree(mp);
4590 }
4591 mempool_free(mbox, phba->mbox_mem_pool);
4592 goto out;
4593 }
4594
4595 /* Increment reference count to ndlp to hold the
4596 * reference to ndlp for the callback function.
4597 */
4598 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
4599 mbox->vport = vport;
4600 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
4601 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
4602 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
4603 }
4604 else {
4605 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
4606 ndlp->nlp_prev_state = ndlp->nlp_state;
4607 lpfc_nlp_set_state(vport, ndlp,
4608 NLP_STE_REG_LOGIN_ISSUE);
4609 }
4610
4611 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
4612 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
4613 != MBX_NOT_FINISHED)
4614 goto out;
4615
4616 /* Decrement the ndlp reference count we
4617 * set for this failed mailbox command.
4618 */
4619 lpfc_nlp_put(ndlp);
4620 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
4621
4622 /* ELS rsp: Cannot issue reg_login for <NPortid> */
4623 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4624 "0138 ELS rsp: Cannot issue reg_login for x%x "
4625 "Data: x%x x%x x%x\n",
4626 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4627 ndlp->nlp_rpi);
4628
4629 if (lpfc_nlp_not_used(ndlp)) {
4630 ndlp = NULL;
4631 /* Indicate node has already been released,
4632 * should not reference to it from within
4633 * the routine lpfc_els_free_iocb.
4634 */
4635 cmdiocb->context1 = NULL;
4636 }
4637 } else {
4638 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
4639 if (!lpfc_error_lost_link(irsp) &&
4640 ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
4641 if (lpfc_nlp_not_used(ndlp)) {
4642 ndlp = NULL;
4643 /* Indicate node has already been
4644 * released, should not reference
4645 * to it from within the routine
4646 * lpfc_els_free_iocb.
4647 */
4648 cmdiocb->context1 = NULL;
4649 }
4650 }
4651 }
4652 mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
4653 if (mp) {
4654 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4655 kfree(mp);
4656 }
4657 mempool_free(mbox, phba->mbox_mem_pool);
4658 }
4659 out:
4660 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
4661 spin_lock_irq(shost->host_lock);
4662 if (mbox)
4663 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
4664 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
4665 spin_unlock_irq(shost->host_lock);
4666
4667 /* If the node is not being used by another discovery thread,
4668 * and we are sending a reject, we are done with it.
4669 * Release driver reference count here and free associated
4670 * resources.
4671 */
4672 if (ls_rjt)
4673 if (lpfc_nlp_not_used(ndlp))
4674 /* Indicate node has already been released,
4675 * should not reference to it from within
4676 * the routine lpfc_els_free_iocb.
4677 */
4678 cmdiocb->context1 = NULL;
4679
4680 }
4681
4682 lpfc_els_free_iocb(phba, cmdiocb);
4683 return;
4684 }
4685
4686 /**
4687 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
4688 * @vport: pointer to a host virtual N_Port data structure.
4689 * @flag: the els command code to be accepted.
4690 * @oldiocb: pointer to the original lpfc command iocb data structure.
4691 * @ndlp: pointer to a node-list data structure.
4692 * @mbox: pointer to the driver internal queue element for mailbox command.
4693 *
4694 * This routine prepares and issues an Accept (ACC) response IOCB
4695 * command. It uses the @flag to properly set up the IOCB field for the
4696 * specific ACC response command to be issued and invokes the
4697 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
4698 * @mbox pointer is passed in, it will be put into the context_un.mbox
4699 * field of the IOCB for the completion callback function to issue the
4700 * mailbox command to the HBA later when callback is invoked.
4701 *
4702 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4703 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4704 * will be stored into the context1 field of the IOCB for the completion
4705 * callback function to the corresponding response ELS IOCB command.
4706 *
4707 * Return code
4708 * 0 - Successfully issued acc response
4709 * 1 - Failed to issue acc response
4710 **/
4711 int
lpfc_els_rsp_acc(struct lpfc_vport * vport,uint32_t flag,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp,LPFC_MBOXQ_t * mbox)4712 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
4713 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
4714 LPFC_MBOXQ_t *mbox)
4715 {
4716 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4717 struct lpfc_hba *phba = vport->phba;
4718 IOCB_t *icmd;
4719 IOCB_t *oldcmd;
4720 struct lpfc_iocbq *elsiocb;
4721 uint8_t *pcmd;
4722 struct serv_parm *sp;
4723 uint16_t cmdsize;
4724 int rc;
4725 ELS_PKT *els_pkt_ptr;
4726
4727 oldcmd = &oldiocb->iocb;
4728
4729 switch (flag) {
4730 case ELS_CMD_ACC:
4731 cmdsize = sizeof(uint32_t);
4732 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
4733 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
4734 if (!elsiocb) {
4735 spin_lock_irq(shost->host_lock);
4736 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4737 spin_unlock_irq(shost->host_lock);
4738 return 1;
4739 }
4740
4741 icmd = &elsiocb->iocb;
4742 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4743 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4744 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4745 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4746 pcmd += sizeof(uint32_t);
4747
4748 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4749 "Issue ACC: did:x%x flg:x%x",
4750 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4751 break;
4752 case ELS_CMD_FLOGI:
4753 case ELS_CMD_PLOGI:
4754 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
4755 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
4756 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
4757 if (!elsiocb)
4758 return 1;
4759
4760 icmd = &elsiocb->iocb;
4761 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4762 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4763 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4764
4765 if (mbox)
4766 elsiocb->context_un.mbox = mbox;
4767
4768 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4769 pcmd += sizeof(uint32_t);
4770 sp = (struct serv_parm *)pcmd;
4771
4772 if (flag == ELS_CMD_FLOGI) {
4773 /* Copy the received service parameters back */
4774 memcpy(sp, &phba->fc_fabparam,
4775 sizeof(struct serv_parm));
4776
4777 /* Clear the F_Port bit */
4778 sp->cmn.fPort = 0;
4779
4780 /* Mark all class service parameters as invalid */
4781 sp->cls1.classValid = 0;
4782 sp->cls2.classValid = 0;
4783 sp->cls3.classValid = 0;
4784 sp->cls4.classValid = 0;
4785
4786 /* Copy our worldwide names */
4787 memcpy(&sp->portName, &vport->fc_sparam.portName,
4788 sizeof(struct lpfc_name));
4789 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
4790 sizeof(struct lpfc_name));
4791 } else {
4792 memcpy(pcmd, &vport->fc_sparam,
4793 sizeof(struct serv_parm));
4794
4795 sp->cmn.valid_vendor_ver_level = 0;
4796 memset(sp->un.vendorVersion, 0,
4797 sizeof(sp->un.vendorVersion));
4798 sp->cmn.bbRcvSizeMsb &= 0xF;
4799
4800 /* If our firmware supports this feature, convey that
4801 * info to the target using the vendor specific field.
4802 */
4803 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
4804 sp->cmn.valid_vendor_ver_level = 1;
4805 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
4806 sp->un.vv.flags =
4807 cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
4808 }
4809 }
4810
4811 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4812 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
4813 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4814 break;
4815 case ELS_CMD_PRLO:
4816 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
4817 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
4818 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
4819 if (!elsiocb)
4820 return 1;
4821
4822 icmd = &elsiocb->iocb;
4823 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4824 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4825 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4826
4827 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
4828 sizeof(uint32_t) + sizeof(PRLO));
4829 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
4830 els_pkt_ptr = (ELS_PKT *) pcmd;
4831 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
4832
4833 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4834 "Issue ACC PRLO: did:x%x flg:x%x",
4835 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4836 break;
4837 default:
4838 return 1;
4839 }
4840 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
4841 spin_lock_irq(shost->host_lock);
4842 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4843 ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
4844 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4845 spin_unlock_irq(shost->host_lock);
4846 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
4847 } else {
4848 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4849 }
4850
4851 phba->fc_stat.elsXmitACC++;
4852 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4853 if (rc == IOCB_ERROR) {
4854 lpfc_els_free_iocb(phba, elsiocb);
4855 return 1;
4856 }
4857 return 0;
4858 }
4859
4860 /**
4861 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
4862 * @vport: pointer to a virtual N_Port data structure.
4863 * @rejectError: reject response to issue
4864 * @oldiocb: pointer to the original lpfc command iocb data structure.
4865 * @ndlp: pointer to a node-list data structure.
4866 * @mbox: pointer to the driver internal queue element for mailbox command.
4867 *
4868 * This routine prepares and issue an Reject (RJT) response IOCB
4869 * command. If a @mbox pointer is passed in, it will be put into the
4870 * context_un.mbox field of the IOCB for the completion callback function
4871 * to issue to the HBA later.
4872 *
4873 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4874 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4875 * will be stored into the context1 field of the IOCB for the completion
4876 * callback function to the reject response ELS IOCB command.
4877 *
4878 * Return code
4879 * 0 - Successfully issued reject response
4880 * 1 - Failed to issue reject response
4881 **/
4882 int
lpfc_els_rsp_reject(struct lpfc_vport * vport,uint32_t rejectError,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp,LPFC_MBOXQ_t * mbox)4883 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
4884 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
4885 LPFC_MBOXQ_t *mbox)
4886 {
4887 struct lpfc_hba *phba = vport->phba;
4888 IOCB_t *icmd;
4889 IOCB_t *oldcmd;
4890 struct lpfc_iocbq *elsiocb;
4891 uint8_t *pcmd;
4892 uint16_t cmdsize;
4893 int rc;
4894
4895 cmdsize = 2 * sizeof(uint32_t);
4896 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4897 ndlp->nlp_DID, ELS_CMD_LS_RJT);
4898 if (!elsiocb)
4899 return 1;
4900
4901 icmd = &elsiocb->iocb;
4902 oldcmd = &oldiocb->iocb;
4903 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4904 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4905 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4906
4907 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
4908 pcmd += sizeof(uint32_t);
4909 *((uint32_t *) (pcmd)) = rejectError;
4910
4911 if (mbox)
4912 elsiocb->context_un.mbox = mbox;
4913
4914 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
4915 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4916 "0129 Xmit ELS RJT x%x response tag x%x "
4917 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
4918 "rpi x%x\n",
4919 rejectError, elsiocb->iotag,
4920 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
4921 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
4922 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4923 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
4924 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
4925
4926 phba->fc_stat.elsXmitLSRJT++;
4927 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4928 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4929
4930 if (rc == IOCB_ERROR) {
4931 lpfc_els_free_iocb(phba, elsiocb);
4932 return 1;
4933 }
4934 return 0;
4935 }
4936
4937 /**
4938 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
4939 * @vport: pointer to a virtual N_Port data structure.
4940 * @oldiocb: pointer to the original lpfc command iocb data structure.
4941 * @ndlp: pointer to a node-list data structure.
4942 *
4943 * This routine prepares and issues an Accept (ACC) response to Address
4944 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
4945 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
4946 *
4947 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4948 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4949 * will be stored into the context1 field of the IOCB for the completion
4950 * callback function to the ADISC Accept response ELS IOCB command.
4951 *
4952 * Return code
4953 * 0 - Successfully issued acc adisc response
4954 * 1 - Failed to issue adisc acc response
4955 **/
4956 int
lpfc_els_rsp_adisc_acc(struct lpfc_vport * vport,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)4957 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
4958 struct lpfc_nodelist *ndlp)
4959 {
4960 struct lpfc_hba *phba = vport->phba;
4961 ADISC *ap;
4962 IOCB_t *icmd, *oldcmd;
4963 struct lpfc_iocbq *elsiocb;
4964 uint8_t *pcmd;
4965 uint16_t cmdsize;
4966 int rc;
4967
4968 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
4969 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4970 ndlp->nlp_DID, ELS_CMD_ACC);
4971 if (!elsiocb)
4972 return 1;
4973
4974 icmd = &elsiocb->iocb;
4975 oldcmd = &oldiocb->iocb;
4976 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
4977 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4978
4979 /* Xmit ADISC ACC response tag <ulpIoTag> */
4980 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4981 "0130 Xmit ADISC ACC response iotag x%x xri: "
4982 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
4983 elsiocb->iotag, elsiocb->iocb.ulpContext,
4984 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4985 ndlp->nlp_rpi);
4986 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4987
4988 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4989 pcmd += sizeof(uint32_t);
4990
4991 ap = (ADISC *) (pcmd);
4992 ap->hardAL_PA = phba->fc_pref_ALPA;
4993 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4994 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
4995 ap->DID = be32_to_cpu(vport->fc_myDID);
4996
4997 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4998 "Issue ACC ADISC: did:x%x flg:x%x",
4999 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5000
5001 phba->fc_stat.elsXmitACC++;
5002 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5003 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5004 if (rc == IOCB_ERROR) {
5005 lpfc_els_free_iocb(phba, elsiocb);
5006 return 1;
5007 }
5008
5009 /* Xmit ELS ACC response tag <ulpIoTag> */
5010 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5011 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
5012 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
5013 "RPI: x%x, fc_flag x%x\n",
5014 rc, elsiocb->iotag, elsiocb->sli4_xritag,
5015 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5016 ndlp->nlp_rpi, vport->fc_flag);
5017 return 0;
5018 }
5019
5020 /**
5021 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
5022 * @vport: pointer to a virtual N_Port data structure.
5023 * @oldiocb: pointer to the original lpfc command iocb data structure.
5024 * @ndlp: pointer to a node-list data structure.
5025 *
5026 * This routine prepares and issues an Accept (ACC) response to Process
5027 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
5028 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
5029 *
5030 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5031 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5032 * will be stored into the context1 field of the IOCB for the completion
5033 * callback function to the PRLI Accept response ELS IOCB command.
5034 *
5035 * Return code
5036 * 0 - Successfully issued acc prli response
5037 * 1 - Failed to issue acc prli response
5038 **/
5039 int
lpfc_els_rsp_prli_acc(struct lpfc_vport * vport,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)5040 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
5041 struct lpfc_nodelist *ndlp)
5042 {
5043 struct lpfc_hba *phba = vport->phba;
5044 PRLI *npr;
5045 struct lpfc_nvme_prli *npr_nvme;
5046 lpfc_vpd_t *vpd;
5047 IOCB_t *icmd;
5048 IOCB_t *oldcmd;
5049 struct lpfc_iocbq *elsiocb;
5050 uint8_t *pcmd;
5051 uint16_t cmdsize;
5052 uint32_t prli_fc4_req, *req_payload;
5053 struct lpfc_dmabuf *req_buf;
5054 int rc;
5055 u32 elsrspcmd;
5056
5057 /* Need the incoming PRLI payload to determine if the ACC is for an
5058 * FC4 or NVME PRLI type. The PRLI type is at word 1.
5059 */
5060 req_buf = (struct lpfc_dmabuf *)oldiocb->context2;
5061 req_payload = (((uint32_t *)req_buf->virt) + 1);
5062
5063 /* PRLI type payload is at byte 3 for FCP or NVME. */
5064 prli_fc4_req = be32_to_cpu(*req_payload);
5065 prli_fc4_req = (prli_fc4_req >> 24) & 0xff;
5066 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5067 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n",
5068 prli_fc4_req, *((uint32_t *)req_payload));
5069
5070 if (prli_fc4_req == PRLI_FCP_TYPE) {
5071 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
5072 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
5073 } else if (prli_fc4_req & PRLI_NVME_TYPE) {
5074 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
5075 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
5076 } else {
5077 return 1;
5078 }
5079
5080 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5081 ndlp->nlp_DID, elsrspcmd);
5082 if (!elsiocb)
5083 return 1;
5084
5085 icmd = &elsiocb->iocb;
5086 oldcmd = &oldiocb->iocb;
5087 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5088 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5089
5090 /* Xmit PRLI ACC response tag <ulpIoTag> */
5091 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5092 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
5093 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
5094 elsiocb->iotag, elsiocb->iocb.ulpContext,
5095 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5096 ndlp->nlp_rpi);
5097 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5098 memset(pcmd, 0, cmdsize);
5099
5100 *((uint32_t *)(pcmd)) = elsrspcmd;
5101 pcmd += sizeof(uint32_t);
5102
5103 /* For PRLI, remainder of payload is PRLI parameter page */
5104 vpd = &phba->vpd;
5105
5106 if (prli_fc4_req == PRLI_FCP_TYPE) {
5107 /*
5108 * If the remote port is a target and our firmware version
5109 * is 3.20 or later, set the following bits for FC-TAPE
5110 * support.
5111 */
5112 npr = (PRLI *) pcmd;
5113 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
5114 (vpd->rev.feaLevelHigh >= 0x02)) {
5115 npr->ConfmComplAllowed = 1;
5116 npr->Retry = 1;
5117 npr->TaskRetryIdReq = 1;
5118 }
5119 npr->acceptRspCode = PRLI_REQ_EXECUTED;
5120 npr->estabImagePair = 1;
5121 npr->readXferRdyDis = 1;
5122 npr->ConfmComplAllowed = 1;
5123 npr->prliType = PRLI_FCP_TYPE;
5124 npr->initiatorFunc = 1;
5125 } else if (prli_fc4_req & PRLI_NVME_TYPE) {
5126 /* Respond with an NVME PRLI Type */
5127 npr_nvme = (struct lpfc_nvme_prli *) pcmd;
5128 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
5129 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
5130 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED);
5131 if (phba->nvmet_support) {
5132 bf_set(prli_tgt, npr_nvme, 1);
5133 bf_set(prli_disc, npr_nvme, 1);
5134 if (phba->cfg_nvme_enable_fb) {
5135 bf_set(prli_fba, npr_nvme, 1);
5136
5137 /* TBD. Target mode needs to post buffers
5138 * that support the configured first burst
5139 * byte size.
5140 */
5141 bf_set(prli_fb_sz, npr_nvme,
5142 phba->cfg_nvmet_fb_size);
5143 }
5144 } else {
5145 bf_set(prli_init, npr_nvme, 1);
5146 }
5147
5148 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
5149 "6015 NVME issue PRLI ACC word1 x%08x "
5150 "word4 x%08x word5 x%08x flag x%x, "
5151 "fcp_info x%x nlp_type x%x\n",
5152 npr_nvme->word1, npr_nvme->word4,
5153 npr_nvme->word5, ndlp->nlp_flag,
5154 ndlp->nlp_fcp_info, ndlp->nlp_type);
5155 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
5156 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
5157 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5);
5158 } else
5159 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5160 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n",
5161 prli_fc4_req, ndlp->nlp_fc4_type,
5162 ndlp->nlp_DID);
5163
5164 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5165 "Issue ACC PRLI: did:x%x flg:x%x",
5166 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5167
5168 phba->fc_stat.elsXmitACC++;
5169 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5170
5171 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5172 if (rc == IOCB_ERROR) {
5173 lpfc_els_free_iocb(phba, elsiocb);
5174 return 1;
5175 }
5176 return 0;
5177 }
5178
5179 /**
5180 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
5181 * @vport: pointer to a virtual N_Port data structure.
5182 * @format: rnid command format.
5183 * @oldiocb: pointer to the original lpfc command iocb data structure.
5184 * @ndlp: pointer to a node-list data structure.
5185 *
5186 * This routine issues a Request Node Identification Data (RNID) Accept
5187 * (ACC) response. It constructs the RNID ACC response command according to
5188 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
5189 * issue the response. Note that this command does not need to hold the ndlp
5190 * reference count for the callback. So, the ndlp reference count taken by
5191 * the lpfc_prep_els_iocb() routine is put back and the context1 field of
5192 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
5193 * there is no ndlp reference available.
5194 *
5195 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
5196 * will be incremented by 1 for holding the ndlp and the reference to ndlp
5197 * will be stored into the context1 field of the IOCB for the completion
5198 * callback function. However, for the RNID Accept Response ELS command,
5199 * this is undone later by this routine after the IOCB is allocated.
5200 *
5201 * Return code
5202 * 0 - Successfully issued acc rnid response
5203 * 1 - Failed to issue acc rnid response
5204 **/
5205 static int
lpfc_els_rsp_rnid_acc(struct lpfc_vport * vport,uint8_t format,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)5206 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
5207 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
5208 {
5209 struct lpfc_hba *phba = vport->phba;
5210 RNID *rn;
5211 IOCB_t *icmd, *oldcmd;
5212 struct lpfc_iocbq *elsiocb;
5213 uint8_t *pcmd;
5214 uint16_t cmdsize;
5215 int rc;
5216
5217 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
5218 + (2 * sizeof(struct lpfc_name));
5219 if (format)
5220 cmdsize += sizeof(RNID_TOP_DISC);
5221
5222 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5223 ndlp->nlp_DID, ELS_CMD_ACC);
5224 if (!elsiocb)
5225 return 1;
5226
5227 icmd = &elsiocb->iocb;
5228 oldcmd = &oldiocb->iocb;
5229 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
5230 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5231
5232 /* Xmit RNID ACC response tag <ulpIoTag> */
5233 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5234 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
5235 elsiocb->iotag, elsiocb->iocb.ulpContext);
5236 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5237 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5238 pcmd += sizeof(uint32_t);
5239
5240 memset(pcmd, 0, sizeof(RNID));
5241 rn = (RNID *) (pcmd);
5242 rn->Format = format;
5243 rn->CommonLen = (2 * sizeof(struct lpfc_name));
5244 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
5245 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
5246 switch (format) {
5247 case 0:
5248 rn->SpecificLen = 0;
5249 break;
5250 case RNID_TOPOLOGY_DISC:
5251 rn->SpecificLen = sizeof(RNID_TOP_DISC);
5252 memcpy(&rn->un.topologyDisc.portName,
5253 &vport->fc_portname, sizeof(struct lpfc_name));
5254 rn->un.topologyDisc.unitType = RNID_HBA;
5255 rn->un.topologyDisc.physPort = 0;
5256 rn->un.topologyDisc.attachedNodes = 0;
5257 break;
5258 default:
5259 rn->CommonLen = 0;
5260 rn->SpecificLen = 0;
5261 break;
5262 }
5263
5264 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5265 "Issue ACC RNID: did:x%x flg:x%x",
5266 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5267
5268 phba->fc_stat.elsXmitACC++;
5269 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5270
5271 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5272 if (rc == IOCB_ERROR) {
5273 lpfc_els_free_iocb(phba, elsiocb);
5274 return 1;
5275 }
5276 return 0;
5277 }
5278
5279 /**
5280 * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
5281 * @vport: pointer to a virtual N_Port data structure.
5282 * @iocb: pointer to the lpfc command iocb data structure.
5283 * @ndlp: pointer to a node-list data structure.
5284 *
5285 * Return
5286 **/
5287 static void
lpfc_els_clear_rrq(struct lpfc_vport * vport,struct lpfc_iocbq * iocb,struct lpfc_nodelist * ndlp)5288 lpfc_els_clear_rrq(struct lpfc_vport *vport,
5289 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
5290 {
5291 struct lpfc_hba *phba = vport->phba;
5292 uint8_t *pcmd;
5293 struct RRQ *rrq;
5294 uint16_t rxid;
5295 uint16_t xri;
5296 struct lpfc_node_rrq *prrq;
5297
5298
5299 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
5300 pcmd += sizeof(uint32_t);
5301 rrq = (struct RRQ *)pcmd;
5302 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
5303 rxid = bf_get(rrq_rxid, rrq);
5304
5305 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5306 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
5307 " x%x x%x\n",
5308 be32_to_cpu(bf_get(rrq_did, rrq)),
5309 bf_get(rrq_oxid, rrq),
5310 rxid,
5311 iocb->iotag, iocb->iocb.ulpContext);
5312
5313 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5314 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
5315 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
5316 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
5317 xri = bf_get(rrq_oxid, rrq);
5318 else
5319 xri = rxid;
5320 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
5321 if (prrq)
5322 lpfc_clr_rrq_active(phba, xri, prrq);
5323 return;
5324 }
5325
5326 /**
5327 * lpfc_els_rsp_echo_acc - Issue echo acc response
5328 * @vport: pointer to a virtual N_Port data structure.
5329 * @data: pointer to echo data to return in the accept.
5330 * @oldiocb: pointer to the original lpfc command iocb data structure.
5331 * @ndlp: pointer to a node-list data structure.
5332 *
5333 * Return code
5334 * 0 - Successfully issued acc echo response
5335 * 1 - Failed to issue acc echo response
5336 **/
5337 static int
lpfc_els_rsp_echo_acc(struct lpfc_vport * vport,uint8_t * data,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)5338 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
5339 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
5340 {
5341 struct lpfc_hba *phba = vport->phba;
5342 struct lpfc_iocbq *elsiocb;
5343 uint8_t *pcmd;
5344 uint16_t cmdsize;
5345 int rc;
5346
5347 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
5348
5349 /* The accumulated length can exceed the BPL_SIZE. For
5350 * now, use this as the limit
5351 */
5352 if (cmdsize > LPFC_BPL_SIZE)
5353 cmdsize = LPFC_BPL_SIZE;
5354 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5355 ndlp->nlp_DID, ELS_CMD_ACC);
5356 if (!elsiocb)
5357 return 1;
5358
5359 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */
5360 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
5361
5362 /* Xmit ECHO ACC response tag <ulpIoTag> */
5363 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5364 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
5365 elsiocb->iotag, elsiocb->iocb.ulpContext);
5366 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5367 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5368 pcmd += sizeof(uint32_t);
5369 memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
5370
5371 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5372 "Issue ACC ECHO: did:x%x flg:x%x",
5373 ndlp->nlp_DID, ndlp->nlp_flag, 0);
5374
5375 phba->fc_stat.elsXmitACC++;
5376 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5377
5378 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5379 if (rc == IOCB_ERROR) {
5380 lpfc_els_free_iocb(phba, elsiocb);
5381 return 1;
5382 }
5383 return 0;
5384 }
5385
5386 /**
5387 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
5388 * @vport: pointer to a host virtual N_Port data structure.
5389 *
5390 * This routine issues Address Discover (ADISC) ELS commands to those
5391 * N_Ports which are in node port recovery state and ADISC has not been issued
5392 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
5393 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
5394 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
5395 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
5396 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
5397 * IOCBs quit for later pick up. On the other hand, after walking through
5398 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
5399 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
5400 * no more ADISC need to be sent.
5401 *
5402 * Return code
5403 * The number of N_Ports with adisc issued.
5404 **/
5405 int
lpfc_els_disc_adisc(struct lpfc_vport * vport)5406 lpfc_els_disc_adisc(struct lpfc_vport *vport)
5407 {
5408 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5409 struct lpfc_nodelist *ndlp, *next_ndlp;
5410 int sentadisc = 0;
5411
5412 /* go thru NPR nodes and issue any remaining ELS ADISCs */
5413 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
5414 if (!NLP_CHK_NODE_ACT(ndlp))
5415 continue;
5416 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
5417 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
5418 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
5419 spin_lock_irq(shost->host_lock);
5420 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
5421 spin_unlock_irq(shost->host_lock);
5422 ndlp->nlp_prev_state = ndlp->nlp_state;
5423 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
5424 lpfc_issue_els_adisc(vport, ndlp, 0);
5425 sentadisc++;
5426 vport->num_disc_nodes++;
5427 if (vport->num_disc_nodes >=
5428 vport->cfg_discovery_threads) {
5429 spin_lock_irq(shost->host_lock);
5430 vport->fc_flag |= FC_NLP_MORE;
5431 spin_unlock_irq(shost->host_lock);
5432 break;
5433 }
5434 }
5435 }
5436 if (sentadisc == 0) {
5437 spin_lock_irq(shost->host_lock);
5438 vport->fc_flag &= ~FC_NLP_MORE;
5439 spin_unlock_irq(shost->host_lock);
5440 }
5441 return sentadisc;
5442 }
5443
5444 /**
5445 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
5446 * @vport: pointer to a host virtual N_Port data structure.
5447 *
5448 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
5449 * which are in node port recovery state, with a @vport. Each time an ELS
5450 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
5451 * the per @vport number of discover count (num_disc_nodes) shall be
5452 * incremented. If the num_disc_nodes reaches a pre-configured threshold
5453 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
5454 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
5455 * later pick up. On the other hand, after walking through all the ndlps with
5456 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
5457 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
5458 * PLOGI need to be sent.
5459 *
5460 * Return code
5461 * The number of N_Ports with plogi issued.
5462 **/
5463 int
lpfc_els_disc_plogi(struct lpfc_vport * vport)5464 lpfc_els_disc_plogi(struct lpfc_vport *vport)
5465 {
5466 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5467 struct lpfc_nodelist *ndlp, *next_ndlp;
5468 int sentplogi = 0;
5469
5470 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
5471 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
5472 if (!NLP_CHK_NODE_ACT(ndlp))
5473 continue;
5474 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
5475 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
5476 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
5477 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
5478 ndlp->nlp_prev_state = ndlp->nlp_state;
5479 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
5480 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
5481 sentplogi++;
5482 vport->num_disc_nodes++;
5483 if (vport->num_disc_nodes >=
5484 vport->cfg_discovery_threads) {
5485 spin_lock_irq(shost->host_lock);
5486 vport->fc_flag |= FC_NLP_MORE;
5487 spin_unlock_irq(shost->host_lock);
5488 break;
5489 }
5490 }
5491 }
5492
5493 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5494 "6452 Discover PLOGI %d flag x%x\n",
5495 sentplogi, vport->fc_flag);
5496
5497 if (sentplogi) {
5498 lpfc_set_disctmo(vport);
5499 }
5500 else {
5501 spin_lock_irq(shost->host_lock);
5502 vport->fc_flag &= ~FC_NLP_MORE;
5503 spin_unlock_irq(shost->host_lock);
5504 }
5505 return sentplogi;
5506 }
5507
5508 static uint32_t
lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc * desc,uint32_t word0)5509 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
5510 uint32_t word0)
5511 {
5512
5513 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG);
5514 desc->payload.els_req = word0;
5515 desc->length = cpu_to_be32(sizeof(desc->payload));
5516
5517 return sizeof(struct fc_rdp_link_service_desc);
5518 }
5519
5520 static uint32_t
lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc * desc,uint8_t * page_a0,uint8_t * page_a2)5521 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
5522 uint8_t *page_a0, uint8_t *page_a2)
5523 {
5524 uint16_t wavelength;
5525 uint16_t temperature;
5526 uint16_t rx_power;
5527 uint16_t tx_bias;
5528 uint16_t tx_power;
5529 uint16_t vcc;
5530 uint16_t flag = 0;
5531 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4;
5532 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5;
5533
5534 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG);
5535
5536 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *)
5537 &page_a0[SSF_TRANSCEIVER_CODE_B4];
5538 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *)
5539 &page_a0[SSF_TRANSCEIVER_CODE_B5];
5540
5541 if ((trasn_code_byte4->fc_sw_laser) ||
5542 (trasn_code_byte5->fc_sw_laser_sl) ||
5543 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */
5544 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT);
5545 } else if (trasn_code_byte4->fc_lw_laser) {
5546 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) |
5547 page_a0[SSF_WAVELENGTH_B0];
5548 if (wavelength == SFP_WAVELENGTH_LC1310)
5549 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT;
5550 if (wavelength == SFP_WAVELENGTH_LL1550)
5551 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT;
5552 }
5553 /* check if its SFP+ */
5554 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ?
5555 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN)
5556 << SFP_FLAG_CT_SHIFT;
5557
5558 /* check if its OPTICAL */
5559 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ?
5560 SFP_FLAG_IS_OPTICAL_PORT : 0)
5561 << SFP_FLAG_IS_OPTICAL_SHIFT;
5562
5563 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 |
5564 page_a2[SFF_TEMPERATURE_B0]);
5565 vcc = (page_a2[SFF_VCC_B1] << 8 |
5566 page_a2[SFF_VCC_B0]);
5567 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 |
5568 page_a2[SFF_TXPOWER_B0]);
5569 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 |
5570 page_a2[SFF_TX_BIAS_CURRENT_B0]);
5571 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 |
5572 page_a2[SFF_RXPOWER_B0]);
5573 desc->sfp_info.temperature = cpu_to_be16(temperature);
5574 desc->sfp_info.rx_power = cpu_to_be16(rx_power);
5575 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias);
5576 desc->sfp_info.tx_power = cpu_to_be16(tx_power);
5577 desc->sfp_info.vcc = cpu_to_be16(vcc);
5578
5579 desc->sfp_info.flags = cpu_to_be16(flag);
5580 desc->length = cpu_to_be32(sizeof(desc->sfp_info));
5581
5582 return sizeof(struct fc_rdp_sfp_desc);
5583 }
5584
5585 static uint32_t
lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc * desc,READ_LNK_VAR * stat)5586 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
5587 READ_LNK_VAR *stat)
5588 {
5589 uint32_t type;
5590
5591 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG);
5592
5593 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT;
5594
5595 desc->info.port_type = cpu_to_be32(type);
5596
5597 desc->info.link_status.link_failure_cnt =
5598 cpu_to_be32(stat->linkFailureCnt);
5599 desc->info.link_status.loss_of_synch_cnt =
5600 cpu_to_be32(stat->lossSyncCnt);
5601 desc->info.link_status.loss_of_signal_cnt =
5602 cpu_to_be32(stat->lossSignalCnt);
5603 desc->info.link_status.primitive_seq_proto_err =
5604 cpu_to_be32(stat->primSeqErrCnt);
5605 desc->info.link_status.invalid_trans_word =
5606 cpu_to_be32(stat->invalidXmitWord);
5607 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt);
5608
5609 desc->length = cpu_to_be32(sizeof(desc->info));
5610
5611 return sizeof(struct fc_rdp_link_error_status_desc);
5612 }
5613
5614 static uint32_t
lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc * desc,READ_LNK_VAR * stat,struct lpfc_vport * vport)5615 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
5616 struct lpfc_vport *vport)
5617 {
5618 uint32_t bbCredit;
5619
5620 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG);
5621
5622 bbCredit = vport->fc_sparam.cmn.bbCreditLsb |
5623 (vport->fc_sparam.cmn.bbCreditMsb << 8);
5624 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit);
5625 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
5626 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb |
5627 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8);
5628 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit);
5629 } else {
5630 desc->bbc_info.attached_port_bbc = 0;
5631 }
5632
5633 desc->bbc_info.rtt = 0;
5634 desc->length = cpu_to_be32(sizeof(desc->bbc_info));
5635
5636 return sizeof(struct fc_rdp_bbc_desc);
5637 }
5638
5639 static uint32_t
lpfc_rdp_res_oed_temp_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)5640 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
5641 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
5642 {
5643 uint32_t flags = 0;
5644
5645 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5646
5647 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM];
5648 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM];
5649 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING];
5650 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING];
5651
5652 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
5653 flags |= RDP_OET_HIGH_ALARM;
5654 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
5655 flags |= RDP_OET_LOW_ALARM;
5656 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
5657 flags |= RDP_OET_HIGH_WARNING;
5658 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
5659 flags |= RDP_OET_LOW_WARNING;
5660
5661 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT);
5662 desc->oed_info.function_flags = cpu_to_be32(flags);
5663 desc->length = cpu_to_be32(sizeof(desc->oed_info));
5664 return sizeof(struct fc_rdp_oed_sfp_desc);
5665 }
5666
5667 static uint32_t
lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)5668 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
5669 struct fc_rdp_oed_sfp_desc *desc,
5670 uint8_t *page_a2)
5671 {
5672 uint32_t flags = 0;
5673
5674 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5675
5676 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM];
5677 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM];
5678 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING];
5679 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING];
5680
5681 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
5682 flags |= RDP_OET_HIGH_ALARM;
5683 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE)
5684 flags |= RDP_OET_LOW_ALARM;
5685 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
5686 flags |= RDP_OET_HIGH_WARNING;
5687 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE)
5688 flags |= RDP_OET_LOW_WARNING;
5689
5690 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT);
5691 desc->oed_info.function_flags = cpu_to_be32(flags);
5692 desc->length = cpu_to_be32(sizeof(desc->oed_info));
5693 return sizeof(struct fc_rdp_oed_sfp_desc);
5694 }
5695
5696 static uint32_t
lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)5697 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
5698 struct fc_rdp_oed_sfp_desc *desc,
5699 uint8_t *page_a2)
5700 {
5701 uint32_t flags = 0;
5702
5703 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5704
5705 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM];
5706 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM];
5707 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING];
5708 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING];
5709
5710 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS)
5711 flags |= RDP_OET_HIGH_ALARM;
5712 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS)
5713 flags |= RDP_OET_LOW_ALARM;
5714 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS)
5715 flags |= RDP_OET_HIGH_WARNING;
5716 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS)
5717 flags |= RDP_OET_LOW_WARNING;
5718
5719 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT);
5720 desc->oed_info.function_flags = cpu_to_be32(flags);
5721 desc->length = cpu_to_be32(sizeof(desc->oed_info));
5722 return sizeof(struct fc_rdp_oed_sfp_desc);
5723 }
5724
5725 static uint32_t
lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)5726 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
5727 struct fc_rdp_oed_sfp_desc *desc,
5728 uint8_t *page_a2)
5729 {
5730 uint32_t flags = 0;
5731
5732 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5733
5734 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM];
5735 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM];
5736 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING];
5737 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING];
5738
5739 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER)
5740 flags |= RDP_OET_HIGH_ALARM;
5741 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER)
5742 flags |= RDP_OET_LOW_ALARM;
5743 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER)
5744 flags |= RDP_OET_HIGH_WARNING;
5745 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER)
5746 flags |= RDP_OET_LOW_WARNING;
5747
5748 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT);
5749 desc->oed_info.function_flags = cpu_to_be32(flags);
5750 desc->length = cpu_to_be32(sizeof(desc->oed_info));
5751 return sizeof(struct fc_rdp_oed_sfp_desc);
5752 }
5753
5754
5755 static uint32_t
lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba * phba,struct fc_rdp_oed_sfp_desc * desc,uint8_t * page_a2)5756 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
5757 struct fc_rdp_oed_sfp_desc *desc,
5758 uint8_t *page_a2)
5759 {
5760 uint32_t flags = 0;
5761
5762 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5763
5764 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM];
5765 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM];
5766 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING];
5767 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING];
5768
5769 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER)
5770 flags |= RDP_OET_HIGH_ALARM;
5771 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER)
5772 flags |= RDP_OET_LOW_ALARM;
5773 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER)
5774 flags |= RDP_OET_HIGH_WARNING;
5775 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER)
5776 flags |= RDP_OET_LOW_WARNING;
5777
5778 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT);
5779 desc->oed_info.function_flags = cpu_to_be32(flags);
5780 desc->length = cpu_to_be32(sizeof(desc->oed_info));
5781 return sizeof(struct fc_rdp_oed_sfp_desc);
5782 }
5783
5784 static uint32_t
lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc * desc,uint8_t * page_a0,struct lpfc_vport * vport)5785 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
5786 uint8_t *page_a0, struct lpfc_vport *vport)
5787 {
5788 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG);
5789 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
5790 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
5791 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
5792 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4);
5793 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
5794 desc->length = cpu_to_be32(sizeof(desc->opd_info));
5795 return sizeof(struct fc_rdp_opd_sfp_desc);
5796 }
5797
5798 static uint32_t
lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc * desc,READ_LNK_VAR * stat)5799 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
5800 {
5801 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
5802 return 0;
5803 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG);
5804
5805 desc->info.CorrectedBlocks =
5806 cpu_to_be32(stat->fecCorrBlkCount);
5807 desc->info.UncorrectableBlocks =
5808 cpu_to_be32(stat->fecUncorrBlkCount);
5809
5810 desc->length = cpu_to_be32(sizeof(desc->info));
5811
5812 return sizeof(struct fc_fec_rdp_desc);
5813 }
5814
5815 static uint32_t
lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc * desc,struct lpfc_hba * phba)5816 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
5817 {
5818 uint16_t rdp_cap = 0;
5819 uint16_t rdp_speed;
5820
5821 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
5822
5823 switch (phba->fc_linkspeed) {
5824 case LPFC_LINK_SPEED_1GHZ:
5825 rdp_speed = RDP_PS_1GB;
5826 break;
5827 case LPFC_LINK_SPEED_2GHZ:
5828 rdp_speed = RDP_PS_2GB;
5829 break;
5830 case LPFC_LINK_SPEED_4GHZ:
5831 rdp_speed = RDP_PS_4GB;
5832 break;
5833 case LPFC_LINK_SPEED_8GHZ:
5834 rdp_speed = RDP_PS_8GB;
5835 break;
5836 case LPFC_LINK_SPEED_10GHZ:
5837 rdp_speed = RDP_PS_10GB;
5838 break;
5839 case LPFC_LINK_SPEED_16GHZ:
5840 rdp_speed = RDP_PS_16GB;
5841 break;
5842 case LPFC_LINK_SPEED_32GHZ:
5843 rdp_speed = RDP_PS_32GB;
5844 break;
5845 case LPFC_LINK_SPEED_64GHZ:
5846 rdp_speed = RDP_PS_64GB;
5847 break;
5848 default:
5849 rdp_speed = RDP_PS_UNKNOWN;
5850 break;
5851 }
5852
5853 desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
5854
5855 if (phba->lmt & LMT_128Gb)
5856 rdp_cap |= RDP_PS_128GB;
5857 if (phba->lmt & LMT_64Gb)
5858 rdp_cap |= RDP_PS_64GB;
5859 if (phba->lmt & LMT_32Gb)
5860 rdp_cap |= RDP_PS_32GB;
5861 if (phba->lmt & LMT_16Gb)
5862 rdp_cap |= RDP_PS_16GB;
5863 if (phba->lmt & LMT_10Gb)
5864 rdp_cap |= RDP_PS_10GB;
5865 if (phba->lmt & LMT_8Gb)
5866 rdp_cap |= RDP_PS_8GB;
5867 if (phba->lmt & LMT_4Gb)
5868 rdp_cap |= RDP_PS_4GB;
5869 if (phba->lmt & LMT_2Gb)
5870 rdp_cap |= RDP_PS_2GB;
5871 if (phba->lmt & LMT_1Gb)
5872 rdp_cap |= RDP_PS_1GB;
5873
5874 if (rdp_cap == 0)
5875 rdp_cap = RDP_CAP_UNKNOWN;
5876 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO)
5877 rdp_cap |= RDP_CAP_USER_CONFIGURED;
5878
5879 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
5880 desc->length = cpu_to_be32(sizeof(desc->info));
5881 return sizeof(struct fc_rdp_port_speed_desc);
5882 }
5883
5884 static uint32_t
lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc * desc,struct lpfc_vport * vport)5885 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
5886 struct lpfc_vport *vport)
5887 {
5888
5889 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
5890
5891 memcpy(desc->port_names.wwnn, &vport->fc_nodename,
5892 sizeof(desc->port_names.wwnn));
5893
5894 memcpy(desc->port_names.wwpn, &vport->fc_portname,
5895 sizeof(desc->port_names.wwpn));
5896
5897 desc->length = cpu_to_be32(sizeof(desc->port_names));
5898 return sizeof(struct fc_rdp_port_name_desc);
5899 }
5900
5901 static uint32_t
lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc * desc,struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)5902 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
5903 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5904 {
5905
5906 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
5907 if (vport->fc_flag & FC_FABRIC) {
5908 memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
5909 sizeof(desc->port_names.wwnn));
5910
5911 memcpy(desc->port_names.wwpn, &vport->fabric_portname,
5912 sizeof(desc->port_names.wwpn));
5913 } else { /* Point to Point */
5914 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
5915 sizeof(desc->port_names.wwnn));
5916
5917 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname,
5918 sizeof(desc->port_names.wwpn));
5919 }
5920
5921 desc->length = cpu_to_be32(sizeof(desc->port_names));
5922 return sizeof(struct fc_rdp_port_name_desc);
5923 }
5924
5925 static void
lpfc_els_rdp_cmpl(struct lpfc_hba * phba,struct lpfc_rdp_context * rdp_context,int status)5926 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
5927 int status)
5928 {
5929 struct lpfc_nodelist *ndlp = rdp_context->ndlp;
5930 struct lpfc_vport *vport = ndlp->vport;
5931 struct lpfc_iocbq *elsiocb;
5932 struct ulp_bde64 *bpl;
5933 IOCB_t *icmd;
5934 uint8_t *pcmd;
5935 struct ls_rjt *stat;
5936 struct fc_rdp_res_frame *rdp_res;
5937 uint32_t cmdsize, len;
5938 uint16_t *flag_ptr;
5939 int rc;
5940
5941 if (status != SUCCESS)
5942 goto error;
5943
5944 /* This will change once we know the true size of the RDP payload */
5945 cmdsize = sizeof(struct fc_rdp_res_frame);
5946
5947 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
5948 lpfc_max_els_tries, rdp_context->ndlp,
5949 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
5950 lpfc_nlp_put(ndlp);
5951 if (!elsiocb)
5952 goto free_rdp_context;
5953
5954 icmd = &elsiocb->iocb;
5955 icmd->ulpContext = rdp_context->rx_id;
5956 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
5957
5958 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5959 "2171 Xmit RDP response tag x%x xri x%x, "
5960 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
5961 elsiocb->iotag, elsiocb->iocb.ulpContext,
5962 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5963 ndlp->nlp_rpi);
5964 rdp_res = (struct fc_rdp_res_frame *)
5965 (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5966 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5967 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
5968 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5969
5970 /* Update Alarm and Warning */
5971 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS);
5972 phba->sfp_alarm |= *flag_ptr;
5973 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS);
5974 phba->sfp_warning |= *flag_ptr;
5975
5976 /* For RDP payload */
5977 len = 8;
5978 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *)
5979 (len + pcmd), ELS_CMD_RDP);
5980
5981 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd),
5982 rdp_context->page_a0, rdp_context->page_a2);
5983 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd),
5984 phba);
5985 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
5986 (len + pcmd), &rdp_context->link_stat);
5987 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
5988 (len + pcmd), vport);
5989 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
5990 (len + pcmd), vport, ndlp);
5991 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
5992 &rdp_context->link_stat);
5993 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
5994 &rdp_context->link_stat, vport);
5995 len += lpfc_rdp_res_oed_temp_desc(phba,
5996 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5997 rdp_context->page_a2);
5998 len += lpfc_rdp_res_oed_voltage_desc(phba,
5999 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
6000 rdp_context->page_a2);
6001 len += lpfc_rdp_res_oed_txbias_desc(phba,
6002 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
6003 rdp_context->page_a2);
6004 len += lpfc_rdp_res_oed_txpower_desc(phba,
6005 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
6006 rdp_context->page_a2);
6007 len += lpfc_rdp_res_oed_rxpower_desc(phba,
6008 (struct fc_rdp_oed_sfp_desc *)(len + pcmd),
6009 rdp_context->page_a2);
6010 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd),
6011 rdp_context->page_a0, vport);
6012
6013 rdp_res->length = cpu_to_be32(len - 8);
6014 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6015
6016 /* Now that we know the true size of the payload, update the BPL */
6017 bpl = (struct ulp_bde64 *)
6018 (((struct lpfc_dmabuf *)(elsiocb->context3))->virt);
6019 bpl->tus.f.bdeSize = len;
6020 bpl->tus.f.bdeFlags = 0;
6021 bpl->tus.w = le32_to_cpu(bpl->tus.w);
6022
6023 phba->fc_stat.elsXmitACC++;
6024 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6025 if (rc == IOCB_ERROR)
6026 lpfc_els_free_iocb(phba, elsiocb);
6027
6028 kfree(rdp_context);
6029
6030 return;
6031 error:
6032 cmdsize = 2 * sizeof(uint32_t);
6033 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries,
6034 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
6035 lpfc_nlp_put(ndlp);
6036 if (!elsiocb)
6037 goto free_rdp_context;
6038
6039 icmd = &elsiocb->iocb;
6040 icmd->ulpContext = rdp_context->rx_id;
6041 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
6042 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6043
6044 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
6045 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
6046 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6047
6048 phba->fc_stat.elsXmitLSRJT++;
6049 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6050 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6051
6052 if (rc == IOCB_ERROR)
6053 lpfc_els_free_iocb(phba, elsiocb);
6054 free_rdp_context:
6055 kfree(rdp_context);
6056 }
6057
6058 static int
lpfc_get_rdp_info(struct lpfc_hba * phba,struct lpfc_rdp_context * rdp_context)6059 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
6060 {
6061 LPFC_MBOXQ_t *mbox = NULL;
6062 int rc;
6063
6064 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6065 if (!mbox) {
6066 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
6067 "7105 failed to allocate mailbox memory");
6068 return 1;
6069 }
6070
6071 if (lpfc_sli4_dump_page_a0(phba, mbox))
6072 goto prep_mbox_fail;
6073 mbox->vport = rdp_context->ndlp->vport;
6074 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
6075 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
6076 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6077 if (rc == MBX_NOT_FINISHED)
6078 goto issue_mbox_fail;
6079
6080 return 0;
6081
6082 prep_mbox_fail:
6083 issue_mbox_fail:
6084 mempool_free(mbox, phba->mbox_mem_pool);
6085 return 1;
6086 }
6087
6088 /*
6089 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS.
6090 * @vport: pointer to a host virtual N_Port data structure.
6091 * @cmdiocb: pointer to lpfc command iocb data structure.
6092 * @ndlp: pointer to a node-list data structure.
6093 *
6094 * This routine processes an unsolicited RDP(Read Diagnostic Parameters)
6095 * IOCB. First, the payload of the unsolicited RDP is checked.
6096 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3
6097 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2,
6098 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl
6099 * gather all data and send RDP response.
6100 *
6101 * Return code
6102 * 0 - Sent the acc response
6103 * 1 - Sent the reject response.
6104 */
6105 static int
lpfc_els_rcv_rdp(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)6106 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6107 struct lpfc_nodelist *ndlp)
6108 {
6109 struct lpfc_hba *phba = vport->phba;
6110 struct lpfc_dmabuf *pcmd;
6111 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
6112 struct fc_rdp_req_frame *rdp_req;
6113 struct lpfc_rdp_context *rdp_context;
6114 IOCB_t *cmd = NULL;
6115 struct ls_rjt stat;
6116
6117 if (phba->sli_rev < LPFC_SLI_REV4 ||
6118 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
6119 LPFC_SLI_INTF_IF_TYPE_2) {
6120 rjt_err = LSRJT_UNABLE_TPC;
6121 rjt_expl = LSEXP_REQ_UNSUPPORTED;
6122 goto error;
6123 }
6124
6125 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) {
6126 rjt_err = LSRJT_UNABLE_TPC;
6127 rjt_expl = LSEXP_REQ_UNSUPPORTED;
6128 goto error;
6129 }
6130
6131 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6132 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
6133
6134 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6135 "2422 ELS RDP Request "
6136 "dec len %d tag x%x port_id %d len %d\n",
6137 be32_to_cpu(rdp_req->rdp_des_length),
6138 be32_to_cpu(rdp_req->nport_id_desc.tag),
6139 be32_to_cpu(rdp_req->nport_id_desc.nport_id),
6140 be32_to_cpu(rdp_req->nport_id_desc.length));
6141
6142 if (sizeof(struct fc_rdp_nport_desc) !=
6143 be32_to_cpu(rdp_req->rdp_des_length))
6144 goto rjt_logerr;
6145 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag))
6146 goto rjt_logerr;
6147 if (RDP_NPORT_ID_SIZE !=
6148 be32_to_cpu(rdp_req->nport_id_desc.length))
6149 goto rjt_logerr;
6150 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL);
6151 if (!rdp_context) {
6152 rjt_err = LSRJT_UNABLE_TPC;
6153 goto error;
6154 }
6155
6156 cmd = &cmdiocb->iocb;
6157 rdp_context->ndlp = lpfc_nlp_get(ndlp);
6158 rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id;
6159 rdp_context->rx_id = cmd->ulpContext;
6160 rdp_context->cmpl = lpfc_els_rdp_cmpl;
6161 if (lpfc_get_rdp_info(phba, rdp_context)) {
6162 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
6163 "2423 Unable to send mailbox");
6164 kfree(rdp_context);
6165 rjt_err = LSRJT_UNABLE_TPC;
6166 lpfc_nlp_put(ndlp);
6167 goto error;
6168 }
6169
6170 return 0;
6171
6172 rjt_logerr:
6173 rjt_err = LSRJT_LOGICAL_ERR;
6174
6175 error:
6176 memset(&stat, 0, sizeof(stat));
6177 stat.un.b.lsRjtRsnCode = rjt_err;
6178 stat.un.b.lsRjtRsnCodeExp = rjt_expl;
6179 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
6180 return 1;
6181 }
6182
6183
6184 static void
lpfc_els_lcb_rsp(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)6185 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6186 {
6187 MAILBOX_t *mb;
6188 IOCB_t *icmd;
6189 uint8_t *pcmd;
6190 struct lpfc_iocbq *elsiocb;
6191 struct lpfc_nodelist *ndlp;
6192 struct ls_rjt *stat;
6193 union lpfc_sli4_cfg_shdr *shdr;
6194 struct lpfc_lcb_context *lcb_context;
6195 struct fc_lcb_res_frame *lcb_res;
6196 uint32_t cmdsize, shdr_status, shdr_add_status;
6197 int rc;
6198
6199 mb = &pmb->u.mb;
6200 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp;
6201 ndlp = lcb_context->ndlp;
6202 pmb->ctx_ndlp = NULL;
6203 pmb->ctx_buf = NULL;
6204
6205 shdr = (union lpfc_sli4_cfg_shdr *)
6206 &pmb->u.mqe.un.beacon_config.header.cfg_shdr;
6207 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6208 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6209
6210 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX,
6211 "0194 SET_BEACON_CONFIG mailbox "
6212 "completed with status x%x add_status x%x,"
6213 " mbx status x%x\n",
6214 shdr_status, shdr_add_status, mb->mbxStatus);
6215
6216 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status ||
6217 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) ||
6218 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) {
6219 mempool_free(pmb, phba->mbox_mem_pool);
6220 goto error;
6221 }
6222
6223 mempool_free(pmb, phba->mbox_mem_pool);
6224 cmdsize = sizeof(struct fc_lcb_res_frame);
6225 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
6226 lpfc_max_els_tries, ndlp,
6227 ndlp->nlp_DID, ELS_CMD_ACC);
6228
6229 /* Decrement the ndlp reference count from previous mbox command */
6230 lpfc_nlp_put(ndlp);
6231
6232 if (!elsiocb)
6233 goto free_lcb_context;
6234
6235 lcb_res = (struct fc_lcb_res_frame *)
6236 (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
6237
6238 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
6239 icmd = &elsiocb->iocb;
6240 icmd->ulpContext = lcb_context->rx_id;
6241 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
6242
6243 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
6244 *((uint32_t *)(pcmd)) = ELS_CMD_ACC;
6245 lcb_res->lcb_sub_command = lcb_context->sub_command;
6246 lcb_res->lcb_type = lcb_context->type;
6247 lcb_res->capability = lcb_context->capability;
6248 lcb_res->lcb_frequency = lcb_context->frequency;
6249 lcb_res->lcb_duration = lcb_context->duration;
6250 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6251 phba->fc_stat.elsXmitACC++;
6252 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6253 if (rc == IOCB_ERROR)
6254 lpfc_els_free_iocb(phba, elsiocb);
6255
6256 kfree(lcb_context);
6257 return;
6258
6259 error:
6260 cmdsize = sizeof(struct fc_lcb_res_frame);
6261 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
6262 lpfc_max_els_tries, ndlp,
6263 ndlp->nlp_DID, ELS_CMD_LS_RJT);
6264 lpfc_nlp_put(ndlp);
6265 if (!elsiocb)
6266 goto free_lcb_context;
6267
6268 icmd = &elsiocb->iocb;
6269 icmd->ulpContext = lcb_context->rx_id;
6270 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
6271 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
6272
6273 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
6274 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
6275 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6276
6277 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
6278 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
6279
6280 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6281 phba->fc_stat.elsXmitLSRJT++;
6282 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6283 if (rc == IOCB_ERROR)
6284 lpfc_els_free_iocb(phba, elsiocb);
6285 free_lcb_context:
6286 kfree(lcb_context);
6287 }
6288
6289 static int
lpfc_sli4_set_beacon(struct lpfc_vport * vport,struct lpfc_lcb_context * lcb_context,uint32_t beacon_state)6290 lpfc_sli4_set_beacon(struct lpfc_vport *vport,
6291 struct lpfc_lcb_context *lcb_context,
6292 uint32_t beacon_state)
6293 {
6294 struct lpfc_hba *phba = vport->phba;
6295 union lpfc_sli4_cfg_shdr *cfg_shdr;
6296 LPFC_MBOXQ_t *mbox = NULL;
6297 uint32_t len;
6298 int rc;
6299
6300 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6301 if (!mbox)
6302 return 1;
6303
6304 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
6305 len = sizeof(struct lpfc_mbx_set_beacon_config) -
6306 sizeof(struct lpfc_sli4_cfg_mhdr);
6307 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6308 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
6309 LPFC_SLI4_MBX_EMBED);
6310 mbox->ctx_ndlp = (void *)lcb_context;
6311 mbox->vport = phba->pport;
6312 mbox->mbox_cmpl = lpfc_els_lcb_rsp;
6313 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
6314 phba->sli4_hba.physical_port);
6315 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
6316 beacon_state);
6317 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */
6318
6319 /*
6320 * Check bv1s bit before issuing the mailbox
6321 * if bv1s == 1, LCB V1 supported
6322 * else, LCB V0 supported
6323 */
6324
6325 if (phba->sli4_hba.pc_sli4_params.bv1s) {
6326 /* COMMON_SET_BEACON_CONFIG_V1 */
6327 cfg_shdr->request.word9 = BEACON_VERSION_V1;
6328 lcb_context->capability |= LCB_CAPABILITY_DURATION;
6329 bf_set(lpfc_mbx_set_beacon_port_type,
6330 &mbox->u.mqe.un.beacon_config, 0);
6331 bf_set(lpfc_mbx_set_beacon_duration_v1,
6332 &mbox->u.mqe.un.beacon_config,
6333 be16_to_cpu(lcb_context->duration));
6334 } else {
6335 /* COMMON_SET_BEACON_CONFIG_V0 */
6336 if (be16_to_cpu(lcb_context->duration) != 0) {
6337 mempool_free(mbox, phba->mbox_mem_pool);
6338 return 1;
6339 }
6340 cfg_shdr->request.word9 = BEACON_VERSION_V0;
6341 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION);
6342 bf_set(lpfc_mbx_set_beacon_state,
6343 &mbox->u.mqe.un.beacon_config, beacon_state);
6344 bf_set(lpfc_mbx_set_beacon_port_type,
6345 &mbox->u.mqe.un.beacon_config, 1);
6346 bf_set(lpfc_mbx_set_beacon_duration,
6347 &mbox->u.mqe.un.beacon_config,
6348 be16_to_cpu(lcb_context->duration));
6349 }
6350
6351 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6352 if (rc == MBX_NOT_FINISHED) {
6353 mempool_free(mbox, phba->mbox_mem_pool);
6354 return 1;
6355 }
6356
6357 return 0;
6358 }
6359
6360
6361 /**
6362 * lpfc_els_rcv_lcb - Process an unsolicited LCB
6363 * @vport: pointer to a host virtual N_Port data structure.
6364 * @cmdiocb: pointer to lpfc command iocb data structure.
6365 * @ndlp: pointer to a node-list data structure.
6366 *
6367 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB.
6368 * First, the payload of the unsolicited LCB is checked.
6369 * Then based on Subcommand beacon will either turn on or off.
6370 *
6371 * Return code
6372 * 0 - Sent the acc response
6373 * 1 - Sent the reject response.
6374 **/
6375 static int
lpfc_els_rcv_lcb(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)6376 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6377 struct lpfc_nodelist *ndlp)
6378 {
6379 struct lpfc_hba *phba = vport->phba;
6380 struct lpfc_dmabuf *pcmd;
6381 uint8_t *lp;
6382 struct fc_lcb_request_frame *beacon;
6383 struct lpfc_lcb_context *lcb_context;
6384 uint8_t state, rjt_err;
6385 struct ls_rjt stat;
6386
6387 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
6388 lp = (uint8_t *)pcmd->virt;
6389 beacon = (struct fc_lcb_request_frame *)pcmd->virt;
6390
6391 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6392 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x "
6393 "type x%x frequency %x duration x%x\n",
6394 lp[0], lp[1], lp[2],
6395 beacon->lcb_command,
6396 beacon->lcb_sub_command,
6397 beacon->lcb_type,
6398 beacon->lcb_frequency,
6399 be16_to_cpu(beacon->lcb_duration));
6400
6401 if (beacon->lcb_sub_command != LPFC_LCB_ON &&
6402 beacon->lcb_sub_command != LPFC_LCB_OFF) {
6403 rjt_err = LSRJT_CMD_UNSUPPORTED;
6404 goto rjt;
6405 }
6406
6407 if (phba->sli_rev < LPFC_SLI_REV4 ||
6408 phba->hba_flag & HBA_FCOE_MODE ||
6409 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
6410 LPFC_SLI_INTF_IF_TYPE_2)) {
6411 rjt_err = LSRJT_CMD_UNSUPPORTED;
6412 goto rjt;
6413 }
6414
6415 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL);
6416 if (!lcb_context) {
6417 rjt_err = LSRJT_UNABLE_TPC;
6418 goto rjt;
6419 }
6420
6421 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
6422 lcb_context->sub_command = beacon->lcb_sub_command;
6423 lcb_context->capability = 0;
6424 lcb_context->type = beacon->lcb_type;
6425 lcb_context->frequency = beacon->lcb_frequency;
6426 lcb_context->duration = beacon->lcb_duration;
6427 lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
6428 lcb_context->rx_id = cmdiocb->iocb.ulpContext;
6429 lcb_context->ndlp = lpfc_nlp_get(ndlp);
6430 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
6431 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT,
6432 "0193 failed to send mail box");
6433 kfree(lcb_context);
6434 lpfc_nlp_put(ndlp);
6435 rjt_err = LSRJT_UNABLE_TPC;
6436 goto rjt;
6437 }
6438 return 0;
6439 rjt:
6440 memset(&stat, 0, sizeof(stat));
6441 stat.un.b.lsRjtRsnCode = rjt_err;
6442 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
6443 return 1;
6444 }
6445
6446
6447 /**
6448 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
6449 * @vport: pointer to a host virtual N_Port data structure.
6450 *
6451 * This routine cleans up any Registration State Change Notification
6452 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
6453 * @vport together with the host_lock is used to prevent multiple thread
6454 * trying to access the RSCN array on a same @vport at the same time.
6455 **/
6456 void
lpfc_els_flush_rscn(struct lpfc_vport * vport)6457 lpfc_els_flush_rscn(struct lpfc_vport *vport)
6458 {
6459 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6460 struct lpfc_hba *phba = vport->phba;
6461 int i;
6462
6463 spin_lock_irq(shost->host_lock);
6464 if (vport->fc_rscn_flush) {
6465 /* Another thread is walking fc_rscn_id_list on this vport */
6466 spin_unlock_irq(shost->host_lock);
6467 return;
6468 }
6469 /* Indicate we are walking lpfc_els_flush_rscn on this vport */
6470 vport->fc_rscn_flush = 1;
6471 spin_unlock_irq(shost->host_lock);
6472
6473 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
6474 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
6475 vport->fc_rscn_id_list[i] = NULL;
6476 }
6477 spin_lock_irq(shost->host_lock);
6478 vport->fc_rscn_id_cnt = 0;
6479 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
6480 spin_unlock_irq(shost->host_lock);
6481 lpfc_can_disctmo(vport);
6482 /* Indicate we are done walking this fc_rscn_id_list */
6483 vport->fc_rscn_flush = 0;
6484 }
6485
6486 /**
6487 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
6488 * @vport: pointer to a host virtual N_Port data structure.
6489 * @did: remote destination port identifier.
6490 *
6491 * This routine checks whether there is any pending Registration State
6492 * Configuration Notification (RSCN) to a @did on @vport.
6493 *
6494 * Return code
6495 * None zero - The @did matched with a pending rscn
6496 * 0 - not able to match @did with a pending rscn
6497 **/
6498 int
lpfc_rscn_payload_check(struct lpfc_vport * vport,uint32_t did)6499 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
6500 {
6501 D_ID ns_did;
6502 D_ID rscn_did;
6503 uint32_t *lp;
6504 uint32_t payload_len, i;
6505 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6506
6507 ns_did.un.word = did;
6508
6509 /* Never match fabric nodes for RSCNs */
6510 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
6511 return 0;
6512
6513 /* If we are doing a FULL RSCN rediscovery, match everything */
6514 if (vport->fc_flag & FC_RSCN_DISCOVERY)
6515 return did;
6516
6517 spin_lock_irq(shost->host_lock);
6518 if (vport->fc_rscn_flush) {
6519 /* Another thread is walking fc_rscn_id_list on this vport */
6520 spin_unlock_irq(shost->host_lock);
6521 return 0;
6522 }
6523 /* Indicate we are walking fc_rscn_id_list on this vport */
6524 vport->fc_rscn_flush = 1;
6525 spin_unlock_irq(shost->host_lock);
6526 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
6527 lp = vport->fc_rscn_id_list[i]->virt;
6528 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
6529 payload_len -= sizeof(uint32_t); /* take off word 0 */
6530 while (payload_len) {
6531 rscn_did.un.word = be32_to_cpu(*lp++);
6532 payload_len -= sizeof(uint32_t);
6533 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
6534 case RSCN_ADDRESS_FORMAT_PORT:
6535 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
6536 && (ns_did.un.b.area == rscn_did.un.b.area)
6537 && (ns_did.un.b.id == rscn_did.un.b.id))
6538 goto return_did_out;
6539 break;
6540 case RSCN_ADDRESS_FORMAT_AREA:
6541 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
6542 && (ns_did.un.b.area == rscn_did.un.b.area))
6543 goto return_did_out;
6544 break;
6545 case RSCN_ADDRESS_FORMAT_DOMAIN:
6546 if (ns_did.un.b.domain == rscn_did.un.b.domain)
6547 goto return_did_out;
6548 break;
6549 case RSCN_ADDRESS_FORMAT_FABRIC:
6550 goto return_did_out;
6551 }
6552 }
6553 }
6554 /* Indicate we are done with walking fc_rscn_id_list on this vport */
6555 vport->fc_rscn_flush = 0;
6556 return 0;
6557 return_did_out:
6558 /* Indicate we are done with walking fc_rscn_id_list on this vport */
6559 vport->fc_rscn_flush = 0;
6560 return did;
6561 }
6562
6563 /**
6564 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
6565 * @vport: pointer to a host virtual N_Port data structure.
6566 *
6567 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
6568 * state machine for a @vport's nodes that are with pending RSCN (Registration
6569 * State Change Notification).
6570 *
6571 * Return code
6572 * 0 - Successful (currently alway return 0)
6573 **/
6574 static int
lpfc_rscn_recovery_check(struct lpfc_vport * vport)6575 lpfc_rscn_recovery_check(struct lpfc_vport *vport)
6576 {
6577 struct lpfc_nodelist *ndlp = NULL;
6578
6579 /* Move all affected nodes by pending RSCNs to NPR state. */
6580 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6581 if (!NLP_CHK_NODE_ACT(ndlp) ||
6582 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
6583 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
6584 continue;
6585
6586 /* NVME Target mode does not do RSCN Recovery. */
6587 if (vport->phba->nvmet_support)
6588 continue;
6589
6590 /* If we are in the process of doing discovery on this
6591 * NPort, let it continue on its own.
6592 */
6593 switch (ndlp->nlp_state) {
6594 case NLP_STE_PLOGI_ISSUE:
6595 case NLP_STE_ADISC_ISSUE:
6596 case NLP_STE_REG_LOGIN_ISSUE:
6597 case NLP_STE_PRLI_ISSUE:
6598 case NLP_STE_LOGO_ISSUE:
6599 continue;
6600 }
6601
6602 /* Check to see if we need to NVME rescan this target
6603 * remoteport.
6604 */
6605 if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
6606 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
6607 lpfc_nvme_rescan_port(vport, ndlp);
6608
6609 lpfc_disc_state_machine(vport, ndlp, NULL,
6610 NLP_EVT_DEVICE_RECOVERY);
6611 lpfc_cancel_retry_delay_tmo(vport, ndlp);
6612 }
6613 return 0;
6614 }
6615
6616 /**
6617 * lpfc_send_rscn_event - Send an RSCN event to management application
6618 * @vport: pointer to a host virtual N_Port data structure.
6619 * @cmdiocb: pointer to lpfc command iocb data structure.
6620 *
6621 * lpfc_send_rscn_event sends an RSCN netlink event to management
6622 * applications.
6623 */
6624 static void
lpfc_send_rscn_event(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb)6625 lpfc_send_rscn_event(struct lpfc_vport *vport,
6626 struct lpfc_iocbq *cmdiocb)
6627 {
6628 struct lpfc_dmabuf *pcmd;
6629 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6630 uint32_t *payload_ptr;
6631 uint32_t payload_len;
6632 struct lpfc_rscn_event_header *rscn_event_data;
6633
6634 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6635 payload_ptr = (uint32_t *) pcmd->virt;
6636 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
6637
6638 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
6639 payload_len, GFP_KERNEL);
6640 if (!rscn_event_data) {
6641 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6642 "0147 Failed to allocate memory for RSCN event\n");
6643 return;
6644 }
6645 rscn_event_data->event_type = FC_REG_RSCN_EVENT;
6646 rscn_event_data->payload_length = payload_len;
6647 memcpy(rscn_event_data->rscn_payload, payload_ptr,
6648 payload_len);
6649
6650 fc_host_post_vendor_event(shost,
6651 fc_get_event_number(),
6652 sizeof(struct lpfc_rscn_event_header) + payload_len,
6653 (char *)rscn_event_data,
6654 LPFC_NL_VENDOR_ID);
6655
6656 kfree(rscn_event_data);
6657 }
6658
6659 /**
6660 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
6661 * @vport: pointer to a host virtual N_Port data structure.
6662 * @cmdiocb: pointer to lpfc command iocb data structure.
6663 * @ndlp: pointer to a node-list data structure.
6664 *
6665 * This routine processes an unsolicited RSCN (Registration State Change
6666 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
6667 * to invoke fc_host_post_event() routine to the FC transport layer. If the
6668 * discover state machine is about to begin discovery, it just accepts the
6669 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
6670 * contains N_Port IDs for other vports on this HBA, it just accepts the
6671 * RSCN and ignore processing it. If the state machine is in the recovery
6672 * state, the fc_rscn_id_list of this @vport is walked and the
6673 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
6674 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
6675 * routine is invoked to handle the RSCN event.
6676 *
6677 * Return code
6678 * 0 - Just sent the acc response
6679 * 1 - Sent the acc response and waited for name server completion
6680 **/
6681 static int
lpfc_els_rcv_rscn(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)6682 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6683 struct lpfc_nodelist *ndlp)
6684 {
6685 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6686 struct lpfc_hba *phba = vport->phba;
6687 struct lpfc_dmabuf *pcmd;
6688 uint32_t *lp, *datap;
6689 uint32_t payload_len, length, nportid, *cmd;
6690 int rscn_cnt;
6691 int rscn_id = 0, hba_id = 0;
6692 int i, tmo;
6693
6694 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6695 lp = (uint32_t *) pcmd->virt;
6696
6697 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
6698 payload_len -= sizeof(uint32_t); /* take off word 0 */
6699 /* RSCN received */
6700 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6701 "0214 RSCN received Data: x%x x%x x%x x%x\n",
6702 vport->fc_flag, payload_len, *lp,
6703 vport->fc_rscn_id_cnt);
6704
6705 /* Send an RSCN event to the management application */
6706 lpfc_send_rscn_event(vport, cmdiocb);
6707
6708 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
6709 fc_host_post_event(shost, fc_get_event_number(),
6710 FCH_EVT_RSCN, lp[i]);
6711
6712 /* Check if RSCN is coming from a direct-connected remote NPort */
6713 if (vport->fc_flag & FC_PT2PT) {
6714 /* If so, just ACC it, no other action needed for now */
6715 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6716 "2024 pt2pt RSCN %08x Data: x%x x%x\n",
6717 *lp, vport->fc_flag, payload_len);
6718 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6719
6720 /* Check to see if we need to NVME rescan this target
6721 * remoteport.
6722 */
6723 if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
6724 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
6725 lpfc_nvme_rescan_port(vport, ndlp);
6726 return 0;
6727 }
6728
6729 /* If we are about to begin discovery, just ACC the RSCN.
6730 * Discovery processing will satisfy it.
6731 */
6732 if (vport->port_state <= LPFC_NS_QRY) {
6733 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6734 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
6735 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
6736
6737 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6738 return 0;
6739 }
6740
6741 /* If this RSCN just contains NPortIDs for other vports on this HBA,
6742 * just ACC and ignore it.
6743 */
6744 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
6745 !(vport->cfg_peer_port_login)) {
6746 i = payload_len;
6747 datap = lp;
6748 while (i > 0) {
6749 nportid = *datap++;
6750 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
6751 i -= sizeof(uint32_t);
6752 rscn_id++;
6753 if (lpfc_find_vport_by_did(phba, nportid))
6754 hba_id++;
6755 }
6756 if (rscn_id == hba_id) {
6757 /* ALL NPortIDs in RSCN are on HBA */
6758 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6759 "0219 Ignore RSCN "
6760 "Data: x%x x%x x%x x%x\n",
6761 vport->fc_flag, payload_len,
6762 *lp, vport->fc_rscn_id_cnt);
6763 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6764 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
6765 ndlp->nlp_DID, vport->port_state,
6766 ndlp->nlp_flag);
6767
6768 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
6769 ndlp, NULL);
6770 return 0;
6771 }
6772 }
6773
6774 spin_lock_irq(shost->host_lock);
6775 if (vport->fc_rscn_flush) {
6776 /* Another thread is walking fc_rscn_id_list on this vport */
6777 vport->fc_flag |= FC_RSCN_DISCOVERY;
6778 spin_unlock_irq(shost->host_lock);
6779 /* Send back ACC */
6780 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6781 return 0;
6782 }
6783 /* Indicate we are walking fc_rscn_id_list on this vport */
6784 vport->fc_rscn_flush = 1;
6785 spin_unlock_irq(shost->host_lock);
6786 /* Get the array count after successfully have the token */
6787 rscn_cnt = vport->fc_rscn_id_cnt;
6788 /* If we are already processing an RSCN, save the received
6789 * RSCN payload buffer, cmdiocb->context2 to process later.
6790 */
6791 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
6792 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6793 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
6794 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
6795
6796 spin_lock_irq(shost->host_lock);
6797 vport->fc_flag |= FC_RSCN_DEFERRED;
6798
6799 /* Restart disctmo if its already running */
6800 if (vport->fc_flag & FC_DISC_TMO) {
6801 tmo = ((phba->fc_ratov * 3) + 3);
6802 mod_timer(&vport->fc_disctmo,
6803 jiffies + msecs_to_jiffies(1000 * tmo));
6804 }
6805 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
6806 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
6807 vport->fc_flag |= FC_RSCN_MODE;
6808 spin_unlock_irq(shost->host_lock);
6809 if (rscn_cnt) {
6810 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
6811 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
6812 }
6813 if ((rscn_cnt) &&
6814 (payload_len + length <= LPFC_BPL_SIZE)) {
6815 *cmd &= ELS_CMD_MASK;
6816 *cmd |= cpu_to_be32(payload_len + length);
6817 memcpy(((uint8_t *)cmd) + length, lp,
6818 payload_len);
6819 } else {
6820 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
6821 vport->fc_rscn_id_cnt++;
6822 /* If we zero, cmdiocb->context2, the calling
6823 * routine will not try to free it.
6824 */
6825 cmdiocb->context2 = NULL;
6826 }
6827 /* Deferred RSCN */
6828 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6829 "0235 Deferred RSCN "
6830 "Data: x%x x%x x%x\n",
6831 vport->fc_rscn_id_cnt, vport->fc_flag,
6832 vport->port_state);
6833 } else {
6834 vport->fc_flag |= FC_RSCN_DISCOVERY;
6835 spin_unlock_irq(shost->host_lock);
6836 /* ReDiscovery RSCN */
6837 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6838 "0234 ReDiscovery RSCN "
6839 "Data: x%x x%x x%x\n",
6840 vport->fc_rscn_id_cnt, vport->fc_flag,
6841 vport->port_state);
6842 }
6843 /* Indicate we are done walking fc_rscn_id_list on this vport */
6844 vport->fc_rscn_flush = 0;
6845 /* Send back ACC */
6846 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6847 /* send RECOVERY event for ALL nodes that match RSCN payload */
6848 lpfc_rscn_recovery_check(vport);
6849 return 0;
6850 }
6851 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6852 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
6853 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
6854
6855 spin_lock_irq(shost->host_lock);
6856 vport->fc_flag |= FC_RSCN_MODE;
6857 spin_unlock_irq(shost->host_lock);
6858 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
6859 /* Indicate we are done walking fc_rscn_id_list on this vport */
6860 vport->fc_rscn_flush = 0;
6861 /*
6862 * If we zero, cmdiocb->context2, the calling routine will
6863 * not try to free it.
6864 */
6865 cmdiocb->context2 = NULL;
6866 lpfc_set_disctmo(vport);
6867 /* Send back ACC */
6868 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6869 /* send RECOVERY event for ALL nodes that match RSCN payload */
6870 lpfc_rscn_recovery_check(vport);
6871 return lpfc_els_handle_rscn(vport);
6872 }
6873
6874 /**
6875 * lpfc_els_handle_rscn - Handle rscn for a vport
6876 * @vport: pointer to a host virtual N_Port data structure.
6877 *
6878 * This routine handles the Registration State Configuration Notification
6879 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
6880 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
6881 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
6882 * NameServer shall be issued. If CT command to the NameServer fails to be
6883 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
6884 * RSCN activities with the @vport.
6885 *
6886 * Return code
6887 * 0 - Cleaned up rscn on the @vport
6888 * 1 - Wait for plogi to name server before proceed
6889 **/
6890 int
lpfc_els_handle_rscn(struct lpfc_vport * vport)6891 lpfc_els_handle_rscn(struct lpfc_vport *vport)
6892 {
6893 struct lpfc_nodelist *ndlp;
6894 struct lpfc_hba *phba = vport->phba;
6895
6896 /* Ignore RSCN if the port is being torn down. */
6897 if (vport->load_flag & FC_UNLOADING) {
6898 lpfc_els_flush_rscn(vport);
6899 return 0;
6900 }
6901
6902 /* Start timer for RSCN processing */
6903 lpfc_set_disctmo(vport);
6904
6905 /* RSCN processed */
6906 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6907 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n",
6908 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
6909 vport->port_state, vport->num_disc_nodes,
6910 vport->gidft_inp);
6911
6912 /* To process RSCN, first compare RSCN data with NameServer */
6913 vport->fc_ns_retry = 0;
6914 vport->num_disc_nodes = 0;
6915
6916 ndlp = lpfc_findnode_did(vport, NameServer_DID);
6917 if (ndlp && NLP_CHK_NODE_ACT(ndlp)
6918 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
6919 /* Good ndlp, issue CT Request to NameServer. Need to
6920 * know how many gidfts were issued. If none, then just
6921 * flush the RSCN. Otherwise, the outstanding requests
6922 * need to complete.
6923 */
6924 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) {
6925 if (lpfc_issue_gidft(vport) > 0)
6926 return 1;
6927 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) {
6928 if (lpfc_issue_gidpt(vport) > 0)
6929 return 1;
6930 } else {
6931 return 1;
6932 }
6933 } else {
6934 /* Nameserver login in question. Revalidate. */
6935 if (ndlp) {
6936 ndlp = lpfc_enable_node(vport, ndlp,
6937 NLP_STE_PLOGI_ISSUE);
6938 if (!ndlp) {
6939 lpfc_els_flush_rscn(vport);
6940 return 0;
6941 }
6942 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
6943 } else {
6944 ndlp = lpfc_nlp_init(vport, NameServer_DID);
6945 if (!ndlp) {
6946 lpfc_els_flush_rscn(vport);
6947 return 0;
6948 }
6949 ndlp->nlp_prev_state = ndlp->nlp_state;
6950 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6951 }
6952 ndlp->nlp_type |= NLP_FABRIC;
6953 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
6954 /* Wait for NameServer login cmpl before we can
6955 * continue
6956 */
6957 return 1;
6958 }
6959
6960 lpfc_els_flush_rscn(vport);
6961 return 0;
6962 }
6963
6964 /**
6965 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
6966 * @vport: pointer to a host virtual N_Port data structure.
6967 * @cmdiocb: pointer to lpfc command iocb data structure.
6968 * @ndlp: pointer to a node-list data structure.
6969 *
6970 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
6971 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
6972 * point topology. As an unsolicited FLOGI should not be received in a loop
6973 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
6974 * lpfc_check_sparm() routine is invoked to check the parameters in the
6975 * unsolicited FLOGI. If parameters validation failed, the routine
6976 * lpfc_els_rsp_reject() shall be called with reject reason code set to
6977 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
6978 * FLOGI shall be compared with the Port WWN of the @vport to determine who
6979 * will initiate PLOGI. The higher lexicographical value party shall has
6980 * higher priority (as the winning port) and will initiate PLOGI and
6981 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
6982 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
6983 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
6984 *
6985 * Return code
6986 * 0 - Successfully processed the unsolicited flogi
6987 * 1 - Failed to process the unsolicited flogi
6988 **/
6989 static int
lpfc_els_rcv_flogi(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)6990 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6991 struct lpfc_nodelist *ndlp)
6992 {
6993 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6994 struct lpfc_hba *phba = vport->phba;
6995 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6996 uint32_t *lp = (uint32_t *) pcmd->virt;
6997 IOCB_t *icmd = &cmdiocb->iocb;
6998 struct serv_parm *sp;
6999 LPFC_MBOXQ_t *mbox;
7000 uint32_t cmd, did;
7001 int rc;
7002 uint32_t fc_flag = 0;
7003 uint32_t port_state = 0;
7004
7005 cmd = *lp++;
7006 sp = (struct serv_parm *) lp;
7007
7008 /* FLOGI received */
7009
7010 lpfc_set_disctmo(vport);
7011
7012 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
7013 /* We should never receive a FLOGI in loop mode, ignore it */
7014 did = icmd->un.elsreq64.remoteID;
7015
7016 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
7017 Loop Mode */
7018 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
7019 "0113 An FLOGI ELS command x%x was "
7020 "received from DID x%x in Loop Mode\n",
7021 cmd, did);
7022 return 1;
7023 }
7024
7025 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
7026
7027 /*
7028 * If our portname is greater than the remote portname,
7029 * then we initiate Nport login.
7030 */
7031
7032 rc = memcmp(&vport->fc_portname, &sp->portName,
7033 sizeof(struct lpfc_name));
7034
7035 if (!rc) {
7036 if (phba->sli_rev < LPFC_SLI_REV4) {
7037 mbox = mempool_alloc(phba->mbox_mem_pool,
7038 GFP_KERNEL);
7039 if (!mbox)
7040 return 1;
7041 lpfc_linkdown(phba);
7042 lpfc_init_link(phba, mbox,
7043 phba->cfg_topology,
7044 phba->cfg_link_speed);
7045 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
7046 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7047 mbox->vport = vport;
7048 rc = lpfc_sli_issue_mbox(phba, mbox,
7049 MBX_NOWAIT);
7050 lpfc_set_loopback_flag(phba);
7051 if (rc == MBX_NOT_FINISHED)
7052 mempool_free(mbox, phba->mbox_mem_pool);
7053 return 1;
7054 }
7055
7056 /* abort the flogi coming back to ourselves
7057 * due to external loopback on the port.
7058 */
7059 lpfc_els_abort_flogi(phba);
7060 return 0;
7061
7062 } else if (rc > 0) { /* greater than */
7063 spin_lock_irq(shost->host_lock);
7064 vport->fc_flag |= FC_PT2PT_PLOGI;
7065 spin_unlock_irq(shost->host_lock);
7066
7067 /* If we have the high WWPN we can assign our own
7068 * myDID; otherwise, we have to WAIT for a PLOGI
7069 * from the remote NPort to find out what it
7070 * will be.
7071 */
7072 vport->fc_myDID = PT2PT_LocalID;
7073 } else {
7074 vport->fc_myDID = PT2PT_RemoteID;
7075 }
7076
7077 /*
7078 * The vport state should go to LPFC_FLOGI only
7079 * AFTER we issue a FLOGI, not receive one.
7080 */
7081 spin_lock_irq(shost->host_lock);
7082 fc_flag = vport->fc_flag;
7083 port_state = vport->port_state;
7084 vport->fc_flag |= FC_PT2PT;
7085 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
7086
7087 /* Acking an unsol FLOGI. Count 1 for link bounce
7088 * work-around.
7089 */
7090 vport->rcv_flogi_cnt++;
7091 spin_unlock_irq(shost->host_lock);
7092 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7093 "3311 Rcv Flogi PS x%x new PS x%x "
7094 "fc_flag x%x new fc_flag x%x\n",
7095 port_state, vport->port_state,
7096 fc_flag, vport->fc_flag);
7097
7098 /*
7099 * We temporarily set fc_myDID to make it look like we are
7100 * a Fabric. This is done just so we end up with the right
7101 * did / sid on the FLOGI ACC rsp.
7102 */
7103 did = vport->fc_myDID;
7104 vport->fc_myDID = Fabric_DID;
7105
7106 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
7107
7108 /* Defer ACC response until AFTER we issue a FLOGI */
7109 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) {
7110 phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext;
7111 phba->defer_flogi_acc_ox_id =
7112 cmdiocb->iocb.unsli3.rcvsli3.ox_id;
7113
7114 vport->fc_myDID = did;
7115
7116 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7117 "3344 Deferring FLOGI ACC: rx_id: x%x,"
7118 " ox_id: x%x, hba_flag x%x\n",
7119 phba->defer_flogi_acc_rx_id,
7120 phba->defer_flogi_acc_ox_id, phba->hba_flag);
7121
7122 phba->defer_flogi_acc_flag = true;
7123
7124 return 0;
7125 }
7126
7127 /* Send back ACC */
7128 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
7129
7130 /* Now lets put fc_myDID back to what its supposed to be */
7131 vport->fc_myDID = did;
7132
7133 return 0;
7134 }
7135
7136 /**
7137 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
7138 * @vport: pointer to a host virtual N_Port data structure.
7139 * @cmdiocb: pointer to lpfc command iocb data structure.
7140 * @ndlp: pointer to a node-list data structure.
7141 *
7142 * This routine processes Request Node Identification Data (RNID) IOCB
7143 * received as an ELS unsolicited event. Only when the RNID specified format
7144 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
7145 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
7146 * Accept (ACC) the RNID ELS command. All the other RNID formats are
7147 * rejected by invoking the lpfc_els_rsp_reject() routine.
7148 *
7149 * Return code
7150 * 0 - Successfully processed rnid iocb (currently always return 0)
7151 **/
7152 static int
lpfc_els_rcv_rnid(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7153 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7154 struct lpfc_nodelist *ndlp)
7155 {
7156 struct lpfc_dmabuf *pcmd;
7157 uint32_t *lp;
7158 RNID *rn;
7159 struct ls_rjt stat;
7160
7161 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7162 lp = (uint32_t *) pcmd->virt;
7163
7164 lp++;
7165 rn = (RNID *) lp;
7166
7167 /* RNID received */
7168
7169 switch (rn->Format) {
7170 case 0:
7171 case RNID_TOPOLOGY_DISC:
7172 /* Send back ACC */
7173 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
7174 break;
7175 default:
7176 /* Reject this request because format not supported */
7177 stat.un.b.lsRjtRsvd0 = 0;
7178 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7179 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
7180 stat.un.b.vendorUnique = 0;
7181 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
7182 NULL);
7183 }
7184 return 0;
7185 }
7186
7187 /**
7188 * lpfc_els_rcv_echo - Process an unsolicited echo iocb
7189 * @vport: pointer to a host virtual N_Port data structure.
7190 * @cmdiocb: pointer to lpfc command iocb data structure.
7191 * @ndlp: pointer to a node-list data structure.
7192 *
7193 * Return code
7194 * 0 - Successfully processed echo iocb (currently always return 0)
7195 **/
7196 static int
lpfc_els_rcv_echo(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7197 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7198 struct lpfc_nodelist *ndlp)
7199 {
7200 uint8_t *pcmd;
7201
7202 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
7203
7204 /* skip over first word of echo command to find echo data */
7205 pcmd += sizeof(uint32_t);
7206
7207 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
7208 return 0;
7209 }
7210
7211 /**
7212 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
7213 * @vport: pointer to a host virtual N_Port data structure.
7214 * @cmdiocb: pointer to lpfc command iocb data structure.
7215 * @ndlp: pointer to a node-list data structure.
7216 *
7217 * This routine processes a Link Incident Report Registration(LIRR) IOCB
7218 * received as an ELS unsolicited event. Currently, this function just invokes
7219 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
7220 *
7221 * Return code
7222 * 0 - Successfully processed lirr iocb (currently always return 0)
7223 **/
7224 static int
lpfc_els_rcv_lirr(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7225 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7226 struct lpfc_nodelist *ndlp)
7227 {
7228 struct ls_rjt stat;
7229
7230 /* For now, unconditionally reject this command */
7231 stat.un.b.lsRjtRsvd0 = 0;
7232 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7233 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
7234 stat.un.b.vendorUnique = 0;
7235 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7236 return 0;
7237 }
7238
7239 /**
7240 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
7241 * @vport: pointer to a host virtual N_Port data structure.
7242 * @cmdiocb: pointer to lpfc command iocb data structure.
7243 * @ndlp: pointer to a node-list data structure.
7244 *
7245 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
7246 * received as an ELS unsolicited event. A request to RRQ shall only
7247 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
7248 * Nx_Port N_Port_ID of the target Exchange is the same as the
7249 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
7250 * not accepted, an LS_RJT with reason code "Unable to perform
7251 * command request" and reason code explanation "Invalid Originator
7252 * S_ID" shall be returned. For now, we just unconditionally accept
7253 * RRQ from the target.
7254 **/
7255 static void
lpfc_els_rcv_rrq(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7256 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7257 struct lpfc_nodelist *ndlp)
7258 {
7259 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7260 if (vport->phba->sli_rev == LPFC_SLI_REV4)
7261 lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
7262 }
7263
7264 /**
7265 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
7266 * @phba: pointer to lpfc hba data structure.
7267 * @pmb: pointer to the driver internal queue element for mailbox command.
7268 *
7269 * This routine is the completion callback function for the MBX_READ_LNK_STAT
7270 * mailbox command. This callback function is to actually send the Accept
7271 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
7272 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
7273 * mailbox command, constructs the RPS response with the link statistics
7274 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
7275 * response to the RPS.
7276 *
7277 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7278 * will be incremented by 1 for holding the ndlp and the reference to ndlp
7279 * will be stored into the context1 field of the IOCB for the completion
7280 * callback function to the RPS Accept Response ELS IOCB command.
7281 *
7282 **/
7283 static void
lpfc_els_rsp_rls_acc(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)7284 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7285 {
7286 MAILBOX_t *mb;
7287 IOCB_t *icmd;
7288 struct RLS_RSP *rls_rsp;
7289 uint8_t *pcmd;
7290 struct lpfc_iocbq *elsiocb;
7291 struct lpfc_nodelist *ndlp;
7292 uint16_t oxid;
7293 uint16_t rxid;
7294 uint32_t cmdsize;
7295
7296 mb = &pmb->u.mb;
7297
7298 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
7299 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
7300 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
7301 pmb->ctx_buf = NULL;
7302 pmb->ctx_ndlp = NULL;
7303
7304 if (mb->mbxStatus) {
7305 mempool_free(pmb, phba->mbox_mem_pool);
7306 return;
7307 }
7308
7309 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
7310 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
7311 lpfc_max_els_tries, ndlp,
7312 ndlp->nlp_DID, ELS_CMD_ACC);
7313
7314 /* Decrement the ndlp reference count from previous mbox command */
7315 lpfc_nlp_put(ndlp);
7316
7317 if (!elsiocb) {
7318 mempool_free(pmb, phba->mbox_mem_pool);
7319 return;
7320 }
7321
7322 icmd = &elsiocb->iocb;
7323 icmd->ulpContext = rxid;
7324 icmd->unsli3.rcvsli3.ox_id = oxid;
7325
7326 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7327 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
7328 pcmd += sizeof(uint32_t); /* Skip past command */
7329 rls_rsp = (struct RLS_RSP *)pcmd;
7330
7331 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
7332 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
7333 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
7334 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
7335 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
7336 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
7337 mempool_free(pmb, phba->mbox_mem_pool);
7338 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
7339 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
7340 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
7341 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
7342 elsiocb->iotag, elsiocb->iocb.ulpContext,
7343 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
7344 ndlp->nlp_rpi);
7345 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7346 phba->fc_stat.elsXmitACC++;
7347 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
7348 lpfc_els_free_iocb(phba, elsiocb);
7349 }
7350
7351 /**
7352 * lpfc_els_rcv_rls - Process an unsolicited rls iocb
7353 * @vport: pointer to a host virtual N_Port data structure.
7354 * @cmdiocb: pointer to lpfc command iocb data structure.
7355 * @ndlp: pointer to a node-list data structure.
7356 *
7357 * This routine processes Read Link Status (RLS) IOCB received as an
7358 * ELS unsolicited event. It first checks the remote port state. If the
7359 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
7360 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
7361 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
7362 * for reading the HBA link statistics. It is for the callback function,
7363 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
7364 * to actually sending out RPL Accept (ACC) response.
7365 *
7366 * Return codes
7367 * 0 - Successfully processed rls iocb (currently always return 0)
7368 **/
7369 static int
lpfc_els_rcv_rls(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7370 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7371 struct lpfc_nodelist *ndlp)
7372 {
7373 struct lpfc_hba *phba = vport->phba;
7374 LPFC_MBOXQ_t *mbox;
7375 struct ls_rjt stat;
7376
7377 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
7378 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
7379 /* reject the unsolicited RLS request and done with it */
7380 goto reject_out;
7381
7382 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
7383 if (mbox) {
7384 lpfc_read_lnk_stat(phba, mbox);
7385 mbox->ctx_buf = (void *)((unsigned long)
7386 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
7387 cmdiocb->iocb.ulpContext)); /* rx_id */
7388 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
7389 mbox->vport = vport;
7390 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
7391 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
7392 != MBX_NOT_FINISHED)
7393 /* Mbox completion will send ELS Response */
7394 return 0;
7395 /* Decrement reference count used for the failed mbox
7396 * command.
7397 */
7398 lpfc_nlp_put(ndlp);
7399 mempool_free(mbox, phba->mbox_mem_pool);
7400 }
7401 reject_out:
7402 /* issue rejection response */
7403 stat.un.b.lsRjtRsvd0 = 0;
7404 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7405 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
7406 stat.un.b.vendorUnique = 0;
7407 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7408 return 0;
7409 }
7410
7411 /**
7412 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
7413 * @vport: pointer to a host virtual N_Port data structure.
7414 * @cmdiocb: pointer to lpfc command iocb data structure.
7415 * @ndlp: pointer to a node-list data structure.
7416 *
7417 * This routine processes Read Timout Value (RTV) IOCB received as an
7418 * ELS unsolicited event. It first checks the remote port state. If the
7419 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
7420 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
7421 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
7422 * Value (RTV) unsolicited IOCB event.
7423 *
7424 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7425 * will be incremented by 1 for holding the ndlp and the reference to ndlp
7426 * will be stored into the context1 field of the IOCB for the completion
7427 * callback function to the RTV Accept Response ELS IOCB command.
7428 *
7429 * Return codes
7430 * 0 - Successfully processed rtv iocb (currently always return 0)
7431 **/
7432 static int
lpfc_els_rcv_rtv(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7433 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7434 struct lpfc_nodelist *ndlp)
7435 {
7436 struct lpfc_hba *phba = vport->phba;
7437 struct ls_rjt stat;
7438 struct RTV_RSP *rtv_rsp;
7439 uint8_t *pcmd;
7440 struct lpfc_iocbq *elsiocb;
7441 uint32_t cmdsize;
7442
7443
7444 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
7445 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
7446 /* reject the unsolicited RTV request and done with it */
7447 goto reject_out;
7448
7449 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
7450 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
7451 lpfc_max_els_tries, ndlp,
7452 ndlp->nlp_DID, ELS_CMD_ACC);
7453
7454 if (!elsiocb)
7455 return 1;
7456
7457 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7458 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
7459 pcmd += sizeof(uint32_t); /* Skip past command */
7460
7461 /* use the command's xri in the response */
7462 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */
7463 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
7464
7465 rtv_rsp = (struct RTV_RSP *)pcmd;
7466
7467 /* populate RTV payload */
7468 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
7469 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
7470 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
7471 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
7472 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
7473
7474 /* Xmit ELS RLS ACC response tag <ulpIoTag> */
7475 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
7476 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
7477 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
7478 "Data: x%x x%x x%x\n",
7479 elsiocb->iotag, elsiocb->iocb.ulpContext,
7480 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
7481 ndlp->nlp_rpi,
7482 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
7483 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7484 phba->fc_stat.elsXmitACC++;
7485 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
7486 lpfc_els_free_iocb(phba, elsiocb);
7487 return 0;
7488
7489 reject_out:
7490 /* issue rejection response */
7491 stat.un.b.lsRjtRsvd0 = 0;
7492 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7493 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
7494 stat.un.b.vendorUnique = 0;
7495 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7496 return 0;
7497 }
7498
7499 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb
7500 * @vport: pointer to a host virtual N_Port data structure.
7501 * @ndlp: pointer to a node-list data structure.
7502 * @did: DID of the target.
7503 * @rrq: Pointer to the rrq struct.
7504 *
7505 * Build a ELS RRQ command and send it to the target. If the issue_iocb is
7506 * Successful the the completion handler will clear the RRQ.
7507 *
7508 * Return codes
7509 * 0 - Successfully sent rrq els iocb.
7510 * 1 - Failed to send rrq els iocb.
7511 **/
7512 static int
lpfc_issue_els_rrq(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint32_t did,struct lpfc_node_rrq * rrq)7513 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7514 uint32_t did, struct lpfc_node_rrq *rrq)
7515 {
7516 struct lpfc_hba *phba = vport->phba;
7517 struct RRQ *els_rrq;
7518 struct lpfc_iocbq *elsiocb;
7519 uint8_t *pcmd;
7520 uint16_t cmdsize;
7521 int ret;
7522
7523
7524 if (ndlp != rrq->ndlp)
7525 ndlp = rrq->ndlp;
7526 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
7527 return 1;
7528
7529 /* If ndlp is not NULL, we will bump the reference count on it */
7530 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
7531 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
7532 ELS_CMD_RRQ);
7533 if (!elsiocb)
7534 return 1;
7535
7536 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7537
7538 /* For RRQ request, remainder of payload is Exchange IDs */
7539 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
7540 pcmd += sizeof(uint32_t);
7541 els_rrq = (struct RRQ *) pcmd;
7542
7543 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
7544 bf_set(rrq_rxid, els_rrq, rrq->rxid);
7545 bf_set(rrq_did, els_rrq, vport->fc_myDID);
7546 els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
7547 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
7548
7549
7550 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7551 "Issue RRQ: did:x%x",
7552 did, rrq->xritag, rrq->rxid);
7553 elsiocb->context_un.rrq = rrq;
7554 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
7555 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7556
7557 if (ret == IOCB_ERROR) {
7558 lpfc_els_free_iocb(phba, elsiocb);
7559 return 1;
7560 }
7561 return 0;
7562 }
7563
7564 /**
7565 * lpfc_send_rrq - Sends ELS RRQ if needed.
7566 * @phba: pointer to lpfc hba data structure.
7567 * @rrq: pointer to the active rrq.
7568 *
7569 * This routine will call the lpfc_issue_els_rrq if the rrq is
7570 * still active for the xri. If this function returns a failure then
7571 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
7572 *
7573 * Returns 0 Success.
7574 * 1 Failure.
7575 **/
7576 int
lpfc_send_rrq(struct lpfc_hba * phba,struct lpfc_node_rrq * rrq)7577 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
7578 {
7579 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
7580 rrq->nlp_DID);
7581 if (!ndlp)
7582 return 1;
7583
7584 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
7585 return lpfc_issue_els_rrq(rrq->vport, ndlp,
7586 rrq->nlp_DID, rrq);
7587 else
7588 return 1;
7589 }
7590
7591 /**
7592 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
7593 * @vport: pointer to a host virtual N_Port data structure.
7594 * @cmdsize: size of the ELS command.
7595 * @oldiocb: pointer to the original lpfc command iocb data structure.
7596 * @ndlp: pointer to a node-list data structure.
7597 *
7598 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
7599 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
7600 *
7601 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7602 * will be incremented by 1 for holding the ndlp and the reference to ndlp
7603 * will be stored into the context1 field of the IOCB for the completion
7604 * callback function to the RPL Accept Response ELS command.
7605 *
7606 * Return code
7607 * 0 - Successfully issued ACC RPL ELS command
7608 * 1 - Failed to issue ACC RPL ELS command
7609 **/
7610 static int
lpfc_els_rsp_rpl_acc(struct lpfc_vport * vport,uint16_t cmdsize,struct lpfc_iocbq * oldiocb,struct lpfc_nodelist * ndlp)7611 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
7612 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
7613 {
7614 struct lpfc_hba *phba = vport->phba;
7615 IOCB_t *icmd, *oldcmd;
7616 RPL_RSP rpl_rsp;
7617 struct lpfc_iocbq *elsiocb;
7618 uint8_t *pcmd;
7619
7620 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
7621 ndlp->nlp_DID, ELS_CMD_ACC);
7622
7623 if (!elsiocb)
7624 return 1;
7625
7626 icmd = &elsiocb->iocb;
7627 oldcmd = &oldiocb->iocb;
7628 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */
7629 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
7630
7631 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7632 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
7633 pcmd += sizeof(uint16_t);
7634 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
7635 pcmd += sizeof(uint16_t);
7636
7637 /* Setup the RPL ACC payload */
7638 rpl_rsp.listLen = be32_to_cpu(1);
7639 rpl_rsp.index = 0;
7640 rpl_rsp.port_num_blk.portNum = 0;
7641 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
7642 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
7643 sizeof(struct lpfc_name));
7644 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
7645 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
7646 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7647 "0120 Xmit ELS RPL ACC response tag x%x "
7648 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
7649 "rpi x%x\n",
7650 elsiocb->iotag, elsiocb->iocb.ulpContext,
7651 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
7652 ndlp->nlp_rpi);
7653 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7654 phba->fc_stat.elsXmitACC++;
7655 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
7656 IOCB_ERROR) {
7657 lpfc_els_free_iocb(phba, elsiocb);
7658 return 1;
7659 }
7660 return 0;
7661 }
7662
7663 /**
7664 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
7665 * @vport: pointer to a host virtual N_Port data structure.
7666 * @cmdiocb: pointer to lpfc command iocb data structure.
7667 * @ndlp: pointer to a node-list data structure.
7668 *
7669 * This routine processes Read Port List (RPL) IOCB received as an ELS
7670 * unsolicited event. It first checks the remote port state. If the remote
7671 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
7672 * invokes the lpfc_els_rsp_reject() routine to send reject response.
7673 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
7674 * to accept the RPL.
7675 *
7676 * Return code
7677 * 0 - Successfully processed rpl iocb (currently always return 0)
7678 **/
7679 static int
lpfc_els_rcv_rpl(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7680 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7681 struct lpfc_nodelist *ndlp)
7682 {
7683 struct lpfc_dmabuf *pcmd;
7684 uint32_t *lp;
7685 uint32_t maxsize;
7686 uint16_t cmdsize;
7687 RPL *rpl;
7688 struct ls_rjt stat;
7689
7690 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
7691 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
7692 /* issue rejection response */
7693 stat.un.b.lsRjtRsvd0 = 0;
7694 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7695 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
7696 stat.un.b.vendorUnique = 0;
7697 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
7698 NULL);
7699 /* rejected the unsolicited RPL request and done with it */
7700 return 0;
7701 }
7702
7703 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7704 lp = (uint32_t *) pcmd->virt;
7705 rpl = (RPL *) (lp + 1);
7706 maxsize = be32_to_cpu(rpl->maxsize);
7707
7708 /* We support only one port */
7709 if ((rpl->index == 0) &&
7710 ((maxsize == 0) ||
7711 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
7712 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
7713 } else {
7714 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
7715 }
7716 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
7717
7718 return 0;
7719 }
7720
7721 /**
7722 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
7723 * @vport: pointer to a virtual N_Port data structure.
7724 * @cmdiocb: pointer to lpfc command iocb data structure.
7725 * @ndlp: pointer to a node-list data structure.
7726 *
7727 * This routine processes Fibre Channel Address Resolution Protocol
7728 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
7729 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
7730 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
7731 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
7732 * remote PortName is compared against the FC PortName stored in the @vport
7733 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
7734 * compared against the FC NodeName stored in the @vport data structure.
7735 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
7736 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
7737 * invoked to send out FARP Response to the remote node. Before sending the
7738 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
7739 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
7740 * routine is invoked to log into the remote port first.
7741 *
7742 * Return code
7743 * 0 - Either the FARP Match Mode not supported or successfully processed
7744 **/
7745 static int
lpfc_els_rcv_farp(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7746 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7747 struct lpfc_nodelist *ndlp)
7748 {
7749 struct lpfc_dmabuf *pcmd;
7750 uint32_t *lp;
7751 IOCB_t *icmd;
7752 FARP *fp;
7753 uint32_t cnt, did;
7754
7755 icmd = &cmdiocb->iocb;
7756 did = icmd->un.elsreq64.remoteID;
7757 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7758 lp = (uint32_t *) pcmd->virt;
7759
7760 lp++;
7761 fp = (FARP *) lp;
7762 /* FARP-REQ received from DID <did> */
7763 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7764 "0601 FARP-REQ received from DID x%x\n", did);
7765 /* We will only support match on WWPN or WWNN */
7766 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
7767 return 0;
7768 }
7769
7770 cnt = 0;
7771 /* If this FARP command is searching for my portname */
7772 if (fp->Mflags & FARP_MATCH_PORT) {
7773 if (memcmp(&fp->RportName, &vport->fc_portname,
7774 sizeof(struct lpfc_name)) == 0)
7775 cnt = 1;
7776 }
7777
7778 /* If this FARP command is searching for my nodename */
7779 if (fp->Mflags & FARP_MATCH_NODE) {
7780 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
7781 sizeof(struct lpfc_name)) == 0)
7782 cnt = 1;
7783 }
7784
7785 if (cnt) {
7786 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
7787 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
7788 /* Log back into the node before sending the FARP. */
7789 if (fp->Rflags & FARP_REQUEST_PLOGI) {
7790 ndlp->nlp_prev_state = ndlp->nlp_state;
7791 lpfc_nlp_set_state(vport, ndlp,
7792 NLP_STE_PLOGI_ISSUE);
7793 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
7794 }
7795
7796 /* Send a FARP response to that node */
7797 if (fp->Rflags & FARP_REQUEST_FARPR)
7798 lpfc_issue_els_farpr(vport, did, 0);
7799 }
7800 }
7801 return 0;
7802 }
7803
7804 /**
7805 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
7806 * @vport: pointer to a host virtual N_Port data structure.
7807 * @cmdiocb: pointer to lpfc command iocb data structure.
7808 * @ndlp: pointer to a node-list data structure.
7809 *
7810 * This routine processes Fibre Channel Address Resolution Protocol
7811 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
7812 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
7813 * the FARP response request.
7814 *
7815 * Return code
7816 * 0 - Successfully processed FARPR IOCB (currently always return 0)
7817 **/
7818 static int
lpfc_els_rcv_farpr(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * ndlp)7819 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7820 struct lpfc_nodelist *ndlp)
7821 {
7822 struct lpfc_dmabuf *pcmd;
7823 uint32_t *lp;
7824 IOCB_t *icmd;
7825 uint32_t did;
7826
7827 icmd = &cmdiocb->iocb;
7828 did = icmd->un.elsreq64.remoteID;
7829 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7830 lp = (uint32_t *) pcmd->virt;
7831
7832 lp++;
7833 /* FARP-RSP received from DID <did> */
7834 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7835 "0600 FARP-RSP received from DID x%x\n", did);
7836 /* ACCEPT the Farp resp request */
7837 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7838
7839 return 0;
7840 }
7841
7842 /**
7843 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
7844 * @vport: pointer to a host virtual N_Port data structure.
7845 * @cmdiocb: pointer to lpfc command iocb data structure.
7846 * @fan_ndlp: pointer to a node-list data structure.
7847 *
7848 * This routine processes a Fabric Address Notification (FAN) IOCB
7849 * command received as an ELS unsolicited event. The FAN ELS command will
7850 * only be processed on a physical port (i.e., the @vport represents the
7851 * physical port). The fabric NodeName and PortName from the FAN IOCB are
7852 * compared against those in the phba data structure. If any of those is
7853 * different, the lpfc_initial_flogi() routine is invoked to initialize
7854 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
7855 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
7856 * is invoked to register login to the fabric.
7857 *
7858 * Return code
7859 * 0 - Successfully processed fan iocb (currently always return 0).
7860 **/
7861 static int
lpfc_els_rcv_fan(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb,struct lpfc_nodelist * fan_ndlp)7862 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7863 struct lpfc_nodelist *fan_ndlp)
7864 {
7865 struct lpfc_hba *phba = vport->phba;
7866 uint32_t *lp;
7867 FAN *fp;
7868
7869 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
7870 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
7871 fp = (FAN *) ++lp;
7872 /* FAN received; Fan does not have a reply sequence */
7873 if ((vport == phba->pport) &&
7874 (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
7875 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
7876 sizeof(struct lpfc_name))) ||
7877 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
7878 sizeof(struct lpfc_name)))) {
7879 /* This port has switched fabrics. FLOGI is required */
7880 lpfc_issue_init_vfi(vport);
7881 } else {
7882 /* FAN verified - skip FLOGI */
7883 vport->fc_myDID = vport->fc_prevDID;
7884 if (phba->sli_rev < LPFC_SLI_REV4)
7885 lpfc_issue_fabric_reglogin(vport);
7886 else {
7887 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7888 "3138 Need register VFI: (x%x/%x)\n",
7889 vport->fc_prevDID, vport->fc_myDID);
7890 lpfc_issue_reg_vfi(vport);
7891 }
7892 }
7893 }
7894 return 0;
7895 }
7896
7897 /**
7898 * lpfc_els_timeout - Handler funciton to the els timer
7899 * @t: timer context used to obtain the vport.
7900 *
7901 * This routine is invoked by the ELS timer after timeout. It posts the ELS
7902 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
7903 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
7904 * up the worker thread. It is for the worker thread to invoke the routine
7905 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
7906 **/
7907 void
lpfc_els_timeout(struct timer_list * t)7908 lpfc_els_timeout(struct timer_list *t)
7909 {
7910 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc);
7911 struct lpfc_hba *phba = vport->phba;
7912 uint32_t tmo_posted;
7913 unsigned long iflag;
7914
7915 spin_lock_irqsave(&vport->work_port_lock, iflag);
7916 tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
7917 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
7918 vport->work_port_events |= WORKER_ELS_TMO;
7919 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
7920
7921 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
7922 lpfc_worker_wake_up(phba);
7923 return;
7924 }
7925
7926
7927 /**
7928 * lpfc_els_timeout_handler - Process an els timeout event
7929 * @vport: pointer to a virtual N_Port data structure.
7930 *
7931 * This routine is the actual handler function that processes an ELS timeout
7932 * event. It walks the ELS ring to get and abort all the IOCBs (except the
7933 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
7934 * invoking the lpfc_sli_issue_abort_iotag() routine.
7935 **/
7936 void
lpfc_els_timeout_handler(struct lpfc_vport * vport)7937 lpfc_els_timeout_handler(struct lpfc_vport *vport)
7938 {
7939 struct lpfc_hba *phba = vport->phba;
7940 struct lpfc_sli_ring *pring;
7941 struct lpfc_iocbq *tmp_iocb, *piocb;
7942 IOCB_t *cmd = NULL;
7943 struct lpfc_dmabuf *pcmd;
7944 uint32_t els_command = 0;
7945 uint32_t timeout;
7946 uint32_t remote_ID = 0xffffffff;
7947 LIST_HEAD(abort_list);
7948
7949
7950 timeout = (uint32_t)(phba->fc_ratov << 1);
7951
7952 pring = lpfc_phba_elsring(phba);
7953 if (unlikely(!pring))
7954 return;
7955
7956 if (phba->pport->load_flag & FC_UNLOADING)
7957 return;
7958
7959 spin_lock_irq(&phba->hbalock);
7960 if (phba->sli_rev == LPFC_SLI_REV4)
7961 spin_lock(&pring->ring_lock);
7962
7963 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
7964 cmd = &piocb->iocb;
7965
7966 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
7967 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
7968 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
7969 continue;
7970
7971 if (piocb->vport != vport)
7972 continue;
7973
7974 pcmd = (struct lpfc_dmabuf *) piocb->context2;
7975 if (pcmd)
7976 els_command = *(uint32_t *) (pcmd->virt);
7977
7978 if (els_command == ELS_CMD_FARP ||
7979 els_command == ELS_CMD_FARPR ||
7980 els_command == ELS_CMD_FDISC)
7981 continue;
7982
7983 if (piocb->drvrTimeout > 0) {
7984 if (piocb->drvrTimeout >= timeout)
7985 piocb->drvrTimeout -= timeout;
7986 else
7987 piocb->drvrTimeout = 0;
7988 continue;
7989 }
7990
7991 remote_ID = 0xffffffff;
7992 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
7993 remote_ID = cmd->un.elsreq64.remoteID;
7994 else {
7995 struct lpfc_nodelist *ndlp;
7996 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
7997 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
7998 remote_ID = ndlp->nlp_DID;
7999 }
8000 list_add_tail(&piocb->dlist, &abort_list);
8001 }
8002 if (phba->sli_rev == LPFC_SLI_REV4)
8003 spin_unlock(&pring->ring_lock);
8004 spin_unlock_irq(&phba->hbalock);
8005
8006 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
8007 cmd = &piocb->iocb;
8008 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8009 "0127 ELS timeout Data: x%x x%x x%x "
8010 "x%x\n", els_command,
8011 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
8012 spin_lock_irq(&phba->hbalock);
8013 list_del_init(&piocb->dlist);
8014 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
8015 spin_unlock_irq(&phba->hbalock);
8016 }
8017
8018 if (!list_empty(&pring->txcmplq))
8019 if (!(phba->pport->load_flag & FC_UNLOADING))
8020 mod_timer(&vport->els_tmofunc,
8021 jiffies + msecs_to_jiffies(1000 * timeout));
8022 }
8023
8024 /**
8025 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
8026 * @vport: pointer to a host virtual N_Port data structure.
8027 *
8028 * This routine is used to clean up all the outstanding ELS commands on a
8029 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
8030 * routine. After that, it walks the ELS transmit queue to remove all the
8031 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
8032 * the IOCBs with a non-NULL completion callback function, the callback
8033 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
8034 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
8035 * callback function, the IOCB will simply be released. Finally, it walks
8036 * the ELS transmit completion queue to issue an abort IOCB to any transmit
8037 * completion queue IOCB that is associated with the @vport and is not
8038 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
8039 * part of the discovery state machine) out to HBA by invoking the
8040 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
8041 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
8042 * the IOCBs are aborted when this function returns.
8043 **/
8044 void
lpfc_els_flush_cmd(struct lpfc_vport * vport)8045 lpfc_els_flush_cmd(struct lpfc_vport *vport)
8046 {
8047 LIST_HEAD(abort_list);
8048 struct lpfc_hba *phba = vport->phba;
8049 struct lpfc_sli_ring *pring;
8050 struct lpfc_iocbq *tmp_iocb, *piocb;
8051 IOCB_t *cmd = NULL;
8052 unsigned long iflags = 0;
8053
8054 lpfc_fabric_abort_vport(vport);
8055
8056 /*
8057 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate
8058 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag
8059 * ultimately grabs the ring_lock, the driver must splice the list into
8060 * a working list and release the locks before calling the abort.
8061 */
8062 spin_lock_irqsave(&phba->hbalock, iflags);
8063 pring = lpfc_phba_elsring(phba);
8064
8065 /* Bail out if we've no ELS wq, like in PCI error recovery case. */
8066 if (unlikely(!pring)) {
8067 spin_unlock_irqrestore(&phba->hbalock, iflags);
8068 return;
8069 }
8070
8071 if (phba->sli_rev == LPFC_SLI_REV4)
8072 spin_lock(&pring->ring_lock);
8073
8074 /* First we need to issue aborts to outstanding cmds on txcmpl */
8075 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
8076 if (piocb->iocb_flag & LPFC_IO_LIBDFC)
8077 continue;
8078
8079 if (piocb->vport != vport)
8080 continue;
8081
8082 if (piocb->iocb_flag & LPFC_DRIVER_ABORTED)
8083 continue;
8084
8085 /* On the ELS ring we can have ELS_REQUESTs or
8086 * GEN_REQUESTs waiting for a response.
8087 */
8088 cmd = &piocb->iocb;
8089 if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
8090 list_add_tail(&piocb->dlist, &abort_list);
8091
8092 /* If the link is down when flushing ELS commands
8093 * the firmware will not complete them till after
8094 * the link comes back up. This may confuse
8095 * discovery for the new link up, so we need to
8096 * change the compl routine to just clean up the iocb
8097 * and avoid any retry logic.
8098 */
8099 if (phba->link_state == LPFC_LINK_DOWN)
8100 piocb->iocb_cmpl = lpfc_cmpl_els_link_down;
8101 }
8102 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR)
8103 list_add_tail(&piocb->dlist, &abort_list);
8104 }
8105
8106 if (phba->sli_rev == LPFC_SLI_REV4)
8107 spin_unlock(&pring->ring_lock);
8108 spin_unlock_irqrestore(&phba->hbalock, iflags);
8109
8110 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */
8111 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
8112 spin_lock_irqsave(&phba->hbalock, iflags);
8113 list_del_init(&piocb->dlist);
8114 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
8115 spin_unlock_irqrestore(&phba->hbalock, iflags);
8116 }
8117 if (!list_empty(&abort_list))
8118 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8119 "3387 abort list for txq not empty\n");
8120 INIT_LIST_HEAD(&abort_list);
8121
8122 spin_lock_irqsave(&phba->hbalock, iflags);
8123 if (phba->sli_rev == LPFC_SLI_REV4)
8124 spin_lock(&pring->ring_lock);
8125
8126 /* No need to abort the txq list,
8127 * just queue them up for lpfc_sli_cancel_iocbs
8128 */
8129 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
8130 cmd = &piocb->iocb;
8131
8132 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
8133 continue;
8134 }
8135
8136 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
8137 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
8138 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
8139 cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
8140 cmd->ulpCommand == CMD_ABORT_XRI_CN)
8141 continue;
8142
8143 if (piocb->vport != vport)
8144 continue;
8145
8146 list_del_init(&piocb->list);
8147 list_add_tail(&piocb->list, &abort_list);
8148 }
8149
8150 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */
8151 if (vport == phba->pport) {
8152 list_for_each_entry_safe(piocb, tmp_iocb,
8153 &phba->fabric_iocb_list, list) {
8154 cmd = &piocb->iocb;
8155 list_del_init(&piocb->list);
8156 list_add_tail(&piocb->list, &abort_list);
8157 }
8158 }
8159
8160 if (phba->sli_rev == LPFC_SLI_REV4)
8161 spin_unlock(&pring->ring_lock);
8162 spin_unlock_irqrestore(&phba->hbalock, iflags);
8163
8164 /* Cancel all the IOCBs from the completions list */
8165 lpfc_sli_cancel_iocbs(phba, &abort_list,
8166 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
8167
8168 return;
8169 }
8170
8171 /**
8172 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
8173 * @phba: pointer to lpfc hba data structure.
8174 *
8175 * This routine is used to clean up all the outstanding ELS commands on a
8176 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
8177 * routine. After that, it walks the ELS transmit queue to remove all the
8178 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
8179 * the IOCBs with the completion callback function associated, the callback
8180 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
8181 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
8182 * callback function associated, the IOCB will simply be released. Finally,
8183 * it walks the ELS transmit completion queue to issue an abort IOCB to any
8184 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
8185 * management plane IOCBs that are not part of the discovery state machine)
8186 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
8187 **/
8188 void
lpfc_els_flush_all_cmd(struct lpfc_hba * phba)8189 lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
8190 {
8191 struct lpfc_vport *vport;
8192
8193 spin_lock_irq(&phba->port_list_lock);
8194 list_for_each_entry(vport, &phba->port_list, listentry)
8195 lpfc_els_flush_cmd(vport);
8196 spin_unlock_irq(&phba->port_list_lock);
8197
8198 return;
8199 }
8200
8201 /**
8202 * lpfc_send_els_failure_event - Posts an ELS command failure event
8203 * @phba: Pointer to hba context object.
8204 * @cmdiocbp: Pointer to command iocb which reported error.
8205 * @rspiocbp: Pointer to response iocb which reported error.
8206 *
8207 * This function sends an event when there is an ELS command
8208 * failure.
8209 **/
8210 void
lpfc_send_els_failure_event(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbp,struct lpfc_iocbq * rspiocbp)8211 lpfc_send_els_failure_event(struct lpfc_hba *phba,
8212 struct lpfc_iocbq *cmdiocbp,
8213 struct lpfc_iocbq *rspiocbp)
8214 {
8215 struct lpfc_vport *vport = cmdiocbp->vport;
8216 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8217 struct lpfc_lsrjt_event lsrjt_event;
8218 struct lpfc_fabric_event_header fabric_event;
8219 struct ls_rjt stat;
8220 struct lpfc_nodelist *ndlp;
8221 uint32_t *pcmd;
8222
8223 ndlp = cmdiocbp->context1;
8224 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
8225 return;
8226
8227 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
8228 lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
8229 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
8230 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
8231 sizeof(struct lpfc_name));
8232 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
8233 sizeof(struct lpfc_name));
8234 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8235 cmdiocbp->context2)->virt);
8236 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
8237 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
8238 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
8239 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
8240 fc_host_post_vendor_event(shost,
8241 fc_get_event_number(),
8242 sizeof(lsrjt_event),
8243 (char *)&lsrjt_event,
8244 LPFC_NL_VENDOR_ID);
8245 return;
8246 }
8247 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
8248 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
8249 fabric_event.event_type = FC_REG_FABRIC_EVENT;
8250 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
8251 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
8252 else
8253 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
8254 memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
8255 sizeof(struct lpfc_name));
8256 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
8257 sizeof(struct lpfc_name));
8258 fc_host_post_vendor_event(shost,
8259 fc_get_event_number(),
8260 sizeof(fabric_event),
8261 (char *)&fabric_event,
8262 LPFC_NL_VENDOR_ID);
8263 return;
8264 }
8265
8266 }
8267
8268 /**
8269 * lpfc_send_els_event - Posts unsolicited els event
8270 * @vport: Pointer to vport object.
8271 * @ndlp: Pointer FC node object.
8272 * @payload: ELS command code type.
8273 *
8274 * This function posts an event when there is an incoming
8275 * unsolicited ELS command.
8276 **/
8277 static void
lpfc_send_els_event(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint32_t * payload)8278 lpfc_send_els_event(struct lpfc_vport *vport,
8279 struct lpfc_nodelist *ndlp,
8280 uint32_t *payload)
8281 {
8282 struct lpfc_els_event_header *els_data = NULL;
8283 struct lpfc_logo_event *logo_data = NULL;
8284 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8285
8286 if (*payload == ELS_CMD_LOGO) {
8287 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
8288 if (!logo_data) {
8289 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8290 "0148 Failed to allocate memory "
8291 "for LOGO event\n");
8292 return;
8293 }
8294 els_data = &logo_data->header;
8295 } else {
8296 els_data = kmalloc(sizeof(struct lpfc_els_event_header),
8297 GFP_KERNEL);
8298 if (!els_data) {
8299 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8300 "0149 Failed to allocate memory "
8301 "for ELS event\n");
8302 return;
8303 }
8304 }
8305 els_data->event_type = FC_REG_ELS_EVENT;
8306 switch (*payload) {
8307 case ELS_CMD_PLOGI:
8308 els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
8309 break;
8310 case ELS_CMD_PRLO:
8311 els_data->subcategory = LPFC_EVENT_PRLO_RCV;
8312 break;
8313 case ELS_CMD_ADISC:
8314 els_data->subcategory = LPFC_EVENT_ADISC_RCV;
8315 break;
8316 case ELS_CMD_LOGO:
8317 els_data->subcategory = LPFC_EVENT_LOGO_RCV;
8318 /* Copy the WWPN in the LOGO payload */
8319 memcpy(logo_data->logo_wwpn, &payload[2],
8320 sizeof(struct lpfc_name));
8321 break;
8322 default:
8323 kfree(els_data);
8324 return;
8325 }
8326 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
8327 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
8328 if (*payload == ELS_CMD_LOGO) {
8329 fc_host_post_vendor_event(shost,
8330 fc_get_event_number(),
8331 sizeof(struct lpfc_logo_event),
8332 (char *)logo_data,
8333 LPFC_NL_VENDOR_ID);
8334 kfree(logo_data);
8335 } else {
8336 fc_host_post_vendor_event(shost,
8337 fc_get_event_number(),
8338 sizeof(struct lpfc_els_event_header),
8339 (char *)els_data,
8340 LPFC_NL_VENDOR_ID);
8341 kfree(els_data);
8342 }
8343
8344 return;
8345 }
8346
8347
8348 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag,
8349 FC_LS_TLV_DTAG_INIT);
8350
8351 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types,
8352 FC_FPIN_LI_EVT_TYPES_INIT);
8353
8354 /**
8355 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event.
8356 * @vport: Pointer to vport object.
8357 * @tlv: Pointer to the Link Integrity Notification Descriptor.
8358 *
8359 * This function processes a link integrity FPIN event by
8360 * logging a message
8361 **/
8362 static void
lpfc_els_rcv_fpin_li(struct lpfc_vport * vport,struct fc_tlv_desc * tlv)8363 lpfc_els_rcv_fpin_li(struct lpfc_vport *vport, struct fc_tlv_desc *tlv)
8364 {
8365 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv;
8366 const char *li_evt_str;
8367 u32 li_evt;
8368
8369 li_evt = be16_to_cpu(li->event_type);
8370 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt);
8371
8372 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8373 "4680 FPIN Link Integrity %s (x%x) "
8374 "Detecting PN x%016llx Attached PN x%016llx "
8375 "Duration %d mSecs Count %d Port Cnt %d\n",
8376 li_evt_str, li_evt,
8377 be64_to_cpu(li->detecting_wwpn),
8378 be64_to_cpu(li->attached_wwpn),
8379 be32_to_cpu(li->event_threshold),
8380 be32_to_cpu(li->event_count),
8381 be32_to_cpu(li->pname_count));
8382 }
8383
8384 static void
lpfc_els_rcv_fpin(struct lpfc_vport * vport,struct fc_els_fpin * fpin,u32 fpin_length)8385 lpfc_els_rcv_fpin(struct lpfc_vport *vport, struct fc_els_fpin *fpin,
8386 u32 fpin_length)
8387 {
8388 struct fc_tlv_desc *tlv;
8389 const char *dtag_nm;
8390 uint32_t desc_cnt = 0, bytes_remain;
8391 u32 dtag;
8392
8393 /* FPINs handled only if we are in the right discovery state */
8394 if (vport->port_state < LPFC_DISC_AUTH)
8395 return;
8396
8397 /* make sure there is the full fpin header */
8398 if (fpin_length < sizeof(struct fc_els_fpin))
8399 return;
8400
8401 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
8402 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc);
8403 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
8404
8405 /* process each descriptor */
8406 while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
8407 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
8408
8409 dtag = be32_to_cpu(tlv->desc_tag);
8410 switch (dtag) {
8411 case ELS_DTAG_LNK_INTEGRITY:
8412 lpfc_els_rcv_fpin_li(vport, tlv);
8413 break;
8414 default:
8415 dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
8416 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8417 "4678 skipped FPIN descriptor[%d]: "
8418 "tag x%x (%s)\n",
8419 desc_cnt, dtag, dtag_nm);
8420 break;
8421 }
8422
8423 desc_cnt++;
8424 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
8425 tlv = fc_tlv_next_desc(tlv);
8426 }
8427
8428 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), fpin_length,
8429 (char *)fpin);
8430 }
8431
8432 /**
8433 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
8434 * @phba: pointer to lpfc hba data structure.
8435 * @pring: pointer to a SLI ring.
8436 * @vport: pointer to a host virtual N_Port data structure.
8437 * @elsiocb: pointer to lpfc els command iocb data structure.
8438 *
8439 * This routine is used for processing the IOCB associated with a unsolicited
8440 * event. It first determines whether there is an existing ndlp that matches
8441 * the DID from the unsolicited IOCB. If not, it will create a new one with
8442 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
8443 * IOCB is then used to invoke the proper routine and to set up proper state
8444 * of the discovery state machine.
8445 **/
8446 static void
lpfc_els_unsol_buffer(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_vport * vport,struct lpfc_iocbq * elsiocb)8447 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8448 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
8449 {
8450 struct Scsi_Host *shost;
8451 struct lpfc_nodelist *ndlp;
8452 struct ls_rjt stat;
8453 uint32_t *payload, payload_len;
8454 uint32_t cmd, did, newnode;
8455 uint8_t rjt_exp, rjt_err = 0, init_link = 0;
8456 IOCB_t *icmd = &elsiocb->iocb;
8457 LPFC_MBOXQ_t *mbox;
8458
8459 if (!vport || !(elsiocb->context2))
8460 goto dropit;
8461
8462 newnode = 0;
8463 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
8464 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len;
8465 cmd = *payload;
8466 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
8467 lpfc_post_buffer(phba, pring, 1);
8468
8469 did = icmd->un.rcvels.remoteID;
8470 if (icmd->ulpStatus) {
8471 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8472 "RCV Unsol ELS: status:x%x/x%x did:x%x",
8473 icmd->ulpStatus, icmd->un.ulpWord[4], did);
8474 goto dropit;
8475 }
8476
8477 /* Check to see if link went down during discovery */
8478 if (lpfc_els_chk_latt(vport))
8479 goto dropit;
8480
8481 /* Ignore traffic received during vport shutdown. */
8482 if (vport->load_flag & FC_UNLOADING)
8483 goto dropit;
8484
8485 /* If NPort discovery is delayed drop incoming ELS */
8486 if ((vport->fc_flag & FC_DISC_DELAYED) &&
8487 (cmd != ELS_CMD_PLOGI))
8488 goto dropit;
8489
8490 ndlp = lpfc_findnode_did(vport, did);
8491 if (!ndlp) {
8492 /* Cannot find existing Fabric ndlp, so allocate a new one */
8493 ndlp = lpfc_nlp_init(vport, did);
8494 if (!ndlp)
8495 goto dropit;
8496 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
8497 newnode = 1;
8498 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
8499 ndlp->nlp_type |= NLP_FABRIC;
8500 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
8501 ndlp = lpfc_enable_node(vport, ndlp,
8502 NLP_STE_UNUSED_NODE);
8503 if (!ndlp)
8504 goto dropit;
8505 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
8506 newnode = 1;
8507 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
8508 ndlp->nlp_type |= NLP_FABRIC;
8509 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
8510 /* This is similar to the new node path */
8511 ndlp = lpfc_nlp_get(ndlp);
8512 if (!ndlp)
8513 goto dropit;
8514 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
8515 newnode = 1;
8516 }
8517
8518 phba->fc_stat.elsRcvFrame++;
8519
8520 /*
8521 * Do not process any unsolicited ELS commands
8522 * if the ndlp is in DEV_LOSS
8523 */
8524 shost = lpfc_shost_from_vport(vport);
8525 spin_lock_irq(shost->host_lock);
8526 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
8527 spin_unlock_irq(shost->host_lock);
8528 if (newnode)
8529 lpfc_nlp_put(ndlp);
8530 goto dropit;
8531 }
8532 spin_unlock_irq(shost->host_lock);
8533
8534 elsiocb->context1 = lpfc_nlp_get(ndlp);
8535 elsiocb->vport = vport;
8536
8537 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
8538 cmd &= ELS_CMD_MASK;
8539 }
8540 /* ELS command <elsCmd> received from NPORT <did> */
8541 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8542 "0112 ELS command x%x received from NPORT x%x "
8543 "Data: x%x x%x x%x x%x\n",
8544 cmd, did, vport->port_state, vport->fc_flag,
8545 vport->fc_myDID, vport->fc_prevDID);
8546
8547 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */
8548 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
8549 (cmd != ELS_CMD_FLOGI) &&
8550 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) {
8551 rjt_err = LSRJT_LOGICAL_BSY;
8552 rjt_exp = LSEXP_NOTHING_MORE;
8553 goto lsrjt;
8554 }
8555
8556 switch (cmd) {
8557 case ELS_CMD_PLOGI:
8558 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8559 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
8560 did, vport->port_state, ndlp->nlp_flag);
8561
8562 phba->fc_stat.elsRcvPLOGI++;
8563 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
8564 if (phba->sli_rev == LPFC_SLI_REV4 &&
8565 (phba->pport->fc_flag & FC_PT2PT)) {
8566 vport->fc_prevDID = vport->fc_myDID;
8567 /* Our DID needs to be updated before registering
8568 * the vfi. This is done in lpfc_rcv_plogi but
8569 * that is called after the reg_vfi.
8570 */
8571 vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
8572 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8573 "3312 Remote port assigned DID x%x "
8574 "%x\n", vport->fc_myDID,
8575 vport->fc_prevDID);
8576 }
8577
8578 lpfc_send_els_event(vport, ndlp, payload);
8579
8580 /* If Nport discovery is delayed, reject PLOGIs */
8581 if (vport->fc_flag & FC_DISC_DELAYED) {
8582 rjt_err = LSRJT_UNABLE_TPC;
8583 rjt_exp = LSEXP_NOTHING_MORE;
8584 break;
8585 }
8586
8587 if (vport->port_state < LPFC_DISC_AUTH) {
8588 if (!(phba->pport->fc_flag & FC_PT2PT) ||
8589 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
8590 rjt_err = LSRJT_UNABLE_TPC;
8591 rjt_exp = LSEXP_NOTHING_MORE;
8592 break;
8593 }
8594 }
8595
8596 spin_lock_irq(shost->host_lock);
8597 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
8598 spin_unlock_irq(shost->host_lock);
8599
8600 lpfc_disc_state_machine(vport, ndlp, elsiocb,
8601 NLP_EVT_RCV_PLOGI);
8602
8603 break;
8604 case ELS_CMD_FLOGI:
8605 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8606 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
8607 did, vport->port_state, ndlp->nlp_flag);
8608
8609 phba->fc_stat.elsRcvFLOGI++;
8610
8611 /* If the driver believes fabric discovery is done and is ready,
8612 * bounce the link. There is some descrepancy.
8613 */
8614 if (vport->port_state >= LPFC_LOCAL_CFG_LINK &&
8615 vport->fc_flag & FC_PT2PT &&
8616 vport->rcv_flogi_cnt >= 1) {
8617 rjt_err = LSRJT_LOGICAL_BSY;
8618 rjt_exp = LSEXP_NOTHING_MORE;
8619 init_link++;
8620 goto lsrjt;
8621 }
8622
8623 lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
8624 if (newnode)
8625 lpfc_nlp_put(ndlp);
8626 break;
8627 case ELS_CMD_LOGO:
8628 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8629 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
8630 did, vport->port_state, ndlp->nlp_flag);
8631
8632 phba->fc_stat.elsRcvLOGO++;
8633 lpfc_send_els_event(vport, ndlp, payload);
8634 if (vport->port_state < LPFC_DISC_AUTH) {
8635 rjt_err = LSRJT_UNABLE_TPC;
8636 rjt_exp = LSEXP_NOTHING_MORE;
8637 break;
8638 }
8639 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
8640 break;
8641 case ELS_CMD_PRLO:
8642 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8643 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
8644 did, vport->port_state, ndlp->nlp_flag);
8645
8646 phba->fc_stat.elsRcvPRLO++;
8647 lpfc_send_els_event(vport, ndlp, payload);
8648 if (vport->port_state < LPFC_DISC_AUTH) {
8649 rjt_err = LSRJT_UNABLE_TPC;
8650 rjt_exp = LSEXP_NOTHING_MORE;
8651 break;
8652 }
8653 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
8654 break;
8655 case ELS_CMD_LCB:
8656 phba->fc_stat.elsRcvLCB++;
8657 lpfc_els_rcv_lcb(vport, elsiocb, ndlp);
8658 break;
8659 case ELS_CMD_RDP:
8660 phba->fc_stat.elsRcvRDP++;
8661 lpfc_els_rcv_rdp(vport, elsiocb, ndlp);
8662 break;
8663 case ELS_CMD_RSCN:
8664 phba->fc_stat.elsRcvRSCN++;
8665 lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
8666 if (newnode)
8667 lpfc_nlp_put(ndlp);
8668 break;
8669 case ELS_CMD_ADISC:
8670 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8671 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
8672 did, vport->port_state, ndlp->nlp_flag);
8673
8674 lpfc_send_els_event(vport, ndlp, payload);
8675 phba->fc_stat.elsRcvADISC++;
8676 if (vport->port_state < LPFC_DISC_AUTH) {
8677 rjt_err = LSRJT_UNABLE_TPC;
8678 rjt_exp = LSEXP_NOTHING_MORE;
8679 break;
8680 }
8681 lpfc_disc_state_machine(vport, ndlp, elsiocb,
8682 NLP_EVT_RCV_ADISC);
8683 break;
8684 case ELS_CMD_PDISC:
8685 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8686 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
8687 did, vport->port_state, ndlp->nlp_flag);
8688
8689 phba->fc_stat.elsRcvPDISC++;
8690 if (vport->port_state < LPFC_DISC_AUTH) {
8691 rjt_err = LSRJT_UNABLE_TPC;
8692 rjt_exp = LSEXP_NOTHING_MORE;
8693 break;
8694 }
8695 lpfc_disc_state_machine(vport, ndlp, elsiocb,
8696 NLP_EVT_RCV_PDISC);
8697 break;
8698 case ELS_CMD_FARPR:
8699 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8700 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
8701 did, vport->port_state, ndlp->nlp_flag);
8702
8703 phba->fc_stat.elsRcvFARPR++;
8704 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
8705 break;
8706 case ELS_CMD_FARP:
8707 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8708 "RCV FARP: did:x%x/ste:x%x flg:x%x",
8709 did, vport->port_state, ndlp->nlp_flag);
8710
8711 phba->fc_stat.elsRcvFARP++;
8712 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
8713 break;
8714 case ELS_CMD_FAN:
8715 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8716 "RCV FAN: did:x%x/ste:x%x flg:x%x",
8717 did, vport->port_state, ndlp->nlp_flag);
8718
8719 phba->fc_stat.elsRcvFAN++;
8720 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
8721 break;
8722 case ELS_CMD_PRLI:
8723 case ELS_CMD_NVMEPRLI:
8724 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8725 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
8726 did, vport->port_state, ndlp->nlp_flag);
8727
8728 phba->fc_stat.elsRcvPRLI++;
8729 if ((vport->port_state < LPFC_DISC_AUTH) &&
8730 (vport->fc_flag & FC_FABRIC)) {
8731 rjt_err = LSRJT_UNABLE_TPC;
8732 rjt_exp = LSEXP_NOTHING_MORE;
8733 break;
8734 }
8735 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
8736 break;
8737 case ELS_CMD_LIRR:
8738 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8739 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
8740 did, vport->port_state, ndlp->nlp_flag);
8741
8742 phba->fc_stat.elsRcvLIRR++;
8743 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
8744 if (newnode)
8745 lpfc_nlp_put(ndlp);
8746 break;
8747 case ELS_CMD_RLS:
8748 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8749 "RCV RLS: did:x%x/ste:x%x flg:x%x",
8750 did, vport->port_state, ndlp->nlp_flag);
8751
8752 phba->fc_stat.elsRcvRLS++;
8753 lpfc_els_rcv_rls(vport, elsiocb, ndlp);
8754 if (newnode)
8755 lpfc_nlp_put(ndlp);
8756 break;
8757 case ELS_CMD_RPL:
8758 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8759 "RCV RPL: did:x%x/ste:x%x flg:x%x",
8760 did, vport->port_state, ndlp->nlp_flag);
8761
8762 phba->fc_stat.elsRcvRPL++;
8763 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
8764 if (newnode)
8765 lpfc_nlp_put(ndlp);
8766 break;
8767 case ELS_CMD_RNID:
8768 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8769 "RCV RNID: did:x%x/ste:x%x flg:x%x",
8770 did, vport->port_state, ndlp->nlp_flag);
8771
8772 phba->fc_stat.elsRcvRNID++;
8773 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
8774 if (newnode)
8775 lpfc_nlp_put(ndlp);
8776 break;
8777 case ELS_CMD_RTV:
8778 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8779 "RCV RTV: did:x%x/ste:x%x flg:x%x",
8780 did, vport->port_state, ndlp->nlp_flag);
8781 phba->fc_stat.elsRcvRTV++;
8782 lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
8783 if (newnode)
8784 lpfc_nlp_put(ndlp);
8785 break;
8786 case ELS_CMD_RRQ:
8787 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8788 "RCV RRQ: did:x%x/ste:x%x flg:x%x",
8789 did, vport->port_state, ndlp->nlp_flag);
8790
8791 phba->fc_stat.elsRcvRRQ++;
8792 lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
8793 if (newnode)
8794 lpfc_nlp_put(ndlp);
8795 break;
8796 case ELS_CMD_ECHO:
8797 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8798 "RCV ECHO: did:x%x/ste:x%x flg:x%x",
8799 did, vport->port_state, ndlp->nlp_flag);
8800
8801 phba->fc_stat.elsRcvECHO++;
8802 lpfc_els_rcv_echo(vport, elsiocb, ndlp);
8803 if (newnode)
8804 lpfc_nlp_put(ndlp);
8805 break;
8806 case ELS_CMD_REC:
8807 /* receive this due to exchange closed */
8808 rjt_err = LSRJT_UNABLE_TPC;
8809 rjt_exp = LSEXP_INVALID_OX_RX;
8810 break;
8811 case ELS_CMD_FPIN:
8812 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8813 "RCV FPIN: did:x%x/ste:x%x flg:x%x",
8814 did, vport->port_state, ndlp->nlp_flag);
8815
8816 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload,
8817 payload_len);
8818
8819 /* There are no replies, so no rjt codes */
8820 break;
8821 default:
8822 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8823 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
8824 cmd, did, vport->port_state);
8825
8826 /* Unsupported ELS command, reject */
8827 rjt_err = LSRJT_CMD_UNSUPPORTED;
8828 rjt_exp = LSEXP_NOTHING_MORE;
8829
8830 /* Unknown ELS command <elsCmd> received from NPORT <did> */
8831 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8832 "0115 Unknown ELS command x%x "
8833 "received from NPORT x%x\n", cmd, did);
8834 if (newnode)
8835 lpfc_nlp_put(ndlp);
8836 break;
8837 }
8838
8839 lsrjt:
8840 /* check if need to LS_RJT received ELS cmd */
8841 if (rjt_err) {
8842 memset(&stat, 0, sizeof(stat));
8843 stat.un.b.lsRjtRsnCode = rjt_err;
8844 stat.un.b.lsRjtRsnCodeExp = rjt_exp;
8845 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
8846 NULL);
8847 }
8848
8849 lpfc_nlp_put(elsiocb->context1);
8850 elsiocb->context1 = NULL;
8851
8852 /* Special case. Driver received an unsolicited command that
8853 * unsupportable given the driver's current state. Reset the
8854 * link and start over.
8855 */
8856 if (init_link) {
8857 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8858 if (!mbox)
8859 return;
8860 lpfc_linkdown(phba);
8861 lpfc_init_link(phba, mbox,
8862 phba->cfg_topology,
8863 phba->cfg_link_speed);
8864 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
8865 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
8866 mbox->vport = vport;
8867 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
8868 MBX_NOT_FINISHED)
8869 mempool_free(mbox, phba->mbox_mem_pool);
8870 }
8871
8872 return;
8873
8874 dropit:
8875 if (vport && !(vport->load_flag & FC_UNLOADING))
8876 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8877 "0111 Dropping received ELS cmd "
8878 "Data: x%x x%x x%x\n",
8879 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
8880 phba->fc_stat.elsRcvDrop++;
8881 }
8882
8883 /**
8884 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
8885 * @phba: pointer to lpfc hba data structure.
8886 * @pring: pointer to a SLI ring.
8887 * @elsiocb: pointer to lpfc els iocb data structure.
8888 *
8889 * This routine is used to process an unsolicited event received from a SLI
8890 * (Service Level Interface) ring. The actual processing of the data buffer
8891 * associated with the unsolicited event is done by invoking the routine
8892 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
8893 * SLI ring on which the unsolicited event was received.
8894 **/
8895 void
lpfc_els_unsol_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * elsiocb)8896 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8897 struct lpfc_iocbq *elsiocb)
8898 {
8899 struct lpfc_vport *vport = phba->pport;
8900 IOCB_t *icmd = &elsiocb->iocb;
8901 dma_addr_t paddr;
8902 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
8903 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
8904
8905 elsiocb->context1 = NULL;
8906 elsiocb->context2 = NULL;
8907 elsiocb->context3 = NULL;
8908
8909 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
8910 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
8911 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
8912 (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
8913 IOERR_RCV_BUFFER_WAITING) {
8914 phba->fc_stat.NoRcvBuf++;
8915 /* Not enough posted buffers; Try posting more buffers */
8916 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
8917 lpfc_post_buffer(phba, pring, 0);
8918 return;
8919 }
8920
8921 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
8922 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
8923 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
8924 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
8925 vport = phba->pport;
8926 else
8927 vport = lpfc_find_vport_by_vpid(phba,
8928 icmd->unsli3.rcvsli3.vpi);
8929 }
8930
8931 /* If there are no BDEs associated
8932 * with this IOCB, there is nothing to do.
8933 */
8934 if (icmd->ulpBdeCount == 0)
8935 return;
8936
8937 /* type of ELS cmd is first 32bit word
8938 * in packet
8939 */
8940 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
8941 elsiocb->context2 = bdeBuf1;
8942 } else {
8943 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
8944 icmd->un.cont64[0].addrLow);
8945 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
8946 paddr);
8947 }
8948
8949 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
8950 /*
8951 * The different unsolicited event handlers would tell us
8952 * if they are done with "mp" by setting context2 to NULL.
8953 */
8954 if (elsiocb->context2) {
8955 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
8956 elsiocb->context2 = NULL;
8957 }
8958
8959 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
8960 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
8961 icmd->ulpBdeCount == 2) {
8962 elsiocb->context2 = bdeBuf2;
8963 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
8964 /* free mp if we are done with it */
8965 if (elsiocb->context2) {
8966 lpfc_in_buf_free(phba, elsiocb->context2);
8967 elsiocb->context2 = NULL;
8968 }
8969 }
8970 }
8971
8972 static void
lpfc_start_fdmi(struct lpfc_vport * vport)8973 lpfc_start_fdmi(struct lpfc_vport *vport)
8974 {
8975 struct lpfc_nodelist *ndlp;
8976
8977 /* If this is the first time, allocate an ndlp and initialize
8978 * it. Otherwise, make sure the node is enabled and then do the
8979 * login.
8980 */
8981 ndlp = lpfc_findnode_did(vport, FDMI_DID);
8982 if (!ndlp) {
8983 ndlp = lpfc_nlp_init(vport, FDMI_DID);
8984 if (ndlp) {
8985 ndlp->nlp_type |= NLP_FABRIC;
8986 } else {
8987 return;
8988 }
8989 }
8990 if (!NLP_CHK_NODE_ACT(ndlp))
8991 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
8992
8993 if (ndlp) {
8994 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8995 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
8996 }
8997 }
8998
8999 /**
9000 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
9001 * @phba: pointer to lpfc hba data structure.
9002 * @vport: pointer to a virtual N_Port data structure.
9003 *
9004 * This routine issues a Port Login (PLOGI) to the Name Server with
9005 * State Change Request (SCR) for a @vport. This routine will create an
9006 * ndlp for the Name Server associated to the @vport if such node does
9007 * not already exist. The PLOGI to Name Server is issued by invoking the
9008 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
9009 * (FDMI) is configured to the @vport, a FDMI node will be created and
9010 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
9011 **/
9012 void
lpfc_do_scr_ns_plogi(struct lpfc_hba * phba,struct lpfc_vport * vport)9013 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
9014 {
9015 struct lpfc_nodelist *ndlp;
9016 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9017
9018 /*
9019 * If lpfc_delay_discovery parameter is set and the clean address
9020 * bit is cleared and fc fabric parameters chenged, delay FC NPort
9021 * discovery.
9022 */
9023 spin_lock_irq(shost->host_lock);
9024 if (vport->fc_flag & FC_DISC_DELAYED) {
9025 spin_unlock_irq(shost->host_lock);
9026 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9027 "3334 Delay fc port discovery for %d seconds\n",
9028 phba->fc_ratov);
9029 mod_timer(&vport->delayed_disc_tmo,
9030 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
9031 return;
9032 }
9033 spin_unlock_irq(shost->host_lock);
9034
9035 ndlp = lpfc_findnode_did(vport, NameServer_DID);
9036 if (!ndlp) {
9037 ndlp = lpfc_nlp_init(vport, NameServer_DID);
9038 if (!ndlp) {
9039 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9040 lpfc_disc_start(vport);
9041 return;
9042 }
9043 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9044 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9045 "0251 NameServer login: no memory\n");
9046 return;
9047 }
9048 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
9049 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
9050 if (!ndlp) {
9051 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9052 lpfc_disc_start(vport);
9053 return;
9054 }
9055 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9056 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9057 "0348 NameServer login: node freed\n");
9058 return;
9059 }
9060 }
9061 ndlp->nlp_type |= NLP_FABRIC;
9062
9063 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
9064
9065 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
9066 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9067 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9068 "0252 Cannot issue NameServer login\n");
9069 return;
9070 }
9071
9072 if ((phba->cfg_enable_SmartSAN ||
9073 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) &&
9074 (vport->load_flag & FC_ALLOW_FDMI))
9075 lpfc_start_fdmi(vport);
9076 }
9077
9078 /**
9079 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
9080 * @phba: pointer to lpfc hba data structure.
9081 * @pmb: pointer to the driver internal queue element for mailbox command.
9082 *
9083 * This routine is the completion callback function to register new vport
9084 * mailbox command. If the new vport mailbox command completes successfully,
9085 * the fabric registration login shall be performed on physical port (the
9086 * new vport created is actually a physical port, with VPI 0) or the port
9087 * login to Name Server for State Change Request (SCR) will be performed
9088 * on virtual port (real virtual port, with VPI greater than 0).
9089 **/
9090 static void
lpfc_cmpl_reg_new_vport(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)9091 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
9092 {
9093 struct lpfc_vport *vport = pmb->vport;
9094 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9095 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
9096 MAILBOX_t *mb = &pmb->u.mb;
9097 int rc;
9098
9099 spin_lock_irq(shost->host_lock);
9100 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
9101 spin_unlock_irq(shost->host_lock);
9102
9103 if (mb->mbxStatus) {
9104 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9105 "0915 Register VPI failed : Status: x%x"
9106 " upd bit: x%x \n", mb->mbxStatus,
9107 mb->un.varRegVpi.upd);
9108 if (phba->sli_rev == LPFC_SLI_REV4 &&
9109 mb->un.varRegVpi.upd)
9110 goto mbox_err_exit ;
9111
9112 switch (mb->mbxStatus) {
9113 case 0x11: /* unsupported feature */
9114 case 0x9603: /* max_vpi exceeded */
9115 case 0x9602: /* Link event since CLEAR_LA */
9116 /* giving up on vport registration */
9117 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9118 spin_lock_irq(shost->host_lock);
9119 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
9120 spin_unlock_irq(shost->host_lock);
9121 lpfc_can_disctmo(vport);
9122 break;
9123 /* If reg_vpi fail with invalid VPI status, re-init VPI */
9124 case 0x20:
9125 spin_lock_irq(shost->host_lock);
9126 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
9127 spin_unlock_irq(shost->host_lock);
9128 lpfc_init_vpi(phba, pmb, vport->vpi);
9129 pmb->vport = vport;
9130 pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
9131 rc = lpfc_sli_issue_mbox(phba, pmb,
9132 MBX_NOWAIT);
9133 if (rc == MBX_NOT_FINISHED) {
9134 lpfc_printf_vlog(vport, KERN_ERR,
9135 LOG_TRACE_EVENT,
9136 "2732 Failed to issue INIT_VPI"
9137 " mailbox command\n");
9138 } else {
9139 lpfc_nlp_put(ndlp);
9140 return;
9141 }
9142 fallthrough;
9143 default:
9144 /* Try to recover from this error */
9145 if (phba->sli_rev == LPFC_SLI_REV4)
9146 lpfc_sli4_unreg_all_rpis(vport);
9147 lpfc_mbx_unreg_vpi(vport);
9148 spin_lock_irq(shost->host_lock);
9149 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
9150 spin_unlock_irq(shost->host_lock);
9151 if (mb->mbxStatus == MBX_NOT_FINISHED)
9152 break;
9153 if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
9154 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
9155 if (phba->sli_rev == LPFC_SLI_REV4)
9156 lpfc_issue_init_vfi(vport);
9157 else
9158 lpfc_initial_flogi(vport);
9159 } else {
9160 lpfc_initial_fdisc(vport);
9161 }
9162 break;
9163 }
9164 } else {
9165 spin_lock_irq(shost->host_lock);
9166 vport->vpi_state |= LPFC_VPI_REGISTERED;
9167 spin_unlock_irq(shost->host_lock);
9168 if (vport == phba->pport) {
9169 if (phba->sli_rev < LPFC_SLI_REV4)
9170 lpfc_issue_fabric_reglogin(vport);
9171 else {
9172 /*
9173 * If the physical port is instantiated using
9174 * FDISC, do not start vport discovery.
9175 */
9176 if (vport->port_state != LPFC_FDISC)
9177 lpfc_start_fdiscs(phba);
9178 lpfc_do_scr_ns_plogi(phba, vport);
9179 }
9180 } else
9181 lpfc_do_scr_ns_plogi(phba, vport);
9182 }
9183 mbox_err_exit:
9184 /* Now, we decrement the ndlp reference count held for this
9185 * callback function
9186 */
9187 lpfc_nlp_put(ndlp);
9188
9189 mempool_free(pmb, phba->mbox_mem_pool);
9190 return;
9191 }
9192
9193 /**
9194 * lpfc_register_new_vport - Register a new vport with a HBA
9195 * @phba: pointer to lpfc hba data structure.
9196 * @vport: pointer to a host virtual N_Port data structure.
9197 * @ndlp: pointer to a node-list data structure.
9198 *
9199 * This routine registers the @vport as a new virtual port with a HBA.
9200 * It is done through a registering vpi mailbox command.
9201 **/
9202 void
lpfc_register_new_vport(struct lpfc_hba * phba,struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)9203 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
9204 struct lpfc_nodelist *ndlp)
9205 {
9206 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9207 LPFC_MBOXQ_t *mbox;
9208
9209 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9210 if (mbox) {
9211 lpfc_reg_vpi(vport, mbox);
9212 mbox->vport = vport;
9213 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
9214 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
9215 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
9216 == MBX_NOT_FINISHED) {
9217 /* mailbox command not success, decrement ndlp
9218 * reference count for this command
9219 */
9220 lpfc_nlp_put(ndlp);
9221 mempool_free(mbox, phba->mbox_mem_pool);
9222
9223 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9224 "0253 Register VPI: Can't send mbox\n");
9225 goto mbox_err_exit;
9226 }
9227 } else {
9228 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9229 "0254 Register VPI: no memory\n");
9230 goto mbox_err_exit;
9231 }
9232 return;
9233
9234 mbox_err_exit:
9235 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9236 spin_lock_irq(shost->host_lock);
9237 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
9238 spin_unlock_irq(shost->host_lock);
9239 return;
9240 }
9241
9242 /**
9243 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
9244 * @phba: pointer to lpfc hba data structure.
9245 *
9246 * This routine cancels the retry delay timers to all the vports.
9247 **/
9248 void
lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba * phba)9249 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
9250 {
9251 struct lpfc_vport **vports;
9252 struct lpfc_nodelist *ndlp;
9253 uint32_t link_state;
9254 int i;
9255
9256 /* Treat this failure as linkdown for all vports */
9257 link_state = phba->link_state;
9258 lpfc_linkdown(phba);
9259 phba->link_state = link_state;
9260
9261 vports = lpfc_create_vport_work_array(phba);
9262
9263 if (vports) {
9264 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
9265 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
9266 if (ndlp)
9267 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
9268 lpfc_els_flush_cmd(vports[i]);
9269 }
9270 lpfc_destroy_vport_work_array(phba, vports);
9271 }
9272 }
9273
9274 /**
9275 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
9276 * @phba: pointer to lpfc hba data structure.
9277 *
9278 * This routine abort all pending discovery commands and
9279 * start a timer to retry FLOGI for the physical port
9280 * discovery.
9281 **/
9282 void
lpfc_retry_pport_discovery(struct lpfc_hba * phba)9283 lpfc_retry_pport_discovery(struct lpfc_hba *phba)
9284 {
9285 struct lpfc_nodelist *ndlp;
9286 struct Scsi_Host *shost;
9287
9288 /* Cancel the all vports retry delay retry timers */
9289 lpfc_cancel_all_vport_retry_delay_timer(phba);
9290
9291 /* If fabric require FLOGI, then re-instantiate physical login */
9292 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
9293 if (!ndlp)
9294 return;
9295
9296 shost = lpfc_shost_from_vport(phba->pport);
9297 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
9298 spin_lock_irq(shost->host_lock);
9299 ndlp->nlp_flag |= NLP_DELAY_TMO;
9300 spin_unlock_irq(shost->host_lock);
9301 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
9302 phba->pport->port_state = LPFC_FLOGI;
9303 return;
9304 }
9305
9306 /**
9307 * lpfc_fabric_login_reqd - Check if FLOGI required.
9308 * @phba: pointer to lpfc hba data structure.
9309 * @cmdiocb: pointer to FDISC command iocb.
9310 * @rspiocb: pointer to FDISC response iocb.
9311 *
9312 * This routine checks if a FLOGI is reguired for FDISC
9313 * to succeed.
9314 **/
9315 static int
lpfc_fabric_login_reqd(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)9316 lpfc_fabric_login_reqd(struct lpfc_hba *phba,
9317 struct lpfc_iocbq *cmdiocb,
9318 struct lpfc_iocbq *rspiocb)
9319 {
9320
9321 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
9322 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
9323 return 0;
9324 else
9325 return 1;
9326 }
9327
9328 /**
9329 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
9330 * @phba: pointer to lpfc hba data structure.
9331 * @cmdiocb: pointer to lpfc command iocb data structure.
9332 * @rspiocb: pointer to lpfc response iocb data structure.
9333 *
9334 * This routine is the completion callback function to a Fabric Discover
9335 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
9336 * single threaded, each FDISC completion callback function will reset
9337 * the discovery timer for all vports such that the timers will not get
9338 * unnecessary timeout. The function checks the FDISC IOCB status. If error
9339 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
9340 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
9341 * assigned to the vport has been changed with the completion of the FDISC
9342 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
9343 * are unregistered from the HBA, and then the lpfc_register_new_vport()
9344 * routine is invoked to register new vport with the HBA. Otherwise, the
9345 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
9346 * Server for State Change Request (SCR).
9347 **/
9348 static void
lpfc_cmpl_els_fdisc(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)9349 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9350 struct lpfc_iocbq *rspiocb)
9351 {
9352 struct lpfc_vport *vport = cmdiocb->vport;
9353 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9354 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
9355 struct lpfc_nodelist *np;
9356 struct lpfc_nodelist *next_np;
9357 IOCB_t *irsp = &rspiocb->iocb;
9358 struct lpfc_iocbq *piocb;
9359 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
9360 struct serv_parm *sp;
9361 uint8_t fabric_param_changed;
9362
9363 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9364 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
9365 irsp->ulpStatus, irsp->un.ulpWord[4],
9366 vport->fc_prevDID);
9367 /* Since all FDISCs are being single threaded, we
9368 * must reset the discovery timer for ALL vports
9369 * waiting to send FDISC when one completes.
9370 */
9371 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
9372 lpfc_set_disctmo(piocb->vport);
9373 }
9374
9375 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
9376 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
9377 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
9378
9379 if (irsp->ulpStatus) {
9380
9381 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
9382 lpfc_retry_pport_discovery(phba);
9383 goto out;
9384 }
9385
9386 /* Check for retry */
9387 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
9388 goto out;
9389 /* FDISC failed */
9390 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9391 "0126 FDISC failed. (x%x/x%x)\n",
9392 irsp->ulpStatus, irsp->un.ulpWord[4]);
9393 goto fdisc_failed;
9394 }
9395 spin_lock_irq(shost->host_lock);
9396 vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
9397 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
9398 vport->fc_flag |= FC_FABRIC;
9399 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
9400 vport->fc_flag |= FC_PUBLIC_LOOP;
9401 spin_unlock_irq(shost->host_lock);
9402
9403 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
9404 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
9405 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
9406 if (!prsp)
9407 goto out;
9408 sp = prsp->virt + sizeof(uint32_t);
9409 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
9410 memcpy(&vport->fabric_portname, &sp->portName,
9411 sizeof(struct lpfc_name));
9412 memcpy(&vport->fabric_nodename, &sp->nodeName,
9413 sizeof(struct lpfc_name));
9414 if (fabric_param_changed &&
9415 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
9416 /* If our NportID changed, we need to ensure all
9417 * remaining NPORTs get unreg_login'ed so we can
9418 * issue unreg_vpi.
9419 */
9420 list_for_each_entry_safe(np, next_np,
9421 &vport->fc_nodes, nlp_listp) {
9422 if (!NLP_CHK_NODE_ACT(ndlp) ||
9423 (np->nlp_state != NLP_STE_NPR_NODE) ||
9424 !(np->nlp_flag & NLP_NPR_ADISC))
9425 continue;
9426 spin_lock_irq(shost->host_lock);
9427 np->nlp_flag &= ~NLP_NPR_ADISC;
9428 spin_unlock_irq(shost->host_lock);
9429 lpfc_unreg_rpi(vport, np);
9430 }
9431 lpfc_cleanup_pending_mbox(vport);
9432
9433 if (phba->sli_rev == LPFC_SLI_REV4)
9434 lpfc_sli4_unreg_all_rpis(vport);
9435
9436 lpfc_mbx_unreg_vpi(vport);
9437 spin_lock_irq(shost->host_lock);
9438 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
9439 if (phba->sli_rev == LPFC_SLI_REV4)
9440 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
9441 else
9442 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
9443 spin_unlock_irq(shost->host_lock);
9444 } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
9445 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
9446 /*
9447 * Driver needs to re-reg VPI in order for f/w
9448 * to update the MAC address.
9449 */
9450 lpfc_register_new_vport(phba, vport, ndlp);
9451 goto out;
9452 }
9453
9454 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
9455 lpfc_issue_init_vpi(vport);
9456 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
9457 lpfc_register_new_vport(phba, vport, ndlp);
9458 else
9459 lpfc_do_scr_ns_plogi(phba, vport);
9460 goto out;
9461 fdisc_failed:
9462 if (vport->fc_vport &&
9463 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
9464 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9465 /* Cancel discovery timer */
9466 lpfc_can_disctmo(vport);
9467 lpfc_nlp_put(ndlp);
9468 out:
9469 lpfc_els_free_iocb(phba, cmdiocb);
9470 }
9471
9472 /**
9473 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
9474 * @vport: pointer to a virtual N_Port data structure.
9475 * @ndlp: pointer to a node-list data structure.
9476 * @retry: number of retries to the command IOCB.
9477 *
9478 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
9479 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
9480 * routine to issue the IOCB, which makes sure only one outstanding fabric
9481 * IOCB will be sent off HBA at any given time.
9482 *
9483 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
9484 * will be incremented by 1 for holding the ndlp and the reference to ndlp
9485 * will be stored into the context1 field of the IOCB for the completion
9486 * callback function to the FDISC ELS command.
9487 *
9488 * Return code
9489 * 0 - Successfully issued fdisc iocb command
9490 * 1 - Failed to issue fdisc iocb command
9491 **/
9492 static int
lpfc_issue_els_fdisc(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,uint8_t retry)9493 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
9494 uint8_t retry)
9495 {
9496 struct lpfc_hba *phba = vport->phba;
9497 IOCB_t *icmd;
9498 struct lpfc_iocbq *elsiocb;
9499 struct serv_parm *sp;
9500 uint8_t *pcmd;
9501 uint16_t cmdsize;
9502 int did = ndlp->nlp_DID;
9503 int rc;
9504
9505 vport->port_state = LPFC_FDISC;
9506 vport->fc_myDID = 0;
9507 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
9508 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
9509 ELS_CMD_FDISC);
9510 if (!elsiocb) {
9511 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9512 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9513 "0255 Issue FDISC: no IOCB\n");
9514 return 1;
9515 }
9516
9517 icmd = &elsiocb->iocb;
9518 icmd->un.elsreq64.myID = 0;
9519 icmd->un.elsreq64.fl = 1;
9520
9521 /*
9522 * SLI3 ports require a different context type value than SLI4.
9523 * Catch SLI3 ports here and override the prep.
9524 */
9525 if (phba->sli_rev == LPFC_SLI_REV3) {
9526 icmd->ulpCt_h = 1;
9527 icmd->ulpCt_l = 0;
9528 }
9529
9530 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
9531 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
9532 pcmd += sizeof(uint32_t); /* CSP Word 1 */
9533 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
9534 sp = (struct serv_parm *) pcmd;
9535 /* Setup CSPs accordingly for Fabric */
9536 sp->cmn.e_d_tov = 0;
9537 sp->cmn.w2.r_a_tov = 0;
9538 sp->cmn.virtual_fabric_support = 0;
9539 sp->cls1.classValid = 0;
9540 sp->cls2.seqDelivery = 1;
9541 sp->cls3.seqDelivery = 1;
9542
9543 pcmd += sizeof(uint32_t); /* CSP Word 2 */
9544 pcmd += sizeof(uint32_t); /* CSP Word 3 */
9545 pcmd += sizeof(uint32_t); /* CSP Word 4 */
9546 pcmd += sizeof(uint32_t); /* Port Name */
9547 memcpy(pcmd, &vport->fc_portname, 8);
9548 pcmd += sizeof(uint32_t); /* Node Name */
9549 pcmd += sizeof(uint32_t); /* Node Name */
9550 memcpy(pcmd, &vport->fc_nodename, 8);
9551 sp->cmn.valid_vendor_ver_level = 0;
9552 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
9553 lpfc_set_disctmo(vport);
9554
9555 phba->fc_stat.elsXmitFDISC++;
9556 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
9557
9558 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
9559 "Issue FDISC: did:x%x",
9560 did, 0, 0);
9561
9562 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
9563 if (rc == IOCB_ERROR) {
9564 lpfc_els_free_iocb(phba, elsiocb);
9565 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9566 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9567 "0256 Issue FDISC: Cannot send IOCB\n");
9568 return 1;
9569 }
9570 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
9571 return 0;
9572 }
9573
9574 /**
9575 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
9576 * @phba: pointer to lpfc hba data structure.
9577 * @cmdiocb: pointer to lpfc command iocb data structure.
9578 * @rspiocb: pointer to lpfc response iocb data structure.
9579 *
9580 * This routine is the completion callback function to the issuing of a LOGO
9581 * ELS command off a vport. It frees the command IOCB and then decrement the
9582 * reference count held on ndlp for this completion function, indicating that
9583 * the reference to the ndlp is no long needed. Note that the
9584 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
9585 * callback function and an additional explicit ndlp reference decrementation
9586 * will trigger the actual release of the ndlp.
9587 **/
9588 static void
lpfc_cmpl_els_npiv_logo(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)9589 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9590 struct lpfc_iocbq *rspiocb)
9591 {
9592 struct lpfc_vport *vport = cmdiocb->vport;
9593 IOCB_t *irsp;
9594 struct lpfc_nodelist *ndlp;
9595 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9596
9597 ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
9598 irsp = &rspiocb->iocb;
9599 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
9600 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
9601 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
9602
9603 lpfc_els_free_iocb(phba, cmdiocb);
9604 vport->unreg_vpi_cmpl = VPORT_ERROR;
9605
9606 /* Trigger the release of the ndlp after logo */
9607 lpfc_nlp_put(ndlp);
9608
9609 /* NPIV LOGO completes to NPort <nlp_DID> */
9610 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9611 "2928 NPIV LOGO completes to NPort x%x "
9612 "Data: x%x x%x x%x x%x\n",
9613 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
9614 irsp->ulpTimeout, vport->num_disc_nodes);
9615
9616 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
9617 spin_lock_irq(shost->host_lock);
9618 vport->fc_flag &= ~FC_NDISC_ACTIVE;
9619 vport->fc_flag &= ~FC_FABRIC;
9620 spin_unlock_irq(shost->host_lock);
9621 lpfc_can_disctmo(vport);
9622 }
9623 }
9624
9625 /**
9626 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
9627 * @vport: pointer to a virtual N_Port data structure.
9628 * @ndlp: pointer to a node-list data structure.
9629 *
9630 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
9631 *
9632 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
9633 * will be incremented by 1 for holding the ndlp and the reference to ndlp
9634 * will be stored into the context1 field of the IOCB for the completion
9635 * callback function to the LOGO ELS command.
9636 *
9637 * Return codes
9638 * 0 - Successfully issued logo off the @vport
9639 * 1 - Failed to issue logo off the @vport
9640 **/
9641 int
lpfc_issue_els_npiv_logo(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)9642 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
9643 {
9644 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9645 struct lpfc_hba *phba = vport->phba;
9646 struct lpfc_iocbq *elsiocb;
9647 uint8_t *pcmd;
9648 uint16_t cmdsize;
9649
9650 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
9651 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
9652 ELS_CMD_LOGO);
9653 if (!elsiocb)
9654 return 1;
9655
9656 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
9657 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
9658 pcmd += sizeof(uint32_t);
9659
9660 /* Fill in LOGO payload */
9661 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
9662 pcmd += sizeof(uint32_t);
9663 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
9664
9665 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
9666 "Issue LOGO npiv did:x%x flg:x%x",
9667 ndlp->nlp_DID, ndlp->nlp_flag, 0);
9668
9669 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
9670 spin_lock_irq(shost->host_lock);
9671 ndlp->nlp_flag |= NLP_LOGO_SND;
9672 spin_unlock_irq(shost->host_lock);
9673 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
9674 IOCB_ERROR) {
9675 spin_lock_irq(shost->host_lock);
9676 ndlp->nlp_flag &= ~NLP_LOGO_SND;
9677 spin_unlock_irq(shost->host_lock);
9678 lpfc_els_free_iocb(phba, elsiocb);
9679 return 1;
9680 }
9681 return 0;
9682 }
9683
9684 /**
9685 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
9686 * @t: timer context used to obtain the lpfc hba.
9687 *
9688 * This routine is invoked by the fabric iocb block timer after
9689 * timeout. It posts the fabric iocb block timeout event by setting the
9690 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
9691 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
9692 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
9693 * posted event WORKER_FABRIC_BLOCK_TMO.
9694 **/
9695 void
lpfc_fabric_block_timeout(struct timer_list * t)9696 lpfc_fabric_block_timeout(struct timer_list *t)
9697 {
9698 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer);
9699 unsigned long iflags;
9700 uint32_t tmo_posted;
9701
9702 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
9703 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
9704 if (!tmo_posted)
9705 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
9706 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
9707
9708 if (!tmo_posted)
9709 lpfc_worker_wake_up(phba);
9710 return;
9711 }
9712
9713 /**
9714 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
9715 * @phba: pointer to lpfc hba data structure.
9716 *
9717 * This routine issues one fabric iocb from the driver internal list to
9718 * the HBA. It first checks whether it's ready to issue one fabric iocb to
9719 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
9720 * remove one pending fabric iocb from the driver internal list and invokes
9721 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
9722 **/
9723 static void
lpfc_resume_fabric_iocbs(struct lpfc_hba * phba)9724 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
9725 {
9726 struct lpfc_iocbq *iocb;
9727 unsigned long iflags;
9728 int ret;
9729 IOCB_t *cmd;
9730
9731 repeat:
9732 iocb = NULL;
9733 spin_lock_irqsave(&phba->hbalock, iflags);
9734 /* Post any pending iocb to the SLI layer */
9735 if (atomic_read(&phba->fabric_iocb_count) == 0) {
9736 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
9737 list);
9738 if (iocb)
9739 /* Increment fabric iocb count to hold the position */
9740 atomic_inc(&phba->fabric_iocb_count);
9741 }
9742 spin_unlock_irqrestore(&phba->hbalock, iflags);
9743 if (iocb) {
9744 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
9745 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
9746 iocb->iocb_flag |= LPFC_IO_FABRIC;
9747
9748 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
9749 "Fabric sched1: ste:x%x",
9750 iocb->vport->port_state, 0, 0);
9751
9752 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
9753
9754 if (ret == IOCB_ERROR) {
9755 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
9756 iocb->fabric_iocb_cmpl = NULL;
9757 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
9758 cmd = &iocb->iocb;
9759 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
9760 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
9761 iocb->iocb_cmpl(phba, iocb, iocb);
9762
9763 atomic_dec(&phba->fabric_iocb_count);
9764 goto repeat;
9765 }
9766 }
9767
9768 return;
9769 }
9770
9771 /**
9772 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
9773 * @phba: pointer to lpfc hba data structure.
9774 *
9775 * This routine unblocks the issuing fabric iocb command. The function
9776 * will clear the fabric iocb block bit and then invoke the routine
9777 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
9778 * from the driver internal fabric iocb list.
9779 **/
9780 void
lpfc_unblock_fabric_iocbs(struct lpfc_hba * phba)9781 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
9782 {
9783 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
9784
9785 lpfc_resume_fabric_iocbs(phba);
9786 return;
9787 }
9788
9789 /**
9790 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
9791 * @phba: pointer to lpfc hba data structure.
9792 *
9793 * This routine blocks the issuing fabric iocb for a specified amount of
9794 * time (currently 100 ms). This is done by set the fabric iocb block bit
9795 * and set up a timeout timer for 100ms. When the block bit is set, no more
9796 * fabric iocb will be issued out of the HBA.
9797 **/
9798 static void
lpfc_block_fabric_iocbs(struct lpfc_hba * phba)9799 lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
9800 {
9801 int blocked;
9802
9803 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
9804 /* Start a timer to unblock fabric iocbs after 100ms */
9805 if (!blocked)
9806 mod_timer(&phba->fabric_block_timer,
9807 jiffies + msecs_to_jiffies(100));
9808
9809 return;
9810 }
9811
9812 /**
9813 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
9814 * @phba: pointer to lpfc hba data structure.
9815 * @cmdiocb: pointer to lpfc command iocb data structure.
9816 * @rspiocb: pointer to lpfc response iocb data structure.
9817 *
9818 * This routine is the callback function that is put to the fabric iocb's
9819 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
9820 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
9821 * function first restores and invokes the original iocb's callback function
9822 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
9823 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
9824 **/
9825 static void
lpfc_cmpl_fabric_iocb(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)9826 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9827 struct lpfc_iocbq *rspiocb)
9828 {
9829 struct ls_rjt stat;
9830
9831 BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
9832
9833 switch (rspiocb->iocb.ulpStatus) {
9834 case IOSTAT_NPORT_RJT:
9835 case IOSTAT_FABRIC_RJT:
9836 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
9837 lpfc_block_fabric_iocbs(phba);
9838 }
9839 break;
9840
9841 case IOSTAT_NPORT_BSY:
9842 case IOSTAT_FABRIC_BSY:
9843 lpfc_block_fabric_iocbs(phba);
9844 break;
9845
9846 case IOSTAT_LS_RJT:
9847 stat.un.lsRjtError =
9848 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
9849 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
9850 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
9851 lpfc_block_fabric_iocbs(phba);
9852 break;
9853 }
9854
9855 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0);
9856
9857 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
9858 cmdiocb->fabric_iocb_cmpl = NULL;
9859 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
9860 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
9861
9862 atomic_dec(&phba->fabric_iocb_count);
9863 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
9864 /* Post any pending iocbs to HBA */
9865 lpfc_resume_fabric_iocbs(phba);
9866 }
9867 }
9868
9869 /**
9870 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
9871 * @phba: pointer to lpfc hba data structure.
9872 * @iocb: pointer to lpfc command iocb data structure.
9873 *
9874 * This routine is used as the top-level API for issuing a fabric iocb command
9875 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
9876 * function makes sure that only one fabric bound iocb will be outstanding at
9877 * any given time. As such, this function will first check to see whether there
9878 * is already an outstanding fabric iocb on the wire. If so, it will put the
9879 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
9880 * issued later. Otherwise, it will issue the iocb on the wire and update the
9881 * fabric iocb count it indicate that there is one fabric iocb on the wire.
9882 *
9883 * Note, this implementation has a potential sending out fabric IOCBs out of
9884 * order. The problem is caused by the construction of the "ready" boolen does
9885 * not include the condition that the internal fabric IOCB list is empty. As
9886 * such, it is possible a fabric IOCB issued by this routine might be "jump"
9887 * ahead of the fabric IOCBs in the internal list.
9888 *
9889 * Return code
9890 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
9891 * IOCB_ERROR - failed to issue fabric iocb
9892 **/
9893 static int
lpfc_issue_fabric_iocb(struct lpfc_hba * phba,struct lpfc_iocbq * iocb)9894 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
9895 {
9896 unsigned long iflags;
9897 int ready;
9898 int ret;
9899
9900 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1);
9901
9902 spin_lock_irqsave(&phba->hbalock, iflags);
9903 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
9904 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
9905
9906 if (ready)
9907 /* Increment fabric iocb count to hold the position */
9908 atomic_inc(&phba->fabric_iocb_count);
9909 spin_unlock_irqrestore(&phba->hbalock, iflags);
9910 if (ready) {
9911 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
9912 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
9913 iocb->iocb_flag |= LPFC_IO_FABRIC;
9914
9915 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
9916 "Fabric sched2: ste:x%x",
9917 iocb->vport->port_state, 0, 0);
9918
9919 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
9920
9921 if (ret == IOCB_ERROR) {
9922 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
9923 iocb->fabric_iocb_cmpl = NULL;
9924 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
9925 atomic_dec(&phba->fabric_iocb_count);
9926 }
9927 } else {
9928 spin_lock_irqsave(&phba->hbalock, iflags);
9929 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
9930 spin_unlock_irqrestore(&phba->hbalock, iflags);
9931 ret = IOCB_SUCCESS;
9932 }
9933 return ret;
9934 }
9935
9936 /**
9937 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
9938 * @vport: pointer to a virtual N_Port data structure.
9939 *
9940 * This routine aborts all the IOCBs associated with a @vport from the
9941 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
9942 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
9943 * list, removes each IOCB associated with the @vport off the list, set the
9944 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
9945 * associated with the IOCB.
9946 **/
lpfc_fabric_abort_vport(struct lpfc_vport * vport)9947 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
9948 {
9949 LIST_HEAD(completions);
9950 struct lpfc_hba *phba = vport->phba;
9951 struct lpfc_iocbq *tmp_iocb, *piocb;
9952
9953 spin_lock_irq(&phba->hbalock);
9954 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
9955 list) {
9956
9957 if (piocb->vport != vport)
9958 continue;
9959
9960 list_move_tail(&piocb->list, &completions);
9961 }
9962 spin_unlock_irq(&phba->hbalock);
9963
9964 /* Cancel all the IOCBs from the completions list */
9965 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9966 IOERR_SLI_ABORTED);
9967 }
9968
9969 /**
9970 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
9971 * @ndlp: pointer to a node-list data structure.
9972 *
9973 * This routine aborts all the IOCBs associated with an @ndlp from the
9974 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
9975 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
9976 * list, removes each IOCB associated with the @ndlp off the list, set the
9977 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
9978 * associated with the IOCB.
9979 **/
lpfc_fabric_abort_nport(struct lpfc_nodelist * ndlp)9980 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
9981 {
9982 LIST_HEAD(completions);
9983 struct lpfc_hba *phba = ndlp->phba;
9984 struct lpfc_iocbq *tmp_iocb, *piocb;
9985 struct lpfc_sli_ring *pring;
9986
9987 pring = lpfc_phba_elsring(phba);
9988
9989 if (unlikely(!pring))
9990 return;
9991
9992 spin_lock_irq(&phba->hbalock);
9993 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
9994 list) {
9995 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
9996
9997 list_move_tail(&piocb->list, &completions);
9998 }
9999 }
10000 spin_unlock_irq(&phba->hbalock);
10001
10002 /* Cancel all the IOCBs from the completions list */
10003 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10004 IOERR_SLI_ABORTED);
10005 }
10006
10007 /**
10008 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
10009 * @phba: pointer to lpfc hba data structure.
10010 *
10011 * This routine aborts all the IOCBs currently on the driver internal
10012 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
10013 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
10014 * list, removes IOCBs off the list, set the status feild to
10015 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
10016 * the IOCB.
10017 **/
lpfc_fabric_abort_hba(struct lpfc_hba * phba)10018 void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
10019 {
10020 LIST_HEAD(completions);
10021
10022 spin_lock_irq(&phba->hbalock);
10023 list_splice_init(&phba->fabric_iocb_list, &completions);
10024 spin_unlock_irq(&phba->hbalock);
10025
10026 /* Cancel all the IOCBs from the completions list */
10027 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10028 IOERR_SLI_ABORTED);
10029 }
10030
10031 /**
10032 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
10033 * @vport: pointer to lpfc vport data structure.
10034 *
10035 * This routine is invoked by the vport cleanup for deletions and the cleanup
10036 * for an ndlp on removal.
10037 **/
10038 void
lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport * vport)10039 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
10040 {
10041 struct lpfc_hba *phba = vport->phba;
10042 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
10043 unsigned long iflag = 0;
10044
10045 spin_lock_irqsave(&phba->hbalock, iflag);
10046 spin_lock(&phba->sli4_hba.sgl_list_lock);
10047 list_for_each_entry_safe(sglq_entry, sglq_next,
10048 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
10049 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
10050 sglq_entry->ndlp = NULL;
10051 }
10052 spin_unlock(&phba->sli4_hba.sgl_list_lock);
10053 spin_unlock_irqrestore(&phba->hbalock, iflag);
10054 return;
10055 }
10056
10057 /**
10058 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
10059 * @phba: pointer to lpfc hba data structure.
10060 * @axri: pointer to the els xri abort wcqe structure.
10061 *
10062 * This routine is invoked by the worker thread to process a SLI4 slow-path
10063 * ELS aborted xri.
10064 **/
10065 void
lpfc_sli4_els_xri_aborted(struct lpfc_hba * phba,struct sli4_wcqe_xri_aborted * axri)10066 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
10067 struct sli4_wcqe_xri_aborted *axri)
10068 {
10069 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
10070 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
10071 uint16_t lxri = 0;
10072
10073 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
10074 unsigned long iflag = 0;
10075 struct lpfc_nodelist *ndlp;
10076 struct lpfc_sli_ring *pring;
10077
10078 pring = lpfc_phba_elsring(phba);
10079
10080 spin_lock_irqsave(&phba->hbalock, iflag);
10081 spin_lock(&phba->sli4_hba.sgl_list_lock);
10082 list_for_each_entry_safe(sglq_entry, sglq_next,
10083 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
10084 if (sglq_entry->sli4_xritag == xri) {
10085 list_del(&sglq_entry->list);
10086 ndlp = sglq_entry->ndlp;
10087 sglq_entry->ndlp = NULL;
10088 list_add_tail(&sglq_entry->list,
10089 &phba->sli4_hba.lpfc_els_sgl_list);
10090 sglq_entry->state = SGL_FREED;
10091 spin_unlock(&phba->sli4_hba.sgl_list_lock);
10092 spin_unlock_irqrestore(&phba->hbalock, iflag);
10093 lpfc_set_rrq_active(phba, ndlp,
10094 sglq_entry->sli4_lxritag,
10095 rxid, 1);
10096
10097 /* Check if TXQ queue needs to be serviced */
10098 if (pring && !list_empty(&pring->txq))
10099 lpfc_worker_wake_up(phba);
10100 return;
10101 }
10102 }
10103 spin_unlock(&phba->sli4_hba.sgl_list_lock);
10104 lxri = lpfc_sli4_xri_inrange(phba, xri);
10105 if (lxri == NO_XRI) {
10106 spin_unlock_irqrestore(&phba->hbalock, iflag);
10107 return;
10108 }
10109 spin_lock(&phba->sli4_hba.sgl_list_lock);
10110 sglq_entry = __lpfc_get_active_sglq(phba, lxri);
10111 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
10112 spin_unlock(&phba->sli4_hba.sgl_list_lock);
10113 spin_unlock_irqrestore(&phba->hbalock, iflag);
10114 return;
10115 }
10116 sglq_entry->state = SGL_XRI_ABORTED;
10117 spin_unlock(&phba->sli4_hba.sgl_list_lock);
10118 spin_unlock_irqrestore(&phba->hbalock, iflag);
10119 return;
10120 }
10121
10122 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
10123 * @vport: pointer to virtual port object.
10124 * @ndlp: nodelist pointer for the impacted node.
10125 *
10126 * The driver calls this routine in response to an SLI4 XRI ABORT CQE
10127 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event,
10128 * the driver is required to send a LOGO to the remote node before it
10129 * attempts to recover its login to the remote node.
10130 */
10131 void
lpfc_sli_abts_recover_port(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)10132 lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
10133 struct lpfc_nodelist *ndlp)
10134 {
10135 struct Scsi_Host *shost;
10136 struct lpfc_hba *phba;
10137 unsigned long flags = 0;
10138
10139 shost = lpfc_shost_from_vport(vport);
10140 phba = vport->phba;
10141 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
10142 lpfc_printf_log(phba, KERN_INFO,
10143 LOG_SLI, "3093 No rport recovery needed. "
10144 "rport in state 0x%x\n", ndlp->nlp_state);
10145 return;
10146 }
10147 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10148 "3094 Start rport recovery on shost id 0x%x "
10149 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
10150 "flags 0x%x\n",
10151 shost->host_no, ndlp->nlp_DID,
10152 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
10153 ndlp->nlp_flag);
10154 /*
10155 * The rport is not responding. Remove the FCP-2 flag to prevent
10156 * an ADISC in the follow-up recovery code.
10157 */
10158 spin_lock_irqsave(shost->host_lock, flags);
10159 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
10160 ndlp->nlp_flag |= NLP_ISSUE_LOGO;
10161 spin_unlock_irqrestore(shost->host_lock, flags);
10162 lpfc_unreg_rpi(vport, ndlp);
10163 }
10164
10165