1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24 #include <linux/ctype.h>
25 #include <linux/delay.h>
26 #include <linux/pci.h>
27 #include <linux/interrupt.h>
28 #include <linux/module.h>
29 #include <linux/aer.h>
30 #include <linux/gfp.h>
31 #include <linux/kernel.h>
32
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_tcq.h>
37 #include <scsi/scsi_transport_fc.h>
38 #include <scsi/fc/fc_fs.h>
39
40 #include "lpfc_hw4.h"
41 #include "lpfc_hw.h"
42 #include "lpfc_sli.h"
43 #include "lpfc_sli4.h"
44 #include "lpfc_nl.h"
45 #include "lpfc_disc.h"
46 #include "lpfc.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc_nvme.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_version.h"
51 #include "lpfc_compat.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_vport.h"
54 #include "lpfc_attr.h"
55
56 #define LPFC_DEF_DEVLOSS_TMO 30
57 #define LPFC_MIN_DEVLOSS_TMO 1
58 #define LPFC_MAX_DEVLOSS_TMO 255
59
60 #define LPFC_DEF_MRQ_POST 512
61 #define LPFC_MIN_MRQ_POST 512
62 #define LPFC_MAX_MRQ_POST 2048
63
64 /*
65 * Write key size should be multiple of 4. If write key is changed
66 * make sure that library write key is also changed.
67 */
68 #define LPFC_REG_WRITE_KEY_SIZE 4
69 #define LPFC_REG_WRITE_KEY "EMLX"
70
71 const char *const trunk_errmsg[] = { /* map errcode */
72 "", /* There is no such error code at index 0*/
73 "link negotiated speed does not match existing"
74 " trunk - link was \"low\" speed",
75 "link negotiated speed does not match"
76 " existing trunk - link was \"middle\" speed",
77 "link negotiated speed does not match existing"
78 " trunk - link was \"high\" speed",
79 "Attached to non-trunking port - F_Port",
80 "Attached to non-trunking port - N_Port",
81 "FLOGI response timeout",
82 "non-FLOGI frame received",
83 "Invalid FLOGI response",
84 "Trunking initialization protocol",
85 "Trunk peer device mismatch",
86 };
87
88 /**
89 * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
90 * @incr: integer to convert.
91 * @hdw: ascii string holding converted integer plus a string terminator.
92 *
93 * Description:
94 * JEDEC Joint Electron Device Engineering Council.
95 * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii
96 * character string. The string is then terminated with a NULL in byte 9.
97 * Hex 0-9 becomes ascii '0' to '9'.
98 * Hex a-f becomes ascii '=' to 'B' capital B.
99 *
100 * Notes:
101 * Coded for 32 bit integers only.
102 **/
103 static void
lpfc_jedec_to_ascii(int incr,char hdw[])104 lpfc_jedec_to_ascii(int incr, char hdw[])
105 {
106 int i, j;
107 for (i = 0; i < 8; i++) {
108 j = (incr & 0xf);
109 if (j <= 9)
110 hdw[7 - i] = 0x30 + j;
111 else
112 hdw[7 - i] = 0x61 + j - 10;
113 incr = (incr >> 4);
114 }
115 hdw[8] = 0;
116 return;
117 }
118
119 /**
120 * lpfc_drvr_version_show - Return the Emulex driver string with version number
121 * @dev: class unused variable.
122 * @attr: device attribute, not used.
123 * @buf: on return contains the module description text.
124 *
125 * Returns: size of formatted string.
126 **/
127 static ssize_t
lpfc_drvr_version_show(struct device * dev,struct device_attribute * attr,char * buf)128 lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
129 char *buf)
130 {
131 return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
132 }
133
134 /**
135 * lpfc_enable_fip_show - Return the fip mode of the HBA
136 * @dev: class unused variable.
137 * @attr: device attribute, not used.
138 * @buf: on return contains the module description text.
139 *
140 * Returns: size of formatted string.
141 **/
142 static ssize_t
lpfc_enable_fip_show(struct device * dev,struct device_attribute * attr,char * buf)143 lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
144 char *buf)
145 {
146 struct Scsi_Host *shost = class_to_shost(dev);
147 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
148 struct lpfc_hba *phba = vport->phba;
149
150 if (phba->hba_flag & HBA_FIP_SUPPORT)
151 return scnprintf(buf, PAGE_SIZE, "1\n");
152 else
153 return scnprintf(buf, PAGE_SIZE, "0\n");
154 }
155
156 static ssize_t
lpfc_nvme_info_show(struct device * dev,struct device_attribute * attr,char * buf)157 lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
158 char *buf)
159 {
160 struct Scsi_Host *shost = class_to_shost(dev);
161 struct lpfc_vport *vport = shost_priv(shost);
162 struct lpfc_hba *phba = vport->phba;
163 struct lpfc_nvmet_tgtport *tgtp;
164 struct nvme_fc_local_port *localport;
165 struct lpfc_nvme_lport *lport;
166 struct lpfc_nvme_rport *rport;
167 struct lpfc_nodelist *ndlp;
168 struct nvme_fc_remote_port *nrport;
169 struct lpfc_fc4_ctrl_stat *cstat;
170 uint64_t data1, data2, data3;
171 uint64_t totin, totout, tot;
172 char *statep;
173 int i;
174 int len = 0;
175 char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
176
177 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
178 len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
179 return len;
180 }
181 if (phba->nvmet_support) {
182 if (!phba->targetport) {
183 len = scnprintf(buf, PAGE_SIZE,
184 "NVME Target: x%llx is not allocated\n",
185 wwn_to_u64(vport->fc_portname.u.wwn));
186 return len;
187 }
188 /* Port state is only one of two values for now. */
189 if (phba->targetport->port_id)
190 statep = "REGISTERED";
191 else
192 statep = "INIT";
193 scnprintf(tmp, sizeof(tmp),
194 "NVME Target Enabled State %s\n",
195 statep);
196 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
197 goto buffer_done;
198
199 scnprintf(tmp, sizeof(tmp),
200 "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
201 "NVME Target: lpfc",
202 phba->brd_no,
203 wwn_to_u64(vport->fc_portname.u.wwn),
204 wwn_to_u64(vport->fc_nodename.u.wwn),
205 phba->targetport->port_id);
206 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
207 goto buffer_done;
208
209 if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE)
210 >= PAGE_SIZE)
211 goto buffer_done;
212
213 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
214 scnprintf(tmp, sizeof(tmp),
215 "LS: Rcv %08x Drop %08x Abort %08x\n",
216 atomic_read(&tgtp->rcv_ls_req_in),
217 atomic_read(&tgtp->rcv_ls_req_drop),
218 atomic_read(&tgtp->xmt_ls_abort));
219 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
220 goto buffer_done;
221
222 if (atomic_read(&tgtp->rcv_ls_req_in) !=
223 atomic_read(&tgtp->rcv_ls_req_out)) {
224 scnprintf(tmp, sizeof(tmp),
225 "Rcv LS: in %08x != out %08x\n",
226 atomic_read(&tgtp->rcv_ls_req_in),
227 atomic_read(&tgtp->rcv_ls_req_out));
228 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
229 goto buffer_done;
230 }
231
232 scnprintf(tmp, sizeof(tmp),
233 "LS: Xmt %08x Drop %08x Cmpl %08x\n",
234 atomic_read(&tgtp->xmt_ls_rsp),
235 atomic_read(&tgtp->xmt_ls_drop),
236 atomic_read(&tgtp->xmt_ls_rsp_cmpl));
237 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
238 goto buffer_done;
239
240 scnprintf(tmp, sizeof(tmp),
241 "LS: RSP Abort %08x xb %08x Err %08x\n",
242 atomic_read(&tgtp->xmt_ls_rsp_aborted),
243 atomic_read(&tgtp->xmt_ls_rsp_xb_set),
244 atomic_read(&tgtp->xmt_ls_rsp_error));
245 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
246 goto buffer_done;
247
248 scnprintf(tmp, sizeof(tmp),
249 "FCP: Rcv %08x Defer %08x Release %08x "
250 "Drop %08x\n",
251 atomic_read(&tgtp->rcv_fcp_cmd_in),
252 atomic_read(&tgtp->rcv_fcp_cmd_defer),
253 atomic_read(&tgtp->xmt_fcp_release),
254 atomic_read(&tgtp->rcv_fcp_cmd_drop));
255 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
256 goto buffer_done;
257
258 if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
259 atomic_read(&tgtp->rcv_fcp_cmd_out)) {
260 scnprintf(tmp, sizeof(tmp),
261 "Rcv FCP: in %08x != out %08x\n",
262 atomic_read(&tgtp->rcv_fcp_cmd_in),
263 atomic_read(&tgtp->rcv_fcp_cmd_out));
264 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
265 goto buffer_done;
266 }
267
268 scnprintf(tmp, sizeof(tmp),
269 "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
270 "drop %08x\n",
271 atomic_read(&tgtp->xmt_fcp_read),
272 atomic_read(&tgtp->xmt_fcp_read_rsp),
273 atomic_read(&tgtp->xmt_fcp_write),
274 atomic_read(&tgtp->xmt_fcp_rsp),
275 atomic_read(&tgtp->xmt_fcp_drop));
276 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
277 goto buffer_done;
278
279 scnprintf(tmp, sizeof(tmp),
280 "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
281 atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
282 atomic_read(&tgtp->xmt_fcp_rsp_error),
283 atomic_read(&tgtp->xmt_fcp_rsp_drop));
284 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
285 goto buffer_done;
286
287 scnprintf(tmp, sizeof(tmp),
288 "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
289 atomic_read(&tgtp->xmt_fcp_rsp_aborted),
290 atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
291 atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
292 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
293 goto buffer_done;
294
295 scnprintf(tmp, sizeof(tmp),
296 "ABORT: Xmt %08x Cmpl %08x\n",
297 atomic_read(&tgtp->xmt_fcp_abort),
298 atomic_read(&tgtp->xmt_fcp_abort_cmpl));
299 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
300 goto buffer_done;
301
302 scnprintf(tmp, sizeof(tmp),
303 "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n",
304 atomic_read(&tgtp->xmt_abort_sol),
305 atomic_read(&tgtp->xmt_abort_unsol),
306 atomic_read(&tgtp->xmt_abort_rsp),
307 atomic_read(&tgtp->xmt_abort_rsp_error));
308 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
309 goto buffer_done;
310
311 scnprintf(tmp, sizeof(tmp),
312 "DELAY: ctx %08x fod %08x wqfull %08x\n",
313 atomic_read(&tgtp->defer_ctx),
314 atomic_read(&tgtp->defer_fod),
315 atomic_read(&tgtp->defer_wqfull));
316 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
317 goto buffer_done;
318
319 /* Calculate outstanding IOs */
320 tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
321 tot += atomic_read(&tgtp->xmt_fcp_release);
322 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
323
324 scnprintf(tmp, sizeof(tmp),
325 "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
326 "CTX Outstanding %08llx\n\n",
327 phba->sli4_hba.nvmet_xri_cnt,
328 phba->sli4_hba.nvmet_io_wait_cnt,
329 phba->sli4_hba.nvmet_io_wait_total,
330 tot);
331 strlcat(buf, tmp, PAGE_SIZE);
332 goto buffer_done;
333 }
334
335 localport = vport->localport;
336 if (!localport) {
337 len = scnprintf(buf, PAGE_SIZE,
338 "NVME Initiator x%llx is not allocated\n",
339 wwn_to_u64(vport->fc_portname.u.wwn));
340 return len;
341 }
342 lport = (struct lpfc_nvme_lport *)localport->private;
343 if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
344 goto buffer_done;
345
346 scnprintf(tmp, sizeof(tmp),
347 "XRI Dist lpfc%d Total %d IO %d ELS %d\n",
348 phba->brd_no,
349 phba->sli4_hba.max_cfg_param.max_xri,
350 phba->sli4_hba.io_xri_max,
351 lpfc_sli4_get_els_iocb_cnt(phba));
352 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
353 goto buffer_done;
354
355 /* Port state is only one of two values for now. */
356 if (localport->port_id)
357 statep = "ONLINE";
358 else
359 statep = "UNKNOWN ";
360
361 scnprintf(tmp, sizeof(tmp),
362 "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
363 "NVME LPORT lpfc",
364 phba->brd_no,
365 wwn_to_u64(vport->fc_portname.u.wwn),
366 wwn_to_u64(vport->fc_nodename.u.wwn),
367 localport->port_id, statep);
368 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
369 goto buffer_done;
370
371 spin_lock_irq(shost->host_lock);
372
373 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
374 nrport = NULL;
375 spin_lock(&vport->phba->hbalock);
376 rport = lpfc_ndlp_get_nrport(ndlp);
377 if (rport)
378 nrport = rport->remoteport;
379 spin_unlock(&vport->phba->hbalock);
380 if (!nrport)
381 continue;
382
383 /* Port state is only one of two values for now. */
384 switch (nrport->port_state) {
385 case FC_OBJSTATE_ONLINE:
386 statep = "ONLINE";
387 break;
388 case FC_OBJSTATE_UNKNOWN:
389 statep = "UNKNOWN ";
390 break;
391 default:
392 statep = "UNSUPPORTED";
393 break;
394 }
395
396 /* Tab in to show lport ownership. */
397 if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
398 goto unlock_buf_done;
399 if (phba->brd_no >= 10) {
400 if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
401 goto unlock_buf_done;
402 }
403
404 scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
405 nrport->port_name);
406 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
407 goto unlock_buf_done;
408
409 scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
410 nrport->node_name);
411 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
412 goto unlock_buf_done;
413
414 scnprintf(tmp, sizeof(tmp), "DID x%06x ",
415 nrport->port_id);
416 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
417 goto unlock_buf_done;
418
419 /* An NVME rport can have multiple roles. */
420 if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
421 if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
422 goto unlock_buf_done;
423 }
424 if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
425 if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
426 goto unlock_buf_done;
427 }
428 if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
429 if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
430 goto unlock_buf_done;
431 }
432 if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
433 FC_PORT_ROLE_NVME_TARGET |
434 FC_PORT_ROLE_NVME_DISCOVERY)) {
435 scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
436 nrport->port_role);
437 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
438 goto unlock_buf_done;
439 }
440
441 scnprintf(tmp, sizeof(tmp), "%s\n", statep);
442 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
443 goto unlock_buf_done;
444 }
445 spin_unlock_irq(shost->host_lock);
446
447 if (!lport)
448 goto buffer_done;
449
450 if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE)
451 goto buffer_done;
452
453 scnprintf(tmp, sizeof(tmp),
454 "LS: Xmt %010x Cmpl %010x Abort %08x\n",
455 atomic_read(&lport->fc4NvmeLsRequests),
456 atomic_read(&lport->fc4NvmeLsCmpls),
457 atomic_read(&lport->xmt_ls_abort));
458 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
459 goto buffer_done;
460
461 scnprintf(tmp, sizeof(tmp),
462 "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
463 atomic_read(&lport->xmt_ls_err),
464 atomic_read(&lport->cmpl_ls_xb),
465 atomic_read(&lport->cmpl_ls_err));
466 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
467 goto buffer_done;
468
469 totin = 0;
470 totout = 0;
471 for (i = 0; i < phba->cfg_hdw_queue; i++) {
472 cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
473 tot = cstat->io_cmpls;
474 totin += tot;
475 data1 = cstat->input_requests;
476 data2 = cstat->output_requests;
477 data3 = cstat->control_requests;
478 totout += (data1 + data2 + data3);
479 }
480 scnprintf(tmp, sizeof(tmp),
481 "Total FCP Cmpl %016llx Issue %016llx "
482 "OutIO %016llx\n",
483 totin, totout, totout - totin);
484 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
485 goto buffer_done;
486
487 scnprintf(tmp, sizeof(tmp),
488 "\tabort %08x noxri %08x nondlp %08x qdepth %08x "
489 "wqerr %08x err %08x\n",
490 atomic_read(&lport->xmt_fcp_abort),
491 atomic_read(&lport->xmt_fcp_noxri),
492 atomic_read(&lport->xmt_fcp_bad_ndlp),
493 atomic_read(&lport->xmt_fcp_qdepth),
494 atomic_read(&lport->xmt_fcp_err),
495 atomic_read(&lport->xmt_fcp_wqerr));
496 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
497 goto buffer_done;
498
499 scnprintf(tmp, sizeof(tmp),
500 "FCP CMPL: xb %08x Err %08x\n",
501 atomic_read(&lport->cmpl_fcp_xb),
502 atomic_read(&lport->cmpl_fcp_err));
503 strlcat(buf, tmp, PAGE_SIZE);
504
505 /* host_lock is already unlocked. */
506 goto buffer_done;
507
508 unlock_buf_done:
509 spin_unlock_irq(shost->host_lock);
510
511 buffer_done:
512 len = strnlen(buf, PAGE_SIZE);
513
514 if (unlikely(len >= (PAGE_SIZE - 1))) {
515 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
516 "6314 Catching potential buffer "
517 "overflow > PAGE_SIZE = %lu bytes\n",
518 PAGE_SIZE);
519 strlcpy(buf + PAGE_SIZE - 1 -
520 strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1),
521 LPFC_NVME_INFO_MORE_STR,
522 strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1)
523 + 1);
524 }
525
526 return len;
527 }
528
529 static ssize_t
lpfc_scsi_stat_show(struct device * dev,struct device_attribute * attr,char * buf)530 lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr,
531 char *buf)
532 {
533 struct Scsi_Host *shost = class_to_shost(dev);
534 struct lpfc_vport *vport = shost_priv(shost);
535 struct lpfc_hba *phba = vport->phba;
536 int len;
537 struct lpfc_fc4_ctrl_stat *cstat;
538 u64 data1, data2, data3;
539 u64 tot, totin, totout;
540 int i;
541 char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
542
543 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
544 (phba->sli_rev != LPFC_SLI_REV4))
545 return 0;
546
547 scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n");
548
549 totin = 0;
550 totout = 0;
551 for (i = 0; i < phba->cfg_hdw_queue; i++) {
552 cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
553 tot = cstat->io_cmpls;
554 totin += tot;
555 data1 = cstat->input_requests;
556 data2 = cstat->output_requests;
557 data3 = cstat->control_requests;
558 totout += (data1 + data2 + data3);
559
560 scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
561 "IO %016llx ", i, data1, data2, data3);
562 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
563 goto buffer_done;
564
565 scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
566 tot, ((data1 + data2 + data3) - tot));
567 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
568 goto buffer_done;
569 }
570 scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
571 "OutIO %016llx\n", totin, totout, totout - totin);
572 strlcat(buf, tmp, PAGE_SIZE);
573
574 buffer_done:
575 len = strnlen(buf, PAGE_SIZE);
576
577 return len;
578 }
579
580 static ssize_t
lpfc_bg_info_show(struct device * dev,struct device_attribute * attr,char * buf)581 lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
582 char *buf)
583 {
584 struct Scsi_Host *shost = class_to_shost(dev);
585 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
586 struct lpfc_hba *phba = vport->phba;
587
588 if (phba->cfg_enable_bg) {
589 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
590 return scnprintf(buf, PAGE_SIZE,
591 "BlockGuard Enabled\n");
592 else
593 return scnprintf(buf, PAGE_SIZE,
594 "BlockGuard Not Supported\n");
595 } else
596 return scnprintf(buf, PAGE_SIZE,
597 "BlockGuard Disabled\n");
598 }
599
600 static ssize_t
lpfc_bg_guard_err_show(struct device * dev,struct device_attribute * attr,char * buf)601 lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
602 char *buf)
603 {
604 struct Scsi_Host *shost = class_to_shost(dev);
605 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
606 struct lpfc_hba *phba = vport->phba;
607
608 return scnprintf(buf, PAGE_SIZE, "%llu\n",
609 (unsigned long long)phba->bg_guard_err_cnt);
610 }
611
612 static ssize_t
lpfc_bg_apptag_err_show(struct device * dev,struct device_attribute * attr,char * buf)613 lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
614 char *buf)
615 {
616 struct Scsi_Host *shost = class_to_shost(dev);
617 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
618 struct lpfc_hba *phba = vport->phba;
619
620 return scnprintf(buf, PAGE_SIZE, "%llu\n",
621 (unsigned long long)phba->bg_apptag_err_cnt);
622 }
623
624 static ssize_t
lpfc_bg_reftag_err_show(struct device * dev,struct device_attribute * attr,char * buf)625 lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
626 char *buf)
627 {
628 struct Scsi_Host *shost = class_to_shost(dev);
629 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
630 struct lpfc_hba *phba = vport->phba;
631
632 return scnprintf(buf, PAGE_SIZE, "%llu\n",
633 (unsigned long long)phba->bg_reftag_err_cnt);
634 }
635
636 /**
637 * lpfc_info_show - Return some pci info about the host in ascii
638 * @dev: class converted to a Scsi_host structure.
639 * @attr: device attribute, not used.
640 * @buf: on return contains the formatted text from lpfc_info().
641 *
642 * Returns: size of formatted string.
643 **/
644 static ssize_t
lpfc_info_show(struct device * dev,struct device_attribute * attr,char * buf)645 lpfc_info_show(struct device *dev, struct device_attribute *attr,
646 char *buf)
647 {
648 struct Scsi_Host *host = class_to_shost(dev);
649
650 return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host));
651 }
652
653 /**
654 * lpfc_serialnum_show - Return the hba serial number in ascii
655 * @dev: class converted to a Scsi_host structure.
656 * @attr: device attribute, not used.
657 * @buf: on return contains the formatted text serial number.
658 *
659 * Returns: size of formatted string.
660 **/
661 static ssize_t
lpfc_serialnum_show(struct device * dev,struct device_attribute * attr,char * buf)662 lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
663 char *buf)
664 {
665 struct Scsi_Host *shost = class_to_shost(dev);
666 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
667 struct lpfc_hba *phba = vport->phba;
668
669 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber);
670 }
671
672 /**
673 * lpfc_temp_sensor_show - Return the temperature sensor level
674 * @dev: class converted to a Scsi_host structure.
675 * @attr: device attribute, not used.
676 * @buf: on return contains the formatted support level.
677 *
678 * Description:
679 * Returns a number indicating the temperature sensor level currently
680 * supported, zero or one in ascii.
681 *
682 * Returns: size of formatted string.
683 **/
684 static ssize_t
lpfc_temp_sensor_show(struct device * dev,struct device_attribute * attr,char * buf)685 lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
686 char *buf)
687 {
688 struct Scsi_Host *shost = class_to_shost(dev);
689 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
690 struct lpfc_hba *phba = vport->phba;
691 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support);
692 }
693
694 /**
695 * lpfc_modeldesc_show - Return the model description of the hba
696 * @dev: class converted to a Scsi_host structure.
697 * @attr: device attribute, not used.
698 * @buf: on return contains the scsi vpd model description.
699 *
700 * Returns: size of formatted string.
701 **/
702 static ssize_t
lpfc_modeldesc_show(struct device * dev,struct device_attribute * attr,char * buf)703 lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
704 char *buf)
705 {
706 struct Scsi_Host *shost = class_to_shost(dev);
707 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
708 struct lpfc_hba *phba = vport->phba;
709
710 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc);
711 }
712
713 /**
714 * lpfc_modelname_show - Return the model name of the hba
715 * @dev: class converted to a Scsi_host structure.
716 * @attr: device attribute, not used.
717 * @buf: on return contains the scsi vpd model name.
718 *
719 * Returns: size of formatted string.
720 **/
721 static ssize_t
lpfc_modelname_show(struct device * dev,struct device_attribute * attr,char * buf)722 lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
723 char *buf)
724 {
725 struct Scsi_Host *shost = class_to_shost(dev);
726 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
727 struct lpfc_hba *phba = vport->phba;
728
729 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName);
730 }
731
732 /**
733 * lpfc_programtype_show - Return the program type of the hba
734 * @dev: class converted to a Scsi_host structure.
735 * @attr: device attribute, not used.
736 * @buf: on return contains the scsi vpd program type.
737 *
738 * Returns: size of formatted string.
739 **/
740 static ssize_t
lpfc_programtype_show(struct device * dev,struct device_attribute * attr,char * buf)741 lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
742 char *buf)
743 {
744 struct Scsi_Host *shost = class_to_shost(dev);
745 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
746 struct lpfc_hba *phba = vport->phba;
747
748 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType);
749 }
750
751 /**
752 * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
753 * @dev: class converted to a Scsi_host structure.
754 * @attr: device attribute, not used.
755 * @buf: on return contains the Menlo Maintenance sli flag.
756 *
757 * Returns: size of formatted string.
758 **/
759 static ssize_t
lpfc_mlomgmt_show(struct device * dev,struct device_attribute * attr,char * buf)760 lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
761 {
762 struct Scsi_Host *shost = class_to_shost(dev);
763 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
764 struct lpfc_hba *phba = vport->phba;
765
766 return scnprintf(buf, PAGE_SIZE, "%d\n",
767 (phba->sli.sli_flag & LPFC_MENLO_MAINT));
768 }
769
770 /**
771 * lpfc_vportnum_show - Return the port number in ascii of the hba
772 * @dev: class converted to a Scsi_host structure.
773 * @attr: device attribute, not used.
774 * @buf: on return contains scsi vpd program type.
775 *
776 * Returns: size of formatted string.
777 **/
778 static ssize_t
lpfc_vportnum_show(struct device * dev,struct device_attribute * attr,char * buf)779 lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
780 char *buf)
781 {
782 struct Scsi_Host *shost = class_to_shost(dev);
783 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
784 struct lpfc_hba *phba = vport->phba;
785
786 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port);
787 }
788
789 /**
790 * lpfc_fwrev_show - Return the firmware rev running in the hba
791 * @dev: class converted to a Scsi_host structure.
792 * @attr: device attribute, not used.
793 * @buf: on return contains the scsi vpd program type.
794 *
795 * Returns: size of formatted string.
796 **/
797 static ssize_t
lpfc_fwrev_show(struct device * dev,struct device_attribute * attr,char * buf)798 lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
799 char *buf)
800 {
801 struct Scsi_Host *shost = class_to_shost(dev);
802 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
803 struct lpfc_hba *phba = vport->phba;
804 uint32_t if_type;
805 uint8_t sli_family;
806 char fwrev[FW_REV_STR_SIZE];
807 int len;
808
809 lpfc_decode_firmware_rev(phba, fwrev, 1);
810 if_type = phba->sli4_hba.pc_sli4_params.if_type;
811 sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
812
813 if (phba->sli_rev < LPFC_SLI_REV4)
814 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
815 fwrev, phba->sli_rev);
816 else
817 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
818 fwrev, phba->sli_rev, if_type, sli_family);
819
820 return len;
821 }
822
823 /**
824 * lpfc_hdw_show - Return the jedec information about the hba
825 * @dev: class converted to a Scsi_host structure.
826 * @attr: device attribute, not used.
827 * @buf: on return contains the scsi vpd program type.
828 *
829 * Returns: size of formatted string.
830 **/
831 static ssize_t
lpfc_hdw_show(struct device * dev,struct device_attribute * attr,char * buf)832 lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
833 {
834 char hdw[9];
835 struct Scsi_Host *shost = class_to_shost(dev);
836 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
837 struct lpfc_hba *phba = vport->phba;
838 lpfc_vpd_t *vp = &phba->vpd;
839
840 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
841 return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
842 vp->rev.smRev, vp->rev.smFwRev);
843 }
844
845 /**
846 * lpfc_option_rom_version_show - Return the adapter ROM FCode version
847 * @dev: class converted to a Scsi_host structure.
848 * @attr: device attribute, not used.
849 * @buf: on return contains the ROM and FCode ascii strings.
850 *
851 * Returns: size of formatted string.
852 **/
853 static ssize_t
lpfc_option_rom_version_show(struct device * dev,struct device_attribute * attr,char * buf)854 lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
855 char *buf)
856 {
857 struct Scsi_Host *shost = class_to_shost(dev);
858 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
859 struct lpfc_hba *phba = vport->phba;
860 char fwrev[FW_REV_STR_SIZE];
861
862 if (phba->sli_rev < LPFC_SLI_REV4)
863 return scnprintf(buf, PAGE_SIZE, "%s\n",
864 phba->OptionROMVersion);
865
866 lpfc_decode_firmware_rev(phba, fwrev, 1);
867 return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev);
868 }
869
870 /**
871 * lpfc_state_show - Return the link state of the port
872 * @dev: class converted to a Scsi_host structure.
873 * @attr: device attribute, not used.
874 * @buf: on return contains text describing the state of the link.
875 *
876 * Notes:
877 * The switch statement has no default so zero will be returned.
878 *
879 * Returns: size of formatted string.
880 **/
881 static ssize_t
lpfc_link_state_show(struct device * dev,struct device_attribute * attr,char * buf)882 lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
883 char *buf)
884 {
885 struct Scsi_Host *shost = class_to_shost(dev);
886 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
887 struct lpfc_hba *phba = vport->phba;
888 int len = 0;
889
890 switch (phba->link_state) {
891 case LPFC_LINK_UNKNOWN:
892 case LPFC_WARM_START:
893 case LPFC_INIT_START:
894 case LPFC_INIT_MBX_CMDS:
895 case LPFC_LINK_DOWN:
896 case LPFC_HBA_ERROR:
897 if (phba->hba_flag & LINK_DISABLED)
898 len += scnprintf(buf + len, PAGE_SIZE-len,
899 "Link Down - User disabled\n");
900 else
901 len += scnprintf(buf + len, PAGE_SIZE-len,
902 "Link Down\n");
903 break;
904 case LPFC_LINK_UP:
905 case LPFC_CLEAR_LA:
906 case LPFC_HBA_READY:
907 len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
908
909 switch (vport->port_state) {
910 case LPFC_LOCAL_CFG_LINK:
911 len += scnprintf(buf + len, PAGE_SIZE-len,
912 "Configuring Link\n");
913 break;
914 case LPFC_FDISC:
915 case LPFC_FLOGI:
916 case LPFC_FABRIC_CFG_LINK:
917 case LPFC_NS_REG:
918 case LPFC_NS_QRY:
919 case LPFC_BUILD_DISC_LIST:
920 case LPFC_DISC_AUTH:
921 len += scnprintf(buf + len, PAGE_SIZE - len,
922 "Discovery\n");
923 break;
924 case LPFC_VPORT_READY:
925 len += scnprintf(buf + len, PAGE_SIZE - len,
926 "Ready\n");
927 break;
928
929 case LPFC_VPORT_FAILED:
930 len += scnprintf(buf + len, PAGE_SIZE - len,
931 "Failed\n");
932 break;
933
934 case LPFC_VPORT_UNKNOWN:
935 len += scnprintf(buf + len, PAGE_SIZE - len,
936 "Unknown\n");
937 break;
938 }
939 if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
940 len += scnprintf(buf + len, PAGE_SIZE-len,
941 " Menlo Maint Mode\n");
942 else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
943 if (vport->fc_flag & FC_PUBLIC_LOOP)
944 len += scnprintf(buf + len, PAGE_SIZE-len,
945 " Public Loop\n");
946 else
947 len += scnprintf(buf + len, PAGE_SIZE-len,
948 " Private Loop\n");
949 } else {
950 if (vport->fc_flag & FC_FABRIC)
951 len += scnprintf(buf + len, PAGE_SIZE-len,
952 " Fabric\n");
953 else
954 len += scnprintf(buf + len, PAGE_SIZE-len,
955 " Point-2-Point\n");
956 }
957 }
958
959 if ((phba->sli_rev == LPFC_SLI_REV4) &&
960 ((bf_get(lpfc_sli_intf_if_type,
961 &phba->sli4_hba.sli_intf) ==
962 LPFC_SLI_INTF_IF_TYPE_6))) {
963 struct lpfc_trunk_link link = phba->trunk_link;
964
965 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
966 len += scnprintf(buf + len, PAGE_SIZE - len,
967 "Trunk port 0: Link %s %s\n",
968 (link.link0.state == LPFC_LINK_UP) ?
969 "Up" : "Down. ",
970 trunk_errmsg[link.link0.fault]);
971
972 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
973 len += scnprintf(buf + len, PAGE_SIZE - len,
974 "Trunk port 1: Link %s %s\n",
975 (link.link1.state == LPFC_LINK_UP) ?
976 "Up" : "Down. ",
977 trunk_errmsg[link.link1.fault]);
978
979 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
980 len += scnprintf(buf + len, PAGE_SIZE - len,
981 "Trunk port 2: Link %s %s\n",
982 (link.link2.state == LPFC_LINK_UP) ?
983 "Up" : "Down. ",
984 trunk_errmsg[link.link2.fault]);
985
986 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
987 len += scnprintf(buf + len, PAGE_SIZE - len,
988 "Trunk port 3: Link %s %s\n",
989 (link.link3.state == LPFC_LINK_UP) ?
990 "Up" : "Down. ",
991 trunk_errmsg[link.link3.fault]);
992
993 }
994
995 return len;
996 }
997
998 /**
999 * lpfc_sli4_protocol_show - Return the fip mode of the HBA
1000 * @dev: class unused variable.
1001 * @attr: device attribute, not used.
1002 * @buf: on return contains the module description text.
1003 *
1004 * Returns: size of formatted string.
1005 **/
1006 static ssize_t
lpfc_sli4_protocol_show(struct device * dev,struct device_attribute * attr,char * buf)1007 lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
1008 char *buf)
1009 {
1010 struct Scsi_Host *shost = class_to_shost(dev);
1011 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1012 struct lpfc_hba *phba = vport->phba;
1013
1014 if (phba->sli_rev < LPFC_SLI_REV4)
1015 return scnprintf(buf, PAGE_SIZE, "fc\n");
1016
1017 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
1018 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
1019 return scnprintf(buf, PAGE_SIZE, "fcoe\n");
1020 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
1021 return scnprintf(buf, PAGE_SIZE, "fc\n");
1022 }
1023 return scnprintf(buf, PAGE_SIZE, "unknown\n");
1024 }
1025
1026 /**
1027 * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
1028 * (OAS) is supported.
1029 * @dev: class unused variable.
1030 * @attr: device attribute, not used.
1031 * @buf: on return contains the module description text.
1032 *
1033 * Returns: size of formatted string.
1034 **/
1035 static ssize_t
lpfc_oas_supported_show(struct device * dev,struct device_attribute * attr,char * buf)1036 lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
1037 char *buf)
1038 {
1039 struct Scsi_Host *shost = class_to_shost(dev);
1040 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
1041 struct lpfc_hba *phba = vport->phba;
1042
1043 return scnprintf(buf, PAGE_SIZE, "%d\n",
1044 phba->sli4_hba.pc_sli4_params.oas_supported);
1045 }
1046
1047 /**
1048 * lpfc_link_state_store - Transition the link_state on an HBA port
1049 * @dev: class device that is converted into a Scsi_host.
1050 * @attr: device attribute, not used.
1051 * @buf: one or more lpfc_polling_flags values.
1052 * @count: not used.
1053 *
1054 * Returns:
1055 * -EINVAL if the buffer is not "up" or "down"
1056 * return from link state change function if non-zero
1057 * length of the buf on success
1058 **/
1059 static ssize_t
lpfc_link_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1060 lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
1061 const char *buf, size_t count)
1062 {
1063 struct Scsi_Host *shost = class_to_shost(dev);
1064 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1065 struct lpfc_hba *phba = vport->phba;
1066
1067 int status = -EINVAL;
1068
1069 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
1070 (phba->link_state == LPFC_LINK_DOWN))
1071 status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
1072 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
1073 (phba->link_state >= LPFC_LINK_UP))
1074 status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT);
1075
1076 if (status == 0)
1077 return strlen(buf);
1078 else
1079 return status;
1080 }
1081
1082 /**
1083 * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
1084 * @dev: class device that is converted into a Scsi_host.
1085 * @attr: device attribute, not used.
1086 * @buf: on return contains the sum of fc mapped and unmapped.
1087 *
1088 * Description:
1089 * Returns the ascii text number of the sum of the fc mapped and unmapped
1090 * vport counts.
1091 *
1092 * Returns: size of formatted string.
1093 **/
1094 static ssize_t
lpfc_num_discovered_ports_show(struct device * dev,struct device_attribute * attr,char * buf)1095 lpfc_num_discovered_ports_show(struct device *dev,
1096 struct device_attribute *attr, char *buf)
1097 {
1098 struct Scsi_Host *shost = class_to_shost(dev);
1099 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1100
1101 return scnprintf(buf, PAGE_SIZE, "%d\n",
1102 vport->fc_map_cnt + vport->fc_unmap_cnt);
1103 }
1104
1105 /**
1106 * lpfc_issue_lip - Misnomer, name carried over from long ago
1107 * @shost: Scsi_Host pointer.
1108 *
1109 * Description:
1110 * Bring the link down gracefully then re-init the link. The firmware will
1111 * re-init the fiber channel interface as required. Does not issue a LIP.
1112 *
1113 * Returns:
1114 * -EPERM port offline or management commands are being blocked
1115 * -ENOMEM cannot allocate memory for the mailbox command
1116 * -EIO error sending the mailbox command
1117 * zero for success
1118 **/
1119 static int
lpfc_issue_lip(struct Scsi_Host * shost)1120 lpfc_issue_lip(struct Scsi_Host *shost)
1121 {
1122 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1123 struct lpfc_hba *phba = vport->phba;
1124 LPFC_MBOXQ_t *pmboxq;
1125 int mbxstatus = MBXERR_ERROR;
1126
1127 /*
1128 * If the link is offline, disabled or BLOCK_MGMT_IO
1129 * it doesn't make any sense to allow issue_lip
1130 */
1131 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1132 (phba->hba_flag & LINK_DISABLED) ||
1133 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
1134 return -EPERM;
1135
1136 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
1137
1138 if (!pmboxq)
1139 return -ENOMEM;
1140
1141 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1142 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1143 pmboxq->u.mb.mbxOwner = OWN_HOST;
1144
1145 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
1146
1147 if ((mbxstatus == MBX_SUCCESS) &&
1148 (pmboxq->u.mb.mbxStatus == 0 ||
1149 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
1150 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1151 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
1152 phba->cfg_link_speed);
1153 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1154 phba->fc_ratov * 2);
1155 if ((mbxstatus == MBX_SUCCESS) &&
1156 (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
1157 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1158 "2859 SLI authentication is required "
1159 "for INIT_LINK but has not done yet\n");
1160 }
1161
1162 lpfc_set_loopback_flag(phba);
1163 if (mbxstatus != MBX_TIMEOUT)
1164 mempool_free(pmboxq, phba->mbox_mem_pool);
1165
1166 if (mbxstatus == MBXERR_ERROR)
1167 return -EIO;
1168
1169 return 0;
1170 }
1171
1172 int
lpfc_emptyq_wait(struct lpfc_hba * phba,struct list_head * q,spinlock_t * lock)1173 lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
1174 {
1175 int cnt = 0;
1176
1177 spin_lock_irq(lock);
1178 while (!list_empty(q)) {
1179 spin_unlock_irq(lock);
1180 msleep(20);
1181 if (cnt++ > 250) { /* 5 secs */
1182 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1183 "0466 %s %s\n",
1184 "Outstanding IO when ",
1185 "bringing Adapter offline\n");
1186 return 0;
1187 }
1188 spin_lock_irq(lock);
1189 }
1190 spin_unlock_irq(lock);
1191 return 1;
1192 }
1193
1194 /**
1195 * lpfc_do_offline - Issues a mailbox command to bring the link down
1196 * @phba: lpfc_hba pointer.
1197 * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL.
1198 *
1199 * Notes:
1200 * Assumes any error from lpfc_do_offline() will be negative.
1201 * Can wait up to 5 seconds for the port ring buffers count
1202 * to reach zero, prints a warning if it is not zero and continues.
1203 * lpfc_workq_post_event() returns a non-zero return code if call fails.
1204 *
1205 * Returns:
1206 * -EIO error posting the event
1207 * zero for success
1208 **/
1209 static int
lpfc_do_offline(struct lpfc_hba * phba,uint32_t type)1210 lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
1211 {
1212 struct completion online_compl;
1213 struct lpfc_queue *qp = NULL;
1214 struct lpfc_sli_ring *pring;
1215 struct lpfc_sli *psli;
1216 int status = 0;
1217 int i;
1218 int rc;
1219
1220 init_completion(&online_compl);
1221 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1222 LPFC_EVT_OFFLINE_PREP);
1223 if (rc == 0)
1224 return -ENOMEM;
1225
1226 wait_for_completion(&online_compl);
1227
1228 if (status != 0)
1229 return -EIO;
1230
1231 psli = &phba->sli;
1232
1233 /*
1234 * If freeing the queues have already started, don't access them.
1235 * Otherwise set FREE_WAIT to indicate that queues are being used
1236 * to hold the freeing process until we finish.
1237 */
1238 spin_lock_irq(&phba->hbalock);
1239 if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) {
1240 psli->sli_flag |= LPFC_QUEUE_FREE_WAIT;
1241 } else {
1242 spin_unlock_irq(&phba->hbalock);
1243 goto skip_wait;
1244 }
1245 spin_unlock_irq(&phba->hbalock);
1246
1247 /* Wait a little for things to settle down, but not
1248 * long enough for dev loss timeout to expire.
1249 */
1250 if (phba->sli_rev != LPFC_SLI_REV4) {
1251 for (i = 0; i < psli->num_rings; i++) {
1252 pring = &psli->sli3_ring[i];
1253 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1254 &phba->hbalock))
1255 goto out;
1256 }
1257 } else {
1258 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1259 pring = qp->pring;
1260 if (!pring)
1261 continue;
1262 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1263 &pring->ring_lock))
1264 goto out;
1265 }
1266 }
1267 out:
1268 spin_lock_irq(&phba->hbalock);
1269 psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT;
1270 spin_unlock_irq(&phba->hbalock);
1271
1272 skip_wait:
1273 init_completion(&online_compl);
1274 rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
1275 if (rc == 0)
1276 return -ENOMEM;
1277
1278 wait_for_completion(&online_compl);
1279
1280 if (status != 0)
1281 return -EIO;
1282
1283 return 0;
1284 }
1285
1286 /**
1287 * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA
1288 * @phba: lpfc_hba pointer.
1289 *
1290 * Description:
1291 * Issues a PCI secondary bus reset for the phba->pcidev.
1292 *
1293 * Notes:
1294 * First walks the bus_list to ensure only PCI devices with Emulex
1295 * vendor id, device ids that support hot reset, only one occurrence
1296 * of function 0, and all ports on the bus are in offline mode to ensure the
1297 * hot reset only affects one valid HBA.
1298 *
1299 * Returns:
1300 * -ENOTSUPP, cfg_enable_hba_reset must be of value 2
1301 * -ENODEV, NULL ptr to pcidev
1302 * -EBADSLT, detected invalid device
1303 * -EBUSY, port is not in offline state
1304 * 0, successful
1305 */
1306 static int
lpfc_reset_pci_bus(struct lpfc_hba * phba)1307 lpfc_reset_pci_bus(struct lpfc_hba *phba)
1308 {
1309 struct pci_dev *pdev = phba->pcidev;
1310 struct Scsi_Host *shost = NULL;
1311 struct lpfc_hba *phba_other = NULL;
1312 struct pci_dev *ptr = NULL;
1313 int res;
1314
1315 if (phba->cfg_enable_hba_reset != 2)
1316 return -ENOTSUPP;
1317
1318 if (!pdev) {
1319 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n");
1320 return -ENODEV;
1321 }
1322
1323 res = lpfc_check_pci_resettable(phba);
1324 if (res)
1325 return res;
1326
1327 /* Walk the list of devices on the pci_dev's bus */
1328 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
1329 /* Check port is offline */
1330 shost = pci_get_drvdata(ptr);
1331 if (shost) {
1332 phba_other =
1333 ((struct lpfc_vport *)shost->hostdata)->phba;
1334 if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) {
1335 lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT,
1336 "8349 WWPN = 0x%02x%02x%02x%02x"
1337 "%02x%02x%02x%02x is not "
1338 "offline!\n",
1339 phba_other->wwpn[0],
1340 phba_other->wwpn[1],
1341 phba_other->wwpn[2],
1342 phba_other->wwpn[3],
1343 phba_other->wwpn[4],
1344 phba_other->wwpn[5],
1345 phba_other->wwpn[6],
1346 phba_other->wwpn[7]);
1347 return -EBUSY;
1348 }
1349 }
1350 }
1351
1352 /* Issue PCI bus reset */
1353 res = pci_reset_bus(pdev);
1354 if (res) {
1355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1356 "8350 PCI reset bus failed: %d\n", res);
1357 }
1358
1359 return res;
1360 }
1361
1362 /**
1363 * lpfc_selective_reset - Offline then onlines the port
1364 * @phba: lpfc_hba pointer.
1365 *
1366 * Description:
1367 * If the port is configured to allow a reset then the hba is brought
1368 * offline then online.
1369 *
1370 * Notes:
1371 * Assumes any error from lpfc_do_offline() will be negative.
1372 * Do not make this function static.
1373 *
1374 * Returns:
1375 * lpfc_do_offline() return code if not zero
1376 * -EIO reset not configured or error posting the event
1377 * zero for success
1378 **/
1379 int
lpfc_selective_reset(struct lpfc_hba * phba)1380 lpfc_selective_reset(struct lpfc_hba *phba)
1381 {
1382 struct completion online_compl;
1383 int status = 0;
1384 int rc;
1385
1386 if (!phba->cfg_enable_hba_reset)
1387 return -EACCES;
1388
1389 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
1390 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1391
1392 if (status != 0)
1393 return status;
1394 }
1395
1396 init_completion(&online_compl);
1397 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1398 LPFC_EVT_ONLINE);
1399 if (rc == 0)
1400 return -ENOMEM;
1401
1402 wait_for_completion(&online_compl);
1403
1404 if (status != 0)
1405 return -EIO;
1406
1407 return 0;
1408 }
1409
1410 /**
1411 * lpfc_issue_reset - Selectively resets an adapter
1412 * @dev: class device that is converted into a Scsi_host.
1413 * @attr: device attribute, not used.
1414 * @buf: containing the string "selective".
1415 * @count: unused variable.
1416 *
1417 * Description:
1418 * If the buf contains the string "selective" then lpfc_selective_reset()
1419 * is called to perform the reset.
1420 *
1421 * Notes:
1422 * Assumes any error from lpfc_selective_reset() will be negative.
1423 * If lpfc_selective_reset() returns zero then the length of the buffer
1424 * is returned which indicates success
1425 *
1426 * Returns:
1427 * -EINVAL if the buffer does not contain the string "selective"
1428 * length of buf if lpfc-selective_reset() if the call succeeds
1429 * return value of lpfc_selective_reset() if the call fails
1430 **/
1431 static ssize_t
lpfc_issue_reset(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1432 lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
1433 const char *buf, size_t count)
1434 {
1435 struct Scsi_Host *shost = class_to_shost(dev);
1436 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1437 struct lpfc_hba *phba = vport->phba;
1438 int status = -EINVAL;
1439
1440 if (!phba->cfg_enable_hba_reset)
1441 return -EACCES;
1442
1443 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
1444 status = phba->lpfc_selective_reset(phba);
1445
1446 if (status == 0)
1447 return strlen(buf);
1448 else
1449 return status;
1450 }
1451
1452 /**
1453 * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
1454 * @phba: lpfc_hba pointer.
1455 *
1456 * Description:
1457 * SLI4 interface type-2 device to wait on the sliport status register for
1458 * the readyness after performing a firmware reset.
1459 *
1460 * Returns:
1461 * zero for success, -EPERM when port does not have privilege to perform the
1462 * reset, -EIO when port timeout from recovering from the reset.
1463 *
1464 * Note:
1465 * As the caller will interpret the return code by value, be careful in making
1466 * change or addition to return codes.
1467 **/
1468 int
lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba * phba)1469 lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
1470 {
1471 struct lpfc_register portstat_reg = {0};
1472 int i;
1473
1474 msleep(100);
1475 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1476 &portstat_reg.word0))
1477 return -EIO;
1478
1479 /* verify if privileged for the request operation */
1480 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
1481 !bf_get(lpfc_sliport_status_err, &portstat_reg))
1482 return -EPERM;
1483
1484 /* wait for the SLI port firmware ready after firmware reset */
1485 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
1486 msleep(10);
1487 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1488 &portstat_reg.word0))
1489 continue;
1490 if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
1491 continue;
1492 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
1493 continue;
1494 if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
1495 continue;
1496 break;
1497 }
1498
1499 if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
1500 return 0;
1501 else
1502 return -EIO;
1503 }
1504
1505 /**
1506 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
1507 * @phba: lpfc_hba pointer.
1508 *
1509 * Description:
1510 * Request SLI4 interface type-2 device to perform a physical register set
1511 * access.
1512 *
1513 * Returns:
1514 * zero for success
1515 **/
1516 static ssize_t
lpfc_sli4_pdev_reg_request(struct lpfc_hba * phba,uint32_t opcode)1517 lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
1518 {
1519 struct completion online_compl;
1520 struct pci_dev *pdev = phba->pcidev;
1521 uint32_t before_fc_flag;
1522 uint32_t sriov_nr_virtfn;
1523 uint32_t reg_val;
1524 int status = 0, rc = 0;
1525 int job_posted = 1, sriov_err;
1526
1527 if (!phba->cfg_enable_hba_reset)
1528 return -EACCES;
1529
1530 if ((phba->sli_rev < LPFC_SLI_REV4) ||
1531 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
1532 LPFC_SLI_INTF_IF_TYPE_2))
1533 return -EPERM;
1534
1535 /* Keep state if we need to restore back */
1536 before_fc_flag = phba->pport->fc_flag;
1537 sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
1538
1539 /* Disable SR-IOV virtual functions if enabled */
1540 if (phba->cfg_sriov_nr_virtfn) {
1541 pci_disable_sriov(pdev);
1542 phba->cfg_sriov_nr_virtfn = 0;
1543 }
1544
1545 if (opcode == LPFC_FW_DUMP)
1546 phba->hba_flag |= HBA_FW_DUMP_OP;
1547
1548 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1549
1550 if (status != 0) {
1551 phba->hba_flag &= ~HBA_FW_DUMP_OP;
1552 return status;
1553 }
1554
1555 /* wait for the device to be quiesced before firmware reset */
1556 msleep(100);
1557
1558 reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
1559 LPFC_CTL_PDEV_CTL_OFFSET);
1560
1561 if (opcode == LPFC_FW_DUMP)
1562 reg_val |= LPFC_FW_DUMP_REQUEST;
1563 else if (opcode == LPFC_FW_RESET)
1564 reg_val |= LPFC_CTL_PDEV_CTL_FRST;
1565 else if (opcode == LPFC_DV_RESET)
1566 reg_val |= LPFC_CTL_PDEV_CTL_DRST;
1567
1568 writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
1569 LPFC_CTL_PDEV_CTL_OFFSET);
1570 /* flush */
1571 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
1572
1573 /* delay driver action following IF_TYPE_2 reset */
1574 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1575
1576 if (rc == -EPERM) {
1577 /* no privilege for reset */
1578 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1579 "3150 No privilege to perform the requested "
1580 "access: x%x\n", reg_val);
1581 } else if (rc == -EIO) {
1582 /* reset failed, there is nothing more we can do */
1583 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1584 "3153 Fail to perform the requested "
1585 "access: x%x\n", reg_val);
1586 return rc;
1587 }
1588
1589 /* keep the original port state */
1590 if (before_fc_flag & FC_OFFLINE_MODE)
1591 goto out;
1592
1593 init_completion(&online_compl);
1594 job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
1595 LPFC_EVT_ONLINE);
1596 if (!job_posted)
1597 goto out;
1598
1599 wait_for_completion(&online_compl);
1600
1601 out:
1602 /* in any case, restore the virtual functions enabled as before */
1603 if (sriov_nr_virtfn) {
1604 sriov_err =
1605 lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
1606 if (!sriov_err)
1607 phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
1608 }
1609
1610 /* return proper error code */
1611 if (!rc) {
1612 if (!job_posted)
1613 rc = -ENOMEM;
1614 else if (status)
1615 rc = -EIO;
1616 }
1617 return rc;
1618 }
1619
1620 /**
1621 * lpfc_nport_evt_cnt_show - Return the number of nport events
1622 * @dev: class device that is converted into a Scsi_host.
1623 * @attr: device attribute, not used.
1624 * @buf: on return contains the ascii number of nport events.
1625 *
1626 * Returns: size of formatted string.
1627 **/
1628 static ssize_t
lpfc_nport_evt_cnt_show(struct device * dev,struct device_attribute * attr,char * buf)1629 lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
1630 char *buf)
1631 {
1632 struct Scsi_Host *shost = class_to_shost(dev);
1633 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1634 struct lpfc_hba *phba = vport->phba;
1635
1636 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
1637 }
1638
1639 static int
lpfc_set_trunking(struct lpfc_hba * phba,char * buff_out)1640 lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
1641 {
1642 LPFC_MBOXQ_t *mbox = NULL;
1643 unsigned long val = 0;
1644 char *pval = NULL;
1645 int rc = 0;
1646
1647 if (!strncmp("enable", buff_out,
1648 strlen("enable"))) {
1649 pval = buff_out + strlen("enable") + 1;
1650 rc = kstrtoul(pval, 0, &val);
1651 if (rc)
1652 return rc; /* Invalid number */
1653 } else if (!strncmp("disable", buff_out,
1654 strlen("disable"))) {
1655 val = 0;
1656 } else {
1657 return -EINVAL; /* Invalid command */
1658 }
1659
1660 switch (val) {
1661 case 0:
1662 val = 0x0; /* Disable */
1663 break;
1664 case 2:
1665 val = 0x1; /* Enable two port trunk */
1666 break;
1667 case 4:
1668 val = 0x2; /* Enable four port trunk */
1669 break;
1670 default:
1671 return -EINVAL;
1672 }
1673
1674 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1675 "0070 Set trunk mode with val %ld ", val);
1676
1677 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1678 if (!mbox)
1679 return -ENOMEM;
1680
1681 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1682 LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE,
1683 12, LPFC_SLI4_MBX_EMBED);
1684
1685 bf_set(lpfc_mbx_set_trunk_mode,
1686 &mbox->u.mqe.un.set_trunk_mode,
1687 val);
1688 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1689 if (rc)
1690 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1691 "0071 Set trunk mode failed with status: %d",
1692 rc);
1693 if (rc != MBX_TIMEOUT)
1694 mempool_free(mbox, phba->mbox_mem_pool);
1695
1696 return 0;
1697 }
1698
1699 /**
1700 * lpfc_board_mode_show - Return the state of the board
1701 * @dev: class device that is converted into a Scsi_host.
1702 * @attr: device attribute, not used.
1703 * @buf: on return contains the state of the adapter.
1704 *
1705 * Returns: size of formatted string.
1706 **/
1707 static ssize_t
lpfc_board_mode_show(struct device * dev,struct device_attribute * attr,char * buf)1708 lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
1709 char *buf)
1710 {
1711 struct Scsi_Host *shost = class_to_shost(dev);
1712 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1713 struct lpfc_hba *phba = vport->phba;
1714 char * state;
1715
1716 if (phba->link_state == LPFC_HBA_ERROR)
1717 state = "error";
1718 else if (phba->link_state == LPFC_WARM_START)
1719 state = "warm start";
1720 else if (phba->link_state == LPFC_INIT_START)
1721 state = "offline";
1722 else
1723 state = "online";
1724
1725 return scnprintf(buf, PAGE_SIZE, "%s\n", state);
1726 }
1727
1728 /**
1729 * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state
1730 * @dev: class device that is converted into a Scsi_host.
1731 * @attr: device attribute, not used.
1732 * @buf: containing one of the strings "online", "offline", "warm" or "error".
1733 * @count: unused variable.
1734 *
1735 * Returns:
1736 * -EACCES if enable hba reset not enabled
1737 * -EINVAL if the buffer does not contain a valid string (see above)
1738 * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails
1739 * buf length greater than zero indicates success
1740 **/
1741 static ssize_t
lpfc_board_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1742 lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
1743 const char *buf, size_t count)
1744 {
1745 struct Scsi_Host *shost = class_to_shost(dev);
1746 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1747 struct lpfc_hba *phba = vport->phba;
1748 struct completion online_compl;
1749 char *board_mode_str = NULL;
1750 int status = 0;
1751 int rc;
1752
1753 if (!phba->cfg_enable_hba_reset) {
1754 status = -EACCES;
1755 goto board_mode_out;
1756 }
1757
1758 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1759 "3050 lpfc_board_mode set to %s\n", buf);
1760
1761 init_completion(&online_compl);
1762
1763 if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
1764 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1765 LPFC_EVT_ONLINE);
1766 if (rc == 0) {
1767 status = -ENOMEM;
1768 goto board_mode_out;
1769 }
1770 wait_for_completion(&online_compl);
1771 if (status)
1772 status = -EIO;
1773 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
1774 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1775 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
1776 if (phba->sli_rev == LPFC_SLI_REV4)
1777 status = -EINVAL;
1778 else
1779 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
1780 else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
1781 if (phba->sli_rev == LPFC_SLI_REV4)
1782 status = -EINVAL;
1783 else
1784 status = lpfc_do_offline(phba, LPFC_EVT_KILL);
1785 else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
1786 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
1787 else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
1788 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
1789 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
1790 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
1791 else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1)
1792 == 0)
1793 status = lpfc_reset_pci_bus(phba);
1794 else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0)
1795 status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk"));
1796 else
1797 status = -EINVAL;
1798
1799 board_mode_out:
1800 if (!status)
1801 return strlen(buf);
1802 else {
1803 board_mode_str = strchr(buf, '\n');
1804 if (board_mode_str)
1805 *board_mode_str = '\0';
1806 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1807 "3097 Failed \"%s\", status(%d), "
1808 "fc_flag(x%x)\n",
1809 buf, status, phba->pport->fc_flag);
1810 return status;
1811 }
1812 }
1813
1814 /**
1815 * lpfc_get_hba_info - Return various bits of informaton about the adapter
1816 * @phba: pointer to the adapter structure.
1817 * @mxri: max xri count.
1818 * @axri: available xri count.
1819 * @mrpi: max rpi count.
1820 * @arpi: available rpi count.
1821 * @mvpi: max vpi count.
1822 * @avpi: available vpi count.
1823 *
1824 * Description:
1825 * If an integer pointer for an count is not null then the value for the
1826 * count is returned.
1827 *
1828 * Returns:
1829 * zero on error
1830 * one for success
1831 **/
1832 static int
lpfc_get_hba_info(struct lpfc_hba * phba,uint32_t * mxri,uint32_t * axri,uint32_t * mrpi,uint32_t * arpi,uint32_t * mvpi,uint32_t * avpi)1833 lpfc_get_hba_info(struct lpfc_hba *phba,
1834 uint32_t *mxri, uint32_t *axri,
1835 uint32_t *mrpi, uint32_t *arpi,
1836 uint32_t *mvpi, uint32_t *avpi)
1837 {
1838 struct lpfc_mbx_read_config *rd_config;
1839 LPFC_MBOXQ_t *pmboxq;
1840 MAILBOX_t *pmb;
1841 int rc = 0;
1842 uint32_t max_vpi;
1843
1844 /*
1845 * prevent udev from issuing mailbox commands until the port is
1846 * configured.
1847 */
1848 if (phba->link_state < LPFC_LINK_DOWN ||
1849 !phba->mbox_mem_pool ||
1850 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
1851 return 0;
1852
1853 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
1854 return 0;
1855
1856 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1857 if (!pmboxq)
1858 return 0;
1859 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1860
1861 pmb = &pmboxq->u.mb;
1862 pmb->mbxCommand = MBX_READ_CONFIG;
1863 pmb->mbxOwner = OWN_HOST;
1864 pmboxq->ctx_buf = NULL;
1865
1866 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1867 rc = MBX_NOT_FINISHED;
1868 else
1869 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1870
1871 if (rc != MBX_SUCCESS) {
1872 if (rc != MBX_TIMEOUT)
1873 mempool_free(pmboxq, phba->mbox_mem_pool);
1874 return 0;
1875 }
1876
1877 if (phba->sli_rev == LPFC_SLI_REV4) {
1878 rd_config = &pmboxq->u.mqe.un.rd_config;
1879 if (mrpi)
1880 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
1881 if (arpi)
1882 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
1883 phba->sli4_hba.max_cfg_param.rpi_used;
1884 if (mxri)
1885 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
1886 if (axri)
1887 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
1888 phba->sli4_hba.max_cfg_param.xri_used;
1889
1890 /* Account for differences with SLI-3. Get vpi count from
1891 * mailbox data and subtract one for max vpi value.
1892 */
1893 max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
1894 (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
1895
1896 /* Limit the max we support */
1897 if (max_vpi > LPFC_MAX_VPI)
1898 max_vpi = LPFC_MAX_VPI;
1899 if (mvpi)
1900 *mvpi = max_vpi;
1901 if (avpi)
1902 *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
1903 } else {
1904 if (mrpi)
1905 *mrpi = pmb->un.varRdConfig.max_rpi;
1906 if (arpi)
1907 *arpi = pmb->un.varRdConfig.avail_rpi;
1908 if (mxri)
1909 *mxri = pmb->un.varRdConfig.max_xri;
1910 if (axri)
1911 *axri = pmb->un.varRdConfig.avail_xri;
1912 if (mvpi)
1913 *mvpi = pmb->un.varRdConfig.max_vpi;
1914 if (avpi) {
1915 /* avail_vpi is only valid if link is up and ready */
1916 if (phba->link_state == LPFC_HBA_READY)
1917 *avpi = pmb->un.varRdConfig.avail_vpi;
1918 else
1919 *avpi = pmb->un.varRdConfig.max_vpi;
1920 }
1921 }
1922
1923 mempool_free(pmboxq, phba->mbox_mem_pool);
1924 return 1;
1925 }
1926
1927 /**
1928 * lpfc_max_rpi_show - Return maximum rpi
1929 * @dev: class device that is converted into a Scsi_host.
1930 * @attr: device attribute, not used.
1931 * @buf: on return contains the maximum rpi count in decimal or "Unknown".
1932 *
1933 * Description:
1934 * Calls lpfc_get_hba_info() asking for just the mrpi count.
1935 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
1936 * to "Unknown" and the buffer length is returned, therefore the caller
1937 * must check for "Unknown" in the buffer to detect a failure.
1938 *
1939 * Returns: size of formatted string.
1940 **/
1941 static ssize_t
lpfc_max_rpi_show(struct device * dev,struct device_attribute * attr,char * buf)1942 lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
1943 char *buf)
1944 {
1945 struct Scsi_Host *shost = class_to_shost(dev);
1946 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1947 struct lpfc_hba *phba = vport->phba;
1948 uint32_t cnt;
1949
1950 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
1951 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
1952 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
1953 }
1954
1955 /**
1956 * lpfc_used_rpi_show - Return maximum rpi minus available rpi
1957 * @dev: class device that is converted into a Scsi_host.
1958 * @attr: device attribute, not used.
1959 * @buf: containing the used rpi count in decimal or "Unknown".
1960 *
1961 * Description:
1962 * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts.
1963 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
1964 * to "Unknown" and the buffer length is returned, therefore the caller
1965 * must check for "Unknown" in the buffer to detect a failure.
1966 *
1967 * Returns: size of formatted string.
1968 **/
1969 static ssize_t
lpfc_used_rpi_show(struct device * dev,struct device_attribute * attr,char * buf)1970 lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
1971 char *buf)
1972 {
1973 struct Scsi_Host *shost = class_to_shost(dev);
1974 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1975 struct lpfc_hba *phba = vport->phba;
1976 uint32_t cnt, acnt;
1977
1978 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
1979 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
1980 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
1981 }
1982
1983 /**
1984 * lpfc_max_xri_show - Return maximum xri
1985 * @dev: class device that is converted into a Scsi_host.
1986 * @attr: device attribute, not used.
1987 * @buf: on return contains the maximum xri count in decimal or "Unknown".
1988 *
1989 * Description:
1990 * Calls lpfc_get_hba_info() asking for just the mrpi count.
1991 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
1992 * to "Unknown" and the buffer length is returned, therefore the caller
1993 * must check for "Unknown" in the buffer to detect a failure.
1994 *
1995 * Returns: size of formatted string.
1996 **/
1997 static ssize_t
lpfc_max_xri_show(struct device * dev,struct device_attribute * attr,char * buf)1998 lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
1999 char *buf)
2000 {
2001 struct Scsi_Host *shost = class_to_shost(dev);
2002 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2003 struct lpfc_hba *phba = vport->phba;
2004 uint32_t cnt;
2005
2006 if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
2007 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2008 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2009 }
2010
2011 /**
2012 * lpfc_used_xri_show - Return maximum xpi minus the available xpi
2013 * @dev: class device that is converted into a Scsi_host.
2014 * @attr: device attribute, not used.
2015 * @buf: on return contains the used xri count in decimal or "Unknown".
2016 *
2017 * Description:
2018 * Calls lpfc_get_hba_info() asking for just the mxri and axri counts.
2019 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2020 * to "Unknown" and the buffer length is returned, therefore the caller
2021 * must check for "Unknown" in the buffer to detect a failure.
2022 *
2023 * Returns: size of formatted string.
2024 **/
2025 static ssize_t
lpfc_used_xri_show(struct device * dev,struct device_attribute * attr,char * buf)2026 lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
2027 char *buf)
2028 {
2029 struct Scsi_Host *shost = class_to_shost(dev);
2030 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2031 struct lpfc_hba *phba = vport->phba;
2032 uint32_t cnt, acnt;
2033
2034 if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
2035 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2036 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2037 }
2038
2039 /**
2040 * lpfc_max_vpi_show - Return maximum vpi
2041 * @dev: class device that is converted into a Scsi_host.
2042 * @attr: device attribute, not used.
2043 * @buf: on return contains the maximum vpi count in decimal or "Unknown".
2044 *
2045 * Description:
2046 * Calls lpfc_get_hba_info() asking for just the mvpi count.
2047 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2048 * to "Unknown" and the buffer length is returned, therefore the caller
2049 * must check for "Unknown" in the buffer to detect a failure.
2050 *
2051 * Returns: size of formatted string.
2052 **/
2053 static ssize_t
lpfc_max_vpi_show(struct device * dev,struct device_attribute * attr,char * buf)2054 lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
2055 char *buf)
2056 {
2057 struct Scsi_Host *shost = class_to_shost(dev);
2058 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2059 struct lpfc_hba *phba = vport->phba;
2060 uint32_t cnt;
2061
2062 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
2063 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2064 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2065 }
2066
2067 /**
2068 * lpfc_used_vpi_show - Return maximum vpi minus the available vpi
2069 * @dev: class device that is converted into a Scsi_host.
2070 * @attr: device attribute, not used.
2071 * @buf: on return contains the used vpi count in decimal or "Unknown".
2072 *
2073 * Description:
2074 * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts.
2075 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2076 * to "Unknown" and the buffer length is returned, therefore the caller
2077 * must check for "Unknown" in the buffer to detect a failure.
2078 *
2079 * Returns: size of formatted string.
2080 **/
2081 static ssize_t
lpfc_used_vpi_show(struct device * dev,struct device_attribute * attr,char * buf)2082 lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
2083 char *buf)
2084 {
2085 struct Scsi_Host *shost = class_to_shost(dev);
2086 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2087 struct lpfc_hba *phba = vport->phba;
2088 uint32_t cnt, acnt;
2089
2090 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
2091 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2092 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2093 }
2094
2095 /**
2096 * lpfc_npiv_info_show - Return text about NPIV support for the adapter
2097 * @dev: class device that is converted into a Scsi_host.
2098 * @attr: device attribute, not used.
2099 * @buf: text that must be interpreted to determine if npiv is supported.
2100 *
2101 * Description:
2102 * Buffer will contain text indicating npiv is not suppoerted on the port,
2103 * the port is an NPIV physical port, or it is an npiv virtual port with
2104 * the id of the vport.
2105 *
2106 * Returns: size of formatted string.
2107 **/
2108 static ssize_t
lpfc_npiv_info_show(struct device * dev,struct device_attribute * attr,char * buf)2109 lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
2110 char *buf)
2111 {
2112 struct Scsi_Host *shost = class_to_shost(dev);
2113 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2114 struct lpfc_hba *phba = vport->phba;
2115
2116 if (!(phba->max_vpi))
2117 return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
2118 if (vport->port_type == LPFC_PHYSICAL_PORT)
2119 return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n");
2120 return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
2121 }
2122
2123 /**
2124 * lpfc_poll_show - Return text about poll support for the adapter
2125 * @dev: class device that is converted into a Scsi_host.
2126 * @attr: device attribute, not used.
2127 * @buf: on return contains the cfg_poll in hex.
2128 *
2129 * Notes:
2130 * cfg_poll should be a lpfc_polling_flags type.
2131 *
2132 * Returns: size of formatted string.
2133 **/
2134 static ssize_t
lpfc_poll_show(struct device * dev,struct device_attribute * attr,char * buf)2135 lpfc_poll_show(struct device *dev, struct device_attribute *attr,
2136 char *buf)
2137 {
2138 struct Scsi_Host *shost = class_to_shost(dev);
2139 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2140 struct lpfc_hba *phba = vport->phba;
2141
2142 return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
2143 }
2144
2145 /**
2146 * lpfc_poll_store - Set the value of cfg_poll for the adapter
2147 * @dev: class device that is converted into a Scsi_host.
2148 * @attr: device attribute, not used.
2149 * @buf: one or more lpfc_polling_flags values.
2150 * @count: not used.
2151 *
2152 * Notes:
2153 * buf contents converted to integer and checked for a valid value.
2154 *
2155 * Returns:
2156 * -EINVAL if the buffer connot be converted or is out of range
2157 * length of the buf on success
2158 **/
2159 static ssize_t
lpfc_poll_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2160 lpfc_poll_store(struct device *dev, struct device_attribute *attr,
2161 const char *buf, size_t count)
2162 {
2163 struct Scsi_Host *shost = class_to_shost(dev);
2164 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2165 struct lpfc_hba *phba = vport->phba;
2166 uint32_t creg_val;
2167 uint32_t old_val;
2168 int val=0;
2169
2170 if (!isdigit(buf[0]))
2171 return -EINVAL;
2172
2173 if (sscanf(buf, "%i", &val) != 1)
2174 return -EINVAL;
2175
2176 if ((val & 0x3) != val)
2177 return -EINVAL;
2178
2179 if (phba->sli_rev == LPFC_SLI_REV4)
2180 val = 0;
2181
2182 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2183 "3051 lpfc_poll changed from %d to %d\n",
2184 phba->cfg_poll, val);
2185
2186 spin_lock_irq(&phba->hbalock);
2187
2188 old_val = phba->cfg_poll;
2189
2190 if (val & ENABLE_FCP_RING_POLLING) {
2191 if ((val & DISABLE_FCP_RING_INT) &&
2192 !(old_val & DISABLE_FCP_RING_INT)) {
2193 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2194 spin_unlock_irq(&phba->hbalock);
2195 return -EINVAL;
2196 }
2197 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
2198 writel(creg_val, phba->HCregaddr);
2199 readl(phba->HCregaddr); /* flush */
2200
2201 lpfc_poll_start_timer(phba);
2202 }
2203 } else if (val != 0x0) {
2204 spin_unlock_irq(&phba->hbalock);
2205 return -EINVAL;
2206 }
2207
2208 if (!(val & DISABLE_FCP_RING_INT) &&
2209 (old_val & DISABLE_FCP_RING_INT))
2210 {
2211 spin_unlock_irq(&phba->hbalock);
2212 del_timer(&phba->fcp_poll_timer);
2213 spin_lock_irq(&phba->hbalock);
2214 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2215 spin_unlock_irq(&phba->hbalock);
2216 return -EINVAL;
2217 }
2218 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
2219 writel(creg_val, phba->HCregaddr);
2220 readl(phba->HCregaddr); /* flush */
2221 }
2222
2223 phba->cfg_poll = val;
2224
2225 spin_unlock_irq(&phba->hbalock);
2226
2227 return strlen(buf);
2228 }
2229
2230 /**
2231 * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
2232 * @dev: class converted to a Scsi_host structure.
2233 * @attr: device attribute, not used.
2234 * @buf: on return contains the formatted support level.
2235 *
2236 * Description:
2237 * Returns the maximum number of virtual functions a physical function can
2238 * support, 0 will be returned if called on virtual function.
2239 *
2240 * Returns: size of formatted string.
2241 **/
2242 static ssize_t
lpfc_sriov_hw_max_virtfn_show(struct device * dev,struct device_attribute * attr,char * buf)2243 lpfc_sriov_hw_max_virtfn_show(struct device *dev,
2244 struct device_attribute *attr,
2245 char *buf)
2246 {
2247 struct Scsi_Host *shost = class_to_shost(dev);
2248 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2249 struct lpfc_hba *phba = vport->phba;
2250 uint16_t max_nr_virtfn;
2251
2252 max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
2253 return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
2254 }
2255
lpfc_rangecheck(uint val,uint min,uint max)2256 static inline bool lpfc_rangecheck(uint val, uint min, uint max)
2257 {
2258 return val >= min && val <= max;
2259 }
2260
2261 /**
2262 * lpfc_enable_bbcr_set: Sets an attribute value.
2263 * @phba: pointer the the adapter structure.
2264 * @val: integer attribute value.
2265 *
2266 * Description:
2267 * Validates the min and max values then sets the
2268 * adapter config field if in the valid range. prints error message
2269 * and does not set the parameter if invalid.
2270 *
2271 * Returns:
2272 * zero on success
2273 * -EINVAL if val is invalid
2274 */
2275 static ssize_t
lpfc_enable_bbcr_set(struct lpfc_hba * phba,uint val)2276 lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
2277 {
2278 if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
2279 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2280 "3068 %s_enable_bbcr changed from %d to %d\n",
2281 LPFC_DRIVER_NAME, phba->cfg_enable_bbcr, val);
2282 phba->cfg_enable_bbcr = val;
2283 return 0;
2284 }
2285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2286 "0451 %s_enable_bbcr cannot set to %d, range is 0, 1\n",
2287 LPFC_DRIVER_NAME, val);
2288 return -EINVAL;
2289 }
2290
2291 /**
2292 * lpfc_param_show - Return a cfg attribute value in decimal
2293 *
2294 * Description:
2295 * Macro that given an attr e.g. hba_queue_depth expands
2296 * into a function with the name lpfc_hba_queue_depth_show.
2297 *
2298 * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field.
2299 * @dev: class device that is converted into a Scsi_host.
2300 * @attr: device attribute, not used.
2301 * @buf: on return contains the attribute value in decimal.
2302 *
2303 * Returns: size of formatted string.
2304 **/
2305 #define lpfc_param_show(attr) \
2306 static ssize_t \
2307 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2308 char *buf) \
2309 { \
2310 struct Scsi_Host *shost = class_to_shost(dev);\
2311 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2312 struct lpfc_hba *phba = vport->phba;\
2313 return scnprintf(buf, PAGE_SIZE, "%d\n",\
2314 phba->cfg_##attr);\
2315 }
2316
2317 /**
2318 * lpfc_param_hex_show - Return a cfg attribute value in hex
2319 *
2320 * Description:
2321 * Macro that given an attr e.g. hba_queue_depth expands
2322 * into a function with the name lpfc_hba_queue_depth_show
2323 *
2324 * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field.
2325 * @dev: class device that is converted into a Scsi_host.
2326 * @attr: device attribute, not used.
2327 * @buf: on return contains the attribute value in hexadecimal.
2328 *
2329 * Returns: size of formatted string.
2330 **/
2331 #define lpfc_param_hex_show(attr) \
2332 static ssize_t \
2333 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2334 char *buf) \
2335 { \
2336 struct Scsi_Host *shost = class_to_shost(dev);\
2337 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2338 struct lpfc_hba *phba = vport->phba;\
2339 uint val = 0;\
2340 val = phba->cfg_##attr;\
2341 return scnprintf(buf, PAGE_SIZE, "%#x\n",\
2342 phba->cfg_##attr);\
2343 }
2344
2345 /**
2346 * lpfc_param_init - Initializes a cfg attribute
2347 *
2348 * Description:
2349 * Macro that given an attr e.g. hba_queue_depth expands
2350 * into a function with the name lpfc_hba_queue_depth_init. The macro also
2351 * takes a default argument, a minimum and maximum argument.
2352 *
2353 * lpfc_##attr##_init: Initializes an attribute.
2354 * @phba: pointer the the adapter structure.
2355 * @val: integer attribute value.
2356 *
2357 * Validates the min and max values then sets the adapter config field
2358 * accordingly, or uses the default if out of range and prints an error message.
2359 *
2360 * Returns:
2361 * zero on success
2362 * -EINVAL if default used
2363 **/
2364 #define lpfc_param_init(attr, default, minval, maxval) \
2365 static int \
2366 lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
2367 { \
2368 if (lpfc_rangecheck(val, minval, maxval)) {\
2369 phba->cfg_##attr = val;\
2370 return 0;\
2371 }\
2372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2373 "0449 lpfc_"#attr" attribute cannot be set to %d, "\
2374 "allowed range is ["#minval", "#maxval"]\n", val); \
2375 phba->cfg_##attr = default;\
2376 return -EINVAL;\
2377 }
2378
2379 /**
2380 * lpfc_param_set - Set a cfg attribute value
2381 *
2382 * Description:
2383 * Macro that given an attr e.g. hba_queue_depth expands
2384 * into a function with the name lpfc_hba_queue_depth_set
2385 *
2386 * lpfc_##attr##_set: Sets an attribute value.
2387 * @phba: pointer the the adapter structure.
2388 * @val: integer attribute value.
2389 *
2390 * Description:
2391 * Validates the min and max values then sets the
2392 * adapter config field if in the valid range. prints error message
2393 * and does not set the parameter if invalid.
2394 *
2395 * Returns:
2396 * zero on success
2397 * -EINVAL if val is invalid
2398 **/
2399 #define lpfc_param_set(attr, default, minval, maxval) \
2400 static int \
2401 lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
2402 { \
2403 if (lpfc_rangecheck(val, minval, maxval)) {\
2404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2405 "3052 lpfc_" #attr " changed from %d to %d\n", \
2406 phba->cfg_##attr, val); \
2407 phba->cfg_##attr = val;\
2408 return 0;\
2409 }\
2410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2411 "0450 lpfc_"#attr" attribute cannot be set to %d, "\
2412 "allowed range is ["#minval", "#maxval"]\n", val); \
2413 return -EINVAL;\
2414 }
2415
2416 /**
2417 * lpfc_param_store - Set a vport attribute value
2418 *
2419 * Description:
2420 * Macro that given an attr e.g. hba_queue_depth expands
2421 * into a function with the name lpfc_hba_queue_depth_store.
2422 *
2423 * lpfc_##attr##_store: Set an sttribute value.
2424 * @dev: class device that is converted into a Scsi_host.
2425 * @attr: device attribute, not used.
2426 * @buf: contains the attribute value in ascii.
2427 * @count: not used.
2428 *
2429 * Description:
2430 * Convert the ascii text number to an integer, then
2431 * use the lpfc_##attr##_set function to set the value.
2432 *
2433 * Returns:
2434 * -EINVAL if val is invalid or lpfc_##attr##_set() fails
2435 * length of buffer upon success.
2436 **/
2437 #define lpfc_param_store(attr) \
2438 static ssize_t \
2439 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2440 const char *buf, size_t count) \
2441 { \
2442 struct Scsi_Host *shost = class_to_shost(dev);\
2443 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2444 struct lpfc_hba *phba = vport->phba;\
2445 uint val = 0;\
2446 if (!isdigit(buf[0]))\
2447 return -EINVAL;\
2448 if (sscanf(buf, "%i", &val) != 1)\
2449 return -EINVAL;\
2450 if (lpfc_##attr##_set(phba, val) == 0) \
2451 return strlen(buf);\
2452 else \
2453 return -EINVAL;\
2454 }
2455
2456 /**
2457 * lpfc_vport_param_show - Return decimal formatted cfg attribute value
2458 *
2459 * Description:
2460 * Macro that given an attr e.g. hba_queue_depth expands
2461 * into a function with the name lpfc_hba_queue_depth_show
2462 *
2463 * lpfc_##attr##_show: prints the attribute value in decimal.
2464 * @dev: class device that is converted into a Scsi_host.
2465 * @attr: device attribute, not used.
2466 * @buf: on return contains the attribute value in decimal.
2467 *
2468 * Returns: length of formatted string.
2469 **/
2470 #define lpfc_vport_param_show(attr) \
2471 static ssize_t \
2472 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2473 char *buf) \
2474 { \
2475 struct Scsi_Host *shost = class_to_shost(dev);\
2476 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2477 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
2478 }
2479
2480 /**
2481 * lpfc_vport_param_hex_show - Return hex formatted attribute value
2482 *
2483 * Description:
2484 * Macro that given an attr e.g.
2485 * hba_queue_depth expands into a function with the name
2486 * lpfc_hba_queue_depth_show
2487 *
2488 * lpfc_##attr##_show: prints the attribute value in hexadecimal.
2489 * @dev: class device that is converted into a Scsi_host.
2490 * @attr: device attribute, not used.
2491 * @buf: on return contains the attribute value in hexadecimal.
2492 *
2493 * Returns: length of formatted string.
2494 **/
2495 #define lpfc_vport_param_hex_show(attr) \
2496 static ssize_t \
2497 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2498 char *buf) \
2499 { \
2500 struct Scsi_Host *shost = class_to_shost(dev);\
2501 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2502 return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
2503 }
2504
2505 /**
2506 * lpfc_vport_param_init - Initialize a vport cfg attribute
2507 *
2508 * Description:
2509 * Macro that given an attr e.g. hba_queue_depth expands
2510 * into a function with the name lpfc_hba_queue_depth_init. The macro also
2511 * takes a default argument, a minimum and maximum argument.
2512 *
2513 * lpfc_##attr##_init: validates the min and max values then sets the
2514 * adapter config field accordingly, or uses the default if out of range
2515 * and prints an error message.
2516 * @phba: pointer the the adapter structure.
2517 * @val: integer attribute value.
2518 *
2519 * Returns:
2520 * zero on success
2521 * -EINVAL if default used
2522 **/
2523 #define lpfc_vport_param_init(attr, default, minval, maxval) \
2524 static int \
2525 lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
2526 { \
2527 if (lpfc_rangecheck(val, minval, maxval)) {\
2528 vport->cfg_##attr = val;\
2529 return 0;\
2530 }\
2531 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2532 "0423 lpfc_"#attr" attribute cannot be set to %d, "\
2533 "allowed range is ["#minval", "#maxval"]\n", val); \
2534 vport->cfg_##attr = default;\
2535 return -EINVAL;\
2536 }
2537
2538 /**
2539 * lpfc_vport_param_set - Set a vport cfg attribute
2540 *
2541 * Description:
2542 * Macro that given an attr e.g. hba_queue_depth expands
2543 * into a function with the name lpfc_hba_queue_depth_set
2544 *
2545 * lpfc_##attr##_set: validates the min and max values then sets the
2546 * adapter config field if in the valid range. prints error message
2547 * and does not set the parameter if invalid.
2548 * @phba: pointer the the adapter structure.
2549 * @val: integer attribute value.
2550 *
2551 * Returns:
2552 * zero on success
2553 * -EINVAL if val is invalid
2554 **/
2555 #define lpfc_vport_param_set(attr, default, minval, maxval) \
2556 static int \
2557 lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
2558 { \
2559 if (lpfc_rangecheck(val, minval, maxval)) {\
2560 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2561 "3053 lpfc_" #attr \
2562 " changed from %d (x%x) to %d (x%x)\n", \
2563 vport->cfg_##attr, vport->cfg_##attr, \
2564 val, val); \
2565 vport->cfg_##attr = val;\
2566 return 0;\
2567 }\
2568 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2569 "0424 lpfc_"#attr" attribute cannot be set to %d, "\
2570 "allowed range is ["#minval", "#maxval"]\n", val); \
2571 return -EINVAL;\
2572 }
2573
2574 /**
2575 * lpfc_vport_param_store - Set a vport attribute
2576 *
2577 * Description:
2578 * Macro that given an attr e.g. hba_queue_depth
2579 * expands into a function with the name lpfc_hba_queue_depth_store
2580 *
2581 * lpfc_##attr##_store: convert the ascii text number to an integer, then
2582 * use the lpfc_##attr##_set function to set the value.
2583 * @cdev: class device that is converted into a Scsi_host.
2584 * @buf: contains the attribute value in decimal.
2585 * @count: not used.
2586 *
2587 * Returns:
2588 * -EINVAL if val is invalid or lpfc_##attr##_set() fails
2589 * length of buffer upon success.
2590 **/
2591 #define lpfc_vport_param_store(attr) \
2592 static ssize_t \
2593 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2594 const char *buf, size_t count) \
2595 { \
2596 struct Scsi_Host *shost = class_to_shost(dev);\
2597 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2598 uint val = 0;\
2599 if (!isdigit(buf[0]))\
2600 return -EINVAL;\
2601 if (sscanf(buf, "%i", &val) != 1)\
2602 return -EINVAL;\
2603 if (lpfc_##attr##_set(vport, val) == 0) \
2604 return strlen(buf);\
2605 else \
2606 return -EINVAL;\
2607 }
2608
2609
2610 static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL);
2611 static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL);
2612 static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
2613 static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
2614 static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
2615 static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
2616 static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
2617 static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
2618 static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
2619 static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
2620 static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
2621 static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
2622 static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
2623 static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
2624 static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
2625 lpfc_link_state_store);
2626 static DEVICE_ATTR(option_rom_version, S_IRUGO,
2627 lpfc_option_rom_version_show, NULL);
2628 static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
2629 lpfc_num_discovered_ports_show, NULL);
2630 static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
2631 static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
2632 static DEVICE_ATTR_RO(lpfc_drvr_version);
2633 static DEVICE_ATTR_RO(lpfc_enable_fip);
2634 static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
2635 lpfc_board_mode_show, lpfc_board_mode_store);
2636 static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
2637 static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
2638 static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
2639 static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
2640 static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
2641 static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
2642 static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
2643 static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
2644 static DEVICE_ATTR_RO(lpfc_temp_sensor);
2645 static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
2646 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
2647 static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
2648 NULL);
2649
2650 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
2651 #define WWN_SZ 8
2652 /**
2653 * lpfc_wwn_set - Convert string to the 8 byte WWN value.
2654 * @buf: WWN string.
2655 * @cnt: Length of string.
2656 * @wwn: Array to receive converted wwn value.
2657 *
2658 * Returns:
2659 * -EINVAL if the buffer does not contain a valid wwn
2660 * 0 success
2661 **/
2662 static size_t
lpfc_wwn_set(const char * buf,size_t cnt,char wwn[])2663 lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
2664 {
2665 unsigned int i, j;
2666
2667 /* Count may include a LF at end of string */
2668 if (buf[cnt-1] == '\n')
2669 cnt--;
2670
2671 if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
2672 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
2673 return -EINVAL;
2674
2675 memset(wwn, 0, WWN_SZ);
2676
2677 /* Validate and store the new name */
2678 for (i = 0, j = 0; i < 16; i++) {
2679 if ((*buf >= 'a') && (*buf <= 'f'))
2680 j = ((j << 4) | ((*buf++ - 'a') + 10));
2681 else if ((*buf >= 'A') && (*buf <= 'F'))
2682 j = ((j << 4) | ((*buf++ - 'A') + 10));
2683 else if ((*buf >= '0') && (*buf <= '9'))
2684 j = ((j << 4) | (*buf++ - '0'));
2685 else
2686 return -EINVAL;
2687 if (i % 2) {
2688 wwn[i/2] = j & 0xff;
2689 j = 0;
2690 }
2691 }
2692 return 0;
2693 }
2694 /**
2695 * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
2696 * @dev: class device that is converted into a Scsi_host.
2697 * @attr: device attribute, not used.
2698 * @buf: containing the string lpfc_soft_wwn_key.
2699 * @count: must be size of lpfc_soft_wwn_key.
2700 *
2701 * Returns:
2702 * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
2703 * length of buf indicates success
2704 **/
2705 static ssize_t
lpfc_soft_wwn_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2706 lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
2707 const char *buf, size_t count)
2708 {
2709 struct Scsi_Host *shost = class_to_shost(dev);
2710 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2711 struct lpfc_hba *phba = vport->phba;
2712 unsigned int cnt = count;
2713 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
2714 u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0];
2715
2716 /*
2717 * We're doing a simple sanity check for soft_wwpn setting.
2718 * We require that the user write a specific key to enable
2719 * the soft_wwpn attribute to be settable. Once the attribute
2720 * is written, the enable key resets. If further updates are
2721 * desired, the key must be written again to re-enable the
2722 * attribute.
2723 *
2724 * The "key" is not secret - it is a hardcoded string shown
2725 * here. The intent is to protect against the random user or
2726 * application that is just writing attributes.
2727 */
2728 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
2729 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2730 "0051 "LPFC_DRIVER_NAME" soft wwpn can not"
2731 " be enabled: fawwpn is enabled\n");
2732 return -EINVAL;
2733 }
2734
2735 /* count may include a LF at end of string */
2736 if (buf[cnt-1] == '\n')
2737 cnt--;
2738
2739 if ((cnt != strlen(lpfc_soft_wwn_key)) ||
2740 (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
2741 return -EINVAL;
2742
2743 phba->soft_wwn_enable = 1;
2744
2745 dev_printk(KERN_WARNING, &phba->pcidev->dev,
2746 "lpfc%d: soft_wwpn assignment has been enabled.\n",
2747 phba->brd_no);
2748 dev_printk(KERN_WARNING, &phba->pcidev->dev,
2749 " The soft_wwpn feature is not supported by Broadcom.");
2750
2751 return count;
2752 }
2753 static DEVICE_ATTR_WO(lpfc_soft_wwn_enable);
2754
2755 /**
2756 * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
2757 * @dev: class device that is converted into a Scsi_host.
2758 * @attr: device attribute, not used.
2759 * @buf: on return contains the wwpn in hexadecimal.
2760 *
2761 * Returns: size of formatted string.
2762 **/
2763 static ssize_t
lpfc_soft_wwpn_show(struct device * dev,struct device_attribute * attr,char * buf)2764 lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
2765 char *buf)
2766 {
2767 struct Scsi_Host *shost = class_to_shost(dev);
2768 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2769 struct lpfc_hba *phba = vport->phba;
2770
2771 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2772 (unsigned long long)phba->cfg_soft_wwpn);
2773 }
2774
2775 /**
2776 * lpfc_soft_wwpn_store - Set the ww port name of the adapter
2777 * @dev class device that is converted into a Scsi_host.
2778 * @attr: device attribute, not used.
2779 * @buf: contains the wwpn in hexadecimal.
2780 * @count: number of wwpn bytes in buf
2781 *
2782 * Returns:
2783 * -EACCES hba reset not enabled, adapter over temp
2784 * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
2785 * -EIO error taking adapter offline or online
2786 * value of count on success
2787 **/
2788 static ssize_t
lpfc_soft_wwpn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2789 lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
2790 const char *buf, size_t count)
2791 {
2792 struct Scsi_Host *shost = class_to_shost(dev);
2793 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2794 struct lpfc_hba *phba = vport->phba;
2795 struct completion online_compl;
2796 int stat1 = 0, stat2 = 0;
2797 unsigned int cnt = count;
2798 u8 wwpn[WWN_SZ];
2799 int rc;
2800
2801 if (!phba->cfg_enable_hba_reset)
2802 return -EACCES;
2803 spin_lock_irq(&phba->hbalock);
2804 if (phba->over_temp_state == HBA_OVER_TEMP) {
2805 spin_unlock_irq(&phba->hbalock);
2806 return -EACCES;
2807 }
2808 spin_unlock_irq(&phba->hbalock);
2809 /* count may include a LF at end of string */
2810 if (buf[cnt-1] == '\n')
2811 cnt--;
2812
2813 if (!phba->soft_wwn_enable)
2814 return -EINVAL;
2815
2816 /* lock setting wwpn, wwnn down */
2817 phba->soft_wwn_enable = 0;
2818
2819 rc = lpfc_wwn_set(buf, cnt, wwpn);
2820 if (rc) {
2821 /* not able to set wwpn, unlock it */
2822 phba->soft_wwn_enable = 1;
2823 return rc;
2824 }
2825
2826 phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
2827 fc_host_port_name(shost) = phba->cfg_soft_wwpn;
2828 if (phba->cfg_soft_wwnn)
2829 fc_host_node_name(shost) = phba->cfg_soft_wwnn;
2830
2831 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
2832 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
2833
2834 stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
2835 if (stat1)
2836 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2837 "0463 lpfc_soft_wwpn attribute set failed to "
2838 "reinit adapter - %d\n", stat1);
2839 init_completion(&online_compl);
2840 rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
2841 LPFC_EVT_ONLINE);
2842 if (rc == 0)
2843 return -ENOMEM;
2844
2845 wait_for_completion(&online_compl);
2846 if (stat2)
2847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2848 "0464 lpfc_soft_wwpn attribute set failed to "
2849 "reinit adapter - %d\n", stat2);
2850 return (stat1 || stat2) ? -EIO : count;
2851 }
2852 static DEVICE_ATTR_RW(lpfc_soft_wwpn);
2853
2854 /**
2855 * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
2856 * @dev: class device that is converted into a Scsi_host.
2857 * @attr: device attribute, not used.
2858 * @buf: on return contains the wwnn in hexadecimal.
2859 *
2860 * Returns: size of formatted string.
2861 **/
2862 static ssize_t
lpfc_soft_wwnn_show(struct device * dev,struct device_attribute * attr,char * buf)2863 lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
2864 char *buf)
2865 {
2866 struct Scsi_Host *shost = class_to_shost(dev);
2867 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2868 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2869 (unsigned long long)phba->cfg_soft_wwnn);
2870 }
2871
2872 /**
2873 * lpfc_soft_wwnn_store - sets the ww node name of the adapter
2874 * @cdev: class device that is converted into a Scsi_host.
2875 * @buf: contains the ww node name in hexadecimal.
2876 * @count: number of wwnn bytes in buf.
2877 *
2878 * Returns:
2879 * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
2880 * value of count on success
2881 **/
2882 static ssize_t
lpfc_soft_wwnn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2883 lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
2884 const char *buf, size_t count)
2885 {
2886 struct Scsi_Host *shost = class_to_shost(dev);
2887 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2888 unsigned int cnt = count;
2889 u8 wwnn[WWN_SZ];
2890 int rc;
2891
2892 /* count may include a LF at end of string */
2893 if (buf[cnt-1] == '\n')
2894 cnt--;
2895
2896 if (!phba->soft_wwn_enable)
2897 return -EINVAL;
2898
2899 rc = lpfc_wwn_set(buf, cnt, wwnn);
2900 if (rc) {
2901 /* Allow wwnn to be set many times, as long as the enable
2902 * is set. However, once the wwpn is set, everything locks.
2903 */
2904 return rc;
2905 }
2906
2907 phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
2908
2909 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
2910 "lpfc%d: soft_wwnn set. Value will take effect upon "
2911 "setting of the soft_wwpn\n", phba->brd_no);
2912
2913 return count;
2914 }
2915 static DEVICE_ATTR_RW(lpfc_soft_wwnn);
2916
2917 /**
2918 * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
2919 * Optimized Access Storage (OAS) operations.
2920 * @dev: class device that is converted into a Scsi_host.
2921 * @attr: device attribute, not used.
2922 * @buf: buffer for passing information.
2923 *
2924 * Returns:
2925 * value of count
2926 **/
2927 static ssize_t
lpfc_oas_tgt_show(struct device * dev,struct device_attribute * attr,char * buf)2928 lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
2929 char *buf)
2930 {
2931 struct Scsi_Host *shost = class_to_shost(dev);
2932 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2933
2934 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2935 wwn_to_u64(phba->cfg_oas_tgt_wwpn));
2936 }
2937
2938 /**
2939 * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
2940 * Optimized Access Storage (OAS) operations.
2941 * @dev: class device that is converted into a Scsi_host.
2942 * @attr: device attribute, not used.
2943 * @buf: buffer for passing information.
2944 * @count: Size of the data buffer.
2945 *
2946 * Returns:
2947 * -EINVAL count is invalid, invalid wwpn byte invalid
2948 * -EPERM oas is not supported by hba
2949 * value of count on success
2950 **/
2951 static ssize_t
lpfc_oas_tgt_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2952 lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
2953 const char *buf, size_t count)
2954 {
2955 struct Scsi_Host *shost = class_to_shost(dev);
2956 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2957 unsigned int cnt = count;
2958 uint8_t wwpn[WWN_SZ];
2959 int rc;
2960
2961 if (!phba->cfg_fof)
2962 return -EPERM;
2963
2964 /* count may include a LF at end of string */
2965 if (buf[cnt-1] == '\n')
2966 cnt--;
2967
2968 rc = lpfc_wwn_set(buf, cnt, wwpn);
2969 if (rc)
2970 return rc;
2971
2972 memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2973 memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2974 if (wwn_to_u64(wwpn) == 0)
2975 phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
2976 else
2977 phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
2978 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
2979 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
2980 return count;
2981 }
2982 static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
2983 lpfc_oas_tgt_show, lpfc_oas_tgt_store);
2984
2985 /**
2986 * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for
2987 * Optimized Access Storage (OAS) operations.
2988 * @dev: class device that is converted into a Scsi_host.
2989 * @attr: device attribute, not used.
2990 * @buf: buffer for passing information.
2991 *
2992 * Returns:
2993 * value of count
2994 **/
2995 static ssize_t
lpfc_oas_priority_show(struct device * dev,struct device_attribute * attr,char * buf)2996 lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr,
2997 char *buf)
2998 {
2999 struct Scsi_Host *shost = class_to_shost(dev);
3000 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3001
3002 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
3003 }
3004
3005 /**
3006 * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for
3007 * Optimized Access Storage (OAS) operations.
3008 * @dev: class device that is converted into a Scsi_host.
3009 * @attr: device attribute, not used.
3010 * @buf: buffer for passing information.
3011 * @count: Size of the data buffer.
3012 *
3013 * Returns:
3014 * -EINVAL count is invalid, invalid wwpn byte invalid
3015 * -EPERM oas is not supported by hba
3016 * value of count on success
3017 **/
3018 static ssize_t
lpfc_oas_priority_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3019 lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr,
3020 const char *buf, size_t count)
3021 {
3022 struct Scsi_Host *shost = class_to_shost(dev);
3023 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3024 unsigned int cnt = count;
3025 unsigned long val;
3026 int ret;
3027
3028 if (!phba->cfg_fof)
3029 return -EPERM;
3030
3031 /* count may include a LF at end of string */
3032 if (buf[cnt-1] == '\n')
3033 cnt--;
3034
3035 ret = kstrtoul(buf, 0, &val);
3036 if (ret || (val > 0x7f))
3037 return -EINVAL;
3038
3039 if (val)
3040 phba->cfg_oas_priority = (uint8_t)val;
3041 else
3042 phba->cfg_oas_priority = phba->cfg_XLanePriority;
3043 return count;
3044 }
3045 static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR,
3046 lpfc_oas_priority_show, lpfc_oas_priority_store);
3047
3048 /**
3049 * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
3050 * for Optimized Access Storage (OAS) operations.
3051 * @dev: class device that is converted into a Scsi_host.
3052 * @attr: device attribute, not used.
3053 * @buf: buffer for passing information.
3054 *
3055 * Returns:
3056 * value of count on success
3057 **/
3058 static ssize_t
lpfc_oas_vpt_show(struct device * dev,struct device_attribute * attr,char * buf)3059 lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
3060 char *buf)
3061 {
3062 struct Scsi_Host *shost = class_to_shost(dev);
3063 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3064
3065 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
3066 wwn_to_u64(phba->cfg_oas_vpt_wwpn));
3067 }
3068
3069 /**
3070 * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
3071 * for Optimized Access Storage (OAS) operations.
3072 * @dev: class device that is converted into a Scsi_host.
3073 * @attr: device attribute, not used.
3074 * @buf: buffer for passing information.
3075 * @count: Size of the data buffer.
3076 *
3077 * Returns:
3078 * -EINVAL count is invalid, invalid wwpn byte invalid
3079 * -EPERM oas is not supported by hba
3080 * value of count on success
3081 **/
3082 static ssize_t
lpfc_oas_vpt_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3083 lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
3084 const char *buf, size_t count)
3085 {
3086 struct Scsi_Host *shost = class_to_shost(dev);
3087 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3088 unsigned int cnt = count;
3089 uint8_t wwpn[WWN_SZ];
3090 int rc;
3091
3092 if (!phba->cfg_fof)
3093 return -EPERM;
3094
3095 /* count may include a LF at end of string */
3096 if (buf[cnt-1] == '\n')
3097 cnt--;
3098
3099 rc = lpfc_wwn_set(buf, cnt, wwpn);
3100 if (rc)
3101 return rc;
3102
3103 memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3104 memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3105 if (wwn_to_u64(wwpn) == 0)
3106 phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
3107 else
3108 phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
3109 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
3110 if (phba->cfg_oas_priority == 0)
3111 phba->cfg_oas_priority = phba->cfg_XLanePriority;
3112 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
3113 return count;
3114 }
3115 static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
3116 lpfc_oas_vpt_show, lpfc_oas_vpt_store);
3117
3118 /**
3119 * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
3120 * of whether luns will be enabled or disabled
3121 * for Optimized Access Storage (OAS) operations.
3122 * @dev: class device that is converted into a Scsi_host.
3123 * @attr: device attribute, not used.
3124 * @buf: buffer for passing information.
3125 *
3126 * Returns:
3127 * size of formatted string.
3128 **/
3129 static ssize_t
lpfc_oas_lun_state_show(struct device * dev,struct device_attribute * attr,char * buf)3130 lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
3131 char *buf)
3132 {
3133 struct Scsi_Host *shost = class_to_shost(dev);
3134 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3135
3136 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
3137 }
3138
3139 /**
3140 * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
3141 * of whether luns will be enabled or disabled
3142 * for Optimized Access Storage (OAS) operations.
3143 * @dev: class device that is converted into a Scsi_host.
3144 * @attr: device attribute, not used.
3145 * @buf: buffer for passing information.
3146 * @count: Size of the data buffer.
3147 *
3148 * Returns:
3149 * -EINVAL count is invalid, invalid wwpn byte invalid
3150 * -EPERM oas is not supported by hba
3151 * value of count on success
3152 **/
3153 static ssize_t
lpfc_oas_lun_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3154 lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
3155 const char *buf, size_t count)
3156 {
3157 struct Scsi_Host *shost = class_to_shost(dev);
3158 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3159 int val = 0;
3160
3161 if (!phba->cfg_fof)
3162 return -EPERM;
3163
3164 if (!isdigit(buf[0]))
3165 return -EINVAL;
3166
3167 if (sscanf(buf, "%i", &val) != 1)
3168 return -EINVAL;
3169
3170 if ((val != 0) && (val != 1))
3171 return -EINVAL;
3172
3173 phba->cfg_oas_lun_state = val;
3174 return strlen(buf);
3175 }
3176 static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
3177 lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
3178
3179 /**
3180 * lpfc_oas_lun_status_show - Return the status of the Optimized Access
3181 * Storage (OAS) lun returned by the
3182 * lpfc_oas_lun_show function.
3183 * @dev: class device that is converted into a Scsi_host.
3184 * @attr: device attribute, not used.
3185 * @buf: buffer for passing information.
3186 *
3187 * Returns:
3188 * size of formatted string.
3189 **/
3190 static ssize_t
lpfc_oas_lun_status_show(struct device * dev,struct device_attribute * attr,char * buf)3191 lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
3192 char *buf)
3193 {
3194 struct Scsi_Host *shost = class_to_shost(dev);
3195 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3196
3197 if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
3198 return -EFAULT;
3199
3200 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
3201 }
3202 static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
3203 lpfc_oas_lun_status_show, NULL);
3204
3205
3206 /**
3207 * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
3208 * (OAS) operations.
3209 * @phba: lpfc_hba pointer.
3210 * @ndlp: pointer to fcp target node.
3211 * @lun: the fc lun for setting oas state.
3212 * @oas_state: the oas state to be set to the lun.
3213 *
3214 * Returns:
3215 * SUCCESS : 0
3216 * -EPERM OAS is not enabled or not supported by this port.
3217 *
3218 */
3219 static size_t
lpfc_oas_lun_state_set(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint64_t lun,uint32_t oas_state,uint8_t pri)3220 lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3221 uint8_t tgt_wwpn[], uint64_t lun,
3222 uint32_t oas_state, uint8_t pri)
3223 {
3224
3225 int rc = 0;
3226
3227 if (!phba->cfg_fof)
3228 return -EPERM;
3229
3230 if (oas_state) {
3231 if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3232 (struct lpfc_name *)tgt_wwpn,
3233 lun, pri))
3234 rc = -ENOMEM;
3235 } else {
3236 lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3237 (struct lpfc_name *)tgt_wwpn, lun, pri);
3238 }
3239 return rc;
3240
3241 }
3242
3243 /**
3244 * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
3245 * Access Storage (OAS) operations.
3246 * @phba: lpfc_hba pointer.
3247 * @vpt_wwpn: wwpn of the vport associated with the returned lun
3248 * @tgt_wwpn: wwpn of the target associated with the returned lun
3249 * @lun_status: status of the lun returned lun
3250 *
3251 * Returns the first or next lun enabled for OAS operations for the vport/target
3252 * specified. If a lun is found, its vport wwpn, target wwpn and status is
3253 * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
3254 *
3255 * Return:
3256 * lun that is OAS enabled for the vport/target
3257 * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
3258 */
3259 static uint64_t
lpfc_oas_lun_get_next(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint32_t * lun_status,uint32_t * lun_pri)3260 lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3261 uint8_t tgt_wwpn[], uint32_t *lun_status,
3262 uint32_t *lun_pri)
3263 {
3264 uint64_t found_lun;
3265
3266 if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
3267 return NOT_OAS_ENABLED_LUN;
3268 if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
3269 phba->sli4_hba.oas_next_vpt_wwpn,
3270 (struct lpfc_name *)
3271 phba->sli4_hba.oas_next_tgt_wwpn,
3272 &phba->sli4_hba.oas_next_lun,
3273 (struct lpfc_name *)vpt_wwpn,
3274 (struct lpfc_name *)tgt_wwpn,
3275 &found_lun, lun_status, lun_pri))
3276 return found_lun;
3277 else
3278 return NOT_OAS_ENABLED_LUN;
3279 }
3280
3281 /**
3282 * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
3283 * @phba: lpfc_hba pointer.
3284 * @vpt_wwpn: vport wwpn by reference.
3285 * @tgt_wwpn: target wwpn by reference.
3286 * @lun: the fc lun for setting oas state.
3287 * @oas_state: the oas state to be set to the oas_lun.
3288 *
3289 * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
3290 * a lun for OAS operations.
3291 *
3292 * Return:
3293 * SUCCESS: 0
3294 * -ENOMEM: failed to enable an lun for OAS operations
3295 * -EPERM: OAS is not enabled
3296 */
3297 static ssize_t
lpfc_oas_lun_state_change(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint64_t lun,uint32_t oas_state,uint8_t pri)3298 lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3299 uint8_t tgt_wwpn[], uint64_t lun,
3300 uint32_t oas_state, uint8_t pri)
3301 {
3302
3303 int rc;
3304
3305 rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
3306 oas_state, pri);
3307 return rc;
3308 }
3309
3310 /**
3311 * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
3312 * @dev: class device that is converted into a Scsi_host.
3313 * @attr: device attribute, not used.
3314 * @buf: buffer for passing information.
3315 *
3316 * This routine returns a lun enabled for OAS each time the function
3317 * is called.
3318 *
3319 * Returns:
3320 * SUCCESS: size of formatted string.
3321 * -EFAULT: target or vport wwpn was not set properly.
3322 * -EPERM: oas is not enabled.
3323 **/
3324 static ssize_t
lpfc_oas_lun_show(struct device * dev,struct device_attribute * attr,char * buf)3325 lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
3326 char *buf)
3327 {
3328 struct Scsi_Host *shost = class_to_shost(dev);
3329 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3330
3331 uint64_t oas_lun;
3332 int len = 0;
3333
3334 if (!phba->cfg_fof)
3335 return -EPERM;
3336
3337 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3338 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
3339 return -EFAULT;
3340
3341 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3342 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
3343 return -EFAULT;
3344
3345 oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
3346 phba->cfg_oas_tgt_wwpn,
3347 &phba->cfg_oas_lun_status,
3348 &phba->cfg_oas_priority);
3349 if (oas_lun != NOT_OAS_ENABLED_LUN)
3350 phba->cfg_oas_flags |= OAS_LUN_VALID;
3351
3352 len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
3353
3354 return len;
3355 }
3356
3357 /**
3358 * lpfc_oas_lun_store - Sets the OAS state for lun
3359 * @dev: class device that is converted into a Scsi_host.
3360 * @attr: device attribute, not used.
3361 * @buf: buffer for passing information.
3362 *
3363 * This function sets the OAS state for lun. Before this function is called,
3364 * the vport wwpn, target wwpn, and oas state need to be set.
3365 *
3366 * Returns:
3367 * SUCCESS: size of formatted string.
3368 * -EFAULT: target or vport wwpn was not set properly.
3369 * -EPERM: oas is not enabled.
3370 * size of formatted string.
3371 **/
3372 static ssize_t
lpfc_oas_lun_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3373 lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
3374 const char *buf, size_t count)
3375 {
3376 struct Scsi_Host *shost = class_to_shost(dev);
3377 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3378 uint64_t scsi_lun;
3379 uint32_t pri;
3380 ssize_t rc;
3381
3382 if (!phba->cfg_fof)
3383 return -EPERM;
3384
3385 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3386 return -EFAULT;
3387
3388 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3389 return -EFAULT;
3390
3391 if (!isdigit(buf[0]))
3392 return -EINVAL;
3393
3394 if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
3395 return -EINVAL;
3396
3397 pri = phba->cfg_oas_priority;
3398 if (pri == 0)
3399 pri = phba->cfg_XLanePriority;
3400
3401 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3402 "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx "
3403 "priority 0x%x with oas state %d\n",
3404 wwn_to_u64(phba->cfg_oas_vpt_wwpn),
3405 wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
3406 pri, phba->cfg_oas_lun_state);
3407
3408 rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
3409 phba->cfg_oas_tgt_wwpn, scsi_lun,
3410 phba->cfg_oas_lun_state, pri);
3411 if (rc)
3412 return rc;
3413
3414 return count;
3415 }
3416 static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
3417 lpfc_oas_lun_show, lpfc_oas_lun_store);
3418
3419 int lpfc_enable_nvmet_cnt;
3420 unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = {
3421 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3422 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3423 module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444);
3424 MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target");
3425
3426 static int lpfc_poll = 0;
3427 module_param(lpfc_poll, int, S_IRUGO);
3428 MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
3429 " 0 - none,"
3430 " 1 - poll with interrupts enabled"
3431 " 3 - poll and disable FCP ring interrupts");
3432
3433 static DEVICE_ATTR_RW(lpfc_poll);
3434
3435 int lpfc_no_hba_reset_cnt;
3436 unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
3437 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3438 module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
3439 MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
3440
3441 LPFC_ATTR(sli_mode, 0, 0, 3,
3442 "SLI mode selector:"
3443 " 0 - auto (SLI-3 if supported),"
3444 " 2 - select SLI-2 even on SLI-3 capable HBAs,"
3445 " 3 - select SLI-3");
3446
3447 LPFC_ATTR_R(enable_npiv, 1, 0, 1,
3448 "Enable NPIV functionality");
3449
3450 LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
3451 "FCF Fast failover=1 Priority failover=2");
3452
3453 /*
3454 # lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
3455 # 0x0 = disabled, XRI/OXID use not tracked.
3456 # 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
3457 # 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
3458 */
3459 LPFC_ATTR_R(enable_rrq, 2, 0, 2,
3460 "Enable RRQ functionality");
3461
3462 /*
3463 # lpfc_suppress_link_up: Bring link up at initialization
3464 # 0x0 = bring link up (issue MBX_INIT_LINK)
3465 # 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK)
3466 # 0x2 = never bring up link
3467 # Default value is 0.
3468 */
3469 LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
3470 LPFC_DELAY_INIT_LINK_INDEFINITELY,
3471 "Suppress Link Up at initialization");
3472
3473 static ssize_t
lpfc_pls_show(struct device * dev,struct device_attribute * attr,char * buf)3474 lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf)
3475 {
3476 struct Scsi_Host *shost = class_to_shost(dev);
3477 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3478
3479 return scnprintf(buf, PAGE_SIZE, "%d\n",
3480 phba->sli4_hba.pc_sli4_params.pls);
3481 }
3482 static DEVICE_ATTR(pls, 0444,
3483 lpfc_pls_show, NULL);
3484
3485 static ssize_t
lpfc_pt_show(struct device * dev,struct device_attribute * attr,char * buf)3486 lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
3487 {
3488 struct Scsi_Host *shost = class_to_shost(dev);
3489 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3490
3491 return scnprintf(buf, PAGE_SIZE, "%d\n",
3492 (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
3493 }
3494 static DEVICE_ATTR(pt, 0444,
3495 lpfc_pt_show, NULL);
3496
3497 /*
3498 # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
3499 # 1 - (1024)
3500 # 2 - (2048)
3501 # 3 - (3072)
3502 # 4 - (4096)
3503 # 5 - (5120)
3504 */
3505 static ssize_t
lpfc_iocb_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3506 lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3507 {
3508 struct Scsi_Host *shost = class_to_shost(dev);
3509 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3510
3511 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
3512 }
3513
3514 static DEVICE_ATTR(iocb_hw, S_IRUGO,
3515 lpfc_iocb_hw_show, NULL);
3516 static ssize_t
lpfc_txq_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3517 lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3518 {
3519 struct Scsi_Host *shost = class_to_shost(dev);
3520 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3521 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3522
3523 return scnprintf(buf, PAGE_SIZE, "%d\n",
3524 pring ? pring->txq_max : 0);
3525 }
3526
3527 static DEVICE_ATTR(txq_hw, S_IRUGO,
3528 lpfc_txq_hw_show, NULL);
3529 static ssize_t
lpfc_txcmplq_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3530 lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
3531 char *buf)
3532 {
3533 struct Scsi_Host *shost = class_to_shost(dev);
3534 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3535 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3536
3537 return scnprintf(buf, PAGE_SIZE, "%d\n",
3538 pring ? pring->txcmplq_max : 0);
3539 }
3540
3541 static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
3542 lpfc_txcmplq_hw_show, NULL);
3543
3544 /*
3545 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
3546 # until the timer expires. Value range is [0,255]. Default value is 30.
3547 */
3548 static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3549 static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
3550 module_param(lpfc_nodev_tmo, int, 0);
3551 MODULE_PARM_DESC(lpfc_nodev_tmo,
3552 "Seconds driver will hold I/O waiting "
3553 "for a device to come back");
3554
3555 /**
3556 * lpfc_nodev_tmo_show - Return the hba dev loss timeout value
3557 * @dev: class converted to a Scsi_host structure.
3558 * @attr: device attribute, not used.
3559 * @buf: on return contains the dev loss timeout in decimal.
3560 *
3561 * Returns: size of formatted string.
3562 **/
3563 static ssize_t
lpfc_nodev_tmo_show(struct device * dev,struct device_attribute * attr,char * buf)3564 lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
3565 char *buf)
3566 {
3567 struct Scsi_Host *shost = class_to_shost(dev);
3568 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3569
3570 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
3571 }
3572
3573 /**
3574 * lpfc_nodev_tmo_init - Set the hba nodev timeout value
3575 * @vport: lpfc vport structure pointer.
3576 * @val: contains the nodev timeout value.
3577 *
3578 * Description:
3579 * If the devloss tmo is already set then nodev tmo is set to devloss tmo,
3580 * a kernel error message is printed and zero is returned.
3581 * Else if val is in range then nodev tmo and devloss tmo are set to val.
3582 * Otherwise nodev tmo is set to the default value.
3583 *
3584 * Returns:
3585 * zero if already set or if val is in range
3586 * -EINVAL val out of range
3587 **/
3588 static int
lpfc_nodev_tmo_init(struct lpfc_vport * vport,int val)3589 lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
3590 {
3591 if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
3592 vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
3593 if (val != LPFC_DEF_DEVLOSS_TMO)
3594 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3595 "0407 Ignoring lpfc_nodev_tmo module "
3596 "parameter because lpfc_devloss_tmo "
3597 "is set.\n");
3598 return 0;
3599 }
3600
3601 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3602 vport->cfg_nodev_tmo = val;
3603 vport->cfg_devloss_tmo = val;
3604 return 0;
3605 }
3606 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3607 "0400 lpfc_nodev_tmo attribute cannot be set to"
3608 " %d, allowed range is [%d, %d]\n",
3609 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3610 vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3611 return -EINVAL;
3612 }
3613
3614 /**
3615 * lpfc_update_rport_devloss_tmo - Update dev loss tmo value
3616 * @vport: lpfc vport structure pointer.
3617 *
3618 * Description:
3619 * Update all the ndlp's dev loss tmo with the vport devloss tmo value.
3620 **/
3621 static void
lpfc_update_rport_devloss_tmo(struct lpfc_vport * vport)3622 lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3623 {
3624 struct Scsi_Host *shost;
3625 struct lpfc_nodelist *ndlp;
3626 #if (IS_ENABLED(CONFIG_NVME_FC))
3627 struct lpfc_nvme_rport *rport;
3628 struct nvme_fc_remote_port *remoteport = NULL;
3629 #endif
3630
3631 shost = lpfc_shost_from_vport(vport);
3632 spin_lock_irq(shost->host_lock);
3633 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3634 if (!NLP_CHK_NODE_ACT(ndlp))
3635 continue;
3636 if (ndlp->rport)
3637 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3638 #if (IS_ENABLED(CONFIG_NVME_FC))
3639 spin_lock(&vport->phba->hbalock);
3640 rport = lpfc_ndlp_get_nrport(ndlp);
3641 if (rport)
3642 remoteport = rport->remoteport;
3643 spin_unlock(&vport->phba->hbalock);
3644 if (rport && remoteport)
3645 nvme_fc_set_remoteport_devloss(remoteport,
3646 vport->cfg_devloss_tmo);
3647 #endif
3648 }
3649 spin_unlock_irq(shost->host_lock);
3650 }
3651
3652 /**
3653 * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values
3654 * @vport: lpfc vport structure pointer.
3655 * @val: contains the tmo value.
3656 *
3657 * Description:
3658 * If the devloss tmo is already set or the vport dev loss tmo has changed
3659 * then a kernel error message is printed and zero is returned.
3660 * Else if val is in range then nodev tmo and devloss tmo are set to val.
3661 * Otherwise nodev tmo is set to the default value.
3662 *
3663 * Returns:
3664 * zero if already set or if val is in range
3665 * -EINVAL val out of range
3666 **/
3667 static int
lpfc_nodev_tmo_set(struct lpfc_vport * vport,int val)3668 lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
3669 {
3670 if (vport->dev_loss_tmo_changed ||
3671 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
3672 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3673 "0401 Ignoring change to lpfc_nodev_tmo "
3674 "because lpfc_devloss_tmo is set.\n");
3675 return 0;
3676 }
3677 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3678 vport->cfg_nodev_tmo = val;
3679 vport->cfg_devloss_tmo = val;
3680 /*
3681 * For compat: set the fc_host dev loss so new rports
3682 * will get the value.
3683 */
3684 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3685 lpfc_update_rport_devloss_tmo(vport);
3686 return 0;
3687 }
3688 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3689 "0403 lpfc_nodev_tmo attribute cannot be set to "
3690 "%d, allowed range is [%d, %d]\n",
3691 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3692 return -EINVAL;
3693 }
3694
3695 lpfc_vport_param_store(nodev_tmo)
3696
3697 static DEVICE_ATTR_RW(lpfc_nodev_tmo);
3698
3699 /*
3700 # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
3701 # disappear until the timer expires. Value range is [0,255]. Default
3702 # value is 30.
3703 */
3704 module_param(lpfc_devloss_tmo, int, S_IRUGO);
3705 MODULE_PARM_DESC(lpfc_devloss_tmo,
3706 "Seconds driver will hold I/O waiting "
3707 "for a device to come back");
lpfc_vport_param_init(devloss_tmo,LPFC_DEF_DEVLOSS_TMO,LPFC_MIN_DEVLOSS_TMO,LPFC_MAX_DEVLOSS_TMO)3708 lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
3709 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
3710 lpfc_vport_param_show(devloss_tmo)
3711
3712 /**
3713 * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit
3714 * @vport: lpfc vport structure pointer.
3715 * @val: contains the tmo value.
3716 *
3717 * Description:
3718 * If val is in a valid range then set the vport nodev tmo,
3719 * devloss tmo, also set the vport dev loss tmo changed flag.
3720 * Else a kernel error message is printed.
3721 *
3722 * Returns:
3723 * zero if val is in range
3724 * -EINVAL val out of range
3725 **/
3726 static int
3727 lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
3728 {
3729 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3730 vport->cfg_nodev_tmo = val;
3731 vport->cfg_devloss_tmo = val;
3732 vport->dev_loss_tmo_changed = 1;
3733 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3734 lpfc_update_rport_devloss_tmo(vport);
3735 return 0;
3736 }
3737
3738 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3739 "0404 lpfc_devloss_tmo attribute cannot be set to "
3740 "%d, allowed range is [%d, %d]\n",
3741 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3742 return -EINVAL;
3743 }
3744
3745 lpfc_vport_param_store(devloss_tmo)
3746 static DEVICE_ATTR_RW(lpfc_devloss_tmo);
3747
3748 /*
3749 * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it
3750 * lpfc_suppress_rsp = 0 Disable
3751 * lpfc_suppress_rsp = 1 Enable (default)
3752 *
3753 */
3754 LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
3755 "Enable suppress rsp feature is firmware supports it");
3756
3757 /*
3758 * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
3759 * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs
3760 * lpfc_nvmet_mrq = 1 use a single RQ pair
3761 * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
3762 *
3763 */
3764 LPFC_ATTR_R(nvmet_mrq,
3765 LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
3766 "Specify number of RQ pairs for processing NVMET cmds");
3767
3768 /*
3769 * lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post
3770 * to each NVMET RQ. Range 64 to 2048, default is 512.
3771 */
3772 LPFC_ATTR_R(nvmet_mrq_post,
3773 LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST,
3774 LPFC_NVMET_RQE_DEF_COUNT,
3775 "Specify number of RQ buffers to initially post");
3776
3777 /*
3778 * lpfc_enable_fc4_type: Defines what FC4 types are supported.
3779 * Supported Values: 1 - register just FCP
3780 * 3 - register both FCP and NVME
3781 * Supported values are [1,3]. Default value is 3
3782 */
3783 LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
3784 LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
3785 "Enable FC4 Protocol support - FCP / NVME");
3786
3787 /*
3788 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being
3789 # deluged with LOTS of information.
3790 # You can set a bit mask to record specific types of verbose messages:
3791 # See lpfc_logmsh.h for definitions.
3792 */
3793 LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
3794 "Verbose logging bit-mask");
3795
3796 /*
3797 # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
3798 # objects that have been registered with the nameserver after login.
3799 */
3800 LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
3801 "Deregister nameserver objects before LOGO");
3802
3803 /*
3804 # lun_queue_depth: This parameter is used to limit the number of outstanding
3805 # commands per FCP LUN.
3806 */
3807 LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512,
3808 "Max number of FCP commands we can queue to a specific LUN");
3809
3810 /*
3811 # tgt_queue_depth: This parameter is used to limit the number of outstanding
3812 # commands per target port. Value range is [10,65535]. Default value is 65535.
3813 */
3814 static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH;
3815 module_param(lpfc_tgt_queue_depth, uint, 0444);
3816 MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth");
3817 lpfc_vport_param_show(tgt_queue_depth);
3818 lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
3819 LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
3820
3821 /**
3822 * lpfc_tgt_queue_depth_store: Sets an attribute value.
3823 * @phba: pointer the the adapter structure.
3824 * @val: integer attribute value.
3825 *
3826 * Description: Sets the parameter to the new value.
3827 *
3828 * Returns:
3829 * zero on success
3830 * -EINVAL if val is invalid
3831 */
3832 static int
lpfc_tgt_queue_depth_set(struct lpfc_vport * vport,uint val)3833 lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
3834 {
3835 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3836 struct lpfc_nodelist *ndlp;
3837
3838 if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
3839 return -EINVAL;
3840
3841 if (val == vport->cfg_tgt_queue_depth)
3842 return 0;
3843
3844 spin_lock_irq(shost->host_lock);
3845 vport->cfg_tgt_queue_depth = val;
3846
3847 /* Next loop thru nodelist and change cmd_qdepth */
3848 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
3849 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
3850
3851 spin_unlock_irq(shost->host_lock);
3852 return 0;
3853 }
3854
3855 lpfc_vport_param_store(tgt_queue_depth);
3856 static DEVICE_ATTR_RW(lpfc_tgt_queue_depth);
3857
3858 /*
3859 # hba_queue_depth: This parameter is used to limit the number of outstanding
3860 # commands per lpfc HBA. Value range is [32,8192]. If this parameter
3861 # value is greater than the maximum number of exchanges supported by the HBA,
3862 # then maximum number of exchanges supported by the HBA is used to determine
3863 # the hba_queue_depth.
3864 */
3865 LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
3866 "Max number of FCP commands we can queue to a lpfc HBA");
3867
3868 /*
3869 # peer_port_login: This parameter allows/prevents logins
3870 # between peer ports hosted on the same physical port.
3871 # When this parameter is set 0 peer ports of same physical port
3872 # are not allowed to login to each other.
3873 # When this parameter is set 1 peer ports of same physical port
3874 # are allowed to login to each other.
3875 # Default value of this parameter is 0.
3876 */
3877 LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
3878 "Allow peer ports on the same physical port to login to each "
3879 "other.");
3880
3881 /*
3882 # restrict_login: This parameter allows/prevents logins
3883 # between Virtual Ports and remote initiators.
3884 # When this parameter is not set (0) Virtual Ports will accept PLOGIs from
3885 # other initiators and will attempt to PLOGI all remote ports.
3886 # When this parameter is set (1) Virtual Ports will reject PLOGIs from
3887 # remote ports and will not attempt to PLOGI to other initiators.
3888 # This parameter does not restrict to the physical port.
3889 # This parameter does not restrict logins to Fabric resident remote ports.
3890 # Default value of this parameter is 1.
3891 */
3892 static int lpfc_restrict_login = 1;
3893 module_param(lpfc_restrict_login, int, S_IRUGO);
3894 MODULE_PARM_DESC(lpfc_restrict_login,
3895 "Restrict virtual ports login to remote initiators.");
3896 lpfc_vport_param_show(restrict_login);
3897
3898 /**
3899 * lpfc_restrict_login_init - Set the vport restrict login flag
3900 * @vport: lpfc vport structure pointer.
3901 * @val: contains the restrict login value.
3902 *
3903 * Description:
3904 * If val is not in a valid range then log a kernel error message and set
3905 * the vport restrict login to one.
3906 * If the port type is physical clear the restrict login flag and return.
3907 * Else set the restrict login flag to val.
3908 *
3909 * Returns:
3910 * zero if val is in range
3911 * -EINVAL val out of range
3912 **/
3913 static int
lpfc_restrict_login_init(struct lpfc_vport * vport,int val)3914 lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
3915 {
3916 if (val < 0 || val > 1) {
3917 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3918 "0422 lpfc_restrict_login attribute cannot "
3919 "be set to %d, allowed range is [0, 1]\n",
3920 val);
3921 vport->cfg_restrict_login = 1;
3922 return -EINVAL;
3923 }
3924 if (vport->port_type == LPFC_PHYSICAL_PORT) {
3925 vport->cfg_restrict_login = 0;
3926 return 0;
3927 }
3928 vport->cfg_restrict_login = val;
3929 return 0;
3930 }
3931
3932 /**
3933 * lpfc_restrict_login_set - Set the vport restrict login flag
3934 * @vport: lpfc vport structure pointer.
3935 * @val: contains the restrict login value.
3936 *
3937 * Description:
3938 * If val is not in a valid range then log a kernel error message and set
3939 * the vport restrict login to one.
3940 * If the port type is physical and the val is not zero log a kernel
3941 * error message, clear the restrict login flag and return zero.
3942 * Else set the restrict login flag to val.
3943 *
3944 * Returns:
3945 * zero if val is in range
3946 * -EINVAL val out of range
3947 **/
3948 static int
lpfc_restrict_login_set(struct lpfc_vport * vport,int val)3949 lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
3950 {
3951 if (val < 0 || val > 1) {
3952 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3953 "0425 lpfc_restrict_login attribute cannot "
3954 "be set to %d, allowed range is [0, 1]\n",
3955 val);
3956 vport->cfg_restrict_login = 1;
3957 return -EINVAL;
3958 }
3959 if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) {
3960 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3961 "0468 lpfc_restrict_login must be 0 for "
3962 "Physical ports.\n");
3963 vport->cfg_restrict_login = 0;
3964 return 0;
3965 }
3966 vport->cfg_restrict_login = val;
3967 return 0;
3968 }
3969 lpfc_vport_param_store(restrict_login);
3970 static DEVICE_ATTR_RW(lpfc_restrict_login);
3971
3972 /*
3973 # Some disk devices have a "select ID" or "select Target" capability.
3974 # From a protocol standpoint "select ID" usually means select the
3975 # Fibre channel "ALPA". In the FC-AL Profile there is an "informative
3976 # annex" which contains a table that maps a "select ID" (a number
3977 # between 0 and 7F) to an ALPA. By default, for compatibility with
3978 # older drivers, the lpfc driver scans this table from low ALPA to high
3979 # ALPA.
3980 #
3981 # Turning on the scan-down variable (on = 1, off = 0) will
3982 # cause the lpfc driver to use an inverted table, effectively
3983 # scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
3984 #
3985 # (Note: This "select ID" functionality is a LOOP ONLY characteristic
3986 # and will not work across a fabric. Also this parameter will take
3987 # effect only in the case when ALPA map is not available.)
3988 */
3989 LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
3990 "Start scanning for devices from highest ALPA to lowest");
3991
3992 /*
3993 # lpfc_topology: link topology for init link
3994 # 0x0 = attempt loop mode then point-to-point
3995 # 0x01 = internal loopback mode
3996 # 0x02 = attempt point-to-point mode only
3997 # 0x04 = attempt loop mode only
3998 # 0x06 = attempt point-to-point mode then loop
3999 # Set point-to-point mode if you want to run as an N_Port.
4000 # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
4001 # Default value is 0.
4002 */
4003 LPFC_ATTR(topology, 0, 0, 6,
4004 "Select Fibre Channel topology");
4005
4006 /**
4007 * lpfc_topology_set - Set the adapters topology field
4008 * @phba: lpfc_hba pointer.
4009 * @val: topology value.
4010 *
4011 * Description:
4012 * If val is in a valid range then set the adapter's topology field and
4013 * issue a lip; if the lip fails reset the topology to the old value.
4014 *
4015 * If the value is not in range log a kernel error message and return an error.
4016 *
4017 * Returns:
4018 * zero if val is in range and lip okay
4019 * non-zero return value from lpfc_issue_lip()
4020 * -EINVAL val out of range
4021 **/
4022 static ssize_t
lpfc_topology_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4023 lpfc_topology_store(struct device *dev, struct device_attribute *attr,
4024 const char *buf, size_t count)
4025 {
4026 struct Scsi_Host *shost = class_to_shost(dev);
4027 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4028 struct lpfc_hba *phba = vport->phba;
4029 int val = 0;
4030 int nolip = 0;
4031 const char *val_buf = buf;
4032 int err;
4033 uint32_t prev_val;
4034
4035 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4036 nolip = 1;
4037 val_buf = &buf[strlen("nolip ")];
4038 }
4039
4040 if (!isdigit(val_buf[0]))
4041 return -EINVAL;
4042 if (sscanf(val_buf, "%i", &val) != 1)
4043 return -EINVAL;
4044
4045 if (val >= 0 && val <= 6) {
4046 prev_val = phba->cfg_topology;
4047 if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
4048 val == 4) {
4049 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4050 "3113 Loop mode not supported at speed %d\n",
4051 val);
4052 return -EINVAL;
4053 }
4054 /*
4055 * The 'topology' is not a configurable parameter if :
4056 * - persistent topology enabled
4057 * - G7/G6 with no private loop support
4058 */
4059
4060 if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
4061 (!phba->sli4_hba.pc_sli4_params.pls &&
4062 (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC ||
4063 phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) &&
4064 val == 4) {
4065 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4066 "3114 Loop mode not supported\n");
4067 return -EINVAL;
4068 }
4069 phba->cfg_topology = val;
4070 if (nolip)
4071 return strlen(buf);
4072
4073 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4074 "3054 lpfc_topology changed from %d to %d\n",
4075 prev_val, val);
4076 if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
4077 phba->fc_topology_changed = 1;
4078 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4079 if (err) {
4080 phba->cfg_topology = prev_val;
4081 return -EINVAL;
4082 } else
4083 return strlen(buf);
4084 }
4085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4086 "%d:0467 lpfc_topology attribute cannot be set to %d, "
4087 "allowed range is [0, 6]\n",
4088 phba->brd_no, val);
4089 return -EINVAL;
4090 }
4091
4092 lpfc_param_show(topology)
4093 static DEVICE_ATTR_RW(lpfc_topology);
4094
4095 /**
4096 * lpfc_static_vport_show: Read callback function for
4097 * lpfc_static_vport sysfs file.
4098 * @dev: Pointer to class device object.
4099 * @attr: device attribute structure.
4100 * @buf: Data buffer.
4101 *
4102 * This function is the read call back function for
4103 * lpfc_static_vport sysfs file. The lpfc_static_vport
4104 * sysfs file report the mageability of the vport.
4105 **/
4106 static ssize_t
lpfc_static_vport_show(struct device * dev,struct device_attribute * attr,char * buf)4107 lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
4108 char *buf)
4109 {
4110 struct Scsi_Host *shost = class_to_shost(dev);
4111 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4112 if (vport->vport_flag & STATIC_VPORT)
4113 sprintf(buf, "1\n");
4114 else
4115 sprintf(buf, "0\n");
4116
4117 return strlen(buf);
4118 }
4119
4120 /*
4121 * Sysfs attribute to control the statistical data collection.
4122 */
4123 static DEVICE_ATTR_RO(lpfc_static_vport);
4124
4125 /**
4126 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
4127 * @dev: Pointer to class device.
4128 * @buf: Data buffer.
4129 * @count: Size of the data buffer.
4130 *
4131 * This function get called when a user write to the lpfc_stat_data_ctrl
4132 * sysfs file. This function parse the command written to the sysfs file
4133 * and take appropriate action. These commands are used for controlling
4134 * driver statistical data collection.
4135 * Following are the command this function handles.
4136 *
4137 * setbucket <bucket_type> <base> <step>
4138 * = Set the latency buckets.
4139 * destroybucket = destroy all the buckets.
4140 * start = start data collection
4141 * stop = stop data collection
4142 * reset = reset the collected data
4143 **/
4144 static ssize_t
lpfc_stat_data_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4145 lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
4146 const char *buf, size_t count)
4147 {
4148 struct Scsi_Host *shost = class_to_shost(dev);
4149 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4150 struct lpfc_hba *phba = vport->phba;
4151 #define LPFC_MAX_DATA_CTRL_LEN 1024
4152 static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
4153 unsigned long i;
4154 char *str_ptr, *token;
4155 struct lpfc_vport **vports;
4156 struct Scsi_Host *v_shost;
4157 char *bucket_type_str, *base_str, *step_str;
4158 unsigned long base, step, bucket_type;
4159
4160 if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
4161 if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
4162 return -EINVAL;
4163
4164 strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
4165 str_ptr = &bucket_data[0];
4166 /* Ignore this token - this is command token */
4167 token = strsep(&str_ptr, "\t ");
4168 if (!token)
4169 return -EINVAL;
4170
4171 bucket_type_str = strsep(&str_ptr, "\t ");
4172 if (!bucket_type_str)
4173 return -EINVAL;
4174
4175 if (!strncmp(bucket_type_str, "linear", strlen("linear")))
4176 bucket_type = LPFC_LINEAR_BUCKET;
4177 else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
4178 bucket_type = LPFC_POWER2_BUCKET;
4179 else
4180 return -EINVAL;
4181
4182 base_str = strsep(&str_ptr, "\t ");
4183 if (!base_str)
4184 return -EINVAL;
4185 base = simple_strtoul(base_str, NULL, 0);
4186
4187 step_str = strsep(&str_ptr, "\t ");
4188 if (!step_str)
4189 return -EINVAL;
4190 step = simple_strtoul(step_str, NULL, 0);
4191 if (!step)
4192 return -EINVAL;
4193
4194 /* Block the data collection for every vport */
4195 vports = lpfc_create_vport_work_array(phba);
4196 if (vports == NULL)
4197 return -ENOMEM;
4198
4199 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4200 v_shost = lpfc_shost_from_vport(vports[i]);
4201 spin_lock_irq(v_shost->host_lock);
4202 /* Block and reset data collection */
4203 vports[i]->stat_data_blocked = 1;
4204 if (vports[i]->stat_data_enabled)
4205 lpfc_vport_reset_stat_data(vports[i]);
4206 spin_unlock_irq(v_shost->host_lock);
4207 }
4208
4209 /* Set the bucket attributes */
4210 phba->bucket_type = bucket_type;
4211 phba->bucket_base = base;
4212 phba->bucket_step = step;
4213
4214 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4215 v_shost = lpfc_shost_from_vport(vports[i]);
4216
4217 /* Unblock data collection */
4218 spin_lock_irq(v_shost->host_lock);
4219 vports[i]->stat_data_blocked = 0;
4220 spin_unlock_irq(v_shost->host_lock);
4221 }
4222 lpfc_destroy_vport_work_array(phba, vports);
4223 return strlen(buf);
4224 }
4225
4226 if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
4227 vports = lpfc_create_vport_work_array(phba);
4228 if (vports == NULL)
4229 return -ENOMEM;
4230
4231 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4232 v_shost = lpfc_shost_from_vport(vports[i]);
4233 spin_lock_irq(shost->host_lock);
4234 vports[i]->stat_data_blocked = 1;
4235 lpfc_free_bucket(vport);
4236 vport->stat_data_enabled = 0;
4237 vports[i]->stat_data_blocked = 0;
4238 spin_unlock_irq(shost->host_lock);
4239 }
4240 lpfc_destroy_vport_work_array(phba, vports);
4241 phba->bucket_type = LPFC_NO_BUCKET;
4242 phba->bucket_base = 0;
4243 phba->bucket_step = 0;
4244 return strlen(buf);
4245 }
4246
4247 if (!strncmp(buf, "start", strlen("start"))) {
4248 /* If no buckets configured return error */
4249 if (phba->bucket_type == LPFC_NO_BUCKET)
4250 return -EINVAL;
4251 spin_lock_irq(shost->host_lock);
4252 if (vport->stat_data_enabled) {
4253 spin_unlock_irq(shost->host_lock);
4254 return strlen(buf);
4255 }
4256 lpfc_alloc_bucket(vport);
4257 vport->stat_data_enabled = 1;
4258 spin_unlock_irq(shost->host_lock);
4259 return strlen(buf);
4260 }
4261
4262 if (!strncmp(buf, "stop", strlen("stop"))) {
4263 spin_lock_irq(shost->host_lock);
4264 if (vport->stat_data_enabled == 0) {
4265 spin_unlock_irq(shost->host_lock);
4266 return strlen(buf);
4267 }
4268 lpfc_free_bucket(vport);
4269 vport->stat_data_enabled = 0;
4270 spin_unlock_irq(shost->host_lock);
4271 return strlen(buf);
4272 }
4273
4274 if (!strncmp(buf, "reset", strlen("reset"))) {
4275 if ((phba->bucket_type == LPFC_NO_BUCKET)
4276 || !vport->stat_data_enabled)
4277 return strlen(buf);
4278 spin_lock_irq(shost->host_lock);
4279 vport->stat_data_blocked = 1;
4280 lpfc_vport_reset_stat_data(vport);
4281 vport->stat_data_blocked = 0;
4282 spin_unlock_irq(shost->host_lock);
4283 return strlen(buf);
4284 }
4285 return -EINVAL;
4286 }
4287
4288
4289 /**
4290 * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
4291 * @dev: Pointer to class device object.
4292 * @buf: Data buffer.
4293 *
4294 * This function is the read call back function for
4295 * lpfc_stat_data_ctrl sysfs file. This function report the
4296 * current statistical data collection state.
4297 **/
4298 static ssize_t
lpfc_stat_data_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)4299 lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
4300 char *buf)
4301 {
4302 struct Scsi_Host *shost = class_to_shost(dev);
4303 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4304 struct lpfc_hba *phba = vport->phba;
4305 int index = 0;
4306 int i;
4307 char *bucket_type;
4308 unsigned long bucket_value;
4309
4310 switch (phba->bucket_type) {
4311 case LPFC_LINEAR_BUCKET:
4312 bucket_type = "linear";
4313 break;
4314 case LPFC_POWER2_BUCKET:
4315 bucket_type = "power2";
4316 break;
4317 default:
4318 bucket_type = "No Bucket";
4319 break;
4320 }
4321
4322 sprintf(&buf[index], "Statistical Data enabled :%d, "
4323 "blocked :%d, Bucket type :%s, Bucket base :%d,"
4324 " Bucket step :%d\nLatency Ranges :",
4325 vport->stat_data_enabled, vport->stat_data_blocked,
4326 bucket_type, phba->bucket_base, phba->bucket_step);
4327 index = strlen(buf);
4328 if (phba->bucket_type != LPFC_NO_BUCKET) {
4329 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4330 if (phba->bucket_type == LPFC_LINEAR_BUCKET)
4331 bucket_value = phba->bucket_base +
4332 phba->bucket_step * i;
4333 else
4334 bucket_value = phba->bucket_base +
4335 (1 << i) * phba->bucket_step;
4336
4337 if (index + 10 > PAGE_SIZE)
4338 break;
4339 sprintf(&buf[index], "%08ld ", bucket_value);
4340 index = strlen(buf);
4341 }
4342 }
4343 sprintf(&buf[index], "\n");
4344 return strlen(buf);
4345 }
4346
4347 /*
4348 * Sysfs attribute to control the statistical data collection.
4349 */
4350 static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
4351
4352 /*
4353 * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
4354 */
4355
4356 /*
4357 * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
4358 * for each target.
4359 */
4360 #define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
4361 #define MAX_STAT_DATA_SIZE_PER_TARGET \
4362 STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
4363
4364
4365 /**
4366 * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
4367 * @filp: sysfs file
4368 * @kobj: Pointer to the kernel object
4369 * @bin_attr: Attribute object
4370 * @buff: Buffer pointer
4371 * @off: File offset
4372 * @count: Buffer size
4373 *
4374 * This function is the read call back function for lpfc_drvr_stat_data
4375 * sysfs file. This function export the statistical data to user
4376 * applications.
4377 **/
4378 static ssize_t
sysfs_drvr_stat_data_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)4379 sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
4380 struct bin_attribute *bin_attr,
4381 char *buf, loff_t off, size_t count)
4382 {
4383 struct device *dev = container_of(kobj, struct device,
4384 kobj);
4385 struct Scsi_Host *shost = class_to_shost(dev);
4386 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4387 struct lpfc_hba *phba = vport->phba;
4388 int i = 0, index = 0;
4389 unsigned long nport_index;
4390 struct lpfc_nodelist *ndlp = NULL;
4391 nport_index = (unsigned long)off /
4392 MAX_STAT_DATA_SIZE_PER_TARGET;
4393
4394 if (!vport->stat_data_enabled || vport->stat_data_blocked
4395 || (phba->bucket_type == LPFC_NO_BUCKET))
4396 return 0;
4397
4398 spin_lock_irq(shost->host_lock);
4399 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4400 if (!NLP_CHK_NODE_ACT(ndlp) || !ndlp->lat_data)
4401 continue;
4402
4403 if (nport_index > 0) {
4404 nport_index--;
4405 continue;
4406 }
4407
4408 if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
4409 > count)
4410 break;
4411
4412 if (!ndlp->lat_data)
4413 continue;
4414
4415 /* Print the WWN */
4416 sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
4417 ndlp->nlp_portname.u.wwn[0],
4418 ndlp->nlp_portname.u.wwn[1],
4419 ndlp->nlp_portname.u.wwn[2],
4420 ndlp->nlp_portname.u.wwn[3],
4421 ndlp->nlp_portname.u.wwn[4],
4422 ndlp->nlp_portname.u.wwn[5],
4423 ndlp->nlp_portname.u.wwn[6],
4424 ndlp->nlp_portname.u.wwn[7]);
4425
4426 index = strlen(buf);
4427
4428 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4429 sprintf(&buf[index], "%010u,",
4430 ndlp->lat_data[i].cmd_count);
4431 index = strlen(buf);
4432 }
4433 sprintf(&buf[index], "\n");
4434 index = strlen(buf);
4435 }
4436 spin_unlock_irq(shost->host_lock);
4437 return index;
4438 }
4439
4440 static struct bin_attribute sysfs_drvr_stat_data_attr = {
4441 .attr = {
4442 .name = "lpfc_drvr_stat_data",
4443 .mode = S_IRUSR,
4444 },
4445 .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
4446 .read = sysfs_drvr_stat_data_read,
4447 .write = NULL,
4448 };
4449
4450 /*
4451 # lpfc_link_speed: Link speed selection for initializing the Fibre Channel
4452 # connection.
4453 # Value range is [0,16]. Default value is 0.
4454 */
4455 /**
4456 * lpfc_link_speed_set - Set the adapters link speed
4457 * @phba: lpfc_hba pointer.
4458 * @val: link speed value.
4459 *
4460 * Description:
4461 * If val is in a valid range then set the adapter's link speed field and
4462 * issue a lip; if the lip fails reset the link speed to the old value.
4463 *
4464 * Notes:
4465 * If the value is not in range log a kernel error message and return an error.
4466 *
4467 * Returns:
4468 * zero if val is in range and lip okay.
4469 * non-zero return value from lpfc_issue_lip()
4470 * -EINVAL val out of range
4471 **/
4472 static ssize_t
lpfc_link_speed_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4473 lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
4474 const char *buf, size_t count)
4475 {
4476 struct Scsi_Host *shost = class_to_shost(dev);
4477 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4478 struct lpfc_hba *phba = vport->phba;
4479 int val = LPFC_USER_LINK_SPEED_AUTO;
4480 int nolip = 0;
4481 const char *val_buf = buf;
4482 int err;
4483 uint32_t prev_val, if_type;
4484
4485 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
4486 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 &&
4487 phba->hba_flag & HBA_FORCED_LINK_SPEED)
4488 return -EPERM;
4489
4490 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4491 nolip = 1;
4492 val_buf = &buf[strlen("nolip ")];
4493 }
4494
4495 if (!isdigit(val_buf[0]))
4496 return -EINVAL;
4497 if (sscanf(val_buf, "%i", &val) != 1)
4498 return -EINVAL;
4499
4500 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4501 "3055 lpfc_link_speed changed from %d to %d %s\n",
4502 phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
4503
4504 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
4505 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
4506 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
4507 ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
4508 ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
4509 ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) ||
4510 ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) ||
4511 ((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) {
4512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4513 "2879 lpfc_link_speed attribute cannot be set "
4514 "to %d. Speed is not supported by this port.\n",
4515 val);
4516 return -EINVAL;
4517 }
4518 if (val >= LPFC_USER_LINK_SPEED_16G &&
4519 phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4520 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4521 "3112 lpfc_link_speed attribute cannot be set "
4522 "to %d. Speed is not supported in loop mode.\n",
4523 val);
4524 return -EINVAL;
4525 }
4526
4527 switch (val) {
4528 case LPFC_USER_LINK_SPEED_AUTO:
4529 case LPFC_USER_LINK_SPEED_1G:
4530 case LPFC_USER_LINK_SPEED_2G:
4531 case LPFC_USER_LINK_SPEED_4G:
4532 case LPFC_USER_LINK_SPEED_8G:
4533 case LPFC_USER_LINK_SPEED_16G:
4534 case LPFC_USER_LINK_SPEED_32G:
4535 case LPFC_USER_LINK_SPEED_64G:
4536 prev_val = phba->cfg_link_speed;
4537 phba->cfg_link_speed = val;
4538 if (nolip)
4539 return strlen(buf);
4540
4541 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4542 if (err) {
4543 phba->cfg_link_speed = prev_val;
4544 return -EINVAL;
4545 }
4546 return strlen(buf);
4547 default:
4548 break;
4549 }
4550
4551 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4552 "0469 lpfc_link_speed attribute cannot be set to %d, "
4553 "allowed values are [%s]\n",
4554 val, LPFC_LINK_SPEED_STRING);
4555 return -EINVAL;
4556
4557 }
4558
4559 static int lpfc_link_speed = 0;
4560 module_param(lpfc_link_speed, int, S_IRUGO);
4561 MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
lpfc_param_show(link_speed)4562 lpfc_param_show(link_speed)
4563
4564 /**
4565 * lpfc_link_speed_init - Set the adapters link speed
4566 * @phba: lpfc_hba pointer.
4567 * @val: link speed value.
4568 *
4569 * Description:
4570 * If val is in a valid range then set the adapter's link speed field.
4571 *
4572 * Notes:
4573 * If the value is not in range log a kernel error message, clear the link
4574 * speed and return an error.
4575 *
4576 * Returns:
4577 * zero if val saved.
4578 * -EINVAL val out of range
4579 **/
4580 static int
4581 lpfc_link_speed_init(struct lpfc_hba *phba, int val)
4582 {
4583 if (val >= LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
4584 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4585 "3111 lpfc_link_speed of %d cannot "
4586 "support loop mode, setting topology to default.\n",
4587 val);
4588 phba->cfg_topology = 0;
4589 }
4590
4591 switch (val) {
4592 case LPFC_USER_LINK_SPEED_AUTO:
4593 case LPFC_USER_LINK_SPEED_1G:
4594 case LPFC_USER_LINK_SPEED_2G:
4595 case LPFC_USER_LINK_SPEED_4G:
4596 case LPFC_USER_LINK_SPEED_8G:
4597 case LPFC_USER_LINK_SPEED_16G:
4598 case LPFC_USER_LINK_SPEED_32G:
4599 case LPFC_USER_LINK_SPEED_64G:
4600 phba->cfg_link_speed = val;
4601 return 0;
4602 default:
4603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4604 "0405 lpfc_link_speed attribute cannot "
4605 "be set to %d, allowed values are "
4606 "["LPFC_LINK_SPEED_STRING"]\n", val);
4607 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
4608 return -EINVAL;
4609 }
4610 }
4611
4612 static DEVICE_ATTR_RW(lpfc_link_speed);
4613
4614 /*
4615 # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
4616 # 0 = aer disabled or not supported
4617 # 1 = aer supported and enabled (default)
4618 # Value range is [0,1]. Default value is 1.
4619 */
4620 LPFC_ATTR(aer_support, 1, 0, 1,
4621 "Enable PCIe device AER support");
lpfc_param_show(aer_support)4622 lpfc_param_show(aer_support)
4623
4624 /**
4625 * lpfc_aer_support_store - Set the adapter for aer support
4626 *
4627 * @dev: class device that is converted into a Scsi_host.
4628 * @attr: device attribute, not used.
4629 * @buf: containing enable or disable aer flag.
4630 * @count: unused variable.
4631 *
4632 * Description:
4633 * If the val is 1 and currently the device's AER capability was not
4634 * enabled, invoke the kernel's enable AER helper routine, trying to
4635 * enable the device's AER capability. If the helper routine enabling
4636 * AER returns success, update the device's cfg_aer_support flag to
4637 * indicate AER is supported by the device; otherwise, if the device
4638 * AER capability is already enabled to support AER, then do nothing.
4639 *
4640 * If the val is 0 and currently the device's AER support was enabled,
4641 * invoke the kernel's disable AER helper routine. After that, update
4642 * the device's cfg_aer_support flag to indicate AER is not supported
4643 * by the device; otherwise, if the device AER capability is already
4644 * disabled from supporting AER, then do nothing.
4645 *
4646 * Returns:
4647 * length of the buf on success if val is in range the intended mode
4648 * is supported.
4649 * -EINVAL if val out of range or intended mode is not supported.
4650 **/
4651 static ssize_t
4652 lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
4653 const char *buf, size_t count)
4654 {
4655 struct Scsi_Host *shost = class_to_shost(dev);
4656 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4657 struct lpfc_hba *phba = vport->phba;
4658 int val = 0, rc = -EINVAL;
4659
4660 if (!isdigit(buf[0]))
4661 return -EINVAL;
4662 if (sscanf(buf, "%i", &val) != 1)
4663 return -EINVAL;
4664
4665 switch (val) {
4666 case 0:
4667 if (phba->hba_flag & HBA_AER_ENABLED) {
4668 rc = pci_disable_pcie_error_reporting(phba->pcidev);
4669 if (!rc) {
4670 spin_lock_irq(&phba->hbalock);
4671 phba->hba_flag &= ~HBA_AER_ENABLED;
4672 spin_unlock_irq(&phba->hbalock);
4673 phba->cfg_aer_support = 0;
4674 rc = strlen(buf);
4675 } else
4676 rc = -EPERM;
4677 } else {
4678 phba->cfg_aer_support = 0;
4679 rc = strlen(buf);
4680 }
4681 break;
4682 case 1:
4683 if (!(phba->hba_flag & HBA_AER_ENABLED)) {
4684 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4685 if (!rc) {
4686 spin_lock_irq(&phba->hbalock);
4687 phba->hba_flag |= HBA_AER_ENABLED;
4688 spin_unlock_irq(&phba->hbalock);
4689 phba->cfg_aer_support = 1;
4690 rc = strlen(buf);
4691 } else
4692 rc = -EPERM;
4693 } else {
4694 phba->cfg_aer_support = 1;
4695 rc = strlen(buf);
4696 }
4697 break;
4698 default:
4699 rc = -EINVAL;
4700 break;
4701 }
4702 return rc;
4703 }
4704
4705 static DEVICE_ATTR_RW(lpfc_aer_support);
4706
4707 /**
4708 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
4709 * @dev: class device that is converted into a Scsi_host.
4710 * @attr: device attribute, not used.
4711 * @buf: containing flag 1 for aer cleanup state.
4712 * @count: unused variable.
4713 *
4714 * Description:
4715 * If the @buf contains 1 and the device currently has the AER support
4716 * enabled, then invokes the kernel AER helper routine
4717 * pci_aer_clear_nonfatal_status() to clean up the uncorrectable
4718 * error status register.
4719 *
4720 * Notes:
4721 *
4722 * Returns:
4723 * -EINVAL if the buf does not contain the 1 or the device is not currently
4724 * enabled with the AER support.
4725 **/
4726 static ssize_t
lpfc_aer_cleanup_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4727 lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
4728 const char *buf, size_t count)
4729 {
4730 struct Scsi_Host *shost = class_to_shost(dev);
4731 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4732 struct lpfc_hba *phba = vport->phba;
4733 int val, rc = -1;
4734
4735 if (!isdigit(buf[0]))
4736 return -EINVAL;
4737 if (sscanf(buf, "%i", &val) != 1)
4738 return -EINVAL;
4739 if (val != 1)
4740 return -EINVAL;
4741
4742 if (phba->hba_flag & HBA_AER_ENABLED)
4743 rc = pci_aer_clear_nonfatal_status(phba->pcidev);
4744
4745 if (rc == 0)
4746 return strlen(buf);
4747 else
4748 return -EPERM;
4749 }
4750
4751 static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
4752 lpfc_aer_cleanup_state);
4753
4754 /**
4755 * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
4756 *
4757 * @dev: class device that is converted into a Scsi_host.
4758 * @attr: device attribute, not used.
4759 * @buf: containing the string the number of vfs to be enabled.
4760 * @count: unused variable.
4761 *
4762 * Description:
4763 * When this api is called either through user sysfs, the driver shall
4764 * try to enable or disable SR-IOV virtual functions according to the
4765 * following:
4766 *
4767 * If zero virtual function has been enabled to the physical function,
4768 * the driver shall invoke the pci enable virtual function api trying
4769 * to enable the virtual functions. If the nr_vfn provided is greater
4770 * than the maximum supported, the maximum virtual function number will
4771 * be used for invoking the api; otherwise, the nr_vfn provided shall
4772 * be used for invoking the api. If the api call returned success, the
4773 * actual number of virtual functions enabled will be set to the driver
4774 * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
4775 * cfg_sriov_nr_virtfn remains zero.
4776 *
4777 * If none-zero virtual functions have already been enabled to the
4778 * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
4779 * -EINVAL will be returned and the driver does nothing;
4780 *
4781 * If the nr_vfn provided is zero and none-zero virtual functions have
4782 * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
4783 * disabling virtual function api shall be invoded to disable all the
4784 * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
4785 * zero. Otherwise, if zero virtual function has been enabled, do
4786 * nothing.
4787 *
4788 * Returns:
4789 * length of the buf on success if val is in range the intended mode
4790 * is supported.
4791 * -EINVAL if val out of range or intended mode is not supported.
4792 **/
4793 static ssize_t
lpfc_sriov_nr_virtfn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4794 lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
4795 const char *buf, size_t count)
4796 {
4797 struct Scsi_Host *shost = class_to_shost(dev);
4798 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4799 struct lpfc_hba *phba = vport->phba;
4800 struct pci_dev *pdev = phba->pcidev;
4801 int val = 0, rc = -EINVAL;
4802
4803 /* Sanity check on user data */
4804 if (!isdigit(buf[0]))
4805 return -EINVAL;
4806 if (sscanf(buf, "%i", &val) != 1)
4807 return -EINVAL;
4808 if (val < 0)
4809 return -EINVAL;
4810
4811 /* Request disabling virtual functions */
4812 if (val == 0) {
4813 if (phba->cfg_sriov_nr_virtfn > 0) {
4814 pci_disable_sriov(pdev);
4815 phba->cfg_sriov_nr_virtfn = 0;
4816 }
4817 return strlen(buf);
4818 }
4819
4820 /* Request enabling virtual functions */
4821 if (phba->cfg_sriov_nr_virtfn > 0) {
4822 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4823 "3018 There are %d virtual functions "
4824 "enabled on physical function.\n",
4825 phba->cfg_sriov_nr_virtfn);
4826 return -EEXIST;
4827 }
4828
4829 if (val <= LPFC_MAX_VFN_PER_PFN)
4830 phba->cfg_sriov_nr_virtfn = val;
4831 else {
4832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4833 "3019 Enabling %d virtual functions is not "
4834 "allowed.\n", val);
4835 return -EINVAL;
4836 }
4837
4838 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
4839 if (rc) {
4840 phba->cfg_sriov_nr_virtfn = 0;
4841 rc = -EPERM;
4842 } else
4843 rc = strlen(buf);
4844
4845 return rc;
4846 }
4847
4848 LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
4849 "Enable PCIe device SR-IOV virtual fn");
4850
4851 lpfc_param_show(sriov_nr_virtfn)
4852 static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn);
4853
4854 /**
4855 * lpfc_request_firmware_store - Request for Linux generic firmware upgrade
4856 *
4857 * @dev: class device that is converted into a Scsi_host.
4858 * @attr: device attribute, not used.
4859 * @buf: containing the string the number of vfs to be enabled.
4860 * @count: unused variable.
4861 *
4862 * Description:
4863 *
4864 * Returns:
4865 * length of the buf on success if val is in range the intended mode
4866 * is supported.
4867 * -EINVAL if val out of range or intended mode is not supported.
4868 **/
4869 static ssize_t
lpfc_request_firmware_upgrade_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4870 lpfc_request_firmware_upgrade_store(struct device *dev,
4871 struct device_attribute *attr,
4872 const char *buf, size_t count)
4873 {
4874 struct Scsi_Host *shost = class_to_shost(dev);
4875 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4876 struct lpfc_hba *phba = vport->phba;
4877 int val = 0, rc;
4878
4879 /* Sanity check on user data */
4880 if (!isdigit(buf[0]))
4881 return -EINVAL;
4882 if (sscanf(buf, "%i", &val) != 1)
4883 return -EINVAL;
4884 if (val != 1)
4885 return -EINVAL;
4886
4887 rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE);
4888 if (rc)
4889 rc = -EPERM;
4890 else
4891 rc = strlen(buf);
4892 return rc;
4893 }
4894
4895 static int lpfc_req_fw_upgrade;
4896 module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR);
4897 MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade");
lpfc_param_show(request_firmware_upgrade)4898 lpfc_param_show(request_firmware_upgrade)
4899
4900 /**
4901 * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade
4902 * @phba: lpfc_hba pointer.
4903 * @val: 0 or 1.
4904 *
4905 * Description:
4906 * Set the initial Linux generic firmware upgrade enable or disable flag.
4907 *
4908 * Returns:
4909 * zero if val saved.
4910 * -EINVAL val out of range
4911 **/
4912 static int
4913 lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val)
4914 {
4915 if (val >= 0 && val <= 1) {
4916 phba->cfg_request_firmware_upgrade = val;
4917 return 0;
4918 }
4919 return -EINVAL;
4920 }
4921 static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
4922 lpfc_request_firmware_upgrade_show,
4923 lpfc_request_firmware_upgrade_store);
4924
4925 /**
4926 * lpfc_force_rscn_store
4927 *
4928 * @dev: class device that is converted into a Scsi_host.
4929 * @attr: device attribute, not used.
4930 * @buf: unused string
4931 * @count: unused variable.
4932 *
4933 * Description:
4934 * Force the switch to send a RSCN to all other NPorts in our zone
4935 * If we are direct connect pt2pt, build the RSCN command ourself
4936 * and send to the other NPort. Not supported for private loop.
4937 *
4938 * Returns:
4939 * 0 - on success
4940 * -EIO - if command is not sent
4941 **/
4942 static ssize_t
lpfc_force_rscn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4943 lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr,
4944 const char *buf, size_t count)
4945 {
4946 struct Scsi_Host *shost = class_to_shost(dev);
4947 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4948 int i;
4949
4950 i = lpfc_issue_els_rscn(vport, 0);
4951 if (i)
4952 return -EIO;
4953 return strlen(buf);
4954 }
4955
4956 /*
4957 * lpfc_force_rscn: Force an RSCN to be sent to all remote NPorts
4958 * connected to the HBA.
4959 *
4960 * Value range is any ascii value
4961 */
4962 static int lpfc_force_rscn;
4963 module_param(lpfc_force_rscn, int, 0644);
4964 MODULE_PARM_DESC(lpfc_force_rscn,
4965 "Force an RSCN to be sent to all remote NPorts");
lpfc_param_show(force_rscn)4966 lpfc_param_show(force_rscn)
4967
4968 /**
4969 * lpfc_force_rscn_init - Force an RSCN to be sent to all remote NPorts
4970 * @phba: lpfc_hba pointer.
4971 * @val: unused value.
4972 *
4973 * Returns:
4974 * zero if val saved.
4975 **/
4976 static int
4977 lpfc_force_rscn_init(struct lpfc_hba *phba, int val)
4978 {
4979 return 0;
4980 }
4981 static DEVICE_ATTR_RW(lpfc_force_rscn);
4982
4983 /**
4984 * lpfc_fcp_imax_store
4985 *
4986 * @dev: class device that is converted into a Scsi_host.
4987 * @attr: device attribute, not used.
4988 * @buf: string with the number of fast-path FCP interrupts per second.
4989 * @count: unused variable.
4990 *
4991 * Description:
4992 * If val is in a valid range [636,651042], then set the adapter's
4993 * maximum number of fast-path FCP interrupts per second.
4994 *
4995 * Returns:
4996 * length of the buf on success if val is in range the intended mode
4997 * is supported.
4998 * -EINVAL if val out of range or intended mode is not supported.
4999 **/
5000 static ssize_t
lpfc_fcp_imax_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5001 lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
5002 const char *buf, size_t count)
5003 {
5004 struct Scsi_Host *shost = class_to_shost(dev);
5005 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5006 struct lpfc_hba *phba = vport->phba;
5007 struct lpfc_eq_intr_info *eqi;
5008 uint32_t usdelay;
5009 int val = 0, i;
5010
5011 /* fcp_imax is only valid for SLI4 */
5012 if (phba->sli_rev != LPFC_SLI_REV4)
5013 return -EINVAL;
5014
5015 /* Sanity check on user data */
5016 if (!isdigit(buf[0]))
5017 return -EINVAL;
5018 if (sscanf(buf, "%i", &val) != 1)
5019 return -EINVAL;
5020
5021 /*
5022 * Value range for the HBA is [5000,5000000]
5023 * The value for each EQ depends on how many EQs are configured.
5024 * Allow value == 0
5025 */
5026 if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX))
5027 return -EINVAL;
5028
5029 phba->cfg_auto_imax = (val) ? 0 : 1;
5030 if (phba->cfg_fcp_imax && !val) {
5031 queue_delayed_work(phba->wq, &phba->eq_delay_work,
5032 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
5033
5034 for_each_present_cpu(i) {
5035 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
5036 eqi->icnt = 0;
5037 }
5038 }
5039
5040 phba->cfg_fcp_imax = (uint32_t)val;
5041
5042 if (phba->cfg_fcp_imax)
5043 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
5044 else
5045 usdelay = 0;
5046
5047 for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
5048 lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
5049 usdelay);
5050
5051 return strlen(buf);
5052 }
5053
5054 /*
5055 # lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
5056 # for the HBA.
5057 #
5058 # Value range is [5,000 to 5,000,000]. Default value is 50,000.
5059 */
5060 static int lpfc_fcp_imax = LPFC_DEF_IMAX;
5061 module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
5062 MODULE_PARM_DESC(lpfc_fcp_imax,
5063 "Set the maximum number of FCP interrupts per second per HBA");
lpfc_param_show(fcp_imax)5064 lpfc_param_show(fcp_imax)
5065
5066 /**
5067 * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable
5068 * @phba: lpfc_hba pointer.
5069 * @val: link speed value.
5070 *
5071 * Description:
5072 * If val is in a valid range [636,651042], then initialize the adapter's
5073 * maximum number of fast-path FCP interrupts per second.
5074 *
5075 * Returns:
5076 * zero if val saved.
5077 * -EINVAL val out of range
5078 **/
5079 static int
5080 lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
5081 {
5082 if (phba->sli_rev != LPFC_SLI_REV4) {
5083 phba->cfg_fcp_imax = 0;
5084 return 0;
5085 }
5086
5087 if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) ||
5088 (val == 0)) {
5089 phba->cfg_fcp_imax = val;
5090 return 0;
5091 }
5092
5093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5094 "3016 lpfc_fcp_imax: %d out of range, using default\n",
5095 val);
5096 phba->cfg_fcp_imax = LPFC_DEF_IMAX;
5097
5098 return 0;
5099 }
5100
5101 static DEVICE_ATTR_RW(lpfc_fcp_imax);
5102
5103 /**
5104 * lpfc_cq_max_proc_limit_store
5105 *
5106 * @dev: class device that is converted into a Scsi_host.
5107 * @attr: device attribute, not used.
5108 * @buf: string with the cq max processing limit of cqes
5109 * @count: unused variable.
5110 *
5111 * Description:
5112 * If val is in a valid range, then set value on each cq
5113 *
5114 * Returns:
5115 * The length of the buf: if successful
5116 * -ERANGE: if val is not in the valid range
5117 * -EINVAL: if bad value format or intended mode is not supported.
5118 **/
5119 static ssize_t
lpfc_cq_max_proc_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5120 lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr,
5121 const char *buf, size_t count)
5122 {
5123 struct Scsi_Host *shost = class_to_shost(dev);
5124 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5125 struct lpfc_hba *phba = vport->phba;
5126 struct lpfc_queue *eq, *cq;
5127 unsigned long val;
5128 int i;
5129
5130 /* cq_max_proc_limit is only valid for SLI4 */
5131 if (phba->sli_rev != LPFC_SLI_REV4)
5132 return -EINVAL;
5133
5134 /* Sanity check on user data */
5135 if (!isdigit(buf[0]))
5136 return -EINVAL;
5137 if (kstrtoul(buf, 0, &val))
5138 return -EINVAL;
5139
5140 if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT)
5141 return -ERANGE;
5142
5143 phba->cfg_cq_max_proc_limit = (uint32_t)val;
5144
5145 /* set the values on the cq's */
5146 for (i = 0; i < phba->cfg_irq_chann; i++) {
5147 /* Get the EQ corresponding to the IRQ vector */
5148 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
5149 if (!eq)
5150 continue;
5151
5152 list_for_each_entry(cq, &eq->child_list, list)
5153 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
5154 cq->entry_count);
5155 }
5156
5157 return strlen(buf);
5158 }
5159
5160 /*
5161 * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an
5162 * itteration of CQ processing.
5163 */
5164 static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5165 module_param(lpfc_cq_max_proc_limit, int, 0644);
5166 MODULE_PARM_DESC(lpfc_cq_max_proc_limit,
5167 "Set the maximum number CQEs processed in an iteration of "
5168 "CQ processing");
5169 lpfc_param_show(cq_max_proc_limit)
5170
5171 /*
5172 * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a
5173 * single handler call which should request a polled completion rather
5174 * than re-enabling interrupts.
5175 */
5176 LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL,
5177 LPFC_CQ_MIN_THRESHOLD_TO_POLL,
5178 LPFC_CQ_MAX_THRESHOLD_TO_POLL,
5179 "CQE Processing Threshold to enable Polling");
5180
5181 /**
5182 * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit
5183 * @phba: lpfc_hba pointer.
5184 * @val: entry limit
5185 *
5186 * Description:
5187 * If val is in a valid range, then initialize the adapter's maximum
5188 * value.
5189 *
5190 * Returns:
5191 * Always returns 0 for success, even if value not always set to
5192 * requested value. If value out of range or not supported, will fall
5193 * back to default.
5194 **/
5195 static int
lpfc_cq_max_proc_limit_init(struct lpfc_hba * phba,int val)5196 lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
5197 {
5198 phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5199
5200 if (phba->sli_rev != LPFC_SLI_REV4)
5201 return 0;
5202
5203 if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) {
5204 phba->cfg_cq_max_proc_limit = val;
5205 return 0;
5206 }
5207
5208 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5209 "0371 "LPFC_DRIVER_NAME"_cq_max_proc_limit: "
5210 "%d out of range, using default\n",
5211 phba->cfg_cq_max_proc_limit);
5212
5213 return 0;
5214 }
5215
5216 static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit);
5217
5218 /**
5219 * lpfc_state_show - Display current driver CPU affinity
5220 * @dev: class converted to a Scsi_host structure.
5221 * @attr: device attribute, not used.
5222 * @buf: on return contains text describing the state of the link.
5223 *
5224 * Returns: size of formatted string.
5225 **/
5226 static ssize_t
lpfc_fcp_cpu_map_show(struct device * dev,struct device_attribute * attr,char * buf)5227 lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
5228 char *buf)
5229 {
5230 struct Scsi_Host *shost = class_to_shost(dev);
5231 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5232 struct lpfc_hba *phba = vport->phba;
5233 struct lpfc_vector_map_info *cpup;
5234 int len = 0;
5235
5236 if ((phba->sli_rev != LPFC_SLI_REV4) ||
5237 (phba->intr_type != MSIX))
5238 return len;
5239
5240 switch (phba->cfg_fcp_cpu_map) {
5241 case 0:
5242 len += scnprintf(buf + len, PAGE_SIZE-len,
5243 "fcp_cpu_map: No mapping (%d)\n",
5244 phba->cfg_fcp_cpu_map);
5245 return len;
5246 case 1:
5247 len += scnprintf(buf + len, PAGE_SIZE-len,
5248 "fcp_cpu_map: HBA centric mapping (%d): "
5249 "%d of %d CPUs online from %d possible CPUs\n",
5250 phba->cfg_fcp_cpu_map, num_online_cpus(),
5251 num_present_cpus(),
5252 phba->sli4_hba.num_possible_cpu);
5253 break;
5254 }
5255
5256 while (phba->sli4_hba.curr_disp_cpu <
5257 phba->sli4_hba.num_possible_cpu) {
5258 cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
5259
5260 if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
5261 len += scnprintf(buf + len, PAGE_SIZE - len,
5262 "CPU %02d not present\n",
5263 phba->sli4_hba.curr_disp_cpu);
5264 else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
5265 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5266 len += scnprintf(
5267 buf + len, PAGE_SIZE - len,
5268 "CPU %02d hdwq None "
5269 "physid %d coreid %d ht %d ua %d\n",
5270 phba->sli4_hba.curr_disp_cpu,
5271 cpup->phys_id, cpup->core_id,
5272 (cpup->flag & LPFC_CPU_MAP_HYPER),
5273 (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5274 else
5275 len += scnprintf(
5276 buf + len, PAGE_SIZE - len,
5277 "CPU %02d EQ None hdwq %04d "
5278 "physid %d coreid %d ht %d ua %d\n",
5279 phba->sli4_hba.curr_disp_cpu,
5280 cpup->hdwq, cpup->phys_id,
5281 cpup->core_id,
5282 (cpup->flag & LPFC_CPU_MAP_HYPER),
5283 (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5284 } else {
5285 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5286 len += scnprintf(
5287 buf + len, PAGE_SIZE - len,
5288 "CPU %02d hdwq None "
5289 "physid %d coreid %d ht %d ua %d IRQ %d\n",
5290 phba->sli4_hba.curr_disp_cpu,
5291 cpup->phys_id,
5292 cpup->core_id,
5293 (cpup->flag & LPFC_CPU_MAP_HYPER),
5294 (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5295 lpfc_get_irq(cpup->eq));
5296 else
5297 len += scnprintf(
5298 buf + len, PAGE_SIZE - len,
5299 "CPU %02d EQ %04d hdwq %04d "
5300 "physid %d coreid %d ht %d ua %d IRQ %d\n",
5301 phba->sli4_hba.curr_disp_cpu,
5302 cpup->eq, cpup->hdwq, cpup->phys_id,
5303 cpup->core_id,
5304 (cpup->flag & LPFC_CPU_MAP_HYPER),
5305 (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5306 lpfc_get_irq(cpup->eq));
5307 }
5308
5309 phba->sli4_hba.curr_disp_cpu++;
5310
5311 /* display max number of CPUs keeping some margin */
5312 if (phba->sli4_hba.curr_disp_cpu <
5313 phba->sli4_hba.num_possible_cpu &&
5314 (len >= (PAGE_SIZE - 64))) {
5315 len += scnprintf(buf + len,
5316 PAGE_SIZE - len, "more...\n");
5317 break;
5318 }
5319 }
5320
5321 if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu)
5322 phba->sli4_hba.curr_disp_cpu = 0;
5323
5324 return len;
5325 }
5326
5327 /**
5328 * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
5329 * @dev: class device that is converted into a Scsi_host.
5330 * @attr: device attribute, not used.
5331 * @buf: one or more lpfc_polling_flags values.
5332 * @count: not used.
5333 *
5334 * Returns:
5335 * -EINVAL - Not implemented yet.
5336 **/
5337 static ssize_t
lpfc_fcp_cpu_map_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5338 lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
5339 const char *buf, size_t count)
5340 {
5341 return -EINVAL;
5342 }
5343
5344 /*
5345 # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
5346 # for the HBA.
5347 #
5348 # Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1).
5349 # 0 - Do not affinitze IRQ vectors
5350 # 1 - Affintize HBA vectors with respect to each HBA
5351 # (start with CPU0 for each HBA)
5352 # This also defines how Hardware Queues are mapped to specific CPUs.
5353 */
5354 static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5355 module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
5356 MODULE_PARM_DESC(lpfc_fcp_cpu_map,
5357 "Defines how to map CPUs to IRQ vectors per HBA");
5358
5359 /**
5360 * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
5361 * @phba: lpfc_hba pointer.
5362 * @val: link speed value.
5363 *
5364 * Description:
5365 * If val is in a valid range [0-2], then affinitze the adapter's
5366 * MSIX vectors.
5367 *
5368 * Returns:
5369 * zero if val saved.
5370 * -EINVAL val out of range
5371 **/
5372 static int
lpfc_fcp_cpu_map_init(struct lpfc_hba * phba,int val)5373 lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
5374 {
5375 if (phba->sli_rev != LPFC_SLI_REV4) {
5376 phba->cfg_fcp_cpu_map = 0;
5377 return 0;
5378 }
5379
5380 if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
5381 phba->cfg_fcp_cpu_map = val;
5382 return 0;
5383 }
5384
5385 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5386 "3326 lpfc_fcp_cpu_map: %d out of range, using "
5387 "default\n", val);
5388 phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5389
5390 return 0;
5391 }
5392
5393 static DEVICE_ATTR_RW(lpfc_fcp_cpu_map);
5394
5395 /*
5396 # lpfc_fcp_class: Determines FC class to use for the FCP protocol.
5397 # Value range is [2,3]. Default value is 3.
5398 */
5399 LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
5400 "Select Fibre Channel class of service for FCP sequences");
5401
5402 /*
5403 # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
5404 # is [0,1]. Default value is 0.
5405 */
5406 LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
5407 "Use ADISC on rediscovery to authenticate FCP devices");
5408
5409 /*
5410 # lpfc_first_burst_size: First burst size to use on the NPorts
5411 # that support first burst.
5412 # Value range is [0,65536]. Default value is 0.
5413 */
5414 LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
5415 "First burst size for Targets that support first burst");
5416
5417 /*
5418 * lpfc_nvmet_fb_size: NVME Target mode supported first burst size.
5419 * When the driver is configured as an NVME target, this value is
5420 * communicated to the NVME initiator in the PRLI response. It is
5421 * used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support
5422 * parameters are set and the target is sending the PRLI RSP.
5423 * Parameter supported on physical port only - no NPIV support.
5424 * Value range is [0,65536]. Default value is 0.
5425 */
5426 LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
5427 "NVME Target mode first burst size in 512B increments.");
5428
5429 /*
5430 * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
5431 * For the Initiator (I), enabling this parameter means that an NVMET
5432 * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
5433 * processed by the initiator for subsequent NVME FCP IO.
5434 * Currently, this feature is not supported on the NVME target
5435 * Value range is [0,1]. Default value is 0 (disabled).
5436 */
5437 LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
5438 "Enable First Burst feature for NVME Initiator.");
5439
5440 /*
5441 # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
5442 # depth. Default value is 0. When the value of this parameter is zero the
5443 # SCSI command completion time is not used for controlling I/O queue depth. When
5444 # the parameter is set to a non-zero value, the I/O queue depth is controlled
5445 # to limit the I/O completion time to the parameter value.
5446 # The value is set in milliseconds.
5447 */
5448 LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
5449 "Use command completion time to control queue depth");
5450
5451 lpfc_vport_param_show(max_scsicmpl_time);
5452 static int
lpfc_max_scsicmpl_time_set(struct lpfc_vport * vport,int val)5453 lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
5454 {
5455 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5456 struct lpfc_nodelist *ndlp, *next_ndlp;
5457
5458 if (val == vport->cfg_max_scsicmpl_time)
5459 return 0;
5460 if ((val < 0) || (val > 60000))
5461 return -EINVAL;
5462 vport->cfg_max_scsicmpl_time = val;
5463
5464 spin_lock_irq(shost->host_lock);
5465 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
5466 if (!NLP_CHK_NODE_ACT(ndlp))
5467 continue;
5468 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
5469 continue;
5470 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
5471 }
5472 spin_unlock_irq(shost->host_lock);
5473 return 0;
5474 }
5475 lpfc_vport_param_store(max_scsicmpl_time);
5476 static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time);
5477
5478 /*
5479 # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
5480 # range is [0,1]. Default value is 0.
5481 */
5482 LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
5483
5484 /*
5485 # lpfc_xri_rebalancing: enable or disable XRI rebalancing feature
5486 # range is [0,1]. Default value is 1.
5487 */
5488 LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing");
5489
5490 /*
5491 * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds
5492 * range is [0,1]. Default value is 0.
5493 * For [0], FCP commands are issued to Work Queues based on upper layer
5494 * hardware queue index.
5495 * For [1], FCP commands are issued to a Work Queue associated with the
5496 * current CPU.
5497 *
5498 * LPFC_FCP_SCHED_BY_HDWQ == 0
5499 * LPFC_FCP_SCHED_BY_CPU == 1
5500 *
5501 * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu
5502 * affinity for FCP/NVME I/Os through Work Queues associated with the current
5503 * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os
5504 * through WQs will be used.
5505 */
5506 LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU,
5507 LPFC_FCP_SCHED_BY_HDWQ,
5508 LPFC_FCP_SCHED_BY_CPU,
5509 "Determine scheduling algorithm for "
5510 "issuing commands [0] - Hardware Queue, [1] - Current CPU");
5511
5512 /*
5513 * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN
5514 * range is [0,1]. Default value is 0.
5515 * For [0], GID_FT is used for NameServer queries after RSCN (default)
5516 * For [1], GID_PT is used for NameServer queries after RSCN
5517 *
5518 */
5519 LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT,
5520 LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT,
5521 "Determine algorithm NameServer queries after RSCN "
5522 "[0] - GID_FT, [1] - GID_PT");
5523
5524 /*
5525 # lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
5526 # range is [0,1]. Default value is 0.
5527 # For [0], bus reset issues target reset to ALL devices
5528 # For [1], bus reset issues target reset to non-FCP2 devices
5529 */
5530 LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for "
5531 "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset");
5532
5533
5534 /*
5535 # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
5536 # cr_delay (msec) or cr_count outstanding commands. cr_delay can take
5537 # value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
5538 # is 0. Default value of cr_count is 1. The cr_count feature is disabled if
5539 # cr_delay is set to 0.
5540 */
5541 LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an "
5542 "interrupt response is generated");
5543
5544 LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
5545 "interrupt response is generated");
5546
5547 /*
5548 # lpfc_multi_ring_support: Determines how many rings to spread available
5549 # cmd/rsp IOCB entries across.
5550 # Value range is [1,2]. Default value is 1.
5551 */
5552 LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
5553 "SLI rings to spread IOCB entries across");
5554
5555 /*
5556 # lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this
5557 # identifies what rctl value to configure the additional ring for.
5558 # Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
5559 */
5560 LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
5561 255, "Identifies RCTL for additional ring configuration");
5562
5563 /*
5564 # lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this
5565 # identifies what type value to configure the additional ring for.
5566 # Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
5567 */
5568 LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
5569 255, "Identifies TYPE for additional ring configuration");
5570
5571 /*
5572 # lpfc_enable_SmartSAN: Sets up FDMI support for SmartSAN
5573 # 0 = SmartSAN functionality disabled (default)
5574 # 1 = SmartSAN functionality enabled
5575 # This parameter will override the value of lpfc_fdmi_on module parameter.
5576 # Value range is [0,1]. Default value is 0.
5577 */
5578 LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
5579
5580 /*
5581 # lpfc_fdmi_on: Controls FDMI support.
5582 # 0 No FDMI support
5583 # 1 Traditional FDMI support (default)
5584 # Traditional FDMI support means the driver will assume FDMI-2 support;
5585 # however, if that fails, it will fallback to FDMI-1.
5586 # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
5587 # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
5588 # lpfc_fdmi_on.
5589 # Value range [0,1]. Default value is 1.
5590 */
5591 LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
5592
5593 /*
5594 # Specifies the maximum number of ELS cmds we can have outstanding (for
5595 # discovery). Value range is [1,64]. Default value = 32.
5596 */
5597 LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
5598 "during discovery");
5599
5600 /*
5601 # lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that
5602 # will be scanned by the SCSI midlayer when sequential scanning is
5603 # used; and is also the highest LUN ID allowed when the SCSI midlayer
5604 # parses REPORT_LUN responses. The lpfc driver has no LUN count or
5605 # LUN ID limit, but the SCSI midlayer requires this field for the uses
5606 # above. The lpfc driver limits the default value to 255 for two reasons.
5607 # As it bounds the sequential scan loop, scanning for thousands of luns
5608 # on a target can take minutes of wall clock time. Additionally,
5609 # there are FC targets, such as JBODs, that only recognize 8-bits of
5610 # LUN ID. When they receive a value greater than 8 bits, they chop off
5611 # the high order bits. In other words, they see LUN IDs 0, 256, 512,
5612 # and so on all as LUN ID 0. This causes the linux kernel, which sees
5613 # valid responses at each of the LUN IDs, to believe there are multiple
5614 # devices present, when in fact, there is only 1.
5615 # A customer that is aware of their target behaviors, and the results as
5616 # indicated above, is welcome to increase the lpfc_max_luns value.
5617 # As mentioned, this value is not used by the lpfc driver, only the
5618 # SCSI midlayer.
5619 # Value range is [0,65535]. Default value is 255.
5620 # NOTE: The SCSI layer might probe all allowed LUN on some old targets.
5621 */
5622 LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID");
5623
5624 /*
5625 # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
5626 # Value range is [1,255], default value is 10.
5627 */
5628 LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
5629 "Milliseconds driver will wait between polling FCP ring");
5630
5631 /*
5632 # lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
5633 # to complete in seconds. Value range is [5,180], default value is 60.
5634 */
5635 LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
5636 "Maximum time to wait for task management commands to complete");
5637 /*
5638 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
5639 # support this feature
5640 # 0 = MSI disabled
5641 # 1 = MSI enabled
5642 # 2 = MSI-X enabled (default)
5643 # Value range is [0,2]. Default value is 2.
5644 */
5645 LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
5646 "MSI-X (2), if possible");
5647
5648 /*
5649 * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs
5650 *
5651 * 0 = NVME OAS disabled
5652 * 1 = NVME OAS enabled
5653 *
5654 * Value range is [0,1]. Default value is 0.
5655 */
5656 LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
5657 "Use OAS bit on NVME IOs");
5658
5659 /*
5660 * lpfc_nvme_embed_cmd: Use the oas bit when sending NVME/NVMET IOs
5661 *
5662 * 0 = Put NVME Command in SGL
5663 * 1 = Embed NVME Command in WQE (unless G7)
5664 * 2 = Embed NVME Command in WQE (force)
5665 *
5666 * Value range is [0,2]. Default value is 1.
5667 */
5668 LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
5669 "Embed NVME Command in WQE");
5670
5671 /*
5672 * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
5673 * the driver will advertise it supports to the SCSI layer.
5674 *
5675 * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
5676 * 1,256 = Manually specify nr_hw_queue value to be advertised,
5677 *
5678 * Value range is [0,256]. Default value is 8.
5679 */
5680 LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5681 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
5682 "Set the number of SCSI Queues advertised");
5683
5684 /*
5685 * lpfc_hdw_queue: Set the number of Hardware Queues the driver
5686 * will advertise it supports to the NVME and SCSI layers. This also
5687 * will map to the number of CQ/WQ pairs the driver will create.
5688 *
5689 * The NVME Layer will try to create this many, plus 1 administrative
5690 * hardware queue. The administrative queue will always map to WQ 0
5691 * A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
5692 *
5693 * 0 = Configure the number of hdw queues to the number of active CPUs.
5694 * 1,256 = Manually specify how many hdw queues to use.
5695 *
5696 * Value range is [0,256]. Default value is 0.
5697 */
5698 LPFC_ATTR_R(hdw_queue,
5699 LPFC_HBA_HDWQ_DEF,
5700 LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
5701 "Set the number of I/O Hardware Queues");
5702
5703 #if IS_ENABLED(CONFIG_X86)
5704 /**
5705 * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on
5706 * irq_chann_mode
5707 * @phba: Pointer to HBA context object.
5708 **/
5709 static void
lpfc_cpumask_irq_mode_init(struct lpfc_hba * phba)5710 lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba)
5711 {
5712 unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE;
5713 const struct cpumask *sibling_mask;
5714 struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask;
5715
5716 cpumask_clear(aff_mask);
5717
5718 if (phba->irq_chann_mode == NUMA_MODE) {
5719 /* Check if we're a NUMA architecture */
5720 numa_node = dev_to_node(&phba->pcidev->dev);
5721 if (numa_node == NUMA_NO_NODE) {
5722 phba->irq_chann_mode = NORMAL_MODE;
5723 return;
5724 }
5725 }
5726
5727 for_each_possible_cpu(cpu) {
5728 switch (phba->irq_chann_mode) {
5729 case NUMA_MODE:
5730 if (cpu_to_node(cpu) == numa_node)
5731 cpumask_set_cpu(cpu, aff_mask);
5732 break;
5733 case NHT_MODE:
5734 sibling_mask = topology_sibling_cpumask(cpu);
5735 first_cpu = cpumask_first(sibling_mask);
5736 if (first_cpu < nr_cpu_ids)
5737 cpumask_set_cpu(first_cpu, aff_mask);
5738 break;
5739 default:
5740 break;
5741 }
5742 }
5743 }
5744 #endif
5745
5746 static void
lpfc_assign_default_irq_chann(struct lpfc_hba * phba)5747 lpfc_assign_default_irq_chann(struct lpfc_hba *phba)
5748 {
5749 #if IS_ENABLED(CONFIG_X86)
5750 switch (boot_cpu_data.x86_vendor) {
5751 case X86_VENDOR_AMD:
5752 /* If AMD architecture, then default is NUMA_MODE */
5753 phba->irq_chann_mode = NUMA_MODE;
5754 break;
5755 case X86_VENDOR_INTEL:
5756 /* If Intel architecture, then default is no hyperthread mode */
5757 phba->irq_chann_mode = NHT_MODE;
5758 break;
5759 default:
5760 phba->irq_chann_mode = NORMAL_MODE;
5761 break;
5762 }
5763 lpfc_cpumask_irq_mode_init(phba);
5764 #else
5765 phba->irq_chann_mode = NORMAL_MODE;
5766 #endif
5767 }
5768
5769 /*
5770 * lpfc_irq_chann: Set the number of IRQ vectors that are available
5771 * for Hardware Queues to utilize. This also will map to the number
5772 * of EQ / MSI-X vectors the driver will create. This should never be
5773 * more than the number of Hardware Queues
5774 *
5775 * 0 = Configure number of IRQ Channels to:
5776 * if AMD architecture, number of CPUs on HBA's NUMA node
5777 * if Intel architecture, number of physical CPUs.
5778 * otherwise, number of active CPUs.
5779 * [1,256] = Manually specify how many IRQ Channels to use.
5780 *
5781 * Value range is [0,256]. Default value is [0].
5782 */
5783 static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF;
5784 module_param(lpfc_irq_chann, uint, 0444);
5785 MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
5786
5787 /* lpfc_irq_chann_init - Set the hba irq_chann initial value
5788 * @phba: lpfc_hba pointer.
5789 * @val: contains the initial value
5790 *
5791 * Description:
5792 * Validates the initial value is within range and assigns it to the
5793 * adapter. If not in range, an error message is posted and the
5794 * default value is assigned.
5795 *
5796 * Returns:
5797 * zero if value is in range and is set
5798 * -EINVAL if value was out of range
5799 **/
5800 static int
lpfc_irq_chann_init(struct lpfc_hba * phba,uint32_t val)5801 lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
5802 {
5803 const struct cpumask *aff_mask;
5804
5805 if (phba->cfg_use_msi != 2) {
5806 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5807 "8532 use_msi = %u ignoring cfg_irq_numa\n",
5808 phba->cfg_use_msi);
5809 phba->irq_chann_mode = NORMAL_MODE;
5810 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5811 return 0;
5812 }
5813
5814 /* Check if default setting was passed */
5815 if (val == LPFC_IRQ_CHANN_DEF)
5816 lpfc_assign_default_irq_chann(phba);
5817
5818 if (phba->irq_chann_mode != NORMAL_MODE) {
5819 aff_mask = &phba->sli4_hba.irq_aff_mask;
5820
5821 if (cpumask_empty(aff_mask)) {
5822 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5823 "8533 Could not identify CPUS for "
5824 "mode %d, ignoring\n",
5825 phba->irq_chann_mode);
5826 phba->irq_chann_mode = NORMAL_MODE;
5827 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5828 } else {
5829 phba->cfg_irq_chann = cpumask_weight(aff_mask);
5830
5831 /* If no hyperthread mode, then set hdwq count to
5832 * aff_mask weight as well
5833 */
5834 if (phba->irq_chann_mode == NHT_MODE)
5835 phba->cfg_hdw_queue = phba->cfg_irq_chann;
5836
5837 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5838 "8543 lpfc_irq_chann set to %u "
5839 "(mode: %d)\n", phba->cfg_irq_chann,
5840 phba->irq_chann_mode);
5841 }
5842 } else {
5843 if (val > LPFC_IRQ_CHANN_MAX) {
5844 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5845 "8545 lpfc_irq_chann attribute cannot "
5846 "be set to %u, allowed range is "
5847 "[%u,%u]\n",
5848 val,
5849 LPFC_IRQ_CHANN_MIN,
5850 LPFC_IRQ_CHANN_MAX);
5851 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5852 return -EINVAL;
5853 }
5854 phba->cfg_irq_chann = val;
5855 }
5856
5857 return 0;
5858 }
5859
5860 /**
5861 * lpfc_irq_chann_show - Display value of irq_chann
5862 * @dev: class converted to a Scsi_host structure.
5863 * @attr: device attribute, not used.
5864 * @buf: on return contains a string with the list sizes
5865 *
5866 * Returns: size of formatted string.
5867 **/
5868 static ssize_t
lpfc_irq_chann_show(struct device * dev,struct device_attribute * attr,char * buf)5869 lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr,
5870 char *buf)
5871 {
5872 struct Scsi_Host *shost = class_to_shost(dev);
5873 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5874 struct lpfc_hba *phba = vport->phba;
5875
5876 return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
5877 }
5878
5879 static DEVICE_ATTR_RO(lpfc_irq_chann);
5880
5881 /*
5882 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
5883 # 0 = HBA resets disabled
5884 # 1 = HBA resets enabled (default)
5885 # 2 = HBA reset via PCI bus reset enabled
5886 # Value range is [0,2]. Default value is 1.
5887 */
5888 LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver.");
5889
5890 /*
5891 # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
5892 # 0 = HBA Heartbeat disabled
5893 # 1 = HBA Heartbeat enabled (default)
5894 # Value range is [0,1]. Default value is 1.
5895 */
5896 LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
5897
5898 /*
5899 # lpfc_EnableXLane: Enable Express Lane Feature
5900 # 0x0 Express Lane Feature disabled
5901 # 0x1 Express Lane Feature enabled
5902 # Value range is [0,1]. Default value is 0.
5903 */
5904 LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
5905
5906 /*
5907 # lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature
5908 # 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
5909 # Value range is [0x0,0x7f]. Default value is 0
5910 */
5911 LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
5912
5913 /*
5914 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
5915 # 0 = BlockGuard disabled (default)
5916 # 1 = BlockGuard enabled
5917 # Value range is [0,1]. Default value is 0.
5918 */
5919 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
5920
5921 /*
5922 # lpfc_prot_mask: i
5923 # - Bit mask of host protection capabilities used to register with the
5924 # SCSI mid-layer
5925 # - Only meaningful if BG is turned on (lpfc_enable_bg=1).
5926 # - Allows you to ultimately specify which profiles to use
5927 # - Default will result in registering capabilities for all profiles.
5928 # - SHOST_DIF_TYPE1_PROTECTION 1
5929 # HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection
5930 # - SHOST_DIX_TYPE0_PROTECTION 8
5931 # HBA supports DIX Type 0: Host to HBA protection only
5932 # - SHOST_DIX_TYPE1_PROTECTION 16
5933 # HBA supports DIX Type 1: Host to HBA Type 1 protection
5934 #
5935 */
5936 LPFC_ATTR(prot_mask,
5937 (SHOST_DIF_TYPE1_PROTECTION |
5938 SHOST_DIX_TYPE0_PROTECTION |
5939 SHOST_DIX_TYPE1_PROTECTION),
5940 0,
5941 (SHOST_DIF_TYPE1_PROTECTION |
5942 SHOST_DIX_TYPE0_PROTECTION |
5943 SHOST_DIX_TYPE1_PROTECTION),
5944 "T10-DIF host protection capabilities mask");
5945
5946 /*
5947 # lpfc_prot_guard: i
5948 # - Bit mask of protection guard types to register with the SCSI mid-layer
5949 # - Guard types are currently either 1) T10-DIF CRC 2) IP checksum
5950 # - Allows you to ultimately specify which profiles to use
5951 # - Default will result in registering capabilities for all guard types
5952 #
5953 */
5954 LPFC_ATTR(prot_guard,
5955 SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
5956 "T10-DIF host protection guard type");
5957
5958 /*
5959 * Delay initial NPort discovery when Clean Address bit is cleared in
5960 * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
5961 * This parameter can have value 0 or 1.
5962 * When this parameter is set to 0, no delay is added to the initial
5963 * discovery.
5964 * When this parameter is set to non-zero value, initial Nport discovery is
5965 * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
5966 * accept and FCID/Fabric name/Fabric portname is changed.
5967 * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
5968 * when Clean Address bit is cleared in FLOGI/FDISC
5969 * accept and FCID/Fabric name/Fabric portname is changed.
5970 * Default value is 0.
5971 */
5972 LPFC_ATTR(delay_discovery, 0, 0, 1,
5973 "Delay NPort discovery when Clean Address bit is cleared.");
5974
5975 /*
5976 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
5977 * This value can be set to values between 64 and 4096. The default value
5978 * is 64, but may be increased to allow for larger Max I/O sizes. The scsi
5979 * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE).
5980 * Because of the additional overhead involved in setting up T10-DIF,
5981 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
5982 * and will be limited to 512 if BlockGuard is enabled under SLI3.
5983 */
5984 static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
5985 module_param(lpfc_sg_seg_cnt, uint, 0444);
5986 MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
5987
5988 /**
5989 * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes
5990 * configured for the adapter
5991 * @dev: class converted to a Scsi_host structure.
5992 * @attr: device attribute, not used.
5993 * @buf: on return contains a string with the list sizes
5994 *
5995 * Returns: size of formatted string.
5996 **/
5997 static ssize_t
lpfc_sg_seg_cnt_show(struct device * dev,struct device_attribute * attr,char * buf)5998 lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
5999 char *buf)
6000 {
6001 struct Scsi_Host *shost = class_to_shost(dev);
6002 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6003 struct lpfc_hba *phba = vport->phba;
6004 int len;
6005
6006 len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
6007 phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
6008
6009 len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
6010 phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
6011 phba->cfg_nvme_seg_cnt);
6012 return len;
6013 }
6014
6015 static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
6016
6017 /**
6018 * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value
6019 * @phba: lpfc_hba pointer.
6020 * @val: contains the initial value
6021 *
6022 * Description:
6023 * Validates the initial value is within range and assigns it to the
6024 * adapter. If not in range, an error message is posted and the
6025 * default value is assigned.
6026 *
6027 * Returns:
6028 * zero if value is in range and is set
6029 * -EINVAL if value was out of range
6030 **/
6031 static int
lpfc_sg_seg_cnt_init(struct lpfc_hba * phba,int val)6032 lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
6033 {
6034 if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
6035 phba->cfg_sg_seg_cnt = val;
6036 return 0;
6037 }
6038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6039 "0409 "LPFC_DRIVER_NAME"_sg_seg_cnt attribute cannot "
6040 "be set to %d, allowed range is [%d, %d]\n",
6041 val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
6042 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
6043 return -EINVAL;
6044 }
6045
6046 /*
6047 * lpfc_enable_mds_diags: Enable MDS Diagnostics
6048 * 0 = MDS Diagnostics disabled (default)
6049 * 1 = MDS Diagnostics enabled
6050 * Value range is [0,1]. Default value is 0.
6051 */
6052 LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
6053
6054 /*
6055 * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
6056 * 0 = Disable firmware logging (default)
6057 * [1-4] = Multiple of 1/4th Mb of host memory for FW logging
6058 * Value range [0..4]. Default value is 0
6059 */
6060 LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
6061 lpfc_param_show(ras_fwlog_buffsize);
6062
6063 static ssize_t
lpfc_ras_fwlog_buffsize_set(struct lpfc_hba * phba,uint val)6064 lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
6065 {
6066 int ret = 0;
6067 enum ras_state state;
6068
6069 if (!lpfc_rangecheck(val, 0, 4))
6070 return -EINVAL;
6071
6072 if (phba->cfg_ras_fwlog_buffsize == val)
6073 return 0;
6074
6075 if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
6076 return -EINVAL;
6077
6078 spin_lock_irq(&phba->hbalock);
6079 state = phba->ras_fwlog.state;
6080 spin_unlock_irq(&phba->hbalock);
6081
6082 if (state == REG_INPROGRESS) {
6083 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
6084 "registration is in progress\n");
6085 return -EBUSY;
6086 }
6087
6088 /* For disable logging: stop the logs and free the DMA.
6089 * For ras_fwlog_buffsize size change we still need to free and
6090 * reallocate the DMA in lpfc_sli4_ras_fwlog_init.
6091 */
6092 phba->cfg_ras_fwlog_buffsize = val;
6093 if (state == ACTIVE) {
6094 lpfc_ras_stop_fwlog(phba);
6095 lpfc_sli4_ras_dma_free(phba);
6096 }
6097
6098 lpfc_sli4_ras_init(phba);
6099 if (phba->ras_fwlog.ras_enabled)
6100 ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6101 LPFC_RAS_ENABLE_LOGGING);
6102 return ret;
6103 }
6104
6105 lpfc_param_store(ras_fwlog_buffsize);
6106 static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize);
6107
6108 /*
6109 * lpfc_ras_fwlog_level: Firmware logging verbosity level
6110 * Valid only if firmware logging is enabled
6111 * 0(Least Verbosity) 4 (most verbosity)
6112 * Value range is [0..4]. Default value is 0
6113 */
6114 LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
6115
6116 /*
6117 * lpfc_ras_fwlog_func: Firmware logging enabled on function number
6118 * Default function which has RAS support : 0
6119 * Value Range is [0..7].
6120 * FW logging is a global action and enablement is via a specific
6121 * port.
6122 */
6123 LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
6124
6125 /*
6126 * lpfc_enable_bbcr: Enable BB Credit Recovery
6127 * 0 = BB Credit Recovery disabled
6128 * 1 = BB Credit Recovery enabled (default)
6129 * Value range is [0,1]. Default value is 1.
6130 */
6131 LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
6132
6133 /*
6134 * lpfc_enable_dpp: Enable DPP on G7
6135 * 0 = DPP on G7 disabled
6136 * 1 = DPP on G7 enabled (default)
6137 * Value range is [0,1]. Default value is 1.
6138 */
6139 LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push");
6140
6141 struct device_attribute *lpfc_hba_attrs[] = {
6142 &dev_attr_nvme_info,
6143 &dev_attr_scsi_stat,
6144 &dev_attr_bg_info,
6145 &dev_attr_bg_guard_err,
6146 &dev_attr_bg_apptag_err,
6147 &dev_attr_bg_reftag_err,
6148 &dev_attr_info,
6149 &dev_attr_serialnum,
6150 &dev_attr_modeldesc,
6151 &dev_attr_modelname,
6152 &dev_attr_programtype,
6153 &dev_attr_portnum,
6154 &dev_attr_fwrev,
6155 &dev_attr_hdw,
6156 &dev_attr_option_rom_version,
6157 &dev_attr_link_state,
6158 &dev_attr_num_discovered_ports,
6159 &dev_attr_menlo_mgmt_mode,
6160 &dev_attr_lpfc_drvr_version,
6161 &dev_attr_lpfc_enable_fip,
6162 &dev_attr_lpfc_temp_sensor,
6163 &dev_attr_lpfc_log_verbose,
6164 &dev_attr_lpfc_lun_queue_depth,
6165 &dev_attr_lpfc_tgt_queue_depth,
6166 &dev_attr_lpfc_hba_queue_depth,
6167 &dev_attr_lpfc_peer_port_login,
6168 &dev_attr_lpfc_nodev_tmo,
6169 &dev_attr_lpfc_devloss_tmo,
6170 &dev_attr_lpfc_enable_fc4_type,
6171 &dev_attr_lpfc_fcp_class,
6172 &dev_attr_lpfc_use_adisc,
6173 &dev_attr_lpfc_first_burst_size,
6174 &dev_attr_lpfc_ack0,
6175 &dev_attr_lpfc_xri_rebalancing,
6176 &dev_attr_lpfc_topology,
6177 &dev_attr_lpfc_scan_down,
6178 &dev_attr_lpfc_link_speed,
6179 &dev_attr_lpfc_fcp_io_sched,
6180 &dev_attr_lpfc_ns_query,
6181 &dev_attr_lpfc_fcp2_no_tgt_reset,
6182 &dev_attr_lpfc_cr_delay,
6183 &dev_attr_lpfc_cr_count,
6184 &dev_attr_lpfc_multi_ring_support,
6185 &dev_attr_lpfc_multi_ring_rctl,
6186 &dev_attr_lpfc_multi_ring_type,
6187 &dev_attr_lpfc_fdmi_on,
6188 &dev_attr_lpfc_enable_SmartSAN,
6189 &dev_attr_lpfc_max_luns,
6190 &dev_attr_lpfc_enable_npiv,
6191 &dev_attr_lpfc_fcf_failover_policy,
6192 &dev_attr_lpfc_enable_rrq,
6193 &dev_attr_nport_evt_cnt,
6194 &dev_attr_board_mode,
6195 &dev_attr_max_vpi,
6196 &dev_attr_used_vpi,
6197 &dev_attr_max_rpi,
6198 &dev_attr_used_rpi,
6199 &dev_attr_max_xri,
6200 &dev_attr_used_xri,
6201 &dev_attr_npiv_info,
6202 &dev_attr_issue_reset,
6203 &dev_attr_lpfc_poll,
6204 &dev_attr_lpfc_poll_tmo,
6205 &dev_attr_lpfc_task_mgmt_tmo,
6206 &dev_attr_lpfc_use_msi,
6207 &dev_attr_lpfc_nvme_oas,
6208 &dev_attr_lpfc_nvme_embed_cmd,
6209 &dev_attr_lpfc_fcp_imax,
6210 &dev_attr_lpfc_force_rscn,
6211 &dev_attr_lpfc_cq_poll_threshold,
6212 &dev_attr_lpfc_cq_max_proc_limit,
6213 &dev_attr_lpfc_fcp_cpu_map,
6214 &dev_attr_lpfc_fcp_mq_threshold,
6215 &dev_attr_lpfc_hdw_queue,
6216 &dev_attr_lpfc_irq_chann,
6217 &dev_attr_lpfc_suppress_rsp,
6218 &dev_attr_lpfc_nvmet_mrq,
6219 &dev_attr_lpfc_nvmet_mrq_post,
6220 &dev_attr_lpfc_nvme_enable_fb,
6221 &dev_attr_lpfc_nvmet_fb_size,
6222 &dev_attr_lpfc_enable_bg,
6223 &dev_attr_lpfc_soft_wwnn,
6224 &dev_attr_lpfc_soft_wwpn,
6225 &dev_attr_lpfc_soft_wwn_enable,
6226 &dev_attr_lpfc_enable_hba_reset,
6227 &dev_attr_lpfc_enable_hba_heartbeat,
6228 &dev_attr_lpfc_EnableXLane,
6229 &dev_attr_lpfc_XLanePriority,
6230 &dev_attr_lpfc_xlane_lun,
6231 &dev_attr_lpfc_xlane_tgt,
6232 &dev_attr_lpfc_xlane_vpt,
6233 &dev_attr_lpfc_xlane_lun_state,
6234 &dev_attr_lpfc_xlane_lun_status,
6235 &dev_attr_lpfc_xlane_priority,
6236 &dev_attr_lpfc_sg_seg_cnt,
6237 &dev_attr_lpfc_max_scsicmpl_time,
6238 &dev_attr_lpfc_stat_data_ctrl,
6239 &dev_attr_lpfc_aer_support,
6240 &dev_attr_lpfc_aer_state_cleanup,
6241 &dev_attr_lpfc_sriov_nr_virtfn,
6242 &dev_attr_lpfc_req_fw_upgrade,
6243 &dev_attr_lpfc_suppress_link_up,
6244 &dev_attr_iocb_hw,
6245 &dev_attr_pls,
6246 &dev_attr_pt,
6247 &dev_attr_txq_hw,
6248 &dev_attr_txcmplq_hw,
6249 &dev_attr_lpfc_sriov_hw_max_virtfn,
6250 &dev_attr_protocol,
6251 &dev_attr_lpfc_xlane_supported,
6252 &dev_attr_lpfc_enable_mds_diags,
6253 &dev_attr_lpfc_ras_fwlog_buffsize,
6254 &dev_attr_lpfc_ras_fwlog_level,
6255 &dev_attr_lpfc_ras_fwlog_func,
6256 &dev_attr_lpfc_enable_bbcr,
6257 &dev_attr_lpfc_enable_dpp,
6258 NULL,
6259 };
6260
6261 struct device_attribute *lpfc_vport_attrs[] = {
6262 &dev_attr_info,
6263 &dev_attr_link_state,
6264 &dev_attr_num_discovered_ports,
6265 &dev_attr_lpfc_drvr_version,
6266 &dev_attr_lpfc_log_verbose,
6267 &dev_attr_lpfc_lun_queue_depth,
6268 &dev_attr_lpfc_tgt_queue_depth,
6269 &dev_attr_lpfc_nodev_tmo,
6270 &dev_attr_lpfc_devloss_tmo,
6271 &dev_attr_lpfc_hba_queue_depth,
6272 &dev_attr_lpfc_peer_port_login,
6273 &dev_attr_lpfc_restrict_login,
6274 &dev_attr_lpfc_fcp_class,
6275 &dev_attr_lpfc_use_adisc,
6276 &dev_attr_lpfc_first_burst_size,
6277 &dev_attr_lpfc_max_luns,
6278 &dev_attr_nport_evt_cnt,
6279 &dev_attr_npiv_info,
6280 &dev_attr_lpfc_enable_da_id,
6281 &dev_attr_lpfc_max_scsicmpl_time,
6282 &dev_attr_lpfc_stat_data_ctrl,
6283 &dev_attr_lpfc_static_vport,
6284 NULL,
6285 };
6286
6287 /**
6288 * sysfs_ctlreg_write - Write method for writing to ctlreg
6289 * @filp: open sysfs file
6290 * @kobj: kernel kobject that contains the kernel class device.
6291 * @bin_attr: kernel attributes passed to us.
6292 * @buf: contains the data to be written to the adapter IOREG space.
6293 * @off: offset into buffer to beginning of data.
6294 * @count: bytes to transfer.
6295 *
6296 * Description:
6297 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
6298 * Uses the adapter io control registers to send buf contents to the adapter.
6299 *
6300 * Returns:
6301 * -ERANGE off and count combo out of range
6302 * -EINVAL off, count or buff address invalid
6303 * -EPERM adapter is offline
6304 * value of count, buf contents written
6305 **/
6306 static ssize_t
sysfs_ctlreg_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6307 sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
6308 struct bin_attribute *bin_attr,
6309 char *buf, loff_t off, size_t count)
6310 {
6311 size_t buf_off;
6312 struct device *dev = container_of(kobj, struct device, kobj);
6313 struct Scsi_Host *shost = class_to_shost(dev);
6314 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6315 struct lpfc_hba *phba = vport->phba;
6316
6317 if (phba->sli_rev >= LPFC_SLI_REV4)
6318 return -EPERM;
6319
6320 if ((off + count) > FF_REG_AREA_SIZE)
6321 return -ERANGE;
6322
6323 if (count <= LPFC_REG_WRITE_KEY_SIZE)
6324 return 0;
6325
6326 if (off % 4 || count % 4 || (unsigned long)buf % 4)
6327 return -EINVAL;
6328
6329 /* This is to protect HBA registers from accidental writes. */
6330 if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
6331 return -EINVAL;
6332
6333 if (!(vport->fc_flag & FC_OFFLINE_MODE))
6334 return -EPERM;
6335
6336 spin_lock_irq(&phba->hbalock);
6337 for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
6338 buf_off += sizeof(uint32_t))
6339 writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
6340 phba->ctrl_regs_memmap_p + off + buf_off);
6341
6342 spin_unlock_irq(&phba->hbalock);
6343
6344 return count;
6345 }
6346
6347 /**
6348 * sysfs_ctlreg_read - Read method for reading from ctlreg
6349 * @filp: open sysfs file
6350 * @kobj: kernel kobject that contains the kernel class device.
6351 * @bin_attr: kernel attributes passed to us.
6352 * @buf: if successful contains the data from the adapter IOREG space.
6353 * @off: offset into buffer to beginning of data.
6354 * @count: bytes to transfer.
6355 *
6356 * Description:
6357 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
6358 * Uses the adapter io control registers to read data into buf.
6359 *
6360 * Returns:
6361 * -ERANGE off and count combo out of range
6362 * -EINVAL off, count or buff address invalid
6363 * value of count, buf contents read
6364 **/
6365 static ssize_t
sysfs_ctlreg_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6366 sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
6367 struct bin_attribute *bin_attr,
6368 char *buf, loff_t off, size_t count)
6369 {
6370 size_t buf_off;
6371 uint32_t * tmp_ptr;
6372 struct device *dev = container_of(kobj, struct device, kobj);
6373 struct Scsi_Host *shost = class_to_shost(dev);
6374 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6375 struct lpfc_hba *phba = vport->phba;
6376
6377 if (phba->sli_rev >= LPFC_SLI_REV4)
6378 return -EPERM;
6379
6380 if (off > FF_REG_AREA_SIZE)
6381 return -ERANGE;
6382
6383 if ((off + count) > FF_REG_AREA_SIZE)
6384 count = FF_REG_AREA_SIZE - off;
6385
6386 if (count == 0) return 0;
6387
6388 if (off % 4 || count % 4 || (unsigned long)buf % 4)
6389 return -EINVAL;
6390
6391 spin_lock_irq(&phba->hbalock);
6392
6393 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
6394 tmp_ptr = (uint32_t *)(buf + buf_off);
6395 *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
6396 }
6397
6398 spin_unlock_irq(&phba->hbalock);
6399
6400 return count;
6401 }
6402
6403 static struct bin_attribute sysfs_ctlreg_attr = {
6404 .attr = {
6405 .name = "ctlreg",
6406 .mode = S_IRUSR | S_IWUSR,
6407 },
6408 .size = 256,
6409 .read = sysfs_ctlreg_read,
6410 .write = sysfs_ctlreg_write,
6411 };
6412
6413 /**
6414 * sysfs_mbox_write - Write method for writing information via mbox
6415 * @filp: open sysfs file
6416 * @kobj: kernel kobject that contains the kernel class device.
6417 * @bin_attr: kernel attributes passed to us.
6418 * @buf: contains the data to be written to sysfs mbox.
6419 * @off: offset into buffer to beginning of data.
6420 * @count: bytes to transfer.
6421 *
6422 * Description:
6423 * Deprecated function. All mailbox access from user space is performed via the
6424 * bsg interface.
6425 *
6426 * Returns:
6427 * -EPERM operation not permitted
6428 **/
6429 static ssize_t
sysfs_mbox_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6430 sysfs_mbox_write(struct file *filp, struct kobject *kobj,
6431 struct bin_attribute *bin_attr,
6432 char *buf, loff_t off, size_t count)
6433 {
6434 return -EPERM;
6435 }
6436
6437 /**
6438 * sysfs_mbox_read - Read method for reading information via mbox
6439 * @filp: open sysfs file
6440 * @kobj: kernel kobject that contains the kernel class device.
6441 * @bin_attr: kernel attributes passed to us.
6442 * @buf: contains the data to be read from sysfs mbox.
6443 * @off: offset into buffer to beginning of data.
6444 * @count: bytes to transfer.
6445 *
6446 * Description:
6447 * Deprecated function. All mailbox access from user space is performed via the
6448 * bsg interface.
6449 *
6450 * Returns:
6451 * -EPERM operation not permitted
6452 **/
6453 static ssize_t
sysfs_mbox_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6454 sysfs_mbox_read(struct file *filp, struct kobject *kobj,
6455 struct bin_attribute *bin_attr,
6456 char *buf, loff_t off, size_t count)
6457 {
6458 return -EPERM;
6459 }
6460
6461 static struct bin_attribute sysfs_mbox_attr = {
6462 .attr = {
6463 .name = "mbox",
6464 .mode = S_IRUSR | S_IWUSR,
6465 },
6466 .size = MAILBOX_SYSFS_MAX,
6467 .read = sysfs_mbox_read,
6468 .write = sysfs_mbox_write,
6469 };
6470
6471 /**
6472 * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries
6473 * @vport: address of lpfc vport structure.
6474 *
6475 * Return codes:
6476 * zero on success
6477 * error return code from sysfs_create_bin_file()
6478 **/
6479 int
lpfc_alloc_sysfs_attr(struct lpfc_vport * vport)6480 lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
6481 {
6482 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6483 int error;
6484
6485 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6486 &sysfs_drvr_stat_data_attr);
6487
6488 /* Virtual ports do not need ctrl_reg and mbox */
6489 if (error || vport->port_type == LPFC_NPIV_PORT)
6490 goto out;
6491
6492 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6493 &sysfs_ctlreg_attr);
6494 if (error)
6495 goto out_remove_stat_attr;
6496
6497 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6498 &sysfs_mbox_attr);
6499 if (error)
6500 goto out_remove_ctlreg_attr;
6501
6502 return 0;
6503 out_remove_ctlreg_attr:
6504 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6505 out_remove_stat_attr:
6506 sysfs_remove_bin_file(&shost->shost_dev.kobj,
6507 &sysfs_drvr_stat_data_attr);
6508 out:
6509 return error;
6510 }
6511
6512 /**
6513 * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries
6514 * @vport: address of lpfc vport structure.
6515 **/
6516 void
lpfc_free_sysfs_attr(struct lpfc_vport * vport)6517 lpfc_free_sysfs_attr(struct lpfc_vport *vport)
6518 {
6519 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6520 sysfs_remove_bin_file(&shost->shost_dev.kobj,
6521 &sysfs_drvr_stat_data_attr);
6522 /* Virtual ports do not need ctrl_reg and mbox */
6523 if (vport->port_type == LPFC_NPIV_PORT)
6524 return;
6525 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
6526 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6527 }
6528
6529 /*
6530 * Dynamic FC Host Attributes Support
6531 */
6532
6533 /**
6534 * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host
6535 * @shost: kernel scsi host pointer.
6536 **/
6537 static void
lpfc_get_host_symbolic_name(struct Scsi_Host * shost)6538 lpfc_get_host_symbolic_name(struct Scsi_Host *shost)
6539 {
6540 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6541
6542 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
6543 sizeof fc_host_symbolic_name(shost));
6544 }
6545
6546 /**
6547 * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
6548 * @shost: kernel scsi host pointer.
6549 **/
6550 static void
lpfc_get_host_port_id(struct Scsi_Host * shost)6551 lpfc_get_host_port_id(struct Scsi_Host *shost)
6552 {
6553 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6554
6555 /* note: fc_myDID already in cpu endianness */
6556 fc_host_port_id(shost) = vport->fc_myDID;
6557 }
6558
6559 /**
6560 * lpfc_get_host_port_type - Set the value of the scsi host port type
6561 * @shost: kernel scsi host pointer.
6562 **/
6563 static void
lpfc_get_host_port_type(struct Scsi_Host * shost)6564 lpfc_get_host_port_type(struct Scsi_Host *shost)
6565 {
6566 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6567 struct lpfc_hba *phba = vport->phba;
6568
6569 spin_lock_irq(shost->host_lock);
6570
6571 if (vport->port_type == LPFC_NPIV_PORT) {
6572 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
6573 } else if (lpfc_is_link_up(phba)) {
6574 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6575 if (vport->fc_flag & FC_PUBLIC_LOOP)
6576 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
6577 else
6578 fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
6579 } else {
6580 if (vport->fc_flag & FC_FABRIC)
6581 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
6582 else
6583 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
6584 }
6585 } else
6586 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
6587
6588 spin_unlock_irq(shost->host_lock);
6589 }
6590
6591 /**
6592 * lpfc_get_host_port_state - Set the value of the scsi host port state
6593 * @shost: kernel scsi host pointer.
6594 **/
6595 static void
lpfc_get_host_port_state(struct Scsi_Host * shost)6596 lpfc_get_host_port_state(struct Scsi_Host *shost)
6597 {
6598 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6599 struct lpfc_hba *phba = vport->phba;
6600
6601 spin_lock_irq(shost->host_lock);
6602
6603 if (vport->fc_flag & FC_OFFLINE_MODE)
6604 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
6605 else {
6606 switch (phba->link_state) {
6607 case LPFC_LINK_UNKNOWN:
6608 case LPFC_LINK_DOWN:
6609 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
6610 break;
6611 case LPFC_LINK_UP:
6612 case LPFC_CLEAR_LA:
6613 case LPFC_HBA_READY:
6614 /* Links up, reports port state accordingly */
6615 if (vport->port_state < LPFC_VPORT_READY)
6616 fc_host_port_state(shost) =
6617 FC_PORTSTATE_BYPASSED;
6618 else
6619 fc_host_port_state(shost) =
6620 FC_PORTSTATE_ONLINE;
6621 break;
6622 case LPFC_HBA_ERROR:
6623 fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
6624 break;
6625 default:
6626 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
6627 break;
6628 }
6629 }
6630
6631 spin_unlock_irq(shost->host_lock);
6632 }
6633
6634 /**
6635 * lpfc_get_host_speed - Set the value of the scsi host speed
6636 * @shost: kernel scsi host pointer.
6637 **/
6638 static void
lpfc_get_host_speed(struct Scsi_Host * shost)6639 lpfc_get_host_speed(struct Scsi_Host *shost)
6640 {
6641 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6642 struct lpfc_hba *phba = vport->phba;
6643
6644 spin_lock_irq(shost->host_lock);
6645
6646 if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) {
6647 switch(phba->fc_linkspeed) {
6648 case LPFC_LINK_SPEED_1GHZ:
6649 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6650 break;
6651 case LPFC_LINK_SPEED_2GHZ:
6652 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
6653 break;
6654 case LPFC_LINK_SPEED_4GHZ:
6655 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
6656 break;
6657 case LPFC_LINK_SPEED_8GHZ:
6658 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
6659 break;
6660 case LPFC_LINK_SPEED_10GHZ:
6661 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6662 break;
6663 case LPFC_LINK_SPEED_16GHZ:
6664 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
6665 break;
6666 case LPFC_LINK_SPEED_32GHZ:
6667 fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
6668 break;
6669 case LPFC_LINK_SPEED_64GHZ:
6670 fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
6671 break;
6672 case LPFC_LINK_SPEED_128GHZ:
6673 fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
6674 break;
6675 default:
6676 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6677 break;
6678 }
6679 } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
6680 switch (phba->fc_linkspeed) {
6681 case LPFC_ASYNC_LINK_SPEED_1GBPS:
6682 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6683 break;
6684 case LPFC_ASYNC_LINK_SPEED_10GBPS:
6685 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6686 break;
6687 case LPFC_ASYNC_LINK_SPEED_20GBPS:
6688 fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
6689 break;
6690 case LPFC_ASYNC_LINK_SPEED_25GBPS:
6691 fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
6692 break;
6693 case LPFC_ASYNC_LINK_SPEED_40GBPS:
6694 fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
6695 break;
6696 case LPFC_ASYNC_LINK_SPEED_100GBPS:
6697 fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
6698 break;
6699 default:
6700 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6701 break;
6702 }
6703 } else
6704 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6705
6706 spin_unlock_irq(shost->host_lock);
6707 }
6708
6709 /**
6710 * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name
6711 * @shost: kernel scsi host pointer.
6712 **/
6713 static void
lpfc_get_host_fabric_name(struct Scsi_Host * shost)6714 lpfc_get_host_fabric_name (struct Scsi_Host *shost)
6715 {
6716 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6717 struct lpfc_hba *phba = vport->phba;
6718 u64 node_name;
6719
6720 spin_lock_irq(shost->host_lock);
6721
6722 if ((vport->port_state > LPFC_FLOGI) &&
6723 ((vport->fc_flag & FC_FABRIC) ||
6724 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
6725 (vport->fc_flag & FC_PUBLIC_LOOP))))
6726 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
6727 else
6728 /* fabric is local port if there is no F/FL_Port */
6729 node_name = 0;
6730
6731 spin_unlock_irq(shost->host_lock);
6732
6733 fc_host_fabric_name(shost) = node_name;
6734 }
6735
6736 /**
6737 * lpfc_get_stats - Return statistical information about the adapter
6738 * @shost: kernel scsi host pointer.
6739 *
6740 * Notes:
6741 * NULL on error for link down, no mbox pool, sli2 active,
6742 * management not allowed, memory allocation error, or mbox error.
6743 *
6744 * Returns:
6745 * NULL for error
6746 * address of the adapter host statistics
6747 **/
6748 static struct fc_host_statistics *
lpfc_get_stats(struct Scsi_Host * shost)6749 lpfc_get_stats(struct Scsi_Host *shost)
6750 {
6751 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6752 struct lpfc_hba *phba = vport->phba;
6753 struct lpfc_sli *psli = &phba->sli;
6754 struct fc_host_statistics *hs = &phba->link_stats;
6755 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
6756 LPFC_MBOXQ_t *pmboxq;
6757 MAILBOX_t *pmb;
6758 int rc = 0;
6759
6760 /*
6761 * prevent udev from issuing mailbox commands until the port is
6762 * configured.
6763 */
6764 if (phba->link_state < LPFC_LINK_DOWN ||
6765 !phba->mbox_mem_pool ||
6766 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
6767 return NULL;
6768
6769 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
6770 return NULL;
6771
6772 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6773 if (!pmboxq)
6774 return NULL;
6775 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
6776
6777 pmb = &pmboxq->u.mb;
6778 pmb->mbxCommand = MBX_READ_STATUS;
6779 pmb->mbxOwner = OWN_HOST;
6780 pmboxq->ctx_buf = NULL;
6781 pmboxq->vport = vport;
6782
6783 if (vport->fc_flag & FC_OFFLINE_MODE)
6784 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6785 else
6786 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6787
6788 if (rc != MBX_SUCCESS) {
6789 if (rc != MBX_TIMEOUT)
6790 mempool_free(pmboxq, phba->mbox_mem_pool);
6791 return NULL;
6792 }
6793
6794 memset(hs, 0, sizeof (struct fc_host_statistics));
6795
6796 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
6797 /*
6798 * The MBX_READ_STATUS returns tx_k_bytes which has to
6799 * converted to words
6800 */
6801 hs->tx_words = (uint64_t)
6802 ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
6803 * (uint64_t)256);
6804 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
6805 hs->rx_words = (uint64_t)
6806 ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
6807 * (uint64_t)256);
6808
6809 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
6810 pmb->mbxCommand = MBX_READ_LNK_STAT;
6811 pmb->mbxOwner = OWN_HOST;
6812 pmboxq->ctx_buf = NULL;
6813 pmboxq->vport = vport;
6814
6815 if (vport->fc_flag & FC_OFFLINE_MODE)
6816 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6817 else
6818 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6819
6820 if (rc != MBX_SUCCESS) {
6821 if (rc != MBX_TIMEOUT)
6822 mempool_free(pmboxq, phba->mbox_mem_pool);
6823 return NULL;
6824 }
6825
6826 hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
6827 hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
6828 hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
6829 hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
6830 hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
6831 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
6832 hs->error_frames = pmb->un.varRdLnk.crcCnt;
6833
6834 hs->link_failure_count -= lso->link_failure_count;
6835 hs->loss_of_sync_count -= lso->loss_of_sync_count;
6836 hs->loss_of_signal_count -= lso->loss_of_signal_count;
6837 hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
6838 hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
6839 hs->invalid_crc_count -= lso->invalid_crc_count;
6840 hs->error_frames -= lso->error_frames;
6841
6842 if (phba->hba_flag & HBA_FCOE_MODE) {
6843 hs->lip_count = -1;
6844 hs->nos_count = (phba->link_events >> 1);
6845 hs->nos_count -= lso->link_events;
6846 } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6847 hs->lip_count = (phba->fc_eventTag >> 1);
6848 hs->lip_count -= lso->link_events;
6849 hs->nos_count = -1;
6850 } else {
6851 hs->lip_count = -1;
6852 hs->nos_count = (phba->fc_eventTag >> 1);
6853 hs->nos_count -= lso->link_events;
6854 }
6855
6856 hs->dumped_frames = -1;
6857
6858 hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start;
6859
6860 mempool_free(pmboxq, phba->mbox_mem_pool);
6861
6862 return hs;
6863 }
6864
6865 /**
6866 * lpfc_reset_stats - Copy the adapter link stats information
6867 * @shost: kernel scsi host pointer.
6868 **/
6869 static void
lpfc_reset_stats(struct Scsi_Host * shost)6870 lpfc_reset_stats(struct Scsi_Host *shost)
6871 {
6872 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6873 struct lpfc_hba *phba = vport->phba;
6874 struct lpfc_sli *psli = &phba->sli;
6875 struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
6876 LPFC_MBOXQ_t *pmboxq;
6877 MAILBOX_t *pmb;
6878 int rc = 0;
6879
6880 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
6881 return;
6882
6883 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6884 if (!pmboxq)
6885 return;
6886 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
6887
6888 pmb = &pmboxq->u.mb;
6889 pmb->mbxCommand = MBX_READ_STATUS;
6890 pmb->mbxOwner = OWN_HOST;
6891 pmb->un.varWords[0] = 0x1; /* reset request */
6892 pmboxq->ctx_buf = NULL;
6893 pmboxq->vport = vport;
6894
6895 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
6896 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
6897 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6898 else
6899 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6900
6901 if (rc != MBX_SUCCESS) {
6902 if (rc != MBX_TIMEOUT)
6903 mempool_free(pmboxq, phba->mbox_mem_pool);
6904 return;
6905 }
6906
6907 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
6908 pmb->mbxCommand = MBX_READ_LNK_STAT;
6909 pmb->mbxOwner = OWN_HOST;
6910 pmboxq->ctx_buf = NULL;
6911 pmboxq->vport = vport;
6912
6913 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
6914 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
6915 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6916 else
6917 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6918
6919 if (rc != MBX_SUCCESS) {
6920 if (rc != MBX_TIMEOUT)
6921 mempool_free( pmboxq, phba->mbox_mem_pool);
6922 return;
6923 }
6924
6925 lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
6926 lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
6927 lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
6928 lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
6929 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
6930 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
6931 lso->error_frames = pmb->un.varRdLnk.crcCnt;
6932 if (phba->hba_flag & HBA_FCOE_MODE)
6933 lso->link_events = (phba->link_events >> 1);
6934 else
6935 lso->link_events = (phba->fc_eventTag >> 1);
6936
6937 psli->stats_start = ktime_get_seconds();
6938
6939 mempool_free(pmboxq, phba->mbox_mem_pool);
6940
6941 return;
6942 }
6943
6944 /*
6945 * The LPFC driver treats linkdown handling as target loss events so there
6946 * are no sysfs handlers for link_down_tmo.
6947 */
6948
6949 /**
6950 * lpfc_get_node_by_target - Return the nodelist for a target
6951 * @starget: kernel scsi target pointer.
6952 *
6953 * Returns:
6954 * address of the node list if found
6955 * NULL target not found
6956 **/
6957 static struct lpfc_nodelist *
lpfc_get_node_by_target(struct scsi_target * starget)6958 lpfc_get_node_by_target(struct scsi_target *starget)
6959 {
6960 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
6961 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6962 struct lpfc_nodelist *ndlp;
6963
6964 spin_lock_irq(shost->host_lock);
6965 /* Search for this, mapped, target ID */
6966 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6967 if (NLP_CHK_NODE_ACT(ndlp) &&
6968 ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
6969 starget->id == ndlp->nlp_sid) {
6970 spin_unlock_irq(shost->host_lock);
6971 return ndlp;
6972 }
6973 }
6974 spin_unlock_irq(shost->host_lock);
6975 return NULL;
6976 }
6977
6978 /**
6979 * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1
6980 * @starget: kernel scsi target pointer.
6981 **/
6982 static void
lpfc_get_starget_port_id(struct scsi_target * starget)6983 lpfc_get_starget_port_id(struct scsi_target *starget)
6984 {
6985 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
6986
6987 fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
6988 }
6989
6990 /**
6991 * lpfc_get_starget_node_name - Set the target node name
6992 * @starget: kernel scsi target pointer.
6993 *
6994 * Description: Set the target node name to the ndlp node name wwn or zero.
6995 **/
6996 static void
lpfc_get_starget_node_name(struct scsi_target * starget)6997 lpfc_get_starget_node_name(struct scsi_target *starget)
6998 {
6999 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7000
7001 fc_starget_node_name(starget) =
7002 ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
7003 }
7004
7005 /**
7006 * lpfc_get_starget_port_name - Set the target port name
7007 * @starget: kernel scsi target pointer.
7008 *
7009 * Description: set the target port name to the ndlp port name wwn or zero.
7010 **/
7011 static void
lpfc_get_starget_port_name(struct scsi_target * starget)7012 lpfc_get_starget_port_name(struct scsi_target *starget)
7013 {
7014 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7015
7016 fc_starget_port_name(starget) =
7017 ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
7018 }
7019
7020 /**
7021 * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo
7022 * @rport: fc rport address.
7023 * @timeout: new value for dev loss tmo.
7024 *
7025 * Description:
7026 * If timeout is non zero set the dev_loss_tmo to timeout, else set
7027 * dev_loss_tmo to one.
7028 **/
7029 static void
lpfc_set_rport_loss_tmo(struct fc_rport * rport,uint32_t timeout)7030 lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
7031 {
7032 struct lpfc_rport_data *rdata = rport->dd_data;
7033 struct lpfc_nodelist *ndlp = rdata->pnode;
7034 #if (IS_ENABLED(CONFIG_NVME_FC))
7035 struct lpfc_nvme_rport *nrport = NULL;
7036 #endif
7037
7038 if (timeout)
7039 rport->dev_loss_tmo = timeout;
7040 else
7041 rport->dev_loss_tmo = 1;
7042
7043 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
7044 dev_info(&rport->dev, "Cannot find remote node to "
7045 "set rport dev loss tmo, port_id x%x\n",
7046 rport->port_id);
7047 return;
7048 }
7049
7050 #if (IS_ENABLED(CONFIG_NVME_FC))
7051 nrport = lpfc_ndlp_get_nrport(ndlp);
7052
7053 if (nrport && nrport->remoteport)
7054 nvme_fc_set_remoteport_devloss(nrport->remoteport,
7055 rport->dev_loss_tmo);
7056 #endif
7057 }
7058
7059 /**
7060 * lpfc_rport_show_function - Return rport target information
7061 *
7062 * Description:
7063 * Macro that uses field to generate a function with the name lpfc_show_rport_
7064 *
7065 * lpfc_show_rport_##field: returns the bytes formatted in buf
7066 * @cdev: class converted to an fc_rport.
7067 * @buf: on return contains the target_field or zero.
7068 *
7069 * Returns: size of formatted string.
7070 **/
7071 #define lpfc_rport_show_function(field, format_string, sz, cast) \
7072 static ssize_t \
7073 lpfc_show_rport_##field (struct device *dev, \
7074 struct device_attribute *attr, \
7075 char *buf) \
7076 { \
7077 struct fc_rport *rport = transport_class_to_rport(dev); \
7078 struct lpfc_rport_data *rdata = rport->hostdata; \
7079 return scnprintf(buf, sz, format_string, \
7080 (rdata->target) ? cast rdata->target->field : 0); \
7081 }
7082
7083 #define lpfc_rport_rd_attr(field, format_string, sz) \
7084 lpfc_rport_show_function(field, format_string, sz, ) \
7085 static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
7086
7087 /**
7088 * lpfc_set_vport_symbolic_name - Set the vport's symbolic name
7089 * @fc_vport: The fc_vport who's symbolic name has been changed.
7090 *
7091 * Description:
7092 * This function is called by the transport after the @fc_vport's symbolic name
7093 * has been changed. This function re-registers the symbolic name with the
7094 * switch to propagate the change into the fabric if the vport is active.
7095 **/
7096 static void
lpfc_set_vport_symbolic_name(struct fc_vport * fc_vport)7097 lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
7098 {
7099 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
7100
7101 if (vport->port_state == LPFC_VPORT_READY)
7102 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
7103 }
7104
7105 /**
7106 * lpfc_hba_log_verbose_init - Set hba's log verbose level
7107 * @phba: Pointer to lpfc_hba struct.
7108 *
7109 * This function is called by the lpfc_get_cfgparam() routine to set the
7110 * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
7111 * log message according to the module's lpfc_log_verbose parameter setting
7112 * before hba port or vport created.
7113 **/
7114 static void
lpfc_hba_log_verbose_init(struct lpfc_hba * phba,uint32_t verbose)7115 lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
7116 {
7117 phba->cfg_log_verbose = verbose;
7118 }
7119
7120 struct fc_function_template lpfc_transport_functions = {
7121 /* fixed attributes the driver supports */
7122 .show_host_node_name = 1,
7123 .show_host_port_name = 1,
7124 .show_host_supported_classes = 1,
7125 .show_host_supported_fc4s = 1,
7126 .show_host_supported_speeds = 1,
7127 .show_host_maxframe_size = 1,
7128
7129 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
7130 .show_host_symbolic_name = 1,
7131
7132 /* dynamic attributes the driver supports */
7133 .get_host_port_id = lpfc_get_host_port_id,
7134 .show_host_port_id = 1,
7135
7136 .get_host_port_type = lpfc_get_host_port_type,
7137 .show_host_port_type = 1,
7138
7139 .get_host_port_state = lpfc_get_host_port_state,
7140 .show_host_port_state = 1,
7141
7142 /* active_fc4s is shown but doesn't change (thus no get function) */
7143 .show_host_active_fc4s = 1,
7144
7145 .get_host_speed = lpfc_get_host_speed,
7146 .show_host_speed = 1,
7147
7148 .get_host_fabric_name = lpfc_get_host_fabric_name,
7149 .show_host_fabric_name = 1,
7150
7151 /*
7152 * The LPFC driver treats linkdown handling as target loss events
7153 * so there are no sysfs handlers for link_down_tmo.
7154 */
7155
7156 .get_fc_host_stats = lpfc_get_stats,
7157 .reset_fc_host_stats = lpfc_reset_stats,
7158
7159 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
7160 .show_rport_maxframe_size = 1,
7161 .show_rport_supported_classes = 1,
7162
7163 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7164 .show_rport_dev_loss_tmo = 1,
7165
7166 .get_starget_port_id = lpfc_get_starget_port_id,
7167 .show_starget_port_id = 1,
7168
7169 .get_starget_node_name = lpfc_get_starget_node_name,
7170 .show_starget_node_name = 1,
7171
7172 .get_starget_port_name = lpfc_get_starget_port_name,
7173 .show_starget_port_name = 1,
7174
7175 .issue_fc_host_lip = lpfc_issue_lip,
7176 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7177 .terminate_rport_io = lpfc_terminate_rport_io,
7178
7179 .dd_fcvport_size = sizeof(struct lpfc_vport *),
7180
7181 .vport_disable = lpfc_vport_disable,
7182
7183 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7184
7185 .bsg_request = lpfc_bsg_request,
7186 .bsg_timeout = lpfc_bsg_timeout,
7187 };
7188
7189 struct fc_function_template lpfc_vport_transport_functions = {
7190 /* fixed attributes the driver supports */
7191 .show_host_node_name = 1,
7192 .show_host_port_name = 1,
7193 .show_host_supported_classes = 1,
7194 .show_host_supported_fc4s = 1,
7195 .show_host_supported_speeds = 1,
7196 .show_host_maxframe_size = 1,
7197
7198 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
7199 .show_host_symbolic_name = 1,
7200
7201 /* dynamic attributes the driver supports */
7202 .get_host_port_id = lpfc_get_host_port_id,
7203 .show_host_port_id = 1,
7204
7205 .get_host_port_type = lpfc_get_host_port_type,
7206 .show_host_port_type = 1,
7207
7208 .get_host_port_state = lpfc_get_host_port_state,
7209 .show_host_port_state = 1,
7210
7211 /* active_fc4s is shown but doesn't change (thus no get function) */
7212 .show_host_active_fc4s = 1,
7213
7214 .get_host_speed = lpfc_get_host_speed,
7215 .show_host_speed = 1,
7216
7217 .get_host_fabric_name = lpfc_get_host_fabric_name,
7218 .show_host_fabric_name = 1,
7219
7220 /*
7221 * The LPFC driver treats linkdown handling as target loss events
7222 * so there are no sysfs handlers for link_down_tmo.
7223 */
7224
7225 .get_fc_host_stats = lpfc_get_stats,
7226 .reset_fc_host_stats = lpfc_reset_stats,
7227
7228 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
7229 .show_rport_maxframe_size = 1,
7230 .show_rport_supported_classes = 1,
7231
7232 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7233 .show_rport_dev_loss_tmo = 1,
7234
7235 .get_starget_port_id = lpfc_get_starget_port_id,
7236 .show_starget_port_id = 1,
7237
7238 .get_starget_node_name = lpfc_get_starget_node_name,
7239 .show_starget_node_name = 1,
7240
7241 .get_starget_port_name = lpfc_get_starget_port_name,
7242 .show_starget_port_name = 1,
7243
7244 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7245 .terminate_rport_io = lpfc_terminate_rport_io,
7246
7247 .vport_disable = lpfc_vport_disable,
7248
7249 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7250 };
7251
7252 /**
7253 * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE
7254 * Mode
7255 * @phba: lpfc_hba pointer.
7256 **/
7257 static void
lpfc_get_hba_function_mode(struct lpfc_hba * phba)7258 lpfc_get_hba_function_mode(struct lpfc_hba *phba)
7259 {
7260 /* If the adapter supports FCoE mode */
7261 switch (phba->pcidev->device) {
7262 case PCI_DEVICE_ID_SKYHAWK:
7263 case PCI_DEVICE_ID_SKYHAWK_VF:
7264 case PCI_DEVICE_ID_LANCER_FCOE:
7265 case PCI_DEVICE_ID_LANCER_FCOE_VF:
7266 case PCI_DEVICE_ID_ZEPHYR_DCSP:
7267 case PCI_DEVICE_ID_HORNET:
7268 case PCI_DEVICE_ID_TIGERSHARK:
7269 case PCI_DEVICE_ID_TOMCAT:
7270 phba->hba_flag |= HBA_FCOE_MODE;
7271 break;
7272 default:
7273 /* for others, clear the flag */
7274 phba->hba_flag &= ~HBA_FCOE_MODE;
7275 }
7276 }
7277
7278 /**
7279 * lpfc_get_cfgparam - Used during probe_one to init the adapter structure
7280 * @phba: lpfc_hba pointer.
7281 **/
7282 void
lpfc_get_cfgparam(struct lpfc_hba * phba)7283 lpfc_get_cfgparam(struct lpfc_hba *phba)
7284 {
7285 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
7286 lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
7287 lpfc_ns_query_init(phba, lpfc_ns_query);
7288 lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
7289 lpfc_cr_delay_init(phba, lpfc_cr_delay);
7290 lpfc_cr_count_init(phba, lpfc_cr_count);
7291 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
7292 lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
7293 lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
7294 lpfc_ack0_init(phba, lpfc_ack0);
7295 lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing);
7296 lpfc_topology_init(phba, lpfc_topology);
7297 lpfc_link_speed_init(phba, lpfc_link_speed);
7298 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
7299 lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
7300 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
7301 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
7302 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
7303 lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
7304 lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
7305 lpfc_use_msi_init(phba, lpfc_use_msi);
7306 lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
7307 lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd);
7308 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
7309 lpfc_force_rscn_init(phba, lpfc_force_rscn);
7310 lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold);
7311 lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit);
7312 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
7313 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
7314 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
7315
7316 lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
7317 if (phba->sli_rev != LPFC_SLI_REV4)
7318 phba->cfg_EnableXLane = 0;
7319 lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
7320
7321 memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
7322 memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
7323 phba->cfg_oas_lun_state = 0;
7324 phba->cfg_oas_lun_status = 0;
7325 phba->cfg_oas_flags = 0;
7326 phba->cfg_oas_priority = 0;
7327 lpfc_enable_bg_init(phba, lpfc_enable_bg);
7328 lpfc_prot_mask_init(phba, lpfc_prot_mask);
7329 lpfc_prot_guard_init(phba, lpfc_prot_guard);
7330 if (phba->sli_rev == LPFC_SLI_REV4)
7331 phba->cfg_poll = 0;
7332 else
7333 phba->cfg_poll = lpfc_poll;
7334
7335 /* Get the function mode */
7336 lpfc_get_hba_function_mode(phba);
7337
7338 /* BlockGuard allowed for FC only. */
7339 if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
7340 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7341 "0581 BlockGuard feature not supported\n");
7342 /* If set, clear the BlockGuard support param */
7343 phba->cfg_enable_bg = 0;
7344 } else if (phba->cfg_enable_bg) {
7345 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
7346 }
7347
7348 lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
7349
7350 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
7351 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
7352 lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
7353
7354 /* Initialize first burst. Target vs Initiator are different. */
7355 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
7356 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
7357 lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
7358 lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
7359 lpfc_irq_chann_init(phba, lpfc_irq_chann);
7360 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
7361 lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
7362
7363 if (phba->sli_rev != LPFC_SLI_REV4) {
7364 /* NVME only supported on SLI4 */
7365 phba->nvmet_support = 0;
7366 phba->cfg_nvmet_mrq = 0;
7367 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
7368 phba->cfg_enable_bbcr = 0;
7369 phba->cfg_xri_rebalancing = 0;
7370 } else {
7371 /* We MUST have FCP support */
7372 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
7373 phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
7374 }
7375
7376 phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1;
7377
7378 phba->cfg_enable_pbde = 0;
7379
7380 /* A value of 0 means use the number of CPUs found in the system */
7381 if (phba->cfg_hdw_queue == 0)
7382 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7383 if (phba->cfg_irq_chann == 0)
7384 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7385 if (phba->cfg_irq_chann > phba->cfg_hdw_queue)
7386 phba->cfg_irq_chann = phba->cfg_hdw_queue;
7387
7388 phba->cfg_soft_wwnn = 0L;
7389 phba->cfg_soft_wwpn = 0L;
7390 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
7391 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
7392 lpfc_aer_support_init(phba, lpfc_aer_support);
7393 lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
7394 lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
7395 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
7396 lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
7397 lpfc_sli_mode_init(phba, lpfc_sli_mode);
7398 lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
7399 lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
7400 lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
7401 lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
7402
7403 return;
7404 }
7405
7406 /**
7407 * lpfc_nvme_mod_param_dep - Adjust module parameter value based on
7408 * dependencies between protocols and roles.
7409 * @phba: lpfc_hba pointer.
7410 **/
7411 void
lpfc_nvme_mod_param_dep(struct lpfc_hba * phba)7412 lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
7413 {
7414 int logit = 0;
7415
7416 if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) {
7417 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7418 logit = 1;
7419 }
7420 if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) {
7421 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7422 logit = 1;
7423 }
7424 if (phba->cfg_irq_chann > phba->cfg_hdw_queue) {
7425 phba->cfg_irq_chann = phba->cfg_hdw_queue;
7426 logit = 1;
7427 }
7428 if (logit)
7429 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7430 "2006 Reducing Queues - CPU limitation: "
7431 "IRQ %d HDWQ %d\n",
7432 phba->cfg_irq_chann,
7433 phba->cfg_hdw_queue);
7434
7435 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
7436 phba->nvmet_support) {
7437 phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
7438
7439 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
7440 "6013 %s x%x fb_size x%x, fb_max x%x\n",
7441 "NVME Target PRLI ACC enable_fb ",
7442 phba->cfg_nvme_enable_fb,
7443 phba->cfg_nvmet_fb_size,
7444 LPFC_NVMET_FB_SZ_MAX);
7445
7446 if (phba->cfg_nvme_enable_fb == 0)
7447 phba->cfg_nvmet_fb_size = 0;
7448 else {
7449 if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX)
7450 phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
7451 }
7452
7453 if (!phba->cfg_nvmet_mrq)
7454 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7455
7456 /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
7457 if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
7458 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7459 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
7460 "6018 Adjust lpfc_nvmet_mrq to %d\n",
7461 phba->cfg_nvmet_mrq);
7462 }
7463 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
7464 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
7465
7466 } else {
7467 /* Not NVME Target mode. Turn off Target parameters. */
7468 phba->nvmet_support = 0;
7469 phba->cfg_nvmet_mrq = 0;
7470 phba->cfg_nvmet_fb_size = 0;
7471 }
7472 }
7473
7474 /**
7475 * lpfc_get_vport_cfgparam - Used during port create, init the vport structure
7476 * @vport: lpfc_vport pointer.
7477 **/
7478 void
lpfc_get_vport_cfgparam(struct lpfc_vport * vport)7479 lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
7480 {
7481 lpfc_log_verbose_init(vport, lpfc_log_verbose);
7482 lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
7483 lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
7484 lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
7485 lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
7486 lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
7487 lpfc_restrict_login_init(vport, lpfc_restrict_login);
7488 lpfc_fcp_class_init(vport, lpfc_fcp_class);
7489 lpfc_use_adisc_init(vport, lpfc_use_adisc);
7490 lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
7491 lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
7492 lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
7493 lpfc_max_luns_init(vport, lpfc_max_luns);
7494 lpfc_scan_down_init(vport, lpfc_scan_down);
7495 lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
7496 return;
7497 }
7498