1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23
24 #include <linux/ctype.h>
25 #include <linux/delay.h>
26 #include <linux/pci.h>
27 #include <linux/interrupt.h>
28 #include <linux/module.h>
29 #include <linux/aer.h>
30 #include <linux/gfp.h>
31 #include <linux/kernel.h>
32
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_tcq.h>
37 #include <scsi/scsi_transport_fc.h>
38 #include <scsi/fc/fc_fs.h>
39
40 #include "lpfc_hw4.h"
41 #include "lpfc_hw.h"
42 #include "lpfc_sli.h"
43 #include "lpfc_sli4.h"
44 #include "lpfc_nl.h"
45 #include "lpfc_disc.h"
46 #include "lpfc.h"
47 #include "lpfc_scsi.h"
48 #include "lpfc_nvme.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_version.h"
51 #include "lpfc_compat.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_vport.h"
54 #include "lpfc_attr.h"
55
56 #define LPFC_DEF_DEVLOSS_TMO 30
57 #define LPFC_MIN_DEVLOSS_TMO 1
58 #define LPFC_MAX_DEVLOSS_TMO 255
59
60 #define LPFC_MAX_INFO_TMP_LEN 100
61 #define LPFC_INFO_MORE_STR "\nCould be more info...\n"
62 /*
63 * Write key size should be multiple of 4. If write key is changed
64 * make sure that library write key is also changed.
65 */
66 #define LPFC_REG_WRITE_KEY_SIZE 4
67 #define LPFC_REG_WRITE_KEY "EMLX"
68
69 const char *const trunk_errmsg[] = { /* map errcode */
70 "", /* There is no such error code at index 0*/
71 "link negotiated speed does not match existing"
72 " trunk - link was \"low\" speed",
73 "link negotiated speed does not match"
74 " existing trunk - link was \"middle\" speed",
75 "link negotiated speed does not match existing"
76 " trunk - link was \"high\" speed",
77 "Attached to non-trunking port - F_Port",
78 "Attached to non-trunking port - N_Port",
79 "FLOGI response timeout",
80 "non-FLOGI frame received",
81 "Invalid FLOGI response",
82 "Trunking initialization protocol",
83 "Trunk peer device mismatch",
84 };
85
86 /**
87 * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
88 * @incr: integer to convert.
89 * @hdw: ascii string holding converted integer plus a string terminator.
90 *
91 * Description:
92 * JEDEC Joint Electron Device Engineering Council.
93 * Convert a 32 bit integer composed of 8 nibbles into an 8 byte ascii
94 * character string. The string is then terminated with a NULL in byte 9.
95 * Hex 0-9 becomes ascii '0' to '9'.
96 * Hex a-f becomes ascii '=' to 'B' capital B.
97 *
98 * Notes:
99 * Coded for 32 bit integers only.
100 **/
101 static void
lpfc_jedec_to_ascii(int incr,char hdw[])102 lpfc_jedec_to_ascii(int incr, char hdw[])
103 {
104 int i, j;
105 for (i = 0; i < 8; i++) {
106 j = (incr & 0xf);
107 if (j <= 9)
108 hdw[7 - i] = 0x30 + j;
109 else
110 hdw[7 - i] = 0x61 + j - 10;
111 incr = (incr >> 4);
112 }
113 hdw[8] = 0;
114 return;
115 }
116
117 static ssize_t
lpfc_cmf_info_show(struct device * dev,struct device_attribute * attr,char * buf)118 lpfc_cmf_info_show(struct device *dev, struct device_attribute *attr,
119 char *buf)
120 {
121 struct Scsi_Host *shost = class_to_shost(dev);
122 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
123 struct lpfc_hba *phba = vport->phba;
124 struct lpfc_cgn_info *cp = NULL;
125 struct lpfc_cgn_stat *cgs;
126 int len = 0;
127 int cpu;
128 u64 rcv, total;
129 char tmp[LPFC_MAX_INFO_TMP_LEN] = {0};
130
131 if (phba->cgn_i)
132 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
133
134 scnprintf(tmp, sizeof(tmp),
135 "Congestion Mgmt Info: E2Eattr %d Ver %d "
136 "CMF %d cnt %d\n",
137 phba->sli4_hba.pc_sli4_params.mi_ver,
138 cp ? cp->cgn_info_version : 0,
139 phba->sli4_hba.pc_sli4_params.cmf, phba->cmf_timer_cnt);
140
141 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
142 goto buffer_done;
143
144 if (!phba->sli4_hba.pc_sli4_params.cmf)
145 goto buffer_done;
146
147 switch (phba->cgn_init_reg_signal) {
148 case EDC_CG_SIG_WARN_ONLY:
149 scnprintf(tmp, sizeof(tmp),
150 "Register: Init: Signal:WARN ");
151 break;
152 case EDC_CG_SIG_WARN_ALARM:
153 scnprintf(tmp, sizeof(tmp),
154 "Register: Init: Signal:WARN|ALARM ");
155 break;
156 default:
157 scnprintf(tmp, sizeof(tmp),
158 "Register: Init: Signal:NONE ");
159 break;
160 }
161 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
162 goto buffer_done;
163
164 switch (phba->cgn_init_reg_fpin) {
165 case LPFC_CGN_FPIN_WARN:
166 scnprintf(tmp, sizeof(tmp),
167 "FPIN:WARN\n");
168 break;
169 case LPFC_CGN_FPIN_ALARM:
170 scnprintf(tmp, sizeof(tmp),
171 "FPIN:ALARM\n");
172 break;
173 case LPFC_CGN_FPIN_BOTH:
174 scnprintf(tmp, sizeof(tmp),
175 "FPIN:WARN|ALARM\n");
176 break;
177 default:
178 scnprintf(tmp, sizeof(tmp),
179 "FPIN:NONE\n");
180 break;
181 }
182 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
183 goto buffer_done;
184
185 switch (phba->cgn_reg_signal) {
186 case EDC_CG_SIG_WARN_ONLY:
187 scnprintf(tmp, sizeof(tmp),
188 " Current: Signal:WARN ");
189 break;
190 case EDC_CG_SIG_WARN_ALARM:
191 scnprintf(tmp, sizeof(tmp),
192 " Current: Signal:WARN|ALARM ");
193 break;
194 default:
195 scnprintf(tmp, sizeof(tmp),
196 " Current: Signal:NONE ");
197 break;
198 }
199 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
200 goto buffer_done;
201
202 switch (phba->cgn_reg_fpin) {
203 case LPFC_CGN_FPIN_WARN:
204 scnprintf(tmp, sizeof(tmp),
205 "FPIN:WARN ACQEcnt:%d\n", phba->cgn_acqe_cnt);
206 break;
207 case LPFC_CGN_FPIN_ALARM:
208 scnprintf(tmp, sizeof(tmp),
209 "FPIN:ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt);
210 break;
211 case LPFC_CGN_FPIN_BOTH:
212 scnprintf(tmp, sizeof(tmp),
213 "FPIN:WARN|ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt);
214 break;
215 default:
216 scnprintf(tmp, sizeof(tmp),
217 "FPIN:NONE ACQEcnt:%d\n", phba->cgn_acqe_cnt);
218 break;
219 }
220 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
221 goto buffer_done;
222
223 if (phba->cmf_active_mode != phba->cgn_p.cgn_param_mode) {
224 switch (phba->cmf_active_mode) {
225 case LPFC_CFG_OFF:
226 scnprintf(tmp, sizeof(tmp), "Active: Mode:Off\n");
227 break;
228 case LPFC_CFG_MANAGED:
229 scnprintf(tmp, sizeof(tmp), "Active: Mode:Managed\n");
230 break;
231 case LPFC_CFG_MONITOR:
232 scnprintf(tmp, sizeof(tmp), "Active: Mode:Monitor\n");
233 break;
234 default:
235 scnprintf(tmp, sizeof(tmp), "Active: Mode:Unknown\n");
236 }
237 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
238 goto buffer_done;
239 }
240
241 switch (phba->cgn_p.cgn_param_mode) {
242 case LPFC_CFG_OFF:
243 scnprintf(tmp, sizeof(tmp), "Config: Mode:Off ");
244 break;
245 case LPFC_CFG_MANAGED:
246 scnprintf(tmp, sizeof(tmp), "Config: Mode:Managed ");
247 break;
248 case LPFC_CFG_MONITOR:
249 scnprintf(tmp, sizeof(tmp), "Config: Mode:Monitor ");
250 break;
251 default:
252 scnprintf(tmp, sizeof(tmp), "Config: Mode:Unknown ");
253 }
254 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
255 goto buffer_done;
256
257 total = 0;
258 rcv = 0;
259 for_each_present_cpu(cpu) {
260 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
261 total += atomic64_read(&cgs->total_bytes);
262 rcv += atomic64_read(&cgs->rcv_bytes);
263 }
264
265 scnprintf(tmp, sizeof(tmp),
266 "IObusy:%d Info:%d Bytes: Rcv:x%llx Total:x%llx\n",
267 atomic_read(&phba->cmf_busy),
268 phba->cmf_active_info, rcv, total);
269 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
270 goto buffer_done;
271
272 scnprintf(tmp, sizeof(tmp),
273 "Port_speed:%d Link_byte_cnt:%ld "
274 "Max_byte_per_interval:%ld\n",
275 lpfc_sli_port_speed_get(phba),
276 (unsigned long)phba->cmf_link_byte_count,
277 (unsigned long)phba->cmf_max_bytes_per_interval);
278 strlcat(buf, tmp, PAGE_SIZE);
279
280 buffer_done:
281 len = strnlen(buf, PAGE_SIZE);
282
283 if (unlikely(len >= (PAGE_SIZE - 1))) {
284 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
285 "6312 Catching potential buffer "
286 "overflow > PAGE_SIZE = %lu bytes\n",
287 PAGE_SIZE);
288 strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
289 LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1);
290 }
291 return len;
292 }
293
294 /**
295 * lpfc_drvr_version_show - Return the Emulex driver string with version number
296 * @dev: class unused variable.
297 * @attr: device attribute, not used.
298 * @buf: on return contains the module description text.
299 *
300 * Returns: size of formatted string.
301 **/
302 static ssize_t
lpfc_drvr_version_show(struct device * dev,struct device_attribute * attr,char * buf)303 lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
304 char *buf)
305 {
306 return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
307 }
308
309 /**
310 * lpfc_enable_fip_show - Return the fip mode of the HBA
311 * @dev: class unused variable.
312 * @attr: device attribute, not used.
313 * @buf: on return contains the module description text.
314 *
315 * Returns: size of formatted string.
316 **/
317 static ssize_t
lpfc_enable_fip_show(struct device * dev,struct device_attribute * attr,char * buf)318 lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
319 char *buf)
320 {
321 struct Scsi_Host *shost = class_to_shost(dev);
322 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
323 struct lpfc_hba *phba = vport->phba;
324
325 if (phba->hba_flag & HBA_FIP_SUPPORT)
326 return scnprintf(buf, PAGE_SIZE, "1\n");
327 else
328 return scnprintf(buf, PAGE_SIZE, "0\n");
329 }
330
331 static ssize_t
lpfc_nvme_info_show(struct device * dev,struct device_attribute * attr,char * buf)332 lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
333 char *buf)
334 {
335 struct Scsi_Host *shost = class_to_shost(dev);
336 struct lpfc_vport *vport = shost_priv(shost);
337 struct lpfc_hba *phba = vport->phba;
338 struct lpfc_nvmet_tgtport *tgtp;
339 struct nvme_fc_local_port *localport;
340 struct lpfc_nvme_lport *lport;
341 struct lpfc_nvme_rport *rport;
342 struct lpfc_nodelist *ndlp;
343 struct nvme_fc_remote_port *nrport;
344 struct lpfc_fc4_ctrl_stat *cstat;
345 uint64_t data1, data2, data3;
346 uint64_t totin, totout, tot;
347 char *statep;
348 int i;
349 int len = 0;
350 char tmp[LPFC_MAX_INFO_TMP_LEN] = {0};
351
352 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
353 len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
354 return len;
355 }
356 if (phba->nvmet_support) {
357 if (!phba->targetport) {
358 len = scnprintf(buf, PAGE_SIZE,
359 "NVME Target: x%llx is not allocated\n",
360 wwn_to_u64(vport->fc_portname.u.wwn));
361 return len;
362 }
363 /* Port state is only one of two values for now. */
364 if (phba->targetport->port_id)
365 statep = "REGISTERED";
366 else
367 statep = "INIT";
368 scnprintf(tmp, sizeof(tmp),
369 "NVME Target Enabled State %s\n",
370 statep);
371 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
372 goto buffer_done;
373
374 scnprintf(tmp, sizeof(tmp),
375 "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
376 "NVME Target: lpfc",
377 phba->brd_no,
378 wwn_to_u64(vport->fc_portname.u.wwn),
379 wwn_to_u64(vport->fc_nodename.u.wwn),
380 phba->targetport->port_id);
381 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
382 goto buffer_done;
383
384 if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE)
385 >= PAGE_SIZE)
386 goto buffer_done;
387
388 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
389 scnprintf(tmp, sizeof(tmp),
390 "LS: Rcv %08x Drop %08x Abort %08x\n",
391 atomic_read(&tgtp->rcv_ls_req_in),
392 atomic_read(&tgtp->rcv_ls_req_drop),
393 atomic_read(&tgtp->xmt_ls_abort));
394 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
395 goto buffer_done;
396
397 if (atomic_read(&tgtp->rcv_ls_req_in) !=
398 atomic_read(&tgtp->rcv_ls_req_out)) {
399 scnprintf(tmp, sizeof(tmp),
400 "Rcv LS: in %08x != out %08x\n",
401 atomic_read(&tgtp->rcv_ls_req_in),
402 atomic_read(&tgtp->rcv_ls_req_out));
403 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
404 goto buffer_done;
405 }
406
407 scnprintf(tmp, sizeof(tmp),
408 "LS: Xmt %08x Drop %08x Cmpl %08x\n",
409 atomic_read(&tgtp->xmt_ls_rsp),
410 atomic_read(&tgtp->xmt_ls_drop),
411 atomic_read(&tgtp->xmt_ls_rsp_cmpl));
412 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
413 goto buffer_done;
414
415 scnprintf(tmp, sizeof(tmp),
416 "LS: RSP Abort %08x xb %08x Err %08x\n",
417 atomic_read(&tgtp->xmt_ls_rsp_aborted),
418 atomic_read(&tgtp->xmt_ls_rsp_xb_set),
419 atomic_read(&tgtp->xmt_ls_rsp_error));
420 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
421 goto buffer_done;
422
423 scnprintf(tmp, sizeof(tmp),
424 "FCP: Rcv %08x Defer %08x Release %08x "
425 "Drop %08x\n",
426 atomic_read(&tgtp->rcv_fcp_cmd_in),
427 atomic_read(&tgtp->rcv_fcp_cmd_defer),
428 atomic_read(&tgtp->xmt_fcp_release),
429 atomic_read(&tgtp->rcv_fcp_cmd_drop));
430 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
431 goto buffer_done;
432
433 if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
434 atomic_read(&tgtp->rcv_fcp_cmd_out)) {
435 scnprintf(tmp, sizeof(tmp),
436 "Rcv FCP: in %08x != out %08x\n",
437 atomic_read(&tgtp->rcv_fcp_cmd_in),
438 atomic_read(&tgtp->rcv_fcp_cmd_out));
439 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
440 goto buffer_done;
441 }
442
443 scnprintf(tmp, sizeof(tmp),
444 "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
445 "drop %08x\n",
446 atomic_read(&tgtp->xmt_fcp_read),
447 atomic_read(&tgtp->xmt_fcp_read_rsp),
448 atomic_read(&tgtp->xmt_fcp_write),
449 atomic_read(&tgtp->xmt_fcp_rsp),
450 atomic_read(&tgtp->xmt_fcp_drop));
451 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
452 goto buffer_done;
453
454 scnprintf(tmp, sizeof(tmp),
455 "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
456 atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
457 atomic_read(&tgtp->xmt_fcp_rsp_error),
458 atomic_read(&tgtp->xmt_fcp_rsp_drop));
459 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
460 goto buffer_done;
461
462 scnprintf(tmp, sizeof(tmp),
463 "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
464 atomic_read(&tgtp->xmt_fcp_rsp_aborted),
465 atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
466 atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
467 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
468 goto buffer_done;
469
470 scnprintf(tmp, sizeof(tmp),
471 "ABORT: Xmt %08x Cmpl %08x\n",
472 atomic_read(&tgtp->xmt_fcp_abort),
473 atomic_read(&tgtp->xmt_fcp_abort_cmpl));
474 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
475 goto buffer_done;
476
477 scnprintf(tmp, sizeof(tmp),
478 "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n",
479 atomic_read(&tgtp->xmt_abort_sol),
480 atomic_read(&tgtp->xmt_abort_unsol),
481 atomic_read(&tgtp->xmt_abort_rsp),
482 atomic_read(&tgtp->xmt_abort_rsp_error));
483 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
484 goto buffer_done;
485
486 scnprintf(tmp, sizeof(tmp),
487 "DELAY: ctx %08x fod %08x wqfull %08x\n",
488 atomic_read(&tgtp->defer_ctx),
489 atomic_read(&tgtp->defer_fod),
490 atomic_read(&tgtp->defer_wqfull));
491 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
492 goto buffer_done;
493
494 /* Calculate outstanding IOs */
495 tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
496 tot += atomic_read(&tgtp->xmt_fcp_release);
497 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
498
499 scnprintf(tmp, sizeof(tmp),
500 "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
501 "CTX Outstanding %08llx\n\n",
502 phba->sli4_hba.nvmet_xri_cnt,
503 phba->sli4_hba.nvmet_io_wait_cnt,
504 phba->sli4_hba.nvmet_io_wait_total,
505 tot);
506 strlcat(buf, tmp, PAGE_SIZE);
507 goto buffer_done;
508 }
509
510 localport = vport->localport;
511 if (!localport) {
512 len = scnprintf(buf, PAGE_SIZE,
513 "NVME Initiator x%llx is not allocated\n",
514 wwn_to_u64(vport->fc_portname.u.wwn));
515 return len;
516 }
517 lport = (struct lpfc_nvme_lport *)localport->private;
518 if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
519 goto buffer_done;
520
521 scnprintf(tmp, sizeof(tmp),
522 "XRI Dist lpfc%d Total %d IO %d ELS %d\n",
523 phba->brd_no,
524 phba->sli4_hba.max_cfg_param.max_xri,
525 phba->sli4_hba.io_xri_max,
526 lpfc_sli4_get_els_iocb_cnt(phba));
527 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
528 goto buffer_done;
529
530 /* Port state is only one of two values for now. */
531 if (localport->port_id)
532 statep = "ONLINE";
533 else
534 statep = "UNKNOWN ";
535
536 scnprintf(tmp, sizeof(tmp),
537 "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
538 "NVME LPORT lpfc",
539 phba->brd_no,
540 wwn_to_u64(vport->fc_portname.u.wwn),
541 wwn_to_u64(vport->fc_nodename.u.wwn),
542 localport->port_id, statep);
543 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
544 goto buffer_done;
545
546 spin_lock_irq(shost->host_lock);
547
548 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
549 nrport = NULL;
550 spin_lock(&ndlp->lock);
551 rport = lpfc_ndlp_get_nrport(ndlp);
552 if (rport)
553 nrport = rport->remoteport;
554 spin_unlock(&ndlp->lock);
555 if (!nrport)
556 continue;
557
558 /* Port state is only one of two values for now. */
559 switch (nrport->port_state) {
560 case FC_OBJSTATE_ONLINE:
561 statep = "ONLINE";
562 break;
563 case FC_OBJSTATE_UNKNOWN:
564 statep = "UNKNOWN ";
565 break;
566 default:
567 statep = "UNSUPPORTED";
568 break;
569 }
570
571 /* Tab in to show lport ownership. */
572 if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
573 goto unlock_buf_done;
574 if (phba->brd_no >= 10) {
575 if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
576 goto unlock_buf_done;
577 }
578
579 scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
580 nrport->port_name);
581 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
582 goto unlock_buf_done;
583
584 scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
585 nrport->node_name);
586 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
587 goto unlock_buf_done;
588
589 scnprintf(tmp, sizeof(tmp), "DID x%06x ",
590 nrport->port_id);
591 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
592 goto unlock_buf_done;
593
594 /* An NVME rport can have multiple roles. */
595 if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
596 if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
597 goto unlock_buf_done;
598 }
599 if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
600 if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
601 goto unlock_buf_done;
602 }
603 if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
604 if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
605 goto unlock_buf_done;
606 }
607 if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
608 FC_PORT_ROLE_NVME_TARGET |
609 FC_PORT_ROLE_NVME_DISCOVERY)) {
610 scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
611 nrport->port_role);
612 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
613 goto unlock_buf_done;
614 }
615
616 scnprintf(tmp, sizeof(tmp), "%s\n", statep);
617 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
618 goto unlock_buf_done;
619 }
620 spin_unlock_irq(shost->host_lock);
621
622 if (!lport)
623 goto buffer_done;
624
625 if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE)
626 goto buffer_done;
627
628 scnprintf(tmp, sizeof(tmp),
629 "LS: Xmt %010x Cmpl %010x Abort %08x\n",
630 atomic_read(&lport->fc4NvmeLsRequests),
631 atomic_read(&lport->fc4NvmeLsCmpls),
632 atomic_read(&lport->xmt_ls_abort));
633 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
634 goto buffer_done;
635
636 scnprintf(tmp, sizeof(tmp),
637 "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
638 atomic_read(&lport->xmt_ls_err),
639 atomic_read(&lport->cmpl_ls_xb),
640 atomic_read(&lport->cmpl_ls_err));
641 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
642 goto buffer_done;
643
644 totin = 0;
645 totout = 0;
646 for (i = 0; i < phba->cfg_hdw_queue; i++) {
647 cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
648 tot = cstat->io_cmpls;
649 totin += tot;
650 data1 = cstat->input_requests;
651 data2 = cstat->output_requests;
652 data3 = cstat->control_requests;
653 totout += (data1 + data2 + data3);
654 }
655 scnprintf(tmp, sizeof(tmp),
656 "Total FCP Cmpl %016llx Issue %016llx "
657 "OutIO %016llx\n",
658 totin, totout, totout - totin);
659 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
660 goto buffer_done;
661
662 scnprintf(tmp, sizeof(tmp),
663 "\tabort %08x noxri %08x nondlp %08x qdepth %08x "
664 "wqerr %08x err %08x\n",
665 atomic_read(&lport->xmt_fcp_abort),
666 atomic_read(&lport->xmt_fcp_noxri),
667 atomic_read(&lport->xmt_fcp_bad_ndlp),
668 atomic_read(&lport->xmt_fcp_qdepth),
669 atomic_read(&lport->xmt_fcp_wqerr),
670 atomic_read(&lport->xmt_fcp_err));
671 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
672 goto buffer_done;
673
674 scnprintf(tmp, sizeof(tmp),
675 "FCP CMPL: xb %08x Err %08x\n",
676 atomic_read(&lport->cmpl_fcp_xb),
677 atomic_read(&lport->cmpl_fcp_err));
678 strlcat(buf, tmp, PAGE_SIZE);
679
680 /* host_lock is already unlocked. */
681 goto buffer_done;
682
683 unlock_buf_done:
684 spin_unlock_irq(shost->host_lock);
685
686 buffer_done:
687 len = strnlen(buf, PAGE_SIZE);
688
689 if (unlikely(len >= (PAGE_SIZE - 1))) {
690 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
691 "6314 Catching potential buffer "
692 "overflow > PAGE_SIZE = %lu bytes\n",
693 PAGE_SIZE);
694 strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
695 LPFC_INFO_MORE_STR,
696 sizeof(LPFC_INFO_MORE_STR) + 1);
697 }
698
699 return len;
700 }
701
702 static ssize_t
lpfc_scsi_stat_show(struct device * dev,struct device_attribute * attr,char * buf)703 lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr,
704 char *buf)
705 {
706 struct Scsi_Host *shost = class_to_shost(dev);
707 struct lpfc_vport *vport = shost_priv(shost);
708 struct lpfc_hba *phba = vport->phba;
709 int len;
710 struct lpfc_fc4_ctrl_stat *cstat;
711 u64 data1, data2, data3;
712 u64 tot, totin, totout;
713 int i;
714 char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
715
716 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
717 (phba->sli_rev != LPFC_SLI_REV4))
718 return 0;
719
720 scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n");
721
722 totin = 0;
723 totout = 0;
724 for (i = 0; i < phba->cfg_hdw_queue; i++) {
725 cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
726 tot = cstat->io_cmpls;
727 totin += tot;
728 data1 = cstat->input_requests;
729 data2 = cstat->output_requests;
730 data3 = cstat->control_requests;
731 totout += (data1 + data2 + data3);
732
733 scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
734 "IO %016llx ", i, data1, data2, data3);
735 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
736 goto buffer_done;
737
738 scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
739 tot, ((data1 + data2 + data3) - tot));
740 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
741 goto buffer_done;
742 }
743 scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
744 "OutIO %016llx\n", totin, totout, totout - totin);
745 strlcat(buf, tmp, PAGE_SIZE);
746
747 buffer_done:
748 len = strnlen(buf, PAGE_SIZE);
749
750 return len;
751 }
752
753 static ssize_t
lpfc_bg_info_show(struct device * dev,struct device_attribute * attr,char * buf)754 lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
755 char *buf)
756 {
757 struct Scsi_Host *shost = class_to_shost(dev);
758 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
759 struct lpfc_hba *phba = vport->phba;
760
761 if (phba->cfg_enable_bg) {
762 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
763 return scnprintf(buf, PAGE_SIZE,
764 "BlockGuard Enabled\n");
765 else
766 return scnprintf(buf, PAGE_SIZE,
767 "BlockGuard Not Supported\n");
768 } else
769 return scnprintf(buf, PAGE_SIZE,
770 "BlockGuard Disabled\n");
771 }
772
773 static ssize_t
lpfc_bg_guard_err_show(struct device * dev,struct device_attribute * attr,char * buf)774 lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
775 char *buf)
776 {
777 struct Scsi_Host *shost = class_to_shost(dev);
778 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
779 struct lpfc_hba *phba = vport->phba;
780
781 return scnprintf(buf, PAGE_SIZE, "%llu\n",
782 (unsigned long long)phba->bg_guard_err_cnt);
783 }
784
785 static ssize_t
lpfc_bg_apptag_err_show(struct device * dev,struct device_attribute * attr,char * buf)786 lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
787 char *buf)
788 {
789 struct Scsi_Host *shost = class_to_shost(dev);
790 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
791 struct lpfc_hba *phba = vport->phba;
792
793 return scnprintf(buf, PAGE_SIZE, "%llu\n",
794 (unsigned long long)phba->bg_apptag_err_cnt);
795 }
796
797 static ssize_t
lpfc_bg_reftag_err_show(struct device * dev,struct device_attribute * attr,char * buf)798 lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
799 char *buf)
800 {
801 struct Scsi_Host *shost = class_to_shost(dev);
802 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
803 struct lpfc_hba *phba = vport->phba;
804
805 return scnprintf(buf, PAGE_SIZE, "%llu\n",
806 (unsigned long long)phba->bg_reftag_err_cnt);
807 }
808
809 /**
810 * lpfc_info_show - Return some pci info about the host in ascii
811 * @dev: class converted to a Scsi_host structure.
812 * @attr: device attribute, not used.
813 * @buf: on return contains the formatted text from lpfc_info().
814 *
815 * Returns: size of formatted string.
816 **/
817 static ssize_t
lpfc_info_show(struct device * dev,struct device_attribute * attr,char * buf)818 lpfc_info_show(struct device *dev, struct device_attribute *attr,
819 char *buf)
820 {
821 struct Scsi_Host *host = class_to_shost(dev);
822
823 return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host));
824 }
825
826 /**
827 * lpfc_serialnum_show - Return the hba serial number in ascii
828 * @dev: class converted to a Scsi_host structure.
829 * @attr: device attribute, not used.
830 * @buf: on return contains the formatted text serial number.
831 *
832 * Returns: size of formatted string.
833 **/
834 static ssize_t
lpfc_serialnum_show(struct device * dev,struct device_attribute * attr,char * buf)835 lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
836 char *buf)
837 {
838 struct Scsi_Host *shost = class_to_shost(dev);
839 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
840 struct lpfc_hba *phba = vport->phba;
841
842 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber);
843 }
844
845 /**
846 * lpfc_temp_sensor_show - Return the temperature sensor level
847 * @dev: class converted to a Scsi_host structure.
848 * @attr: device attribute, not used.
849 * @buf: on return contains the formatted support level.
850 *
851 * Description:
852 * Returns a number indicating the temperature sensor level currently
853 * supported, zero or one in ascii.
854 *
855 * Returns: size of formatted string.
856 **/
857 static ssize_t
lpfc_temp_sensor_show(struct device * dev,struct device_attribute * attr,char * buf)858 lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
859 char *buf)
860 {
861 struct Scsi_Host *shost = class_to_shost(dev);
862 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
863 struct lpfc_hba *phba = vport->phba;
864 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support);
865 }
866
867 /**
868 * lpfc_modeldesc_show - Return the model description of the hba
869 * @dev: class converted to a Scsi_host structure.
870 * @attr: device attribute, not used.
871 * @buf: on return contains the scsi vpd model description.
872 *
873 * Returns: size of formatted string.
874 **/
875 static ssize_t
lpfc_modeldesc_show(struct device * dev,struct device_attribute * attr,char * buf)876 lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
877 char *buf)
878 {
879 struct Scsi_Host *shost = class_to_shost(dev);
880 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
881 struct lpfc_hba *phba = vport->phba;
882
883 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc);
884 }
885
886 /**
887 * lpfc_modelname_show - Return the model name of the hba
888 * @dev: class converted to a Scsi_host structure.
889 * @attr: device attribute, not used.
890 * @buf: on return contains the scsi vpd model name.
891 *
892 * Returns: size of formatted string.
893 **/
894 static ssize_t
lpfc_modelname_show(struct device * dev,struct device_attribute * attr,char * buf)895 lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
896 char *buf)
897 {
898 struct Scsi_Host *shost = class_to_shost(dev);
899 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
900 struct lpfc_hba *phba = vport->phba;
901
902 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName);
903 }
904
905 /**
906 * lpfc_programtype_show - Return the program type of the hba
907 * @dev: class converted to a Scsi_host structure.
908 * @attr: device attribute, not used.
909 * @buf: on return contains the scsi vpd program type.
910 *
911 * Returns: size of formatted string.
912 **/
913 static ssize_t
lpfc_programtype_show(struct device * dev,struct device_attribute * attr,char * buf)914 lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
915 char *buf)
916 {
917 struct Scsi_Host *shost = class_to_shost(dev);
918 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
919 struct lpfc_hba *phba = vport->phba;
920
921 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType);
922 }
923
924 /**
925 * lpfc_mlomgmt_show - Return the Menlo Maintenance sli flag
926 * @dev: class converted to a Scsi_host structure.
927 * @attr: device attribute, not used.
928 * @buf: on return contains the Menlo Maintenance sli flag.
929 *
930 * Returns: size of formatted string.
931 **/
932 static ssize_t
lpfc_mlomgmt_show(struct device * dev,struct device_attribute * attr,char * buf)933 lpfc_mlomgmt_show(struct device *dev, struct device_attribute *attr, char *buf)
934 {
935 struct Scsi_Host *shost = class_to_shost(dev);
936 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
937 struct lpfc_hba *phba = vport->phba;
938
939 return scnprintf(buf, PAGE_SIZE, "%d\n",
940 (phba->sli.sli_flag & LPFC_MENLO_MAINT));
941 }
942
943 /**
944 * lpfc_vportnum_show - Return the port number in ascii of the hba
945 * @dev: class converted to a Scsi_host structure.
946 * @attr: device attribute, not used.
947 * @buf: on return contains scsi vpd program type.
948 *
949 * Returns: size of formatted string.
950 **/
951 static ssize_t
lpfc_vportnum_show(struct device * dev,struct device_attribute * attr,char * buf)952 lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
953 char *buf)
954 {
955 struct Scsi_Host *shost = class_to_shost(dev);
956 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
957 struct lpfc_hba *phba = vport->phba;
958
959 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port);
960 }
961
962 /**
963 * lpfc_fwrev_show - Return the firmware rev running in the hba
964 * @dev: class converted to a Scsi_host structure.
965 * @attr: device attribute, not used.
966 * @buf: on return contains the scsi vpd program type.
967 *
968 * Returns: size of formatted string.
969 **/
970 static ssize_t
lpfc_fwrev_show(struct device * dev,struct device_attribute * attr,char * buf)971 lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
972 char *buf)
973 {
974 struct Scsi_Host *shost = class_to_shost(dev);
975 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
976 struct lpfc_hba *phba = vport->phba;
977 uint32_t if_type;
978 uint8_t sli_family;
979 char fwrev[FW_REV_STR_SIZE];
980 int len;
981
982 lpfc_decode_firmware_rev(phba, fwrev, 1);
983 if_type = phba->sli4_hba.pc_sli4_params.if_type;
984 sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
985
986 if (phba->sli_rev < LPFC_SLI_REV4)
987 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
988 fwrev, phba->sli_rev);
989 else
990 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
991 fwrev, phba->sli_rev, if_type, sli_family);
992
993 return len;
994 }
995
996 /**
997 * lpfc_hdw_show - Return the jedec information about the hba
998 * @dev: class converted to a Scsi_host structure.
999 * @attr: device attribute, not used.
1000 * @buf: on return contains the scsi vpd program type.
1001 *
1002 * Returns: size of formatted string.
1003 **/
1004 static ssize_t
lpfc_hdw_show(struct device * dev,struct device_attribute * attr,char * buf)1005 lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
1006 {
1007 char hdw[9];
1008 struct Scsi_Host *shost = class_to_shost(dev);
1009 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1010 struct lpfc_hba *phba = vport->phba;
1011 lpfc_vpd_t *vp = &phba->vpd;
1012
1013 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
1014 return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
1015 vp->rev.smRev, vp->rev.smFwRev);
1016 }
1017
1018 /**
1019 * lpfc_option_rom_version_show - Return the adapter ROM FCode version
1020 * @dev: class converted to a Scsi_host structure.
1021 * @attr: device attribute, not used.
1022 * @buf: on return contains the ROM and FCode ascii strings.
1023 *
1024 * Returns: size of formatted string.
1025 **/
1026 static ssize_t
lpfc_option_rom_version_show(struct device * dev,struct device_attribute * attr,char * buf)1027 lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
1028 char *buf)
1029 {
1030 struct Scsi_Host *shost = class_to_shost(dev);
1031 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1032 struct lpfc_hba *phba = vport->phba;
1033 char fwrev[FW_REV_STR_SIZE];
1034
1035 if (phba->sli_rev < LPFC_SLI_REV4)
1036 return scnprintf(buf, PAGE_SIZE, "%s\n",
1037 phba->OptionROMVersion);
1038
1039 lpfc_decode_firmware_rev(phba, fwrev, 1);
1040 return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev);
1041 }
1042
1043 /**
1044 * lpfc_link_state_show - Return the link state of the port
1045 * @dev: class converted to a Scsi_host structure.
1046 * @attr: device attribute, not used.
1047 * @buf: on return contains text describing the state of the link.
1048 *
1049 * Notes:
1050 * The switch statement has no default so zero will be returned.
1051 *
1052 * Returns: size of formatted string.
1053 **/
1054 static ssize_t
lpfc_link_state_show(struct device * dev,struct device_attribute * attr,char * buf)1055 lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
1056 char *buf)
1057 {
1058 struct Scsi_Host *shost = class_to_shost(dev);
1059 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1060 struct lpfc_hba *phba = vport->phba;
1061 int len = 0;
1062
1063 switch (phba->link_state) {
1064 case LPFC_LINK_UNKNOWN:
1065 case LPFC_WARM_START:
1066 case LPFC_INIT_START:
1067 case LPFC_INIT_MBX_CMDS:
1068 case LPFC_LINK_DOWN:
1069 case LPFC_HBA_ERROR:
1070 if (phba->hba_flag & LINK_DISABLED)
1071 len += scnprintf(buf + len, PAGE_SIZE-len,
1072 "Link Down - User disabled\n");
1073 else
1074 len += scnprintf(buf + len, PAGE_SIZE-len,
1075 "Link Down\n");
1076 break;
1077 case LPFC_LINK_UP:
1078 case LPFC_CLEAR_LA:
1079 case LPFC_HBA_READY:
1080 len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
1081
1082 switch (vport->port_state) {
1083 case LPFC_LOCAL_CFG_LINK:
1084 len += scnprintf(buf + len, PAGE_SIZE-len,
1085 "Configuring Link\n");
1086 break;
1087 case LPFC_FDISC:
1088 case LPFC_FLOGI:
1089 case LPFC_FABRIC_CFG_LINK:
1090 case LPFC_NS_REG:
1091 case LPFC_NS_QRY:
1092 case LPFC_BUILD_DISC_LIST:
1093 case LPFC_DISC_AUTH:
1094 len += scnprintf(buf + len, PAGE_SIZE - len,
1095 "Discovery\n");
1096 break;
1097 case LPFC_VPORT_READY:
1098 len += scnprintf(buf + len, PAGE_SIZE - len,
1099 "Ready\n");
1100 break;
1101
1102 case LPFC_VPORT_FAILED:
1103 len += scnprintf(buf + len, PAGE_SIZE - len,
1104 "Failed\n");
1105 break;
1106
1107 case LPFC_VPORT_UNKNOWN:
1108 len += scnprintf(buf + len, PAGE_SIZE - len,
1109 "Unknown\n");
1110 break;
1111 }
1112 if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
1113 len += scnprintf(buf + len, PAGE_SIZE-len,
1114 " Menlo Maint Mode\n");
1115 else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
1116 if (vport->fc_flag & FC_PUBLIC_LOOP)
1117 len += scnprintf(buf + len, PAGE_SIZE-len,
1118 " Public Loop\n");
1119 else
1120 len += scnprintf(buf + len, PAGE_SIZE-len,
1121 " Private Loop\n");
1122 } else {
1123 if (vport->fc_flag & FC_FABRIC)
1124 len += scnprintf(buf + len, PAGE_SIZE-len,
1125 " Fabric\n");
1126 else
1127 len += scnprintf(buf + len, PAGE_SIZE-len,
1128 " Point-2-Point\n");
1129 }
1130 }
1131
1132 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1133 ((bf_get(lpfc_sli_intf_if_type,
1134 &phba->sli4_hba.sli_intf) ==
1135 LPFC_SLI_INTF_IF_TYPE_6))) {
1136 struct lpfc_trunk_link link = phba->trunk_link;
1137
1138 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
1139 len += scnprintf(buf + len, PAGE_SIZE - len,
1140 "Trunk port 0: Link %s %s\n",
1141 (link.link0.state == LPFC_LINK_UP) ?
1142 "Up" : "Down. ",
1143 trunk_errmsg[link.link0.fault]);
1144
1145 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
1146 len += scnprintf(buf + len, PAGE_SIZE - len,
1147 "Trunk port 1: Link %s %s\n",
1148 (link.link1.state == LPFC_LINK_UP) ?
1149 "Up" : "Down. ",
1150 trunk_errmsg[link.link1.fault]);
1151
1152 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
1153 len += scnprintf(buf + len, PAGE_SIZE - len,
1154 "Trunk port 2: Link %s %s\n",
1155 (link.link2.state == LPFC_LINK_UP) ?
1156 "Up" : "Down. ",
1157 trunk_errmsg[link.link2.fault]);
1158
1159 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
1160 len += scnprintf(buf + len, PAGE_SIZE - len,
1161 "Trunk port 3: Link %s %s\n",
1162 (link.link3.state == LPFC_LINK_UP) ?
1163 "Up" : "Down. ",
1164 trunk_errmsg[link.link3.fault]);
1165
1166 }
1167
1168 return len;
1169 }
1170
1171 /**
1172 * lpfc_sli4_protocol_show - Return the fip mode of the HBA
1173 * @dev: class unused variable.
1174 * @attr: device attribute, not used.
1175 * @buf: on return contains the module description text.
1176 *
1177 * Returns: size of formatted string.
1178 **/
1179 static ssize_t
lpfc_sli4_protocol_show(struct device * dev,struct device_attribute * attr,char * buf)1180 lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
1181 char *buf)
1182 {
1183 struct Scsi_Host *shost = class_to_shost(dev);
1184 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1185 struct lpfc_hba *phba = vport->phba;
1186
1187 if (phba->sli_rev < LPFC_SLI_REV4)
1188 return scnprintf(buf, PAGE_SIZE, "fc\n");
1189
1190 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
1191 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
1192 return scnprintf(buf, PAGE_SIZE, "fcoe\n");
1193 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
1194 return scnprintf(buf, PAGE_SIZE, "fc\n");
1195 }
1196 return scnprintf(buf, PAGE_SIZE, "unknown\n");
1197 }
1198
1199 /**
1200 * lpfc_oas_supported_show - Return whether or not Optimized Access Storage
1201 * (OAS) is supported.
1202 * @dev: class unused variable.
1203 * @attr: device attribute, not used.
1204 * @buf: on return contains the module description text.
1205 *
1206 * Returns: size of formatted string.
1207 **/
1208 static ssize_t
lpfc_oas_supported_show(struct device * dev,struct device_attribute * attr,char * buf)1209 lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
1210 char *buf)
1211 {
1212 struct Scsi_Host *shost = class_to_shost(dev);
1213 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
1214 struct lpfc_hba *phba = vport->phba;
1215
1216 return scnprintf(buf, PAGE_SIZE, "%d\n",
1217 phba->sli4_hba.pc_sli4_params.oas_supported);
1218 }
1219
1220 /**
1221 * lpfc_link_state_store - Transition the link_state on an HBA port
1222 * @dev: class device that is converted into a Scsi_host.
1223 * @attr: device attribute, not used.
1224 * @buf: one or more lpfc_polling_flags values.
1225 * @count: not used.
1226 *
1227 * Returns:
1228 * -EINVAL if the buffer is not "up" or "down"
1229 * return from link state change function if non-zero
1230 * length of the buf on success
1231 **/
1232 static ssize_t
lpfc_link_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1233 lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
1234 const char *buf, size_t count)
1235 {
1236 struct Scsi_Host *shost = class_to_shost(dev);
1237 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1238 struct lpfc_hba *phba = vport->phba;
1239
1240 int status = -EINVAL;
1241
1242 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
1243 (phba->link_state == LPFC_LINK_DOWN))
1244 status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
1245 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
1246 (phba->link_state >= LPFC_LINK_UP))
1247 status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT);
1248
1249 if (status == 0)
1250 return strlen(buf);
1251 else
1252 return status;
1253 }
1254
1255 /**
1256 * lpfc_num_discovered_ports_show - Return sum of mapped and unmapped vports
1257 * @dev: class device that is converted into a Scsi_host.
1258 * @attr: device attribute, not used.
1259 * @buf: on return contains the sum of fc mapped and unmapped.
1260 *
1261 * Description:
1262 * Returns the ascii text number of the sum of the fc mapped and unmapped
1263 * vport counts.
1264 *
1265 * Returns: size of formatted string.
1266 **/
1267 static ssize_t
lpfc_num_discovered_ports_show(struct device * dev,struct device_attribute * attr,char * buf)1268 lpfc_num_discovered_ports_show(struct device *dev,
1269 struct device_attribute *attr, char *buf)
1270 {
1271 struct Scsi_Host *shost = class_to_shost(dev);
1272 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1273
1274 return scnprintf(buf, PAGE_SIZE, "%d\n",
1275 vport->fc_map_cnt + vport->fc_unmap_cnt);
1276 }
1277
1278 /**
1279 * lpfc_issue_lip - Misnomer, name carried over from long ago
1280 * @shost: Scsi_Host pointer.
1281 *
1282 * Description:
1283 * Bring the link down gracefully then re-init the link. The firmware will
1284 * re-init the fiber channel interface as required. Does not issue a LIP.
1285 *
1286 * Returns:
1287 * -EPERM port offline or management commands are being blocked
1288 * -ENOMEM cannot allocate memory for the mailbox command
1289 * -EIO error sending the mailbox command
1290 * zero for success
1291 **/
1292 static int
lpfc_issue_lip(struct Scsi_Host * shost)1293 lpfc_issue_lip(struct Scsi_Host *shost)
1294 {
1295 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1296 struct lpfc_hba *phba = vport->phba;
1297 LPFC_MBOXQ_t *pmboxq;
1298 int mbxstatus = MBXERR_ERROR;
1299
1300 /*
1301 * If the link is offline, disabled or BLOCK_MGMT_IO
1302 * it doesn't make any sense to allow issue_lip
1303 */
1304 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1305 (phba->hba_flag & LINK_DISABLED) ||
1306 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
1307 return -EPERM;
1308
1309 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
1310
1311 if (!pmboxq)
1312 return -ENOMEM;
1313
1314 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1315 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1316 pmboxq->u.mb.mbxOwner = OWN_HOST;
1317
1318 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
1319
1320 if ((mbxstatus == MBX_SUCCESS) &&
1321 (pmboxq->u.mb.mbxStatus == 0 ||
1322 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
1323 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1324 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
1325 phba->cfg_link_speed);
1326 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1327 phba->fc_ratov * 2);
1328 if ((mbxstatus == MBX_SUCCESS) &&
1329 (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
1330 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1331 "2859 SLI authentication is required "
1332 "for INIT_LINK but has not done yet\n");
1333 }
1334
1335 lpfc_set_loopback_flag(phba);
1336 if (mbxstatus != MBX_TIMEOUT)
1337 mempool_free(pmboxq, phba->mbox_mem_pool);
1338
1339 if (mbxstatus == MBXERR_ERROR)
1340 return -EIO;
1341
1342 return 0;
1343 }
1344
1345 int
lpfc_emptyq_wait(struct lpfc_hba * phba,struct list_head * q,spinlock_t * lock)1346 lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
1347 {
1348 int cnt = 0;
1349
1350 spin_lock_irq(lock);
1351 while (!list_empty(q)) {
1352 spin_unlock_irq(lock);
1353 msleep(20);
1354 if (cnt++ > 250) { /* 5 secs */
1355 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1356 "0466 Outstanding IO when "
1357 "bringing Adapter offline\n");
1358 return 0;
1359 }
1360 spin_lock_irq(lock);
1361 }
1362 spin_unlock_irq(lock);
1363 return 1;
1364 }
1365
1366 /**
1367 * lpfc_do_offline - Issues a mailbox command to bring the link down
1368 * @phba: lpfc_hba pointer.
1369 * @type: LPFC_EVT_OFFLINE, LPFC_EVT_WARM_START, LPFC_EVT_KILL.
1370 *
1371 * Notes:
1372 * Assumes any error from lpfc_do_offline() will be negative.
1373 * Can wait up to 5 seconds for the port ring buffers count
1374 * to reach zero, prints a warning if it is not zero and continues.
1375 * lpfc_workq_post_event() returns a non-zero return code if call fails.
1376 *
1377 * Returns:
1378 * -EIO error posting the event
1379 * zero for success
1380 **/
1381 static int
lpfc_do_offline(struct lpfc_hba * phba,uint32_t type)1382 lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
1383 {
1384 struct completion online_compl;
1385 struct lpfc_queue *qp = NULL;
1386 struct lpfc_sli_ring *pring;
1387 struct lpfc_sli *psli;
1388 int status = 0;
1389 int i;
1390 int rc;
1391
1392 init_completion(&online_compl);
1393 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1394 LPFC_EVT_OFFLINE_PREP);
1395 if (rc == 0)
1396 return -ENOMEM;
1397
1398 wait_for_completion(&online_compl);
1399
1400 if (status != 0)
1401 return -EIO;
1402
1403 psli = &phba->sli;
1404
1405 /*
1406 * If freeing the queues have already started, don't access them.
1407 * Otherwise set FREE_WAIT to indicate that queues are being used
1408 * to hold the freeing process until we finish.
1409 */
1410 spin_lock_irq(&phba->hbalock);
1411 if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) {
1412 psli->sli_flag |= LPFC_QUEUE_FREE_WAIT;
1413 } else {
1414 spin_unlock_irq(&phba->hbalock);
1415 goto skip_wait;
1416 }
1417 spin_unlock_irq(&phba->hbalock);
1418
1419 /* Wait a little for things to settle down, but not
1420 * long enough for dev loss timeout to expire.
1421 */
1422 if (phba->sli_rev != LPFC_SLI_REV4) {
1423 for (i = 0; i < psli->num_rings; i++) {
1424 pring = &psli->sli3_ring[i];
1425 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1426 &phba->hbalock))
1427 goto out;
1428 }
1429 } else {
1430 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1431 pring = qp->pring;
1432 if (!pring)
1433 continue;
1434 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1435 &pring->ring_lock))
1436 goto out;
1437 }
1438 }
1439 out:
1440 spin_lock_irq(&phba->hbalock);
1441 psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT;
1442 spin_unlock_irq(&phba->hbalock);
1443
1444 skip_wait:
1445 init_completion(&online_compl);
1446 rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
1447 if (rc == 0)
1448 return -ENOMEM;
1449
1450 wait_for_completion(&online_compl);
1451
1452 if (status != 0)
1453 return -EIO;
1454
1455 return 0;
1456 }
1457
1458 /**
1459 * lpfc_reset_pci_bus - resets PCI bridge controller's secondary bus of an HBA
1460 * @phba: lpfc_hba pointer.
1461 *
1462 * Description:
1463 * Issues a PCI secondary bus reset for the phba->pcidev.
1464 *
1465 * Notes:
1466 * First walks the bus_list to ensure only PCI devices with Emulex
1467 * vendor id, device ids that support hot reset, only one occurrence
1468 * of function 0, and all ports on the bus are in offline mode to ensure the
1469 * hot reset only affects one valid HBA.
1470 *
1471 * Returns:
1472 * -ENOTSUPP, cfg_enable_hba_reset must be of value 2
1473 * -ENODEV, NULL ptr to pcidev
1474 * -EBADSLT, detected invalid device
1475 * -EBUSY, port is not in offline state
1476 * 0, successful
1477 */
1478 static int
lpfc_reset_pci_bus(struct lpfc_hba * phba)1479 lpfc_reset_pci_bus(struct lpfc_hba *phba)
1480 {
1481 struct pci_dev *pdev = phba->pcidev;
1482 struct Scsi_Host *shost = NULL;
1483 struct lpfc_hba *phba_other = NULL;
1484 struct pci_dev *ptr = NULL;
1485 int res;
1486
1487 if (phba->cfg_enable_hba_reset != 2)
1488 return -ENOTSUPP;
1489
1490 if (!pdev) {
1491 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n");
1492 return -ENODEV;
1493 }
1494
1495 res = lpfc_check_pci_resettable(phba);
1496 if (res)
1497 return res;
1498
1499 /* Walk the list of devices on the pci_dev's bus */
1500 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
1501 /* Check port is offline */
1502 shost = pci_get_drvdata(ptr);
1503 if (shost) {
1504 phba_other =
1505 ((struct lpfc_vport *)shost->hostdata)->phba;
1506 if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) {
1507 lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT,
1508 "8349 WWPN = 0x%02x%02x%02x%02x"
1509 "%02x%02x%02x%02x is not "
1510 "offline!\n",
1511 phba_other->wwpn[0],
1512 phba_other->wwpn[1],
1513 phba_other->wwpn[2],
1514 phba_other->wwpn[3],
1515 phba_other->wwpn[4],
1516 phba_other->wwpn[5],
1517 phba_other->wwpn[6],
1518 phba_other->wwpn[7]);
1519 return -EBUSY;
1520 }
1521 }
1522 }
1523
1524 /* Issue PCI bus reset */
1525 res = pci_reset_bus(pdev);
1526 if (res) {
1527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1528 "8350 PCI reset bus failed: %d\n", res);
1529 }
1530
1531 return res;
1532 }
1533
1534 /**
1535 * lpfc_selective_reset - Offline then onlines the port
1536 * @phba: lpfc_hba pointer.
1537 *
1538 * Description:
1539 * If the port is configured to allow a reset then the hba is brought
1540 * offline then online.
1541 *
1542 * Notes:
1543 * Assumes any error from lpfc_do_offline() will be negative.
1544 * Do not make this function static.
1545 *
1546 * Returns:
1547 * lpfc_do_offline() return code if not zero
1548 * -EIO reset not configured or error posting the event
1549 * zero for success
1550 **/
1551 int
lpfc_selective_reset(struct lpfc_hba * phba)1552 lpfc_selective_reset(struct lpfc_hba *phba)
1553 {
1554 struct completion online_compl;
1555 int status = 0;
1556 int rc;
1557
1558 if (!phba->cfg_enable_hba_reset)
1559 return -EACCES;
1560
1561 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
1562 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1563
1564 if (status != 0)
1565 return status;
1566 }
1567
1568 init_completion(&online_compl);
1569 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1570 LPFC_EVT_ONLINE);
1571 if (rc == 0)
1572 return -ENOMEM;
1573
1574 wait_for_completion(&online_compl);
1575
1576 if (status != 0)
1577 return -EIO;
1578
1579 return 0;
1580 }
1581
1582 /**
1583 * lpfc_issue_reset - Selectively resets an adapter
1584 * @dev: class device that is converted into a Scsi_host.
1585 * @attr: device attribute, not used.
1586 * @buf: containing the string "selective".
1587 * @count: unused variable.
1588 *
1589 * Description:
1590 * If the buf contains the string "selective" then lpfc_selective_reset()
1591 * is called to perform the reset.
1592 *
1593 * Notes:
1594 * Assumes any error from lpfc_selective_reset() will be negative.
1595 * If lpfc_selective_reset() returns zero then the length of the buffer
1596 * is returned which indicates success
1597 *
1598 * Returns:
1599 * -EINVAL if the buffer does not contain the string "selective"
1600 * length of buf if lpfc-selective_reset() if the call succeeds
1601 * return value of lpfc_selective_reset() if the call fails
1602 **/
1603 static ssize_t
lpfc_issue_reset(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1604 lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
1605 const char *buf, size_t count)
1606 {
1607 struct Scsi_Host *shost = class_to_shost(dev);
1608 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1609 struct lpfc_hba *phba = vport->phba;
1610 int status = -EINVAL;
1611
1612 if (!phba->cfg_enable_hba_reset)
1613 return -EACCES;
1614
1615 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
1616 status = phba->lpfc_selective_reset(phba);
1617
1618 if (status == 0)
1619 return strlen(buf);
1620 else
1621 return status;
1622 }
1623
1624 /**
1625 * lpfc_sli4_pdev_status_reg_wait - Wait for pdev status register for readyness
1626 * @phba: lpfc_hba pointer.
1627 *
1628 * Description:
1629 * SLI4 interface type-2 device to wait on the sliport status register for
1630 * the readyness after performing a firmware reset.
1631 *
1632 * Returns:
1633 * zero for success, -EPERM when port does not have privilege to perform the
1634 * reset, -EIO when port timeout from recovering from the reset.
1635 *
1636 * Note:
1637 * As the caller will interpret the return code by value, be careful in making
1638 * change or addition to return codes.
1639 **/
1640 int
lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba * phba)1641 lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
1642 {
1643 struct lpfc_register portstat_reg = {0};
1644 int i;
1645
1646 msleep(100);
1647 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1648 &portstat_reg.word0))
1649 return -EIO;
1650
1651 /* verify if privileged for the request operation */
1652 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
1653 !bf_get(lpfc_sliport_status_err, &portstat_reg))
1654 return -EPERM;
1655
1656 /* wait for the SLI port firmware ready after firmware reset */
1657 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
1658 msleep(10);
1659 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1660 &portstat_reg.word0))
1661 continue;
1662 if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
1663 continue;
1664 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
1665 continue;
1666 if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
1667 continue;
1668 break;
1669 }
1670
1671 if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
1672 return 0;
1673 else
1674 return -EIO;
1675 }
1676
1677 /**
1678 * lpfc_sli4_pdev_reg_request - Request physical dev to perform a register acc
1679 * @phba: lpfc_hba pointer.
1680 * @opcode: The sli4 config command opcode.
1681 *
1682 * Description:
1683 * Request SLI4 interface type-2 device to perform a physical register set
1684 * access.
1685 *
1686 * Returns:
1687 * zero for success
1688 **/
1689 static ssize_t
lpfc_sli4_pdev_reg_request(struct lpfc_hba * phba,uint32_t opcode)1690 lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
1691 {
1692 struct completion online_compl;
1693 struct pci_dev *pdev = phba->pcidev;
1694 uint32_t before_fc_flag;
1695 uint32_t sriov_nr_virtfn;
1696 uint32_t reg_val;
1697 int status = 0, rc = 0;
1698 int job_posted = 1, sriov_err;
1699
1700 if (!phba->cfg_enable_hba_reset)
1701 return -EACCES;
1702
1703 if ((phba->sli_rev < LPFC_SLI_REV4) ||
1704 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
1705 LPFC_SLI_INTF_IF_TYPE_2))
1706 return -EPERM;
1707
1708 /* Keep state if we need to restore back */
1709 before_fc_flag = phba->pport->fc_flag;
1710 sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
1711
1712 /* Disable SR-IOV virtual functions if enabled */
1713 if (phba->cfg_sriov_nr_virtfn) {
1714 pci_disable_sriov(pdev);
1715 phba->cfg_sriov_nr_virtfn = 0;
1716 }
1717
1718 if (opcode == LPFC_FW_DUMP)
1719 phba->hba_flag |= HBA_FW_DUMP_OP;
1720
1721 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1722
1723 if (status != 0) {
1724 phba->hba_flag &= ~HBA_FW_DUMP_OP;
1725 return status;
1726 }
1727
1728 /* wait for the device to be quiesced before firmware reset */
1729 msleep(100);
1730
1731 reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
1732 LPFC_CTL_PDEV_CTL_OFFSET);
1733
1734 if (opcode == LPFC_FW_DUMP)
1735 reg_val |= LPFC_FW_DUMP_REQUEST;
1736 else if (opcode == LPFC_FW_RESET)
1737 reg_val |= LPFC_CTL_PDEV_CTL_FRST;
1738 else if (opcode == LPFC_DV_RESET)
1739 reg_val |= LPFC_CTL_PDEV_CTL_DRST;
1740
1741 writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
1742 LPFC_CTL_PDEV_CTL_OFFSET);
1743 /* flush */
1744 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
1745
1746 /* delay driver action following IF_TYPE_2 reset */
1747 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1748
1749 if (rc == -EPERM) {
1750 /* no privilege for reset */
1751 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1752 "3150 No privilege to perform the requested "
1753 "access: x%x\n", reg_val);
1754 } else if (rc == -EIO) {
1755 /* reset failed, there is nothing more we can do */
1756 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1757 "3153 Fail to perform the requested "
1758 "access: x%x\n", reg_val);
1759 return rc;
1760 }
1761
1762 /* keep the original port state */
1763 if (before_fc_flag & FC_OFFLINE_MODE)
1764 goto out;
1765
1766 init_completion(&online_compl);
1767 job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
1768 LPFC_EVT_ONLINE);
1769 if (!job_posted)
1770 goto out;
1771
1772 wait_for_completion(&online_compl);
1773
1774 out:
1775 /* in any case, restore the virtual functions enabled as before */
1776 if (sriov_nr_virtfn) {
1777 sriov_err =
1778 lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
1779 if (!sriov_err)
1780 phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
1781 }
1782
1783 /* return proper error code */
1784 if (!rc) {
1785 if (!job_posted)
1786 rc = -ENOMEM;
1787 else if (status)
1788 rc = -EIO;
1789 }
1790 return rc;
1791 }
1792
1793 /**
1794 * lpfc_nport_evt_cnt_show - Return the number of nport events
1795 * @dev: class device that is converted into a Scsi_host.
1796 * @attr: device attribute, not used.
1797 * @buf: on return contains the ascii number of nport events.
1798 *
1799 * Returns: size of formatted string.
1800 **/
1801 static ssize_t
lpfc_nport_evt_cnt_show(struct device * dev,struct device_attribute * attr,char * buf)1802 lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
1803 char *buf)
1804 {
1805 struct Scsi_Host *shost = class_to_shost(dev);
1806 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1807 struct lpfc_hba *phba = vport->phba;
1808
1809 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
1810 }
1811
1812 static int
lpfc_set_trunking(struct lpfc_hba * phba,char * buff_out)1813 lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
1814 {
1815 LPFC_MBOXQ_t *mbox = NULL;
1816 unsigned long val = 0;
1817 char *pval = NULL;
1818 int rc = 0;
1819
1820 if (!strncmp("enable", buff_out,
1821 strlen("enable"))) {
1822 pval = buff_out + strlen("enable") + 1;
1823 rc = kstrtoul(pval, 0, &val);
1824 if (rc)
1825 return rc; /* Invalid number */
1826 } else if (!strncmp("disable", buff_out,
1827 strlen("disable"))) {
1828 val = 0;
1829 } else {
1830 return -EINVAL; /* Invalid command */
1831 }
1832
1833 switch (val) {
1834 case 0:
1835 val = 0x0; /* Disable */
1836 break;
1837 case 2:
1838 val = 0x1; /* Enable two port trunk */
1839 break;
1840 case 4:
1841 val = 0x2; /* Enable four port trunk */
1842 break;
1843 default:
1844 return -EINVAL;
1845 }
1846
1847 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1848 "0070 Set trunk mode with val %ld ", val);
1849
1850 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1851 if (!mbox)
1852 return -ENOMEM;
1853
1854 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1855 LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE,
1856 12, LPFC_SLI4_MBX_EMBED);
1857
1858 bf_set(lpfc_mbx_set_trunk_mode,
1859 &mbox->u.mqe.un.set_trunk_mode,
1860 val);
1861 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1862 if (rc)
1863 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1864 "0071 Set trunk mode failed with status: %d",
1865 rc);
1866 mempool_free(mbox, phba->mbox_mem_pool);
1867
1868 return 0;
1869 }
1870
1871 /**
1872 * lpfc_board_mode_show - Return the state of the board
1873 * @dev: class device that is converted into a Scsi_host.
1874 * @attr: device attribute, not used.
1875 * @buf: on return contains the state of the adapter.
1876 *
1877 * Returns: size of formatted string.
1878 **/
1879 static ssize_t
lpfc_board_mode_show(struct device * dev,struct device_attribute * attr,char * buf)1880 lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
1881 char *buf)
1882 {
1883 struct Scsi_Host *shost = class_to_shost(dev);
1884 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1885 struct lpfc_hba *phba = vport->phba;
1886 char * state;
1887
1888 if (phba->link_state == LPFC_HBA_ERROR)
1889 state = "error";
1890 else if (phba->link_state == LPFC_WARM_START)
1891 state = "warm start";
1892 else if (phba->link_state == LPFC_INIT_START)
1893 state = "offline";
1894 else
1895 state = "online";
1896
1897 return scnprintf(buf, PAGE_SIZE, "%s\n", state);
1898 }
1899
1900 /**
1901 * lpfc_board_mode_store - Puts the hba in online, offline, warm or error state
1902 * @dev: class device that is converted into a Scsi_host.
1903 * @attr: device attribute, not used.
1904 * @buf: containing one of the strings "online", "offline", "warm" or "error".
1905 * @count: unused variable.
1906 *
1907 * Returns:
1908 * -EACCES if enable hba reset not enabled
1909 * -EINVAL if the buffer does not contain a valid string (see above)
1910 * -EIO if lpfc_workq_post_event() or lpfc_do_offline() fails
1911 * buf length greater than zero indicates success
1912 **/
1913 static ssize_t
lpfc_board_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1914 lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
1915 const char *buf, size_t count)
1916 {
1917 struct Scsi_Host *shost = class_to_shost(dev);
1918 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1919 struct lpfc_hba *phba = vport->phba;
1920 struct completion online_compl;
1921 char *board_mode_str = NULL;
1922 int status = 0;
1923 int rc;
1924
1925 if (!phba->cfg_enable_hba_reset) {
1926 status = -EACCES;
1927 goto board_mode_out;
1928 }
1929
1930 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1931 "3050 lpfc_board_mode set to %s\n", buf);
1932
1933 init_completion(&online_compl);
1934
1935 if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
1936 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1937 LPFC_EVT_ONLINE);
1938 if (rc == 0) {
1939 status = -ENOMEM;
1940 goto board_mode_out;
1941 }
1942 wait_for_completion(&online_compl);
1943 if (status)
1944 status = -EIO;
1945 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
1946 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1947 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
1948 if (phba->sli_rev == LPFC_SLI_REV4)
1949 status = -EINVAL;
1950 else
1951 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
1952 else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
1953 if (phba->sli_rev == LPFC_SLI_REV4)
1954 status = -EINVAL;
1955 else
1956 status = lpfc_do_offline(phba, LPFC_EVT_KILL);
1957 else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
1958 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
1959 else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
1960 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
1961 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
1962 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
1963 else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1)
1964 == 0)
1965 status = lpfc_reset_pci_bus(phba);
1966 else if (strncmp(buf, "heartbeat", sizeof("heartbeat") - 1) == 0)
1967 lpfc_issue_hb_tmo(phba);
1968 else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0)
1969 status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk"));
1970 else
1971 status = -EINVAL;
1972
1973 board_mode_out:
1974 if (!status)
1975 return strlen(buf);
1976 else {
1977 board_mode_str = strchr(buf, '\n');
1978 if (board_mode_str)
1979 *board_mode_str = '\0';
1980 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1981 "3097 Failed \"%s\", status(%d), "
1982 "fc_flag(x%x)\n",
1983 buf, status, phba->pport->fc_flag);
1984 return status;
1985 }
1986 }
1987
1988 /**
1989 * lpfc_get_hba_info - Return various bits of informaton about the adapter
1990 * @phba: pointer to the adapter structure.
1991 * @mxri: max xri count.
1992 * @axri: available xri count.
1993 * @mrpi: max rpi count.
1994 * @arpi: available rpi count.
1995 * @mvpi: max vpi count.
1996 * @avpi: available vpi count.
1997 *
1998 * Description:
1999 * If an integer pointer for an count is not null then the value for the
2000 * count is returned.
2001 *
2002 * Returns:
2003 * zero on error
2004 * one for success
2005 **/
2006 static int
lpfc_get_hba_info(struct lpfc_hba * phba,uint32_t * mxri,uint32_t * axri,uint32_t * mrpi,uint32_t * arpi,uint32_t * mvpi,uint32_t * avpi)2007 lpfc_get_hba_info(struct lpfc_hba *phba,
2008 uint32_t *mxri, uint32_t *axri,
2009 uint32_t *mrpi, uint32_t *arpi,
2010 uint32_t *mvpi, uint32_t *avpi)
2011 {
2012 struct lpfc_mbx_read_config *rd_config;
2013 LPFC_MBOXQ_t *pmboxq;
2014 MAILBOX_t *pmb;
2015 int rc = 0;
2016 uint32_t max_vpi;
2017
2018 /*
2019 * prevent udev from issuing mailbox commands until the port is
2020 * configured.
2021 */
2022 if (phba->link_state < LPFC_LINK_DOWN ||
2023 !phba->mbox_mem_pool ||
2024 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
2025 return 0;
2026
2027 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
2028 return 0;
2029
2030 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2031 if (!pmboxq)
2032 return 0;
2033 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
2034
2035 pmb = &pmboxq->u.mb;
2036 pmb->mbxCommand = MBX_READ_CONFIG;
2037 pmb->mbxOwner = OWN_HOST;
2038 pmboxq->ctx_buf = NULL;
2039
2040 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2041 rc = MBX_NOT_FINISHED;
2042 else
2043 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
2044
2045 if (rc != MBX_SUCCESS) {
2046 if (rc != MBX_TIMEOUT)
2047 mempool_free(pmboxq, phba->mbox_mem_pool);
2048 return 0;
2049 }
2050
2051 if (phba->sli_rev == LPFC_SLI_REV4) {
2052 rd_config = &pmboxq->u.mqe.un.rd_config;
2053 if (mrpi)
2054 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
2055 if (arpi)
2056 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
2057 phba->sli4_hba.max_cfg_param.rpi_used;
2058 if (mxri)
2059 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
2060 if (axri)
2061 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
2062 phba->sli4_hba.max_cfg_param.xri_used;
2063
2064 /* Account for differences with SLI-3. Get vpi count from
2065 * mailbox data and subtract one for max vpi value.
2066 */
2067 max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
2068 (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
2069
2070 /* Limit the max we support */
2071 if (max_vpi > LPFC_MAX_VPI)
2072 max_vpi = LPFC_MAX_VPI;
2073 if (mvpi)
2074 *mvpi = max_vpi;
2075 if (avpi)
2076 *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
2077 } else {
2078 if (mrpi)
2079 *mrpi = pmb->un.varRdConfig.max_rpi;
2080 if (arpi)
2081 *arpi = pmb->un.varRdConfig.avail_rpi;
2082 if (mxri)
2083 *mxri = pmb->un.varRdConfig.max_xri;
2084 if (axri)
2085 *axri = pmb->un.varRdConfig.avail_xri;
2086 if (mvpi)
2087 *mvpi = pmb->un.varRdConfig.max_vpi;
2088 if (avpi) {
2089 /* avail_vpi is only valid if link is up and ready */
2090 if (phba->link_state == LPFC_HBA_READY)
2091 *avpi = pmb->un.varRdConfig.avail_vpi;
2092 else
2093 *avpi = pmb->un.varRdConfig.max_vpi;
2094 }
2095 }
2096
2097 mempool_free(pmboxq, phba->mbox_mem_pool);
2098 return 1;
2099 }
2100
2101 /**
2102 * lpfc_max_rpi_show - Return maximum rpi
2103 * @dev: class device that is converted into a Scsi_host.
2104 * @attr: device attribute, not used.
2105 * @buf: on return contains the maximum rpi count in decimal or "Unknown".
2106 *
2107 * Description:
2108 * Calls lpfc_get_hba_info() asking for just the mrpi count.
2109 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2110 * to "Unknown" and the buffer length is returned, therefore the caller
2111 * must check for "Unknown" in the buffer to detect a failure.
2112 *
2113 * Returns: size of formatted string.
2114 **/
2115 static ssize_t
lpfc_max_rpi_show(struct device * dev,struct device_attribute * attr,char * buf)2116 lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
2117 char *buf)
2118 {
2119 struct Scsi_Host *shost = class_to_shost(dev);
2120 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2121 struct lpfc_hba *phba = vport->phba;
2122 uint32_t cnt;
2123
2124 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
2125 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2126 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2127 }
2128
2129 /**
2130 * lpfc_used_rpi_show - Return maximum rpi minus available rpi
2131 * @dev: class device that is converted into a Scsi_host.
2132 * @attr: device attribute, not used.
2133 * @buf: containing the used rpi count in decimal or "Unknown".
2134 *
2135 * Description:
2136 * Calls lpfc_get_hba_info() asking for just the mrpi and arpi counts.
2137 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2138 * to "Unknown" and the buffer length is returned, therefore the caller
2139 * must check for "Unknown" in the buffer to detect a failure.
2140 *
2141 * Returns: size of formatted string.
2142 **/
2143 static ssize_t
lpfc_used_rpi_show(struct device * dev,struct device_attribute * attr,char * buf)2144 lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
2145 char *buf)
2146 {
2147 struct Scsi_Host *shost = class_to_shost(dev);
2148 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2149 struct lpfc_hba *phba = vport->phba;
2150 uint32_t cnt, acnt;
2151
2152 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
2153 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2154 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2155 }
2156
2157 /**
2158 * lpfc_max_xri_show - Return maximum xri
2159 * @dev: class device that is converted into a Scsi_host.
2160 * @attr: device attribute, not used.
2161 * @buf: on return contains the maximum xri count in decimal or "Unknown".
2162 *
2163 * Description:
2164 * Calls lpfc_get_hba_info() asking for just the mrpi count.
2165 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2166 * to "Unknown" and the buffer length is returned, therefore the caller
2167 * must check for "Unknown" in the buffer to detect a failure.
2168 *
2169 * Returns: size of formatted string.
2170 **/
2171 static ssize_t
lpfc_max_xri_show(struct device * dev,struct device_attribute * attr,char * buf)2172 lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
2173 char *buf)
2174 {
2175 struct Scsi_Host *shost = class_to_shost(dev);
2176 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2177 struct lpfc_hba *phba = vport->phba;
2178 uint32_t cnt;
2179
2180 if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
2181 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2182 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2183 }
2184
2185 /**
2186 * lpfc_used_xri_show - Return maximum xpi minus the available xpi
2187 * @dev: class device that is converted into a Scsi_host.
2188 * @attr: device attribute, not used.
2189 * @buf: on return contains the used xri count in decimal or "Unknown".
2190 *
2191 * Description:
2192 * Calls lpfc_get_hba_info() asking for just the mxri and axri counts.
2193 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2194 * to "Unknown" and the buffer length is returned, therefore the caller
2195 * must check for "Unknown" in the buffer to detect a failure.
2196 *
2197 * Returns: size of formatted string.
2198 **/
2199 static ssize_t
lpfc_used_xri_show(struct device * dev,struct device_attribute * attr,char * buf)2200 lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
2201 char *buf)
2202 {
2203 struct Scsi_Host *shost = class_to_shost(dev);
2204 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2205 struct lpfc_hba *phba = vport->phba;
2206 uint32_t cnt, acnt;
2207
2208 if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
2209 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2210 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2211 }
2212
2213 /**
2214 * lpfc_max_vpi_show - Return maximum vpi
2215 * @dev: class device that is converted into a Scsi_host.
2216 * @attr: device attribute, not used.
2217 * @buf: on return contains the maximum vpi count in decimal or "Unknown".
2218 *
2219 * Description:
2220 * Calls lpfc_get_hba_info() asking for just the mvpi count.
2221 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2222 * to "Unknown" and the buffer length is returned, therefore the caller
2223 * must check for "Unknown" in the buffer to detect a failure.
2224 *
2225 * Returns: size of formatted string.
2226 **/
2227 static ssize_t
lpfc_max_vpi_show(struct device * dev,struct device_attribute * attr,char * buf)2228 lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
2229 char *buf)
2230 {
2231 struct Scsi_Host *shost = class_to_shost(dev);
2232 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2233 struct lpfc_hba *phba = vport->phba;
2234 uint32_t cnt;
2235
2236 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
2237 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2238 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2239 }
2240
2241 /**
2242 * lpfc_used_vpi_show - Return maximum vpi minus the available vpi
2243 * @dev: class device that is converted into a Scsi_host.
2244 * @attr: device attribute, not used.
2245 * @buf: on return contains the used vpi count in decimal or "Unknown".
2246 *
2247 * Description:
2248 * Calls lpfc_get_hba_info() asking for just the mvpi and avpi counts.
2249 * If lpfc_get_hba_info() returns zero (failure) the buffer text is set
2250 * to "Unknown" and the buffer length is returned, therefore the caller
2251 * must check for "Unknown" in the buffer to detect a failure.
2252 *
2253 * Returns: size of formatted string.
2254 **/
2255 static ssize_t
lpfc_used_vpi_show(struct device * dev,struct device_attribute * attr,char * buf)2256 lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
2257 char *buf)
2258 {
2259 struct Scsi_Host *shost = class_to_shost(dev);
2260 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2261 struct lpfc_hba *phba = vport->phba;
2262 uint32_t cnt, acnt;
2263
2264 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
2265 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2266 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2267 }
2268
2269 /**
2270 * lpfc_npiv_info_show - Return text about NPIV support for the adapter
2271 * @dev: class device that is converted into a Scsi_host.
2272 * @attr: device attribute, not used.
2273 * @buf: text that must be interpreted to determine if npiv is supported.
2274 *
2275 * Description:
2276 * Buffer will contain text indicating npiv is not suppoerted on the port,
2277 * the port is an NPIV physical port, or it is an npiv virtual port with
2278 * the id of the vport.
2279 *
2280 * Returns: size of formatted string.
2281 **/
2282 static ssize_t
lpfc_npiv_info_show(struct device * dev,struct device_attribute * attr,char * buf)2283 lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
2284 char *buf)
2285 {
2286 struct Scsi_Host *shost = class_to_shost(dev);
2287 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2288 struct lpfc_hba *phba = vport->phba;
2289
2290 if (!(phba->max_vpi))
2291 return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
2292 if (vport->port_type == LPFC_PHYSICAL_PORT)
2293 return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n");
2294 return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
2295 }
2296
2297 /**
2298 * lpfc_poll_show - Return text about poll support for the adapter
2299 * @dev: class device that is converted into a Scsi_host.
2300 * @attr: device attribute, not used.
2301 * @buf: on return contains the cfg_poll in hex.
2302 *
2303 * Notes:
2304 * cfg_poll should be a lpfc_polling_flags type.
2305 *
2306 * Returns: size of formatted string.
2307 **/
2308 static ssize_t
lpfc_poll_show(struct device * dev,struct device_attribute * attr,char * buf)2309 lpfc_poll_show(struct device *dev, struct device_attribute *attr,
2310 char *buf)
2311 {
2312 struct Scsi_Host *shost = class_to_shost(dev);
2313 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2314 struct lpfc_hba *phba = vport->phba;
2315
2316 return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
2317 }
2318
2319 /**
2320 * lpfc_poll_store - Set the value of cfg_poll for the adapter
2321 * @dev: class device that is converted into a Scsi_host.
2322 * @attr: device attribute, not used.
2323 * @buf: one or more lpfc_polling_flags values.
2324 * @count: not used.
2325 *
2326 * Notes:
2327 * buf contents converted to integer and checked for a valid value.
2328 *
2329 * Returns:
2330 * -EINVAL if the buffer connot be converted or is out of range
2331 * length of the buf on success
2332 **/
2333 static ssize_t
lpfc_poll_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2334 lpfc_poll_store(struct device *dev, struct device_attribute *attr,
2335 const char *buf, size_t count)
2336 {
2337 struct Scsi_Host *shost = class_to_shost(dev);
2338 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2339 struct lpfc_hba *phba = vport->phba;
2340 uint32_t creg_val;
2341 uint32_t old_val;
2342 int val=0;
2343
2344 if (!isdigit(buf[0]))
2345 return -EINVAL;
2346
2347 if (sscanf(buf, "%i", &val) != 1)
2348 return -EINVAL;
2349
2350 if ((val & 0x3) != val)
2351 return -EINVAL;
2352
2353 if (phba->sli_rev == LPFC_SLI_REV4)
2354 val = 0;
2355
2356 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2357 "3051 lpfc_poll changed from %d to %d\n",
2358 phba->cfg_poll, val);
2359
2360 spin_lock_irq(&phba->hbalock);
2361
2362 old_val = phba->cfg_poll;
2363
2364 if (val & ENABLE_FCP_RING_POLLING) {
2365 if ((val & DISABLE_FCP_RING_INT) &&
2366 !(old_val & DISABLE_FCP_RING_INT)) {
2367 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2368 spin_unlock_irq(&phba->hbalock);
2369 return -EINVAL;
2370 }
2371 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
2372 writel(creg_val, phba->HCregaddr);
2373 readl(phba->HCregaddr); /* flush */
2374
2375 lpfc_poll_start_timer(phba);
2376 }
2377 } else if (val != 0x0) {
2378 spin_unlock_irq(&phba->hbalock);
2379 return -EINVAL;
2380 }
2381
2382 if (!(val & DISABLE_FCP_RING_INT) &&
2383 (old_val & DISABLE_FCP_RING_INT))
2384 {
2385 spin_unlock_irq(&phba->hbalock);
2386 del_timer(&phba->fcp_poll_timer);
2387 spin_lock_irq(&phba->hbalock);
2388 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2389 spin_unlock_irq(&phba->hbalock);
2390 return -EINVAL;
2391 }
2392 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
2393 writel(creg_val, phba->HCregaddr);
2394 readl(phba->HCregaddr); /* flush */
2395 }
2396
2397 phba->cfg_poll = val;
2398
2399 spin_unlock_irq(&phba->hbalock);
2400
2401 return strlen(buf);
2402 }
2403
2404 /**
2405 * lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
2406 * @dev: class converted to a Scsi_host structure.
2407 * @attr: device attribute, not used.
2408 * @buf: on return contains the formatted support level.
2409 *
2410 * Description:
2411 * Returns the maximum number of virtual functions a physical function can
2412 * support, 0 will be returned if called on virtual function.
2413 *
2414 * Returns: size of formatted string.
2415 **/
2416 static ssize_t
lpfc_sriov_hw_max_virtfn_show(struct device * dev,struct device_attribute * attr,char * buf)2417 lpfc_sriov_hw_max_virtfn_show(struct device *dev,
2418 struct device_attribute *attr,
2419 char *buf)
2420 {
2421 struct Scsi_Host *shost = class_to_shost(dev);
2422 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2423 struct lpfc_hba *phba = vport->phba;
2424 uint16_t max_nr_virtfn;
2425
2426 max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
2427 return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
2428 }
2429
2430 /**
2431 * lpfc_enable_bbcr_set: Sets an attribute value.
2432 * @phba: pointer the the adapter structure.
2433 * @val: integer attribute value.
2434 *
2435 * Description:
2436 * Validates the min and max values then sets the
2437 * adapter config field if in the valid range. prints error message
2438 * and does not set the parameter if invalid.
2439 *
2440 * Returns:
2441 * zero on success
2442 * -EINVAL if val is invalid
2443 */
2444 static ssize_t
lpfc_enable_bbcr_set(struct lpfc_hba * phba,uint val)2445 lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
2446 {
2447 if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
2448 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2449 "3068 lpfc_enable_bbcr changed from %d to "
2450 "%d\n", phba->cfg_enable_bbcr, val);
2451 phba->cfg_enable_bbcr = val;
2452 return 0;
2453 }
2454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2455 "0451 lpfc_enable_bbcr cannot set to %d, range is 0, "
2456 "1\n", val);
2457 return -EINVAL;
2458 }
2459
2460 /*
2461 * lpfc_param_show - Return a cfg attribute value in decimal
2462 *
2463 * Description:
2464 * Macro that given an attr e.g. hba_queue_depth expands
2465 * into a function with the name lpfc_hba_queue_depth_show.
2466 *
2467 * lpfc_##attr##_show: Return the decimal value of an adapters cfg_xxx field.
2468 * @dev: class device that is converted into a Scsi_host.
2469 * @attr: device attribute, not used.
2470 * @buf: on return contains the attribute value in decimal.
2471 *
2472 * Returns: size of formatted string.
2473 **/
2474 #define lpfc_param_show(attr) \
2475 static ssize_t \
2476 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2477 char *buf) \
2478 { \
2479 struct Scsi_Host *shost = class_to_shost(dev);\
2480 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2481 struct lpfc_hba *phba = vport->phba;\
2482 return scnprintf(buf, PAGE_SIZE, "%d\n",\
2483 phba->cfg_##attr);\
2484 }
2485
2486 /*
2487 * lpfc_param_hex_show - Return a cfg attribute value in hex
2488 *
2489 * Description:
2490 * Macro that given an attr e.g. hba_queue_depth expands
2491 * into a function with the name lpfc_hba_queue_depth_show
2492 *
2493 * lpfc_##attr##_show: Return the hex value of an adapters cfg_xxx field.
2494 * @dev: class device that is converted into a Scsi_host.
2495 * @attr: device attribute, not used.
2496 * @buf: on return contains the attribute value in hexadecimal.
2497 *
2498 * Returns: size of formatted string.
2499 **/
2500 #define lpfc_param_hex_show(attr) \
2501 static ssize_t \
2502 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2503 char *buf) \
2504 { \
2505 struct Scsi_Host *shost = class_to_shost(dev);\
2506 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2507 struct lpfc_hba *phba = vport->phba;\
2508 uint val = 0;\
2509 val = phba->cfg_##attr;\
2510 return scnprintf(buf, PAGE_SIZE, "%#x\n",\
2511 phba->cfg_##attr);\
2512 }
2513
2514 /*
2515 * lpfc_param_init - Initializes a cfg attribute
2516 *
2517 * Description:
2518 * Macro that given an attr e.g. hba_queue_depth expands
2519 * into a function with the name lpfc_hba_queue_depth_init. The macro also
2520 * takes a default argument, a minimum and maximum argument.
2521 *
2522 * lpfc_##attr##_init: Initializes an attribute.
2523 * @phba: pointer the the adapter structure.
2524 * @val: integer attribute value.
2525 *
2526 * Validates the min and max values then sets the adapter config field
2527 * accordingly, or uses the default if out of range and prints an error message.
2528 *
2529 * Returns:
2530 * zero on success
2531 * -EINVAL if default used
2532 **/
2533 #define lpfc_param_init(attr, default, minval, maxval) \
2534 static int \
2535 lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
2536 { \
2537 if (lpfc_rangecheck(val, minval, maxval)) {\
2538 phba->cfg_##attr = val;\
2539 return 0;\
2540 }\
2541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2542 "0449 lpfc_"#attr" attribute cannot be set to %d, "\
2543 "allowed range is ["#minval", "#maxval"]\n", val); \
2544 phba->cfg_##attr = default;\
2545 return -EINVAL;\
2546 }
2547
2548 /*
2549 * lpfc_param_set - Set a cfg attribute value
2550 *
2551 * Description:
2552 * Macro that given an attr e.g. hba_queue_depth expands
2553 * into a function with the name lpfc_hba_queue_depth_set
2554 *
2555 * lpfc_##attr##_set: Sets an attribute value.
2556 * @phba: pointer the the adapter structure.
2557 * @val: integer attribute value.
2558 *
2559 * Description:
2560 * Validates the min and max values then sets the
2561 * adapter config field if in the valid range. prints error message
2562 * and does not set the parameter if invalid.
2563 *
2564 * Returns:
2565 * zero on success
2566 * -EINVAL if val is invalid
2567 **/
2568 #define lpfc_param_set(attr, default, minval, maxval) \
2569 static int \
2570 lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
2571 { \
2572 if (lpfc_rangecheck(val, minval, maxval)) {\
2573 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2574 "3052 lpfc_" #attr " changed from %d to %d\n", \
2575 phba->cfg_##attr, val); \
2576 phba->cfg_##attr = val;\
2577 return 0;\
2578 }\
2579 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2580 "0450 lpfc_"#attr" attribute cannot be set to %d, "\
2581 "allowed range is ["#minval", "#maxval"]\n", val); \
2582 return -EINVAL;\
2583 }
2584
2585 /*
2586 * lpfc_param_store - Set a vport attribute value
2587 *
2588 * Description:
2589 * Macro that given an attr e.g. hba_queue_depth expands
2590 * into a function with the name lpfc_hba_queue_depth_store.
2591 *
2592 * lpfc_##attr##_store: Set an sttribute value.
2593 * @dev: class device that is converted into a Scsi_host.
2594 * @attr: device attribute, not used.
2595 * @buf: contains the attribute value in ascii.
2596 * @count: not used.
2597 *
2598 * Description:
2599 * Convert the ascii text number to an integer, then
2600 * use the lpfc_##attr##_set function to set the value.
2601 *
2602 * Returns:
2603 * -EINVAL if val is invalid or lpfc_##attr##_set() fails
2604 * length of buffer upon success.
2605 **/
2606 #define lpfc_param_store(attr) \
2607 static ssize_t \
2608 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2609 const char *buf, size_t count) \
2610 { \
2611 struct Scsi_Host *shost = class_to_shost(dev);\
2612 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2613 struct lpfc_hba *phba = vport->phba;\
2614 uint val = 0;\
2615 if (!isdigit(buf[0]))\
2616 return -EINVAL;\
2617 if (sscanf(buf, "%i", &val) != 1)\
2618 return -EINVAL;\
2619 if (lpfc_##attr##_set(phba, val) == 0) \
2620 return strlen(buf);\
2621 else \
2622 return -EINVAL;\
2623 }
2624
2625 /*
2626 * lpfc_vport_param_show - Return decimal formatted cfg attribute value
2627 *
2628 * Description:
2629 * Macro that given an attr e.g. hba_queue_depth expands
2630 * into a function with the name lpfc_hba_queue_depth_show
2631 *
2632 * lpfc_##attr##_show: prints the attribute value in decimal.
2633 * @dev: class device that is converted into a Scsi_host.
2634 * @attr: device attribute, not used.
2635 * @buf: on return contains the attribute value in decimal.
2636 *
2637 * Returns: length of formatted string.
2638 **/
2639 #define lpfc_vport_param_show(attr) \
2640 static ssize_t \
2641 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2642 char *buf) \
2643 { \
2644 struct Scsi_Host *shost = class_to_shost(dev);\
2645 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2646 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
2647 }
2648
2649 /*
2650 * lpfc_vport_param_hex_show - Return hex formatted attribute value
2651 *
2652 * Description:
2653 * Macro that given an attr e.g.
2654 * hba_queue_depth expands into a function with the name
2655 * lpfc_hba_queue_depth_show
2656 *
2657 * lpfc_##attr##_show: prints the attribute value in hexadecimal.
2658 * @dev: class device that is converted into a Scsi_host.
2659 * @attr: device attribute, not used.
2660 * @buf: on return contains the attribute value in hexadecimal.
2661 *
2662 * Returns: length of formatted string.
2663 **/
2664 #define lpfc_vport_param_hex_show(attr) \
2665 static ssize_t \
2666 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2667 char *buf) \
2668 { \
2669 struct Scsi_Host *shost = class_to_shost(dev);\
2670 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2671 return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
2672 }
2673
2674 /*
2675 * lpfc_vport_param_init - Initialize a vport cfg attribute
2676 *
2677 * Description:
2678 * Macro that given an attr e.g. hba_queue_depth expands
2679 * into a function with the name lpfc_hba_queue_depth_init. The macro also
2680 * takes a default argument, a minimum and maximum argument.
2681 *
2682 * lpfc_##attr##_init: validates the min and max values then sets the
2683 * adapter config field accordingly, or uses the default if out of range
2684 * and prints an error message.
2685 * @phba: pointer the the adapter structure.
2686 * @val: integer attribute value.
2687 *
2688 * Returns:
2689 * zero on success
2690 * -EINVAL if default used
2691 **/
2692 #define lpfc_vport_param_init(attr, default, minval, maxval) \
2693 static int \
2694 lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
2695 { \
2696 if (lpfc_rangecheck(val, minval, maxval)) {\
2697 vport->cfg_##attr = val;\
2698 return 0;\
2699 }\
2700 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2701 "0423 lpfc_"#attr" attribute cannot be set to %d, "\
2702 "allowed range is ["#minval", "#maxval"]\n", val); \
2703 vport->cfg_##attr = default;\
2704 return -EINVAL;\
2705 }
2706
2707 /*
2708 * lpfc_vport_param_set - Set a vport cfg attribute
2709 *
2710 * Description:
2711 * Macro that given an attr e.g. hba_queue_depth expands
2712 * into a function with the name lpfc_hba_queue_depth_set
2713 *
2714 * lpfc_##attr##_set: validates the min and max values then sets the
2715 * adapter config field if in the valid range. prints error message
2716 * and does not set the parameter if invalid.
2717 * @phba: pointer the the adapter structure.
2718 * @val: integer attribute value.
2719 *
2720 * Returns:
2721 * zero on success
2722 * -EINVAL if val is invalid
2723 **/
2724 #define lpfc_vport_param_set(attr, default, minval, maxval) \
2725 static int \
2726 lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
2727 { \
2728 if (lpfc_rangecheck(val, minval, maxval)) {\
2729 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2730 "3053 lpfc_" #attr \
2731 " changed from %d (x%x) to %d (x%x)\n", \
2732 vport->cfg_##attr, vport->cfg_##attr, \
2733 val, val); \
2734 vport->cfg_##attr = val;\
2735 return 0;\
2736 }\
2737 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2738 "0424 lpfc_"#attr" attribute cannot be set to %d, "\
2739 "allowed range is ["#minval", "#maxval"]\n", val); \
2740 return -EINVAL;\
2741 }
2742
2743 /*
2744 * lpfc_vport_param_store - Set a vport attribute
2745 *
2746 * Description:
2747 * Macro that given an attr e.g. hba_queue_depth
2748 * expands into a function with the name lpfc_hba_queue_depth_store
2749 *
2750 * lpfc_##attr##_store: convert the ascii text number to an integer, then
2751 * use the lpfc_##attr##_set function to set the value.
2752 * @cdev: class device that is converted into a Scsi_host.
2753 * @buf: contains the attribute value in decimal.
2754 * @count: not used.
2755 *
2756 * Returns:
2757 * -EINVAL if val is invalid or lpfc_##attr##_set() fails
2758 * length of buffer upon success.
2759 **/
2760 #define lpfc_vport_param_store(attr) \
2761 static ssize_t \
2762 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2763 const char *buf, size_t count) \
2764 { \
2765 struct Scsi_Host *shost = class_to_shost(dev);\
2766 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2767 uint val = 0;\
2768 if (!isdigit(buf[0]))\
2769 return -EINVAL;\
2770 if (sscanf(buf, "%i", &val) != 1)\
2771 return -EINVAL;\
2772 if (lpfc_##attr##_set(vport, val) == 0) \
2773 return strlen(buf);\
2774 else \
2775 return -EINVAL;\
2776 }
2777
2778
2779 static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL);
2780 static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL);
2781 static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
2782 static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
2783 static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
2784 static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
2785 static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
2786 static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
2787 static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
2788 static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
2789 static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
2790 static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
2791 static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
2792 static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
2793 static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
2794 lpfc_link_state_store);
2795 static DEVICE_ATTR(option_rom_version, S_IRUGO,
2796 lpfc_option_rom_version_show, NULL);
2797 static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
2798 lpfc_num_discovered_ports_show, NULL);
2799 static DEVICE_ATTR(menlo_mgmt_mode, S_IRUGO, lpfc_mlomgmt_show, NULL);
2800 static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
2801 static DEVICE_ATTR_RO(lpfc_drvr_version);
2802 static DEVICE_ATTR_RO(lpfc_enable_fip);
2803 static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
2804 lpfc_board_mode_show, lpfc_board_mode_store);
2805 static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
2806 static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
2807 static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
2808 static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
2809 static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
2810 static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
2811 static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
2812 static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
2813 static DEVICE_ATTR_RO(lpfc_temp_sensor);
2814 static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
2815 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
2816 static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
2817 NULL);
2818 static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL);
2819
2820 static char *lpfc_soft_wwn_key = "C99G71SL8032A";
2821 #define WWN_SZ 8
2822 /**
2823 * lpfc_wwn_set - Convert string to the 8 byte WWN value.
2824 * @buf: WWN string.
2825 * @cnt: Length of string.
2826 * @wwn: Array to receive converted wwn value.
2827 *
2828 * Returns:
2829 * -EINVAL if the buffer does not contain a valid wwn
2830 * 0 success
2831 **/
2832 static size_t
lpfc_wwn_set(const char * buf,size_t cnt,char wwn[])2833 lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
2834 {
2835 unsigned int i, j;
2836
2837 /* Count may include a LF at end of string */
2838 if (buf[cnt-1] == '\n')
2839 cnt--;
2840
2841 if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
2842 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
2843 return -EINVAL;
2844
2845 memset(wwn, 0, WWN_SZ);
2846
2847 /* Validate and store the new name */
2848 for (i = 0, j = 0; i < 16; i++) {
2849 if ((*buf >= 'a') && (*buf <= 'f'))
2850 j = ((j << 4) | ((*buf++ - 'a') + 10));
2851 else if ((*buf >= 'A') && (*buf <= 'F'))
2852 j = ((j << 4) | ((*buf++ - 'A') + 10));
2853 else if ((*buf >= '0') && (*buf <= '9'))
2854 j = ((j << 4) | (*buf++ - '0'));
2855 else
2856 return -EINVAL;
2857 if (i % 2) {
2858 wwn[i/2] = j & 0xff;
2859 j = 0;
2860 }
2861 }
2862 return 0;
2863 }
2864 /**
2865 * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid
2866 * @dev: class device that is converted into a Scsi_host.
2867 * @attr: device attribute, not used.
2868 * @buf: containing the string lpfc_soft_wwn_key.
2869 * @count: must be size of lpfc_soft_wwn_key.
2870 *
2871 * Returns:
2872 * -EINVAL if the buffer does not contain lpfc_soft_wwn_key
2873 * length of buf indicates success
2874 **/
2875 static ssize_t
lpfc_soft_wwn_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2876 lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
2877 const char *buf, size_t count)
2878 {
2879 struct Scsi_Host *shost = class_to_shost(dev);
2880 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2881 struct lpfc_hba *phba = vport->phba;
2882 unsigned int cnt = count;
2883 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
2884 u32 *fawwpn_key = (uint32_t *)&vport->fc_sparam.un.vendorVersion[0];
2885
2886 /*
2887 * We're doing a simple sanity check for soft_wwpn setting.
2888 * We require that the user write a specific key to enable
2889 * the soft_wwpn attribute to be settable. Once the attribute
2890 * is written, the enable key resets. If further updates are
2891 * desired, the key must be written again to re-enable the
2892 * attribute.
2893 *
2894 * The "key" is not secret - it is a hardcoded string shown
2895 * here. The intent is to protect against the random user or
2896 * application that is just writing attributes.
2897 */
2898 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) {
2899 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2900 "0051 lpfc soft wwpn can not be enabled: "
2901 "fawwpn is enabled\n");
2902 return -EINVAL;
2903 }
2904
2905 /* count may include a LF at end of string */
2906 if (buf[cnt-1] == '\n')
2907 cnt--;
2908
2909 if ((cnt != strlen(lpfc_soft_wwn_key)) ||
2910 (strncmp(buf, lpfc_soft_wwn_key, strlen(lpfc_soft_wwn_key)) != 0))
2911 return -EINVAL;
2912
2913 phba->soft_wwn_enable = 1;
2914
2915 dev_printk(KERN_WARNING, &phba->pcidev->dev,
2916 "lpfc%d: soft_wwpn assignment has been enabled.\n",
2917 phba->brd_no);
2918 dev_printk(KERN_WARNING, &phba->pcidev->dev,
2919 " The soft_wwpn feature is not supported by Broadcom.");
2920
2921 return count;
2922 }
2923 static DEVICE_ATTR_WO(lpfc_soft_wwn_enable);
2924
2925 /**
2926 * lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
2927 * @dev: class device that is converted into a Scsi_host.
2928 * @attr: device attribute, not used.
2929 * @buf: on return contains the wwpn in hexadecimal.
2930 *
2931 * Returns: size of formatted string.
2932 **/
2933 static ssize_t
lpfc_soft_wwpn_show(struct device * dev,struct device_attribute * attr,char * buf)2934 lpfc_soft_wwpn_show(struct device *dev, struct device_attribute *attr,
2935 char *buf)
2936 {
2937 struct Scsi_Host *shost = class_to_shost(dev);
2938 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2939 struct lpfc_hba *phba = vport->phba;
2940
2941 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2942 (unsigned long long)phba->cfg_soft_wwpn);
2943 }
2944
2945 /**
2946 * lpfc_soft_wwpn_store - Set the ww port name of the adapter
2947 * @dev: class device that is converted into a Scsi_host.
2948 * @attr: device attribute, not used.
2949 * @buf: contains the wwpn in hexadecimal.
2950 * @count: number of wwpn bytes in buf
2951 *
2952 * Returns:
2953 * -EACCES hba reset not enabled, adapter over temp
2954 * -EINVAL soft wwn not enabled, count is invalid, invalid wwpn byte invalid
2955 * -EIO error taking adapter offline or online
2956 * value of count on success
2957 **/
2958 static ssize_t
lpfc_soft_wwpn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2959 lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
2960 const char *buf, size_t count)
2961 {
2962 struct Scsi_Host *shost = class_to_shost(dev);
2963 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2964 struct lpfc_hba *phba = vport->phba;
2965 struct completion online_compl;
2966 int stat1 = 0, stat2 = 0;
2967 unsigned int cnt = count;
2968 u8 wwpn[WWN_SZ];
2969 int rc;
2970
2971 if (!phba->cfg_enable_hba_reset)
2972 return -EACCES;
2973 spin_lock_irq(&phba->hbalock);
2974 if (phba->over_temp_state == HBA_OVER_TEMP) {
2975 spin_unlock_irq(&phba->hbalock);
2976 return -EACCES;
2977 }
2978 spin_unlock_irq(&phba->hbalock);
2979 /* count may include a LF at end of string */
2980 if (buf[cnt-1] == '\n')
2981 cnt--;
2982
2983 if (!phba->soft_wwn_enable)
2984 return -EINVAL;
2985
2986 /* lock setting wwpn, wwnn down */
2987 phba->soft_wwn_enable = 0;
2988
2989 rc = lpfc_wwn_set(buf, cnt, wwpn);
2990 if (rc) {
2991 /* not able to set wwpn, unlock it */
2992 phba->soft_wwn_enable = 1;
2993 return rc;
2994 }
2995
2996 phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
2997 fc_host_port_name(shost) = phba->cfg_soft_wwpn;
2998 if (phba->cfg_soft_wwnn)
2999 fc_host_node_name(shost) = phba->cfg_soft_wwnn;
3000
3001 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
3002 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
3003
3004 stat1 = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
3005 if (stat1)
3006 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3007 "0463 lpfc_soft_wwpn attribute set failed to "
3008 "reinit adapter - %d\n", stat1);
3009 init_completion(&online_compl);
3010 rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
3011 LPFC_EVT_ONLINE);
3012 if (rc == 0)
3013 return -ENOMEM;
3014
3015 wait_for_completion(&online_compl);
3016 if (stat2)
3017 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3018 "0464 lpfc_soft_wwpn attribute set failed to "
3019 "reinit adapter - %d\n", stat2);
3020 return (stat1 || stat2) ? -EIO : count;
3021 }
3022 static DEVICE_ATTR_RW(lpfc_soft_wwpn);
3023
3024 /**
3025 * lpfc_soft_wwnn_show - Return the cfg soft ww node name for the adapter
3026 * @dev: class device that is converted into a Scsi_host.
3027 * @attr: device attribute, not used.
3028 * @buf: on return contains the wwnn in hexadecimal.
3029 *
3030 * Returns: size of formatted string.
3031 **/
3032 static ssize_t
lpfc_soft_wwnn_show(struct device * dev,struct device_attribute * attr,char * buf)3033 lpfc_soft_wwnn_show(struct device *dev, struct device_attribute *attr,
3034 char *buf)
3035 {
3036 struct Scsi_Host *shost = class_to_shost(dev);
3037 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3038 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
3039 (unsigned long long)phba->cfg_soft_wwnn);
3040 }
3041
3042 /**
3043 * lpfc_soft_wwnn_store - sets the ww node name of the adapter
3044 * @dev: class device that is converted into a Scsi_host.
3045 * @attr: device attribute, not used.
3046 * @buf: contains the ww node name in hexadecimal.
3047 * @count: number of wwnn bytes in buf.
3048 *
3049 * Returns:
3050 * -EINVAL soft wwn not enabled, count is invalid, invalid wwnn byte invalid
3051 * value of count on success
3052 **/
3053 static ssize_t
lpfc_soft_wwnn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3054 lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
3055 const char *buf, size_t count)
3056 {
3057 struct Scsi_Host *shost = class_to_shost(dev);
3058 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3059 unsigned int cnt = count;
3060 u8 wwnn[WWN_SZ];
3061 int rc;
3062
3063 /* count may include a LF at end of string */
3064 if (buf[cnt-1] == '\n')
3065 cnt--;
3066
3067 if (!phba->soft_wwn_enable)
3068 return -EINVAL;
3069
3070 rc = lpfc_wwn_set(buf, cnt, wwnn);
3071 if (rc) {
3072 /* Allow wwnn to be set many times, as long as the enable
3073 * is set. However, once the wwpn is set, everything locks.
3074 */
3075 return rc;
3076 }
3077
3078 phba->cfg_soft_wwnn = wwn_to_u64(wwnn);
3079
3080 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
3081 "lpfc%d: soft_wwnn set. Value will take effect upon "
3082 "setting of the soft_wwpn\n", phba->brd_no);
3083
3084 return count;
3085 }
3086 static DEVICE_ATTR_RW(lpfc_soft_wwnn);
3087
3088 /**
3089 * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for
3090 * Optimized Access Storage (OAS) operations.
3091 * @dev: class device that is converted into a Scsi_host.
3092 * @attr: device attribute, not used.
3093 * @buf: buffer for passing information.
3094 *
3095 * Returns:
3096 * value of count
3097 **/
3098 static ssize_t
lpfc_oas_tgt_show(struct device * dev,struct device_attribute * attr,char * buf)3099 lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
3100 char *buf)
3101 {
3102 struct Scsi_Host *shost = class_to_shost(dev);
3103 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3104
3105 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
3106 wwn_to_u64(phba->cfg_oas_tgt_wwpn));
3107 }
3108
3109 /**
3110 * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for
3111 * Optimized Access Storage (OAS) operations.
3112 * @dev: class device that is converted into a Scsi_host.
3113 * @attr: device attribute, not used.
3114 * @buf: buffer for passing information.
3115 * @count: Size of the data buffer.
3116 *
3117 * Returns:
3118 * -EINVAL count is invalid, invalid wwpn byte invalid
3119 * -EPERM oas is not supported by hba
3120 * value of count on success
3121 **/
3122 static ssize_t
lpfc_oas_tgt_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3123 lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
3124 const char *buf, size_t count)
3125 {
3126 struct Scsi_Host *shost = class_to_shost(dev);
3127 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3128 unsigned int cnt = count;
3129 uint8_t wwpn[WWN_SZ];
3130 int rc;
3131
3132 if (!phba->cfg_fof)
3133 return -EPERM;
3134
3135 /* count may include a LF at end of string */
3136 if (buf[cnt-1] == '\n')
3137 cnt--;
3138
3139 rc = lpfc_wwn_set(buf, cnt, wwpn);
3140 if (rc)
3141 return rc;
3142
3143 memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3144 memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3145 if (wwn_to_u64(wwpn) == 0)
3146 phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
3147 else
3148 phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
3149 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
3150 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
3151 return count;
3152 }
3153 static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
3154 lpfc_oas_tgt_show, lpfc_oas_tgt_store);
3155
3156 /**
3157 * lpfc_oas_priority_show - Return wwpn of target whose luns maybe enabled for
3158 * Optimized Access Storage (OAS) operations.
3159 * @dev: class device that is converted into a Scsi_host.
3160 * @attr: device attribute, not used.
3161 * @buf: buffer for passing information.
3162 *
3163 * Returns:
3164 * value of count
3165 **/
3166 static ssize_t
lpfc_oas_priority_show(struct device * dev,struct device_attribute * attr,char * buf)3167 lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr,
3168 char *buf)
3169 {
3170 struct Scsi_Host *shost = class_to_shost(dev);
3171 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3172
3173 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
3174 }
3175
3176 /**
3177 * lpfc_oas_priority_store - Store wwpn of target whose luns maybe enabled for
3178 * Optimized Access Storage (OAS) operations.
3179 * @dev: class device that is converted into a Scsi_host.
3180 * @attr: device attribute, not used.
3181 * @buf: buffer for passing information.
3182 * @count: Size of the data buffer.
3183 *
3184 * Returns:
3185 * -EINVAL count is invalid, invalid wwpn byte invalid
3186 * -EPERM oas is not supported by hba
3187 * value of count on success
3188 **/
3189 static ssize_t
lpfc_oas_priority_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3190 lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr,
3191 const char *buf, size_t count)
3192 {
3193 struct Scsi_Host *shost = class_to_shost(dev);
3194 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3195 unsigned int cnt = count;
3196 unsigned long val;
3197 int ret;
3198
3199 if (!phba->cfg_fof)
3200 return -EPERM;
3201
3202 /* count may include a LF at end of string */
3203 if (buf[cnt-1] == '\n')
3204 cnt--;
3205
3206 ret = kstrtoul(buf, 0, &val);
3207 if (ret || (val > 0x7f))
3208 return -EINVAL;
3209
3210 if (val)
3211 phba->cfg_oas_priority = (uint8_t)val;
3212 else
3213 phba->cfg_oas_priority = phba->cfg_XLanePriority;
3214 return count;
3215 }
3216 static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR,
3217 lpfc_oas_priority_show, lpfc_oas_priority_store);
3218
3219 /**
3220 * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled
3221 * for Optimized Access Storage (OAS) operations.
3222 * @dev: class device that is converted into a Scsi_host.
3223 * @attr: device attribute, not used.
3224 * @buf: buffer for passing information.
3225 *
3226 * Returns:
3227 * value of count on success
3228 **/
3229 static ssize_t
lpfc_oas_vpt_show(struct device * dev,struct device_attribute * attr,char * buf)3230 lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
3231 char *buf)
3232 {
3233 struct Scsi_Host *shost = class_to_shost(dev);
3234 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3235
3236 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
3237 wwn_to_u64(phba->cfg_oas_vpt_wwpn));
3238 }
3239
3240 /**
3241 * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled
3242 * for Optimized Access Storage (OAS) operations.
3243 * @dev: class device that is converted into a Scsi_host.
3244 * @attr: device attribute, not used.
3245 * @buf: buffer for passing information.
3246 * @count: Size of the data buffer.
3247 *
3248 * Returns:
3249 * -EINVAL count is invalid, invalid wwpn byte invalid
3250 * -EPERM oas is not supported by hba
3251 * value of count on success
3252 **/
3253 static ssize_t
lpfc_oas_vpt_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3254 lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
3255 const char *buf, size_t count)
3256 {
3257 struct Scsi_Host *shost = class_to_shost(dev);
3258 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3259 unsigned int cnt = count;
3260 uint8_t wwpn[WWN_SZ];
3261 int rc;
3262
3263 if (!phba->cfg_fof)
3264 return -EPERM;
3265
3266 /* count may include a LF at end of string */
3267 if (buf[cnt-1] == '\n')
3268 cnt--;
3269
3270 rc = lpfc_wwn_set(buf, cnt, wwpn);
3271 if (rc)
3272 return rc;
3273
3274 memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3275 memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3276 if (wwn_to_u64(wwpn) == 0)
3277 phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
3278 else
3279 phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
3280 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
3281 if (phba->cfg_oas_priority == 0)
3282 phba->cfg_oas_priority = phba->cfg_XLanePriority;
3283 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
3284 return count;
3285 }
3286 static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
3287 lpfc_oas_vpt_show, lpfc_oas_vpt_store);
3288
3289 /**
3290 * lpfc_oas_lun_state_show - Return the current state (enabled or disabled)
3291 * of whether luns will be enabled or disabled
3292 * for Optimized Access Storage (OAS) operations.
3293 * @dev: class device that is converted into a Scsi_host.
3294 * @attr: device attribute, not used.
3295 * @buf: buffer for passing information.
3296 *
3297 * Returns:
3298 * size of formatted string.
3299 **/
3300 static ssize_t
lpfc_oas_lun_state_show(struct device * dev,struct device_attribute * attr,char * buf)3301 lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
3302 char *buf)
3303 {
3304 struct Scsi_Host *shost = class_to_shost(dev);
3305 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3306
3307 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
3308 }
3309
3310 /**
3311 * lpfc_oas_lun_state_store - Store the state (enabled or disabled)
3312 * of whether luns will be enabled or disabled
3313 * for Optimized Access Storage (OAS) operations.
3314 * @dev: class device that is converted into a Scsi_host.
3315 * @attr: device attribute, not used.
3316 * @buf: buffer for passing information.
3317 * @count: Size of the data buffer.
3318 *
3319 * Returns:
3320 * -EINVAL count is invalid, invalid wwpn byte invalid
3321 * -EPERM oas is not supported by hba
3322 * value of count on success
3323 **/
3324 static ssize_t
lpfc_oas_lun_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3325 lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
3326 const char *buf, size_t count)
3327 {
3328 struct Scsi_Host *shost = class_to_shost(dev);
3329 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3330 int val = 0;
3331
3332 if (!phba->cfg_fof)
3333 return -EPERM;
3334
3335 if (!isdigit(buf[0]))
3336 return -EINVAL;
3337
3338 if (sscanf(buf, "%i", &val) != 1)
3339 return -EINVAL;
3340
3341 if ((val != 0) && (val != 1))
3342 return -EINVAL;
3343
3344 phba->cfg_oas_lun_state = val;
3345 return strlen(buf);
3346 }
3347 static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
3348 lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
3349
3350 /**
3351 * lpfc_oas_lun_status_show - Return the status of the Optimized Access
3352 * Storage (OAS) lun returned by the
3353 * lpfc_oas_lun_show function.
3354 * @dev: class device that is converted into a Scsi_host.
3355 * @attr: device attribute, not used.
3356 * @buf: buffer for passing information.
3357 *
3358 * Returns:
3359 * size of formatted string.
3360 **/
3361 static ssize_t
lpfc_oas_lun_status_show(struct device * dev,struct device_attribute * attr,char * buf)3362 lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
3363 char *buf)
3364 {
3365 struct Scsi_Host *shost = class_to_shost(dev);
3366 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3367
3368 if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
3369 return -EFAULT;
3370
3371 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
3372 }
3373 static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
3374 lpfc_oas_lun_status_show, NULL);
3375
3376
3377 /**
3378 * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage
3379 * (OAS) operations.
3380 * @phba: lpfc_hba pointer.
3381 * @vpt_wwpn: wwpn of the vport associated with the returned lun
3382 * @tgt_wwpn: wwpn of the target associated with the returned lun
3383 * @lun: the fc lun for setting oas state.
3384 * @oas_state: the oas state to be set to the lun.
3385 * @pri: priority
3386 *
3387 * Returns:
3388 * SUCCESS : 0
3389 * -EPERM OAS is not enabled or not supported by this port.
3390 *
3391 */
3392 static size_t
lpfc_oas_lun_state_set(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint64_t lun,uint32_t oas_state,uint8_t pri)3393 lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3394 uint8_t tgt_wwpn[], uint64_t lun,
3395 uint32_t oas_state, uint8_t pri)
3396 {
3397
3398 int rc = 0;
3399
3400 if (!phba->cfg_fof)
3401 return -EPERM;
3402
3403 if (oas_state) {
3404 if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3405 (struct lpfc_name *)tgt_wwpn,
3406 lun, pri))
3407 rc = -ENOMEM;
3408 } else {
3409 lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3410 (struct lpfc_name *)tgt_wwpn, lun, pri);
3411 }
3412 return rc;
3413
3414 }
3415
3416 /**
3417 * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized
3418 * Access Storage (OAS) operations.
3419 * @phba: lpfc_hba pointer.
3420 * @vpt_wwpn: wwpn of the vport associated with the returned lun
3421 * @tgt_wwpn: wwpn of the target associated with the returned lun
3422 * @lun_status: status of the lun returned lun
3423 * @lun_pri: priority of the lun returned lun
3424 *
3425 * Returns the first or next lun enabled for OAS operations for the vport/target
3426 * specified. If a lun is found, its vport wwpn, target wwpn and status is
3427 * returned. If the lun is not found, NOT_OAS_ENABLED_LUN is returned.
3428 *
3429 * Return:
3430 * lun that is OAS enabled for the vport/target
3431 * NOT_OAS_ENABLED_LUN when no oas enabled lun found.
3432 */
3433 static uint64_t
lpfc_oas_lun_get_next(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint32_t * lun_status,uint32_t * lun_pri)3434 lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3435 uint8_t tgt_wwpn[], uint32_t *lun_status,
3436 uint32_t *lun_pri)
3437 {
3438 uint64_t found_lun;
3439
3440 if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
3441 return NOT_OAS_ENABLED_LUN;
3442 if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
3443 phba->sli4_hba.oas_next_vpt_wwpn,
3444 (struct lpfc_name *)
3445 phba->sli4_hba.oas_next_tgt_wwpn,
3446 &phba->sli4_hba.oas_next_lun,
3447 (struct lpfc_name *)vpt_wwpn,
3448 (struct lpfc_name *)tgt_wwpn,
3449 &found_lun, lun_status, lun_pri))
3450 return found_lun;
3451 else
3452 return NOT_OAS_ENABLED_LUN;
3453 }
3454
3455 /**
3456 * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations
3457 * @phba: lpfc_hba pointer.
3458 * @vpt_wwpn: vport wwpn by reference.
3459 * @tgt_wwpn: target wwpn by reference.
3460 * @lun: the fc lun for setting oas state.
3461 * @oas_state: the oas state to be set to the oas_lun.
3462 * @pri: priority
3463 *
3464 * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE)
3465 * a lun for OAS operations.
3466 *
3467 * Return:
3468 * SUCCESS: 0
3469 * -ENOMEM: failed to enable an lun for OAS operations
3470 * -EPERM: OAS is not enabled
3471 */
3472 static ssize_t
lpfc_oas_lun_state_change(struct lpfc_hba * phba,uint8_t vpt_wwpn[],uint8_t tgt_wwpn[],uint64_t lun,uint32_t oas_state,uint8_t pri)3473 lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3474 uint8_t tgt_wwpn[], uint64_t lun,
3475 uint32_t oas_state, uint8_t pri)
3476 {
3477
3478 int rc;
3479
3480 rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
3481 oas_state, pri);
3482 return rc;
3483 }
3484
3485 /**
3486 * lpfc_oas_lun_show - Return oas enabled luns from a chosen target
3487 * @dev: class device that is converted into a Scsi_host.
3488 * @attr: device attribute, not used.
3489 * @buf: buffer for passing information.
3490 *
3491 * This routine returns a lun enabled for OAS each time the function
3492 * is called.
3493 *
3494 * Returns:
3495 * SUCCESS: size of formatted string.
3496 * -EFAULT: target or vport wwpn was not set properly.
3497 * -EPERM: oas is not enabled.
3498 **/
3499 static ssize_t
lpfc_oas_lun_show(struct device * dev,struct device_attribute * attr,char * buf)3500 lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
3501 char *buf)
3502 {
3503 struct Scsi_Host *shost = class_to_shost(dev);
3504 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3505
3506 uint64_t oas_lun;
3507 int len = 0;
3508
3509 if (!phba->cfg_fof)
3510 return -EPERM;
3511
3512 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3513 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
3514 return -EFAULT;
3515
3516 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3517 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
3518 return -EFAULT;
3519
3520 oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
3521 phba->cfg_oas_tgt_wwpn,
3522 &phba->cfg_oas_lun_status,
3523 &phba->cfg_oas_priority);
3524 if (oas_lun != NOT_OAS_ENABLED_LUN)
3525 phba->cfg_oas_flags |= OAS_LUN_VALID;
3526
3527 len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
3528
3529 return len;
3530 }
3531
3532 /**
3533 * lpfc_oas_lun_store - Sets the OAS state for lun
3534 * @dev: class device that is converted into a Scsi_host.
3535 * @attr: device attribute, not used.
3536 * @buf: buffer for passing information.
3537 * @count: size of the formatting string
3538 *
3539 * This function sets the OAS state for lun. Before this function is called,
3540 * the vport wwpn, target wwpn, and oas state need to be set.
3541 *
3542 * Returns:
3543 * SUCCESS: size of formatted string.
3544 * -EFAULT: target or vport wwpn was not set properly.
3545 * -EPERM: oas is not enabled.
3546 * size of formatted string.
3547 **/
3548 static ssize_t
lpfc_oas_lun_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3549 lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
3550 const char *buf, size_t count)
3551 {
3552 struct Scsi_Host *shost = class_to_shost(dev);
3553 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3554 uint64_t scsi_lun;
3555 uint32_t pri;
3556 ssize_t rc;
3557
3558 if (!phba->cfg_fof)
3559 return -EPERM;
3560
3561 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3562 return -EFAULT;
3563
3564 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3565 return -EFAULT;
3566
3567 if (!isdigit(buf[0]))
3568 return -EINVAL;
3569
3570 if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
3571 return -EINVAL;
3572
3573 pri = phba->cfg_oas_priority;
3574 if (pri == 0)
3575 pri = phba->cfg_XLanePriority;
3576
3577 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3578 "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx "
3579 "priority 0x%x with oas state %d\n",
3580 wwn_to_u64(phba->cfg_oas_vpt_wwpn),
3581 wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
3582 pri, phba->cfg_oas_lun_state);
3583
3584 rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
3585 phba->cfg_oas_tgt_wwpn, scsi_lun,
3586 phba->cfg_oas_lun_state, pri);
3587 if (rc)
3588 return rc;
3589
3590 return count;
3591 }
3592 static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
3593 lpfc_oas_lun_show, lpfc_oas_lun_store);
3594
3595 int lpfc_enable_nvmet_cnt;
3596 unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = {
3597 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3598 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3599 module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444);
3600 MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target");
3601
3602 static int lpfc_poll = 0;
3603 module_param(lpfc_poll, int, S_IRUGO);
3604 MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
3605 " 0 - none,"
3606 " 1 - poll with interrupts enabled"
3607 " 3 - poll and disable FCP ring interrupts");
3608
3609 static DEVICE_ATTR_RW(lpfc_poll);
3610
3611 int lpfc_no_hba_reset_cnt;
3612 unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
3613 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3614 module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
3615 MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
3616
3617 LPFC_ATTR(sli_mode, 3, 3, 3,
3618 "SLI mode selector: 3 - select SLI-3");
3619
3620 LPFC_ATTR_R(enable_npiv, 1, 0, 1,
3621 "Enable NPIV functionality");
3622
3623 LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
3624 "FCF Fast failover=1 Priority failover=2");
3625
3626 /*
3627 * lpfc_fcp_wait_abts_rsp: Modifies criteria for reporting completion of
3628 * aborted IO.
3629 * The range is [0,1]. Default value is 0
3630 * 0, IO completes after ABTS issued (default).
3631 * 1, IO completes after receipt of ABTS response or timeout.
3632 */
3633 LPFC_ATTR_R(fcp_wait_abts_rsp, 0, 0, 1, "Wait for FCP ABTS completion");
3634
3635 /*
3636 # lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
3637 # 0x0 = disabled, XRI/OXID use not tracked.
3638 # 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
3639 # 0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
3640 */
3641 LPFC_ATTR_R(enable_rrq, 2, 0, 2,
3642 "Enable RRQ functionality");
3643
3644 /*
3645 # lpfc_suppress_link_up: Bring link up at initialization
3646 # 0x0 = bring link up (issue MBX_INIT_LINK)
3647 # 0x1 = do NOT bring link up at initialization(MBX_INIT_LINK)
3648 # 0x2 = never bring up link
3649 # Default value is 0.
3650 */
3651 LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
3652 LPFC_DELAY_INIT_LINK_INDEFINITELY,
3653 "Suppress Link Up at initialization");
3654
3655 static ssize_t
lpfc_pls_show(struct device * dev,struct device_attribute * attr,char * buf)3656 lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf)
3657 {
3658 struct Scsi_Host *shost = class_to_shost(dev);
3659 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3660
3661 return scnprintf(buf, PAGE_SIZE, "%d\n",
3662 phba->sli4_hba.pc_sli4_params.pls);
3663 }
3664 static DEVICE_ATTR(pls, 0444,
3665 lpfc_pls_show, NULL);
3666
3667 static ssize_t
lpfc_pt_show(struct device * dev,struct device_attribute * attr,char * buf)3668 lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
3669 {
3670 struct Scsi_Host *shost = class_to_shost(dev);
3671 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3672
3673 return scnprintf(buf, PAGE_SIZE, "%d\n",
3674 (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
3675 }
3676 static DEVICE_ATTR(pt, 0444,
3677 lpfc_pt_show, NULL);
3678
3679 /*
3680 # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS
3681 # 1 - (1024)
3682 # 2 - (2048)
3683 # 3 - (3072)
3684 # 4 - (4096)
3685 # 5 - (5120)
3686 */
3687 static ssize_t
lpfc_iocb_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3688 lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3689 {
3690 struct Scsi_Host *shost = class_to_shost(dev);
3691 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3692
3693 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
3694 }
3695
3696 static DEVICE_ATTR(iocb_hw, S_IRUGO,
3697 lpfc_iocb_hw_show, NULL);
3698 static ssize_t
lpfc_txq_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3699 lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3700 {
3701 struct Scsi_Host *shost = class_to_shost(dev);
3702 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3703 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3704
3705 return scnprintf(buf, PAGE_SIZE, "%d\n",
3706 pring ? pring->txq_max : 0);
3707 }
3708
3709 static DEVICE_ATTR(txq_hw, S_IRUGO,
3710 lpfc_txq_hw_show, NULL);
3711 static ssize_t
lpfc_txcmplq_hw_show(struct device * dev,struct device_attribute * attr,char * buf)3712 lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
3713 char *buf)
3714 {
3715 struct Scsi_Host *shost = class_to_shost(dev);
3716 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3717 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3718
3719 return scnprintf(buf, PAGE_SIZE, "%d\n",
3720 pring ? pring->txcmplq_max : 0);
3721 }
3722
3723 static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
3724 lpfc_txcmplq_hw_show, NULL);
3725
3726 /*
3727 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
3728 # until the timer expires. Value range is [0,255]. Default value is 30.
3729 */
3730 static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3731 static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
3732 module_param(lpfc_nodev_tmo, int, 0);
3733 MODULE_PARM_DESC(lpfc_nodev_tmo,
3734 "Seconds driver will hold I/O waiting "
3735 "for a device to come back");
3736
3737 /**
3738 * lpfc_nodev_tmo_show - Return the hba dev loss timeout value
3739 * @dev: class converted to a Scsi_host structure.
3740 * @attr: device attribute, not used.
3741 * @buf: on return contains the dev loss timeout in decimal.
3742 *
3743 * Returns: size of formatted string.
3744 **/
3745 static ssize_t
lpfc_nodev_tmo_show(struct device * dev,struct device_attribute * attr,char * buf)3746 lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
3747 char *buf)
3748 {
3749 struct Scsi_Host *shost = class_to_shost(dev);
3750 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3751
3752 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
3753 }
3754
3755 /**
3756 * lpfc_nodev_tmo_init - Set the hba nodev timeout value
3757 * @vport: lpfc vport structure pointer.
3758 * @val: contains the nodev timeout value.
3759 *
3760 * Description:
3761 * If the devloss tmo is already set then nodev tmo is set to devloss tmo,
3762 * a kernel error message is printed and zero is returned.
3763 * Else if val is in range then nodev tmo and devloss tmo are set to val.
3764 * Otherwise nodev tmo is set to the default value.
3765 *
3766 * Returns:
3767 * zero if already set or if val is in range
3768 * -EINVAL val out of range
3769 **/
3770 static int
lpfc_nodev_tmo_init(struct lpfc_vport * vport,int val)3771 lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
3772 {
3773 if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
3774 vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
3775 if (val != LPFC_DEF_DEVLOSS_TMO)
3776 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3777 "0407 Ignoring lpfc_nodev_tmo module "
3778 "parameter because lpfc_devloss_tmo "
3779 "is set.\n");
3780 return 0;
3781 }
3782
3783 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3784 vport->cfg_nodev_tmo = val;
3785 vport->cfg_devloss_tmo = val;
3786 return 0;
3787 }
3788 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3789 "0400 lpfc_nodev_tmo attribute cannot be set to"
3790 " %d, allowed range is [%d, %d]\n",
3791 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3792 vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3793 return -EINVAL;
3794 }
3795
3796 /**
3797 * lpfc_update_rport_devloss_tmo - Update dev loss tmo value
3798 * @vport: lpfc vport structure pointer.
3799 *
3800 * Description:
3801 * Update all the ndlp's dev loss tmo with the vport devloss tmo value.
3802 **/
3803 static void
lpfc_update_rport_devloss_tmo(struct lpfc_vport * vport)3804 lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3805 {
3806 struct Scsi_Host *shost;
3807 struct lpfc_nodelist *ndlp;
3808 #if (IS_ENABLED(CONFIG_NVME_FC))
3809 struct lpfc_nvme_rport *rport;
3810 struct nvme_fc_remote_port *remoteport = NULL;
3811 #endif
3812
3813 shost = lpfc_shost_from_vport(vport);
3814 spin_lock_irq(shost->host_lock);
3815 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3816 if (ndlp->rport)
3817 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3818 #if (IS_ENABLED(CONFIG_NVME_FC))
3819 spin_lock(&ndlp->lock);
3820 rport = lpfc_ndlp_get_nrport(ndlp);
3821 if (rport)
3822 remoteport = rport->remoteport;
3823 spin_unlock(&ndlp->lock);
3824 if (rport && remoteport)
3825 nvme_fc_set_remoteport_devloss(remoteport,
3826 vport->cfg_devloss_tmo);
3827 #endif
3828 }
3829 spin_unlock_irq(shost->host_lock);
3830 }
3831
3832 /**
3833 * lpfc_nodev_tmo_set - Set the vport nodev tmo and devloss tmo values
3834 * @vport: lpfc vport structure pointer.
3835 * @val: contains the tmo value.
3836 *
3837 * Description:
3838 * If the devloss tmo is already set or the vport dev loss tmo has changed
3839 * then a kernel error message is printed and zero is returned.
3840 * Else if val is in range then nodev tmo and devloss tmo are set to val.
3841 * Otherwise nodev tmo is set to the default value.
3842 *
3843 * Returns:
3844 * zero if already set or if val is in range
3845 * -EINVAL val out of range
3846 **/
3847 static int
lpfc_nodev_tmo_set(struct lpfc_vport * vport,int val)3848 lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
3849 {
3850 if (vport->dev_loss_tmo_changed ||
3851 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
3852 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3853 "0401 Ignoring change to lpfc_nodev_tmo "
3854 "because lpfc_devloss_tmo is set.\n");
3855 return 0;
3856 }
3857 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3858 vport->cfg_nodev_tmo = val;
3859 vport->cfg_devloss_tmo = val;
3860 /*
3861 * For compat: set the fc_host dev loss so new rports
3862 * will get the value.
3863 */
3864 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3865 lpfc_update_rport_devloss_tmo(vport);
3866 return 0;
3867 }
3868 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3869 "0403 lpfc_nodev_tmo attribute cannot be set to "
3870 "%d, allowed range is [%d, %d]\n",
3871 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3872 return -EINVAL;
3873 }
3874
3875 lpfc_vport_param_store(nodev_tmo)
3876
3877 static DEVICE_ATTR_RW(lpfc_nodev_tmo);
3878
3879 /*
3880 # lpfc_devloss_tmo: If set, it will hold all I/O errors on devices that
3881 # disappear until the timer expires. Value range is [0,255]. Default
3882 # value is 30.
3883 */
3884 module_param(lpfc_devloss_tmo, int, S_IRUGO);
3885 MODULE_PARM_DESC(lpfc_devloss_tmo,
3886 "Seconds driver will hold I/O waiting "
3887 "for a device to come back");
lpfc_vport_param_init(devloss_tmo,LPFC_DEF_DEVLOSS_TMO,LPFC_MIN_DEVLOSS_TMO,LPFC_MAX_DEVLOSS_TMO)3888 lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
3889 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
3890 lpfc_vport_param_show(devloss_tmo)
3891
3892 /**
3893 * lpfc_devloss_tmo_set - Sets vport nodev tmo, devloss tmo values, changed bit
3894 * @vport: lpfc vport structure pointer.
3895 * @val: contains the tmo value.
3896 *
3897 * Description:
3898 * If val is in a valid range then set the vport nodev tmo,
3899 * devloss tmo, also set the vport dev loss tmo changed flag.
3900 * Else a kernel error message is printed.
3901 *
3902 * Returns:
3903 * zero if val is in range
3904 * -EINVAL val out of range
3905 **/
3906 static int
3907 lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
3908 {
3909 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3910 vport->cfg_nodev_tmo = val;
3911 vport->cfg_devloss_tmo = val;
3912 vport->dev_loss_tmo_changed = 1;
3913 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3914 lpfc_update_rport_devloss_tmo(vport);
3915 return 0;
3916 }
3917
3918 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3919 "0404 lpfc_devloss_tmo attribute cannot be set to "
3920 "%d, allowed range is [%d, %d]\n",
3921 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3922 return -EINVAL;
3923 }
3924
3925 lpfc_vport_param_store(devloss_tmo)
3926 static DEVICE_ATTR_RW(lpfc_devloss_tmo);
3927
3928 /*
3929 * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it
3930 * lpfc_suppress_rsp = 0 Disable
3931 * lpfc_suppress_rsp = 1 Enable (default)
3932 *
3933 */
3934 LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
3935 "Enable suppress rsp feature is firmware supports it");
3936
3937 /*
3938 * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
3939 * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs
3940 * lpfc_nvmet_mrq = 1 use a single RQ pair
3941 * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
3942 *
3943 */
3944 LPFC_ATTR_R(nvmet_mrq,
3945 LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
3946 "Specify number of RQ pairs for processing NVMET cmds");
3947
3948 /*
3949 * lpfc_nvmet_mrq_post: Specify number of RQ buffer to initially post
3950 * to each NVMET RQ. Range 64 to 2048, default is 512.
3951 */
3952 LPFC_ATTR_R(nvmet_mrq_post,
3953 LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST,
3954 LPFC_NVMET_RQE_DEF_COUNT,
3955 "Specify number of RQ buffers to initially post");
3956
3957 /*
3958 * lpfc_enable_fc4_type: Defines what FC4 types are supported.
3959 * Supported Values: 1 - register just FCP
3960 * 3 - register both FCP and NVME
3961 * Supported values are [1,3]. Default value is 3
3962 */
3963 LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
3964 LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
3965 "Enable FC4 Protocol support - FCP / NVME");
3966
3967 /*
3968 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being
3969 # deluged with LOTS of information.
3970 # You can set a bit mask to record specific types of verbose messages:
3971 # See lpfc_logmsh.h for definitions.
3972 */
3973 LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
3974 "Verbose logging bit-mask");
3975
3976 /*
3977 # lpfc_enable_da_id: This turns on the DA_ID CT command that deregisters
3978 # objects that have been registered with the nameserver after login.
3979 */
3980 LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
3981 "Deregister nameserver objects before LOGO");
3982
3983 /*
3984 # lun_queue_depth: This parameter is used to limit the number of outstanding
3985 # commands per FCP LUN.
3986 */
3987 LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512,
3988 "Max number of FCP commands we can queue to a specific LUN");
3989
3990 /*
3991 # tgt_queue_depth: This parameter is used to limit the number of outstanding
3992 # commands per target port. Value range is [10,65535]. Default value is 65535.
3993 */
3994 static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH;
3995 module_param(lpfc_tgt_queue_depth, uint, 0444);
3996 MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth");
3997 lpfc_vport_param_show(tgt_queue_depth);
3998 lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
3999 LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
4000
4001 /**
4002 * lpfc_tgt_queue_depth_set: Sets an attribute value.
4003 * @vport: lpfc vport structure pointer.
4004 * @val: integer attribute value.
4005 *
4006 * Description: Sets the parameter to the new value.
4007 *
4008 * Returns:
4009 * zero on success
4010 * -EINVAL if val is invalid
4011 */
4012 static int
lpfc_tgt_queue_depth_set(struct lpfc_vport * vport,uint val)4013 lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
4014 {
4015 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4016 struct lpfc_nodelist *ndlp;
4017
4018 if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
4019 return -EINVAL;
4020
4021 if (val == vport->cfg_tgt_queue_depth)
4022 return 0;
4023
4024 spin_lock_irq(shost->host_lock);
4025 vport->cfg_tgt_queue_depth = val;
4026
4027 /* Next loop thru nodelist and change cmd_qdepth */
4028 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
4029 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4030
4031 spin_unlock_irq(shost->host_lock);
4032 return 0;
4033 }
4034
4035 lpfc_vport_param_store(tgt_queue_depth);
4036 static DEVICE_ATTR_RW(lpfc_tgt_queue_depth);
4037
4038 /*
4039 # hba_queue_depth: This parameter is used to limit the number of outstanding
4040 # commands per lpfc HBA. Value range is [32,8192]. If this parameter
4041 # value is greater than the maximum number of exchanges supported by the HBA,
4042 # then maximum number of exchanges supported by the HBA is used to determine
4043 # the hba_queue_depth.
4044 */
4045 LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
4046 "Max number of FCP commands we can queue to a lpfc HBA");
4047
4048 /*
4049 # peer_port_login: This parameter allows/prevents logins
4050 # between peer ports hosted on the same physical port.
4051 # When this parameter is set 0 peer ports of same physical port
4052 # are not allowed to login to each other.
4053 # When this parameter is set 1 peer ports of same physical port
4054 # are allowed to login to each other.
4055 # Default value of this parameter is 0.
4056 */
4057 LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
4058 "Allow peer ports on the same physical port to login to each "
4059 "other.");
4060
4061 /*
4062 # restrict_login: This parameter allows/prevents logins
4063 # between Virtual Ports and remote initiators.
4064 # When this parameter is not set (0) Virtual Ports will accept PLOGIs from
4065 # other initiators and will attempt to PLOGI all remote ports.
4066 # When this parameter is set (1) Virtual Ports will reject PLOGIs from
4067 # remote ports and will not attempt to PLOGI to other initiators.
4068 # This parameter does not restrict to the physical port.
4069 # This parameter does not restrict logins to Fabric resident remote ports.
4070 # Default value of this parameter is 1.
4071 */
4072 static int lpfc_restrict_login = 1;
4073 module_param(lpfc_restrict_login, int, S_IRUGO);
4074 MODULE_PARM_DESC(lpfc_restrict_login,
4075 "Restrict virtual ports login to remote initiators.");
4076 lpfc_vport_param_show(restrict_login);
4077
4078 /**
4079 * lpfc_restrict_login_init - Set the vport restrict login flag
4080 * @vport: lpfc vport structure pointer.
4081 * @val: contains the restrict login value.
4082 *
4083 * Description:
4084 * If val is not in a valid range then log a kernel error message and set
4085 * the vport restrict login to one.
4086 * If the port type is physical clear the restrict login flag and return.
4087 * Else set the restrict login flag to val.
4088 *
4089 * Returns:
4090 * zero if val is in range
4091 * -EINVAL val out of range
4092 **/
4093 static int
lpfc_restrict_login_init(struct lpfc_vport * vport,int val)4094 lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
4095 {
4096 if (val < 0 || val > 1) {
4097 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4098 "0422 lpfc_restrict_login attribute cannot "
4099 "be set to %d, allowed range is [0, 1]\n",
4100 val);
4101 vport->cfg_restrict_login = 1;
4102 return -EINVAL;
4103 }
4104 if (vport->port_type == LPFC_PHYSICAL_PORT) {
4105 vport->cfg_restrict_login = 0;
4106 return 0;
4107 }
4108 vport->cfg_restrict_login = val;
4109 return 0;
4110 }
4111
4112 /**
4113 * lpfc_restrict_login_set - Set the vport restrict login flag
4114 * @vport: lpfc vport structure pointer.
4115 * @val: contains the restrict login value.
4116 *
4117 * Description:
4118 * If val is not in a valid range then log a kernel error message and set
4119 * the vport restrict login to one.
4120 * If the port type is physical and the val is not zero log a kernel
4121 * error message, clear the restrict login flag and return zero.
4122 * Else set the restrict login flag to val.
4123 *
4124 * Returns:
4125 * zero if val is in range
4126 * -EINVAL val out of range
4127 **/
4128 static int
lpfc_restrict_login_set(struct lpfc_vport * vport,int val)4129 lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
4130 {
4131 if (val < 0 || val > 1) {
4132 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4133 "0425 lpfc_restrict_login attribute cannot "
4134 "be set to %d, allowed range is [0, 1]\n",
4135 val);
4136 vport->cfg_restrict_login = 1;
4137 return -EINVAL;
4138 }
4139 if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) {
4140 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4141 "0468 lpfc_restrict_login must be 0 for "
4142 "Physical ports.\n");
4143 vport->cfg_restrict_login = 0;
4144 return 0;
4145 }
4146 vport->cfg_restrict_login = val;
4147 return 0;
4148 }
4149 lpfc_vport_param_store(restrict_login);
4150 static DEVICE_ATTR_RW(lpfc_restrict_login);
4151
4152 /*
4153 # Some disk devices have a "select ID" or "select Target" capability.
4154 # From a protocol standpoint "select ID" usually means select the
4155 # Fibre channel "ALPA". In the FC-AL Profile there is an "informative
4156 # annex" which contains a table that maps a "select ID" (a number
4157 # between 0 and 7F) to an ALPA. By default, for compatibility with
4158 # older drivers, the lpfc driver scans this table from low ALPA to high
4159 # ALPA.
4160 #
4161 # Turning on the scan-down variable (on = 1, off = 0) will
4162 # cause the lpfc driver to use an inverted table, effectively
4163 # scanning ALPAs from high to low. Value range is [0,1]. Default value is 1.
4164 #
4165 # (Note: This "select ID" functionality is a LOOP ONLY characteristic
4166 # and will not work across a fabric. Also this parameter will take
4167 # effect only in the case when ALPA map is not available.)
4168 */
4169 LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
4170 "Start scanning for devices from highest ALPA to lowest");
4171
4172 /*
4173 # lpfc_topology: link topology for init link
4174 # 0x0 = attempt loop mode then point-to-point
4175 # 0x01 = internal loopback mode
4176 # 0x02 = attempt point-to-point mode only
4177 # 0x04 = attempt loop mode only
4178 # 0x06 = attempt point-to-point mode then loop
4179 # Set point-to-point mode if you want to run as an N_Port.
4180 # Set loop mode if you want to run as an NL_Port. Value range is [0,0x6].
4181 # Default value is 0.
4182 */
4183 LPFC_ATTR(topology, 0, 0, 6,
4184 "Select Fibre Channel topology");
4185
4186 /**
4187 * lpfc_topology_store - Set the adapters topology field
4188 * @dev: class device that is converted into a scsi_host.
4189 * @attr:device attribute, not used.
4190 * @buf: buffer for passing information.
4191 * @count: size of the data buffer.
4192 *
4193 * Description:
4194 * If val is in a valid range then set the adapter's topology field and
4195 * issue a lip; if the lip fails reset the topology to the old value.
4196 *
4197 * If the value is not in range log a kernel error message and return an error.
4198 *
4199 * Returns:
4200 * zero if val is in range and lip okay
4201 * non-zero return value from lpfc_issue_lip()
4202 * -EINVAL val out of range
4203 **/
4204 static ssize_t
lpfc_topology_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4205 lpfc_topology_store(struct device *dev, struct device_attribute *attr,
4206 const char *buf, size_t count)
4207 {
4208 struct Scsi_Host *shost = class_to_shost(dev);
4209 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4210 struct lpfc_hba *phba = vport->phba;
4211 int val = 0;
4212 int nolip = 0;
4213 const char *val_buf = buf;
4214 int err;
4215 uint32_t prev_val;
4216 u8 sli_family, if_type;
4217
4218 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4219 nolip = 1;
4220 val_buf = &buf[strlen("nolip ")];
4221 }
4222
4223 if (!isdigit(val_buf[0]))
4224 return -EINVAL;
4225 if (sscanf(val_buf, "%i", &val) != 1)
4226 return -EINVAL;
4227
4228 if (val >= 0 && val <= 6) {
4229 prev_val = phba->cfg_topology;
4230 if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
4231 val == 4) {
4232 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4233 "3113 Loop mode not supported at speed %d\n",
4234 val);
4235 return -EINVAL;
4236 }
4237 /*
4238 * The 'topology' is not a configurable parameter if :
4239 * - persistent topology enabled
4240 * - ASIC_GEN_NUM >= 0xC, with no private loop support
4241 */
4242 sli_family = bf_get(lpfc_sli_intf_sli_family,
4243 &phba->sli4_hba.sli_intf);
4244 if_type = bf_get(lpfc_sli_intf_if_type,
4245 &phba->sli4_hba.sli_intf);
4246 if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
4247 (!phba->sli4_hba.pc_sli4_params.pls &&
4248 (sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
4249 if_type == LPFC_SLI_INTF_IF_TYPE_6))) &&
4250 val == 4) {
4251 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4252 "3114 Loop mode not supported\n");
4253 return -EINVAL;
4254 }
4255 phba->cfg_topology = val;
4256 if (nolip)
4257 return strlen(buf);
4258
4259 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4260 "3054 lpfc_topology changed from %d to %d\n",
4261 prev_val, val);
4262 if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
4263 phba->fc_topology_changed = 1;
4264 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4265 if (err) {
4266 phba->cfg_topology = prev_val;
4267 return -EINVAL;
4268 } else
4269 return strlen(buf);
4270 }
4271 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4272 "%d:0467 lpfc_topology attribute cannot be set to %d, "
4273 "allowed range is [0, 6]\n",
4274 phba->brd_no, val);
4275 return -EINVAL;
4276 }
4277
4278 lpfc_param_show(topology)
4279 static DEVICE_ATTR_RW(lpfc_topology);
4280
4281 /**
4282 * lpfc_static_vport_show: Read callback function for
4283 * lpfc_static_vport sysfs file.
4284 * @dev: Pointer to class device object.
4285 * @attr: device attribute structure.
4286 * @buf: Data buffer.
4287 *
4288 * This function is the read call back function for
4289 * lpfc_static_vport sysfs file. The lpfc_static_vport
4290 * sysfs file report the mageability of the vport.
4291 **/
4292 static ssize_t
lpfc_static_vport_show(struct device * dev,struct device_attribute * attr,char * buf)4293 lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
4294 char *buf)
4295 {
4296 struct Scsi_Host *shost = class_to_shost(dev);
4297 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4298 if (vport->vport_flag & STATIC_VPORT)
4299 sprintf(buf, "1\n");
4300 else
4301 sprintf(buf, "0\n");
4302
4303 return strlen(buf);
4304 }
4305
4306 /*
4307 * Sysfs attribute to control the statistical data collection.
4308 */
4309 static DEVICE_ATTR_RO(lpfc_static_vport);
4310
4311 /**
4312 * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
4313 * @dev: Pointer to class device.
4314 * @attr: Unused.
4315 * @buf: Data buffer.
4316 * @count: Size of the data buffer.
4317 *
4318 * This function get called when a user write to the lpfc_stat_data_ctrl
4319 * sysfs file. This function parse the command written to the sysfs file
4320 * and take appropriate action. These commands are used for controlling
4321 * driver statistical data collection.
4322 * Following are the command this function handles.
4323 *
4324 * setbucket <bucket_type> <base> <step>
4325 * = Set the latency buckets.
4326 * destroybucket = destroy all the buckets.
4327 * start = start data collection
4328 * stop = stop data collection
4329 * reset = reset the collected data
4330 **/
4331 static ssize_t
lpfc_stat_data_ctrl_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4332 lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
4333 const char *buf, size_t count)
4334 {
4335 struct Scsi_Host *shost = class_to_shost(dev);
4336 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4337 struct lpfc_hba *phba = vport->phba;
4338 #define LPFC_MAX_DATA_CTRL_LEN 1024
4339 static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
4340 unsigned long i;
4341 char *str_ptr, *token;
4342 struct lpfc_vport **vports;
4343 struct Scsi_Host *v_shost;
4344 char *bucket_type_str, *base_str, *step_str;
4345 unsigned long base, step, bucket_type;
4346
4347 if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
4348 if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
4349 return -EINVAL;
4350
4351 strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
4352 str_ptr = &bucket_data[0];
4353 /* Ignore this token - this is command token */
4354 token = strsep(&str_ptr, "\t ");
4355 if (!token)
4356 return -EINVAL;
4357
4358 bucket_type_str = strsep(&str_ptr, "\t ");
4359 if (!bucket_type_str)
4360 return -EINVAL;
4361
4362 if (!strncmp(bucket_type_str, "linear", strlen("linear")))
4363 bucket_type = LPFC_LINEAR_BUCKET;
4364 else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
4365 bucket_type = LPFC_POWER2_BUCKET;
4366 else
4367 return -EINVAL;
4368
4369 base_str = strsep(&str_ptr, "\t ");
4370 if (!base_str)
4371 return -EINVAL;
4372 base = simple_strtoul(base_str, NULL, 0);
4373
4374 step_str = strsep(&str_ptr, "\t ");
4375 if (!step_str)
4376 return -EINVAL;
4377 step = simple_strtoul(step_str, NULL, 0);
4378 if (!step)
4379 return -EINVAL;
4380
4381 /* Block the data collection for every vport */
4382 vports = lpfc_create_vport_work_array(phba);
4383 if (vports == NULL)
4384 return -ENOMEM;
4385
4386 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4387 v_shost = lpfc_shost_from_vport(vports[i]);
4388 spin_lock_irq(v_shost->host_lock);
4389 /* Block and reset data collection */
4390 vports[i]->stat_data_blocked = 1;
4391 if (vports[i]->stat_data_enabled)
4392 lpfc_vport_reset_stat_data(vports[i]);
4393 spin_unlock_irq(v_shost->host_lock);
4394 }
4395
4396 /* Set the bucket attributes */
4397 phba->bucket_type = bucket_type;
4398 phba->bucket_base = base;
4399 phba->bucket_step = step;
4400
4401 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4402 v_shost = lpfc_shost_from_vport(vports[i]);
4403
4404 /* Unblock data collection */
4405 spin_lock_irq(v_shost->host_lock);
4406 vports[i]->stat_data_blocked = 0;
4407 spin_unlock_irq(v_shost->host_lock);
4408 }
4409 lpfc_destroy_vport_work_array(phba, vports);
4410 return strlen(buf);
4411 }
4412
4413 if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
4414 vports = lpfc_create_vport_work_array(phba);
4415 if (vports == NULL)
4416 return -ENOMEM;
4417
4418 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4419 v_shost = lpfc_shost_from_vport(vports[i]);
4420 spin_lock_irq(shost->host_lock);
4421 vports[i]->stat_data_blocked = 1;
4422 lpfc_free_bucket(vport);
4423 vport->stat_data_enabled = 0;
4424 vports[i]->stat_data_blocked = 0;
4425 spin_unlock_irq(shost->host_lock);
4426 }
4427 lpfc_destroy_vport_work_array(phba, vports);
4428 phba->bucket_type = LPFC_NO_BUCKET;
4429 phba->bucket_base = 0;
4430 phba->bucket_step = 0;
4431 return strlen(buf);
4432 }
4433
4434 if (!strncmp(buf, "start", strlen("start"))) {
4435 /* If no buckets configured return error */
4436 if (phba->bucket_type == LPFC_NO_BUCKET)
4437 return -EINVAL;
4438 spin_lock_irq(shost->host_lock);
4439 if (vport->stat_data_enabled) {
4440 spin_unlock_irq(shost->host_lock);
4441 return strlen(buf);
4442 }
4443 lpfc_alloc_bucket(vport);
4444 vport->stat_data_enabled = 1;
4445 spin_unlock_irq(shost->host_lock);
4446 return strlen(buf);
4447 }
4448
4449 if (!strncmp(buf, "stop", strlen("stop"))) {
4450 spin_lock_irq(shost->host_lock);
4451 if (vport->stat_data_enabled == 0) {
4452 spin_unlock_irq(shost->host_lock);
4453 return strlen(buf);
4454 }
4455 lpfc_free_bucket(vport);
4456 vport->stat_data_enabled = 0;
4457 spin_unlock_irq(shost->host_lock);
4458 return strlen(buf);
4459 }
4460
4461 if (!strncmp(buf, "reset", strlen("reset"))) {
4462 if ((phba->bucket_type == LPFC_NO_BUCKET)
4463 || !vport->stat_data_enabled)
4464 return strlen(buf);
4465 spin_lock_irq(shost->host_lock);
4466 vport->stat_data_blocked = 1;
4467 lpfc_vport_reset_stat_data(vport);
4468 vport->stat_data_blocked = 0;
4469 spin_unlock_irq(shost->host_lock);
4470 return strlen(buf);
4471 }
4472 return -EINVAL;
4473 }
4474
4475
4476 /**
4477 * lpfc_stat_data_ctrl_show - Read function for lpfc_stat_data_ctrl sysfs file
4478 * @dev: Pointer to class device.
4479 * @attr: Unused.
4480 * @buf: Data buffer.
4481 *
4482 * This function is the read call back function for
4483 * lpfc_stat_data_ctrl sysfs file. This function report the
4484 * current statistical data collection state.
4485 **/
4486 static ssize_t
lpfc_stat_data_ctrl_show(struct device * dev,struct device_attribute * attr,char * buf)4487 lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
4488 char *buf)
4489 {
4490 struct Scsi_Host *shost = class_to_shost(dev);
4491 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4492 struct lpfc_hba *phba = vport->phba;
4493 int index = 0;
4494 int i;
4495 char *bucket_type;
4496 unsigned long bucket_value;
4497
4498 switch (phba->bucket_type) {
4499 case LPFC_LINEAR_BUCKET:
4500 bucket_type = "linear";
4501 break;
4502 case LPFC_POWER2_BUCKET:
4503 bucket_type = "power2";
4504 break;
4505 default:
4506 bucket_type = "No Bucket";
4507 break;
4508 }
4509
4510 sprintf(&buf[index], "Statistical Data enabled :%d, "
4511 "blocked :%d, Bucket type :%s, Bucket base :%d,"
4512 " Bucket step :%d\nLatency Ranges :",
4513 vport->stat_data_enabled, vport->stat_data_blocked,
4514 bucket_type, phba->bucket_base, phba->bucket_step);
4515 index = strlen(buf);
4516 if (phba->bucket_type != LPFC_NO_BUCKET) {
4517 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4518 if (phba->bucket_type == LPFC_LINEAR_BUCKET)
4519 bucket_value = phba->bucket_base +
4520 phba->bucket_step * i;
4521 else
4522 bucket_value = phba->bucket_base +
4523 (1 << i) * phba->bucket_step;
4524
4525 if (index + 10 > PAGE_SIZE)
4526 break;
4527 sprintf(&buf[index], "%08ld ", bucket_value);
4528 index = strlen(buf);
4529 }
4530 }
4531 sprintf(&buf[index], "\n");
4532 return strlen(buf);
4533 }
4534
4535 /*
4536 * Sysfs attribute to control the statistical data collection.
4537 */
4538 static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
4539
4540 /*
4541 * lpfc_drvr_stat_data: sysfs attr to get driver statistical data.
4542 */
4543
4544 /*
4545 * Each Bucket takes 11 characters and 1 new line + 17 bytes WWN
4546 * for each target.
4547 */
4548 #define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
4549 #define MAX_STAT_DATA_SIZE_PER_TARGET \
4550 STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
4551
4552
4553 /**
4554 * sysfs_drvr_stat_data_read - Read function for lpfc_drvr_stat_data attribute
4555 * @filp: sysfs file
4556 * @kobj: Pointer to the kernel object
4557 * @bin_attr: Attribute object
4558 * @buf: Buffer pointer
4559 * @off: File offset
4560 * @count: Buffer size
4561 *
4562 * This function is the read call back function for lpfc_drvr_stat_data
4563 * sysfs file. This function export the statistical data to user
4564 * applications.
4565 **/
4566 static ssize_t
sysfs_drvr_stat_data_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)4567 sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
4568 struct bin_attribute *bin_attr,
4569 char *buf, loff_t off, size_t count)
4570 {
4571 struct device *dev = container_of(kobj, struct device,
4572 kobj);
4573 struct Scsi_Host *shost = class_to_shost(dev);
4574 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4575 struct lpfc_hba *phba = vport->phba;
4576 int i = 0, index = 0;
4577 unsigned long nport_index;
4578 struct lpfc_nodelist *ndlp = NULL;
4579 nport_index = (unsigned long)off /
4580 MAX_STAT_DATA_SIZE_PER_TARGET;
4581
4582 if (!vport->stat_data_enabled || vport->stat_data_blocked
4583 || (phba->bucket_type == LPFC_NO_BUCKET))
4584 return 0;
4585
4586 spin_lock_irq(shost->host_lock);
4587 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4588 if (!ndlp->lat_data)
4589 continue;
4590
4591 if (nport_index > 0) {
4592 nport_index--;
4593 continue;
4594 }
4595
4596 if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
4597 > count)
4598 break;
4599
4600 if (!ndlp->lat_data)
4601 continue;
4602
4603 /* Print the WWN */
4604 sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
4605 ndlp->nlp_portname.u.wwn[0],
4606 ndlp->nlp_portname.u.wwn[1],
4607 ndlp->nlp_portname.u.wwn[2],
4608 ndlp->nlp_portname.u.wwn[3],
4609 ndlp->nlp_portname.u.wwn[4],
4610 ndlp->nlp_portname.u.wwn[5],
4611 ndlp->nlp_portname.u.wwn[6],
4612 ndlp->nlp_portname.u.wwn[7]);
4613
4614 index = strlen(buf);
4615
4616 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4617 sprintf(&buf[index], "%010u,",
4618 ndlp->lat_data[i].cmd_count);
4619 index = strlen(buf);
4620 }
4621 sprintf(&buf[index], "\n");
4622 index = strlen(buf);
4623 }
4624 spin_unlock_irq(shost->host_lock);
4625 return index;
4626 }
4627
4628 static struct bin_attribute sysfs_drvr_stat_data_attr = {
4629 .attr = {
4630 .name = "lpfc_drvr_stat_data",
4631 .mode = S_IRUSR,
4632 },
4633 .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
4634 .read = sysfs_drvr_stat_data_read,
4635 .write = NULL,
4636 };
4637
4638 /*
4639 # lpfc_link_speed: Link speed selection for initializing the Fibre Channel
4640 # connection.
4641 # Value range is [0,16]. Default value is 0.
4642 */
4643 /**
4644 * lpfc_link_speed_store - Set the adapters link speed
4645 * @dev: Pointer to class device.
4646 * @attr: Unused.
4647 * @buf: Data buffer.
4648 * @count: Size of the data buffer.
4649 *
4650 * Description:
4651 * If val is in a valid range then set the adapter's link speed field and
4652 * issue a lip; if the lip fails reset the link speed to the old value.
4653 *
4654 * Notes:
4655 * If the value is not in range log a kernel error message and return an error.
4656 *
4657 * Returns:
4658 * zero if val is in range and lip okay.
4659 * non-zero return value from lpfc_issue_lip()
4660 * -EINVAL val out of range
4661 **/
4662 static ssize_t
lpfc_link_speed_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4663 lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
4664 const char *buf, size_t count)
4665 {
4666 struct Scsi_Host *shost = class_to_shost(dev);
4667 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4668 struct lpfc_hba *phba = vport->phba;
4669 int val = LPFC_USER_LINK_SPEED_AUTO;
4670 int nolip = 0;
4671 const char *val_buf = buf;
4672 int err;
4673 uint32_t prev_val, if_type;
4674
4675 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
4676 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 &&
4677 phba->hba_flag & HBA_FORCED_LINK_SPEED)
4678 return -EPERM;
4679
4680 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4681 nolip = 1;
4682 val_buf = &buf[strlen("nolip ")];
4683 }
4684
4685 if (!isdigit(val_buf[0]))
4686 return -EINVAL;
4687 if (sscanf(val_buf, "%i", &val) != 1)
4688 return -EINVAL;
4689
4690 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4691 "3055 lpfc_link_speed changed from %d to %d %s\n",
4692 phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
4693
4694 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
4695 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
4696 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
4697 ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
4698 ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
4699 ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) ||
4700 ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) ||
4701 ((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) {
4702 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4703 "2879 lpfc_link_speed attribute cannot be set "
4704 "to %d. Speed is not supported by this port.\n",
4705 val);
4706 return -EINVAL;
4707 }
4708 if (val >= LPFC_USER_LINK_SPEED_16G &&
4709 phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4711 "3112 lpfc_link_speed attribute cannot be set "
4712 "to %d. Speed is not supported in loop mode.\n",
4713 val);
4714 return -EINVAL;
4715 }
4716
4717 switch (val) {
4718 case LPFC_USER_LINK_SPEED_AUTO:
4719 case LPFC_USER_LINK_SPEED_1G:
4720 case LPFC_USER_LINK_SPEED_2G:
4721 case LPFC_USER_LINK_SPEED_4G:
4722 case LPFC_USER_LINK_SPEED_8G:
4723 case LPFC_USER_LINK_SPEED_16G:
4724 case LPFC_USER_LINK_SPEED_32G:
4725 case LPFC_USER_LINK_SPEED_64G:
4726 prev_val = phba->cfg_link_speed;
4727 phba->cfg_link_speed = val;
4728 if (nolip)
4729 return strlen(buf);
4730
4731 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4732 if (err) {
4733 phba->cfg_link_speed = prev_val;
4734 return -EINVAL;
4735 }
4736 return strlen(buf);
4737 default:
4738 break;
4739 }
4740
4741 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4742 "0469 lpfc_link_speed attribute cannot be set to %d, "
4743 "allowed values are [%s]\n",
4744 val, LPFC_LINK_SPEED_STRING);
4745 return -EINVAL;
4746
4747 }
4748
4749 static int lpfc_link_speed = 0;
4750 module_param(lpfc_link_speed, int, S_IRUGO);
4751 MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
lpfc_param_show(link_speed)4752 lpfc_param_show(link_speed)
4753
4754 /**
4755 * lpfc_link_speed_init - Set the adapters link speed
4756 * @phba: lpfc_hba pointer.
4757 * @val: link speed value.
4758 *
4759 * Description:
4760 * If val is in a valid range then set the adapter's link speed field.
4761 *
4762 * Notes:
4763 * If the value is not in range log a kernel error message, clear the link
4764 * speed and return an error.
4765 *
4766 * Returns:
4767 * zero if val saved.
4768 * -EINVAL val out of range
4769 **/
4770 static int
4771 lpfc_link_speed_init(struct lpfc_hba *phba, int val)
4772 {
4773 if (val >= LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
4774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4775 "3111 lpfc_link_speed of %d cannot "
4776 "support loop mode, setting topology to default.\n",
4777 val);
4778 phba->cfg_topology = 0;
4779 }
4780
4781 switch (val) {
4782 case LPFC_USER_LINK_SPEED_AUTO:
4783 case LPFC_USER_LINK_SPEED_1G:
4784 case LPFC_USER_LINK_SPEED_2G:
4785 case LPFC_USER_LINK_SPEED_4G:
4786 case LPFC_USER_LINK_SPEED_8G:
4787 case LPFC_USER_LINK_SPEED_16G:
4788 case LPFC_USER_LINK_SPEED_32G:
4789 case LPFC_USER_LINK_SPEED_64G:
4790 phba->cfg_link_speed = val;
4791 return 0;
4792 default:
4793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4794 "0405 lpfc_link_speed attribute cannot "
4795 "be set to %d, allowed values are "
4796 "["LPFC_LINK_SPEED_STRING"]\n", val);
4797 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
4798 return -EINVAL;
4799 }
4800 }
4801
4802 static DEVICE_ATTR_RW(lpfc_link_speed);
4803
4804 /*
4805 # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER)
4806 # 0 = aer disabled or not supported
4807 # 1 = aer supported and enabled (default)
4808 # Value range is [0,1]. Default value is 1.
4809 */
4810 LPFC_ATTR(aer_support, 1, 0, 1,
4811 "Enable PCIe device AER support");
lpfc_param_show(aer_support)4812 lpfc_param_show(aer_support)
4813
4814 /**
4815 * lpfc_aer_support_store - Set the adapter for aer support
4816 *
4817 * @dev: class device that is converted into a Scsi_host.
4818 * @attr: device attribute, not used.
4819 * @buf: containing enable or disable aer flag.
4820 * @count: unused variable.
4821 *
4822 * Description:
4823 * If the val is 1 and currently the device's AER capability was not
4824 * enabled, invoke the kernel's enable AER helper routine, trying to
4825 * enable the device's AER capability. If the helper routine enabling
4826 * AER returns success, update the device's cfg_aer_support flag to
4827 * indicate AER is supported by the device; otherwise, if the device
4828 * AER capability is already enabled to support AER, then do nothing.
4829 *
4830 * If the val is 0 and currently the device's AER support was enabled,
4831 * invoke the kernel's disable AER helper routine. After that, update
4832 * the device's cfg_aer_support flag to indicate AER is not supported
4833 * by the device; otherwise, if the device AER capability is already
4834 * disabled from supporting AER, then do nothing.
4835 *
4836 * Returns:
4837 * length of the buf on success if val is in range the intended mode
4838 * is supported.
4839 * -EINVAL if val out of range or intended mode is not supported.
4840 **/
4841 static ssize_t
4842 lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
4843 const char *buf, size_t count)
4844 {
4845 struct Scsi_Host *shost = class_to_shost(dev);
4846 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4847 struct lpfc_hba *phba = vport->phba;
4848 int val = 0, rc = -EINVAL;
4849
4850 if (!isdigit(buf[0]))
4851 return -EINVAL;
4852 if (sscanf(buf, "%i", &val) != 1)
4853 return -EINVAL;
4854
4855 switch (val) {
4856 case 0:
4857 if (phba->hba_flag & HBA_AER_ENABLED) {
4858 rc = pci_disable_pcie_error_reporting(phba->pcidev);
4859 if (!rc) {
4860 spin_lock_irq(&phba->hbalock);
4861 phba->hba_flag &= ~HBA_AER_ENABLED;
4862 spin_unlock_irq(&phba->hbalock);
4863 phba->cfg_aer_support = 0;
4864 rc = strlen(buf);
4865 } else
4866 rc = -EPERM;
4867 } else {
4868 phba->cfg_aer_support = 0;
4869 rc = strlen(buf);
4870 }
4871 break;
4872 case 1:
4873 if (!(phba->hba_flag & HBA_AER_ENABLED)) {
4874 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4875 if (!rc) {
4876 spin_lock_irq(&phba->hbalock);
4877 phba->hba_flag |= HBA_AER_ENABLED;
4878 spin_unlock_irq(&phba->hbalock);
4879 phba->cfg_aer_support = 1;
4880 rc = strlen(buf);
4881 } else
4882 rc = -EPERM;
4883 } else {
4884 phba->cfg_aer_support = 1;
4885 rc = strlen(buf);
4886 }
4887 break;
4888 default:
4889 rc = -EINVAL;
4890 break;
4891 }
4892 return rc;
4893 }
4894
4895 static DEVICE_ATTR_RW(lpfc_aer_support);
4896
4897 /**
4898 * lpfc_aer_cleanup_state - Clean up aer state to the aer enabled device
4899 * @dev: class device that is converted into a Scsi_host.
4900 * @attr: device attribute, not used.
4901 * @buf: containing flag 1 for aer cleanup state.
4902 * @count: unused variable.
4903 *
4904 * Description:
4905 * If the @buf contains 1 and the device currently has the AER support
4906 * enabled, then invokes the kernel AER helper routine
4907 * pci_aer_clear_nonfatal_status() to clean up the uncorrectable
4908 * error status register.
4909 *
4910 * Notes:
4911 *
4912 * Returns:
4913 * -EINVAL if the buf does not contain the 1 or the device is not currently
4914 * enabled with the AER support.
4915 **/
4916 static ssize_t
lpfc_aer_cleanup_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4917 lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
4918 const char *buf, size_t count)
4919 {
4920 struct Scsi_Host *shost = class_to_shost(dev);
4921 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4922 struct lpfc_hba *phba = vport->phba;
4923 int val, rc = -1;
4924
4925 if (!isdigit(buf[0]))
4926 return -EINVAL;
4927 if (sscanf(buf, "%i", &val) != 1)
4928 return -EINVAL;
4929 if (val != 1)
4930 return -EINVAL;
4931
4932 if (phba->hba_flag & HBA_AER_ENABLED)
4933 rc = pci_aer_clear_nonfatal_status(phba->pcidev);
4934
4935 if (rc == 0)
4936 return strlen(buf);
4937 else
4938 return -EPERM;
4939 }
4940
4941 static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
4942 lpfc_aer_cleanup_state);
4943
4944 /**
4945 * lpfc_sriov_nr_virtfn_store - Enable the adapter for sr-iov virtual functions
4946 *
4947 * @dev: class device that is converted into a Scsi_host.
4948 * @attr: device attribute, not used.
4949 * @buf: containing the string the number of vfs to be enabled.
4950 * @count: unused variable.
4951 *
4952 * Description:
4953 * When this api is called either through user sysfs, the driver shall
4954 * try to enable or disable SR-IOV virtual functions according to the
4955 * following:
4956 *
4957 * If zero virtual function has been enabled to the physical function,
4958 * the driver shall invoke the pci enable virtual function api trying
4959 * to enable the virtual functions. If the nr_vfn provided is greater
4960 * than the maximum supported, the maximum virtual function number will
4961 * be used for invoking the api; otherwise, the nr_vfn provided shall
4962 * be used for invoking the api. If the api call returned success, the
4963 * actual number of virtual functions enabled will be set to the driver
4964 * cfg_sriov_nr_virtfn; otherwise, -EINVAL shall be returned and driver
4965 * cfg_sriov_nr_virtfn remains zero.
4966 *
4967 * If none-zero virtual functions have already been enabled to the
4968 * physical function, as reflected by the driver's cfg_sriov_nr_virtfn,
4969 * -EINVAL will be returned and the driver does nothing;
4970 *
4971 * If the nr_vfn provided is zero and none-zero virtual functions have
4972 * been enabled, as indicated by the driver's cfg_sriov_nr_virtfn, the
4973 * disabling virtual function api shall be invoded to disable all the
4974 * virtual functions and driver's cfg_sriov_nr_virtfn shall be set to
4975 * zero. Otherwise, if zero virtual function has been enabled, do
4976 * nothing.
4977 *
4978 * Returns:
4979 * length of the buf on success if val is in range the intended mode
4980 * is supported.
4981 * -EINVAL if val out of range or intended mode is not supported.
4982 **/
4983 static ssize_t
lpfc_sriov_nr_virtfn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4984 lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
4985 const char *buf, size_t count)
4986 {
4987 struct Scsi_Host *shost = class_to_shost(dev);
4988 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4989 struct lpfc_hba *phba = vport->phba;
4990 struct pci_dev *pdev = phba->pcidev;
4991 int val = 0, rc = -EINVAL;
4992
4993 /* Sanity check on user data */
4994 if (!isdigit(buf[0]))
4995 return -EINVAL;
4996 if (sscanf(buf, "%i", &val) != 1)
4997 return -EINVAL;
4998 if (val < 0)
4999 return -EINVAL;
5000
5001 /* Request disabling virtual functions */
5002 if (val == 0) {
5003 if (phba->cfg_sriov_nr_virtfn > 0) {
5004 pci_disable_sriov(pdev);
5005 phba->cfg_sriov_nr_virtfn = 0;
5006 }
5007 return strlen(buf);
5008 }
5009
5010 /* Request enabling virtual functions */
5011 if (phba->cfg_sriov_nr_virtfn > 0) {
5012 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5013 "3018 There are %d virtual functions "
5014 "enabled on physical function.\n",
5015 phba->cfg_sriov_nr_virtfn);
5016 return -EEXIST;
5017 }
5018
5019 if (val <= LPFC_MAX_VFN_PER_PFN)
5020 phba->cfg_sriov_nr_virtfn = val;
5021 else {
5022 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5023 "3019 Enabling %d virtual functions is not "
5024 "allowed.\n", val);
5025 return -EINVAL;
5026 }
5027
5028 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
5029 if (rc) {
5030 phba->cfg_sriov_nr_virtfn = 0;
5031 rc = -EPERM;
5032 } else
5033 rc = strlen(buf);
5034
5035 return rc;
5036 }
5037
5038 LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
5039 "Enable PCIe device SR-IOV virtual fn");
5040
5041 lpfc_param_show(sriov_nr_virtfn)
5042 static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn);
5043
5044 /**
5045 * lpfc_request_firmware_upgrade_store - Request for Linux generic firmware upgrade
5046 *
5047 * @dev: class device that is converted into a Scsi_host.
5048 * @attr: device attribute, not used.
5049 * @buf: containing the string the number of vfs to be enabled.
5050 * @count: unused variable.
5051 *
5052 * Description:
5053 *
5054 * Returns:
5055 * length of the buf on success if val is in range the intended mode
5056 * is supported.
5057 * -EINVAL if val out of range or intended mode is not supported.
5058 **/
5059 static ssize_t
lpfc_request_firmware_upgrade_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5060 lpfc_request_firmware_upgrade_store(struct device *dev,
5061 struct device_attribute *attr,
5062 const char *buf, size_t count)
5063 {
5064 struct Scsi_Host *shost = class_to_shost(dev);
5065 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5066 struct lpfc_hba *phba = vport->phba;
5067 int val = 0, rc;
5068
5069 /* Sanity check on user data */
5070 if (!isdigit(buf[0]))
5071 return -EINVAL;
5072 if (sscanf(buf, "%i", &val) != 1)
5073 return -EINVAL;
5074 if (val != 1)
5075 return -EINVAL;
5076
5077 rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE);
5078 if (rc)
5079 rc = -EPERM;
5080 else
5081 rc = strlen(buf);
5082 return rc;
5083 }
5084
5085 static int lpfc_req_fw_upgrade;
5086 module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR);
5087 MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade");
lpfc_param_show(request_firmware_upgrade)5088 lpfc_param_show(request_firmware_upgrade)
5089
5090 /**
5091 * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade
5092 * @phba: lpfc_hba pointer.
5093 * @val: 0 or 1.
5094 *
5095 * Description:
5096 * Set the initial Linux generic firmware upgrade enable or disable flag.
5097 *
5098 * Returns:
5099 * zero if val saved.
5100 * -EINVAL val out of range
5101 **/
5102 static int
5103 lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val)
5104 {
5105 if (val >= 0 && val <= 1) {
5106 phba->cfg_request_firmware_upgrade = val;
5107 return 0;
5108 }
5109 return -EINVAL;
5110 }
5111 static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
5112 lpfc_request_firmware_upgrade_show,
5113 lpfc_request_firmware_upgrade_store);
5114
5115 /**
5116 * lpfc_force_rscn_store
5117 *
5118 * @dev: class device that is converted into a Scsi_host.
5119 * @attr: device attribute, not used.
5120 * @buf: unused string
5121 * @count: unused variable.
5122 *
5123 * Description:
5124 * Force the switch to send a RSCN to all other NPorts in our zone
5125 * If we are direct connect pt2pt, build the RSCN command ourself
5126 * and send to the other NPort. Not supported for private loop.
5127 *
5128 * Returns:
5129 * 0 - on success
5130 * -EIO - if command is not sent
5131 **/
5132 static ssize_t
lpfc_force_rscn_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5133 lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr,
5134 const char *buf, size_t count)
5135 {
5136 struct Scsi_Host *shost = class_to_shost(dev);
5137 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5138 int i;
5139
5140 i = lpfc_issue_els_rscn(vport, 0);
5141 if (i)
5142 return -EIO;
5143 return strlen(buf);
5144 }
5145
5146 /*
5147 * lpfc_force_rscn: Force an RSCN to be sent to all remote NPorts
5148 * connected to the HBA.
5149 *
5150 * Value range is any ascii value
5151 */
5152 static int lpfc_force_rscn;
5153 module_param(lpfc_force_rscn, int, 0644);
5154 MODULE_PARM_DESC(lpfc_force_rscn,
5155 "Force an RSCN to be sent to all remote NPorts");
lpfc_param_show(force_rscn)5156 lpfc_param_show(force_rscn)
5157
5158 /**
5159 * lpfc_force_rscn_init - Force an RSCN to be sent to all remote NPorts
5160 * @phba: lpfc_hba pointer.
5161 * @val: unused value.
5162 *
5163 * Returns:
5164 * zero if val saved.
5165 **/
5166 static int
5167 lpfc_force_rscn_init(struct lpfc_hba *phba, int val)
5168 {
5169 return 0;
5170 }
5171 static DEVICE_ATTR_RW(lpfc_force_rscn);
5172
5173 /**
5174 * lpfc_fcp_imax_store
5175 *
5176 * @dev: class device that is converted into a Scsi_host.
5177 * @attr: device attribute, not used.
5178 * @buf: string with the number of fast-path FCP interrupts per second.
5179 * @count: unused variable.
5180 *
5181 * Description:
5182 * If val is in a valid range [636,651042], then set the adapter's
5183 * maximum number of fast-path FCP interrupts per second.
5184 *
5185 * Returns:
5186 * length of the buf on success if val is in range the intended mode
5187 * is supported.
5188 * -EINVAL if val out of range or intended mode is not supported.
5189 **/
5190 static ssize_t
lpfc_fcp_imax_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5191 lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
5192 const char *buf, size_t count)
5193 {
5194 struct Scsi_Host *shost = class_to_shost(dev);
5195 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5196 struct lpfc_hba *phba = vport->phba;
5197 struct lpfc_eq_intr_info *eqi;
5198 uint32_t usdelay;
5199 int val = 0, i;
5200
5201 /* fcp_imax is only valid for SLI4 */
5202 if (phba->sli_rev != LPFC_SLI_REV4)
5203 return -EINVAL;
5204
5205 /* Sanity check on user data */
5206 if (!isdigit(buf[0]))
5207 return -EINVAL;
5208 if (sscanf(buf, "%i", &val) != 1)
5209 return -EINVAL;
5210
5211 /*
5212 * Value range for the HBA is [5000,5000000]
5213 * The value for each EQ depends on how many EQs are configured.
5214 * Allow value == 0
5215 */
5216 if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX))
5217 return -EINVAL;
5218
5219 phba->cfg_auto_imax = (val) ? 0 : 1;
5220 if (phba->cfg_fcp_imax && !val) {
5221 queue_delayed_work(phba->wq, &phba->eq_delay_work,
5222 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
5223
5224 for_each_present_cpu(i) {
5225 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
5226 eqi->icnt = 0;
5227 }
5228 }
5229
5230 phba->cfg_fcp_imax = (uint32_t)val;
5231
5232 if (phba->cfg_fcp_imax)
5233 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
5234 else
5235 usdelay = 0;
5236
5237 for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
5238 lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
5239 usdelay);
5240
5241 return strlen(buf);
5242 }
5243
5244 /*
5245 # lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
5246 # for the HBA.
5247 #
5248 # Value range is [5,000 to 5,000,000]. Default value is 50,000.
5249 */
5250 static int lpfc_fcp_imax = LPFC_DEF_IMAX;
5251 module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
5252 MODULE_PARM_DESC(lpfc_fcp_imax,
5253 "Set the maximum number of FCP interrupts per second per HBA");
lpfc_param_show(fcp_imax)5254 lpfc_param_show(fcp_imax)
5255
5256 /**
5257 * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable
5258 * @phba: lpfc_hba pointer.
5259 * @val: link speed value.
5260 *
5261 * Description:
5262 * If val is in a valid range [636,651042], then initialize the adapter's
5263 * maximum number of fast-path FCP interrupts per second.
5264 *
5265 * Returns:
5266 * zero if val saved.
5267 * -EINVAL val out of range
5268 **/
5269 static int
5270 lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
5271 {
5272 if (phba->sli_rev != LPFC_SLI_REV4) {
5273 phba->cfg_fcp_imax = 0;
5274 return 0;
5275 }
5276
5277 if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) ||
5278 (val == 0)) {
5279 phba->cfg_fcp_imax = val;
5280 return 0;
5281 }
5282
5283 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5284 "3016 lpfc_fcp_imax: %d out of range, using default\n",
5285 val);
5286 phba->cfg_fcp_imax = LPFC_DEF_IMAX;
5287
5288 return 0;
5289 }
5290
5291 static DEVICE_ATTR_RW(lpfc_fcp_imax);
5292
5293 /**
5294 * lpfc_cq_max_proc_limit_store
5295 *
5296 * @dev: class device that is converted into a Scsi_host.
5297 * @attr: device attribute, not used.
5298 * @buf: string with the cq max processing limit of cqes
5299 * @count: unused variable.
5300 *
5301 * Description:
5302 * If val is in a valid range, then set value on each cq
5303 *
5304 * Returns:
5305 * The length of the buf: if successful
5306 * -ERANGE: if val is not in the valid range
5307 * -EINVAL: if bad value format or intended mode is not supported.
5308 **/
5309 static ssize_t
lpfc_cq_max_proc_limit_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5310 lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr,
5311 const char *buf, size_t count)
5312 {
5313 struct Scsi_Host *shost = class_to_shost(dev);
5314 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5315 struct lpfc_hba *phba = vport->phba;
5316 struct lpfc_queue *eq, *cq;
5317 unsigned long val;
5318 int i;
5319
5320 /* cq_max_proc_limit is only valid for SLI4 */
5321 if (phba->sli_rev != LPFC_SLI_REV4)
5322 return -EINVAL;
5323
5324 /* Sanity check on user data */
5325 if (!isdigit(buf[0]))
5326 return -EINVAL;
5327 if (kstrtoul(buf, 0, &val))
5328 return -EINVAL;
5329
5330 if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT)
5331 return -ERANGE;
5332
5333 phba->cfg_cq_max_proc_limit = (uint32_t)val;
5334
5335 /* set the values on the cq's */
5336 for (i = 0; i < phba->cfg_irq_chann; i++) {
5337 /* Get the EQ corresponding to the IRQ vector */
5338 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
5339 if (!eq)
5340 continue;
5341
5342 list_for_each_entry(cq, &eq->child_list, list)
5343 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
5344 cq->entry_count);
5345 }
5346
5347 return strlen(buf);
5348 }
5349
5350 /*
5351 * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an
5352 * itteration of CQ processing.
5353 */
5354 static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5355 module_param(lpfc_cq_max_proc_limit, int, 0644);
5356 MODULE_PARM_DESC(lpfc_cq_max_proc_limit,
5357 "Set the maximum number CQEs processed in an iteration of "
5358 "CQ processing");
5359 lpfc_param_show(cq_max_proc_limit)
5360
5361 /*
5362 * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a
5363 * single handler call which should request a polled completion rather
5364 * than re-enabling interrupts.
5365 */
5366 LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL,
5367 LPFC_CQ_MIN_THRESHOLD_TO_POLL,
5368 LPFC_CQ_MAX_THRESHOLD_TO_POLL,
5369 "CQE Processing Threshold to enable Polling");
5370
5371 /**
5372 * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit
5373 * @phba: lpfc_hba pointer.
5374 * @val: entry limit
5375 *
5376 * Description:
5377 * If val is in a valid range, then initialize the adapter's maximum
5378 * value.
5379 *
5380 * Returns:
5381 * Always returns 0 for success, even if value not always set to
5382 * requested value. If value out of range or not supported, will fall
5383 * back to default.
5384 **/
5385 static int
lpfc_cq_max_proc_limit_init(struct lpfc_hba * phba,int val)5386 lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
5387 {
5388 phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5389
5390 if (phba->sli_rev != LPFC_SLI_REV4)
5391 return 0;
5392
5393 if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) {
5394 phba->cfg_cq_max_proc_limit = val;
5395 return 0;
5396 }
5397
5398 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5399 "0371 lpfc_cq_max_proc_limit: %d out of range, using "
5400 "default\n",
5401 phba->cfg_cq_max_proc_limit);
5402
5403 return 0;
5404 }
5405
5406 static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit);
5407
5408 /**
5409 * lpfc_fcp_cpu_map_show - Display current driver CPU affinity
5410 * @dev: class converted to a Scsi_host structure.
5411 * @attr: device attribute, not used.
5412 * @buf: on return contains text describing the state of the link.
5413 *
5414 * Returns: size of formatted string.
5415 **/
5416 static ssize_t
lpfc_fcp_cpu_map_show(struct device * dev,struct device_attribute * attr,char * buf)5417 lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
5418 char *buf)
5419 {
5420 struct Scsi_Host *shost = class_to_shost(dev);
5421 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5422 struct lpfc_hba *phba = vport->phba;
5423 struct lpfc_vector_map_info *cpup;
5424 int len = 0;
5425
5426 if ((phba->sli_rev != LPFC_SLI_REV4) ||
5427 (phba->intr_type != MSIX))
5428 return len;
5429
5430 switch (phba->cfg_fcp_cpu_map) {
5431 case 0:
5432 len += scnprintf(buf + len, PAGE_SIZE-len,
5433 "fcp_cpu_map: No mapping (%d)\n",
5434 phba->cfg_fcp_cpu_map);
5435 return len;
5436 case 1:
5437 len += scnprintf(buf + len, PAGE_SIZE-len,
5438 "fcp_cpu_map: HBA centric mapping (%d): "
5439 "%d of %d CPUs online from %d possible CPUs\n",
5440 phba->cfg_fcp_cpu_map, num_online_cpus(),
5441 num_present_cpus(),
5442 phba->sli4_hba.num_possible_cpu);
5443 break;
5444 }
5445
5446 while (phba->sli4_hba.curr_disp_cpu <
5447 phba->sli4_hba.num_possible_cpu) {
5448 cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
5449
5450 if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
5451 len += scnprintf(buf + len, PAGE_SIZE - len,
5452 "CPU %02d not present\n",
5453 phba->sli4_hba.curr_disp_cpu);
5454 else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
5455 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5456 len += scnprintf(
5457 buf + len, PAGE_SIZE - len,
5458 "CPU %02d hdwq None "
5459 "physid %d coreid %d ht %d ua %d\n",
5460 phba->sli4_hba.curr_disp_cpu,
5461 cpup->phys_id, cpup->core_id,
5462 (cpup->flag & LPFC_CPU_MAP_HYPER),
5463 (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5464 else
5465 len += scnprintf(
5466 buf + len, PAGE_SIZE - len,
5467 "CPU %02d EQ None hdwq %04d "
5468 "physid %d coreid %d ht %d ua %d\n",
5469 phba->sli4_hba.curr_disp_cpu,
5470 cpup->hdwq, cpup->phys_id,
5471 cpup->core_id,
5472 (cpup->flag & LPFC_CPU_MAP_HYPER),
5473 (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5474 } else {
5475 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5476 len += scnprintf(
5477 buf + len, PAGE_SIZE - len,
5478 "CPU %02d hdwq None "
5479 "physid %d coreid %d ht %d ua %d IRQ %d\n",
5480 phba->sli4_hba.curr_disp_cpu,
5481 cpup->phys_id,
5482 cpup->core_id,
5483 (cpup->flag & LPFC_CPU_MAP_HYPER),
5484 (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5485 lpfc_get_irq(cpup->eq));
5486 else
5487 len += scnprintf(
5488 buf + len, PAGE_SIZE - len,
5489 "CPU %02d EQ %04d hdwq %04d "
5490 "physid %d coreid %d ht %d ua %d IRQ %d\n",
5491 phba->sli4_hba.curr_disp_cpu,
5492 cpup->eq, cpup->hdwq, cpup->phys_id,
5493 cpup->core_id,
5494 (cpup->flag & LPFC_CPU_MAP_HYPER),
5495 (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5496 lpfc_get_irq(cpup->eq));
5497 }
5498
5499 phba->sli4_hba.curr_disp_cpu++;
5500
5501 /* display max number of CPUs keeping some margin */
5502 if (phba->sli4_hba.curr_disp_cpu <
5503 phba->sli4_hba.num_possible_cpu &&
5504 (len >= (PAGE_SIZE - 64))) {
5505 len += scnprintf(buf + len,
5506 PAGE_SIZE - len, "more...\n");
5507 break;
5508 }
5509 }
5510
5511 if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu)
5512 phba->sli4_hba.curr_disp_cpu = 0;
5513
5514 return len;
5515 }
5516
5517 /**
5518 * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors
5519 * @dev: class device that is converted into a Scsi_host.
5520 * @attr: device attribute, not used.
5521 * @buf: one or more lpfc_polling_flags values.
5522 * @count: not used.
5523 *
5524 * Returns:
5525 * -EINVAL - Not implemented yet.
5526 **/
5527 static ssize_t
lpfc_fcp_cpu_map_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5528 lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
5529 const char *buf, size_t count)
5530 {
5531 return -EINVAL;
5532 }
5533
5534 /*
5535 # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors
5536 # for the HBA.
5537 #
5538 # Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1).
5539 # 0 - Do not affinitze IRQ vectors
5540 # 1 - Affintize HBA vectors with respect to each HBA
5541 # (start with CPU0 for each HBA)
5542 # This also defines how Hardware Queues are mapped to specific CPUs.
5543 */
5544 static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5545 module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
5546 MODULE_PARM_DESC(lpfc_fcp_cpu_map,
5547 "Defines how to map CPUs to IRQ vectors per HBA");
5548
5549 /**
5550 * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable
5551 * @phba: lpfc_hba pointer.
5552 * @val: link speed value.
5553 *
5554 * Description:
5555 * If val is in a valid range [0-2], then affinitze the adapter's
5556 * MSIX vectors.
5557 *
5558 * Returns:
5559 * zero if val saved.
5560 * -EINVAL val out of range
5561 **/
5562 static int
lpfc_fcp_cpu_map_init(struct lpfc_hba * phba,int val)5563 lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
5564 {
5565 if (phba->sli_rev != LPFC_SLI_REV4) {
5566 phba->cfg_fcp_cpu_map = 0;
5567 return 0;
5568 }
5569
5570 if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
5571 phba->cfg_fcp_cpu_map = val;
5572 return 0;
5573 }
5574
5575 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5576 "3326 lpfc_fcp_cpu_map: %d out of range, using "
5577 "default\n", val);
5578 phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5579
5580 return 0;
5581 }
5582
5583 static DEVICE_ATTR_RW(lpfc_fcp_cpu_map);
5584
5585 /*
5586 # lpfc_fcp_class: Determines FC class to use for the FCP protocol.
5587 # Value range is [2,3]. Default value is 3.
5588 */
5589 LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
5590 "Select Fibre Channel class of service for FCP sequences");
5591
5592 /*
5593 # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range
5594 # is [0,1]. Default value is 1.
5595 */
5596 LPFC_VPORT_ATTR_RW(use_adisc, 1, 0, 1,
5597 "Use ADISC on rediscovery to authenticate FCP devices");
5598
5599 /*
5600 # lpfc_first_burst_size: First burst size to use on the NPorts
5601 # that support first burst.
5602 # Value range is [0,65536]. Default value is 0.
5603 */
5604 LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
5605 "First burst size for Targets that support first burst");
5606
5607 /*
5608 * lpfc_nvmet_fb_size: NVME Target mode supported first burst size.
5609 * When the driver is configured as an NVME target, this value is
5610 * communicated to the NVME initiator in the PRLI response. It is
5611 * used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support
5612 * parameters are set and the target is sending the PRLI RSP.
5613 * Parameter supported on physical port only - no NPIV support.
5614 * Value range is [0,65536]. Default value is 0.
5615 */
5616 LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
5617 "NVME Target mode first burst size in 512B increments.");
5618
5619 /*
5620 * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
5621 * For the Initiator (I), enabling this parameter means that an NVMET
5622 * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
5623 * processed by the initiator for subsequent NVME FCP IO.
5624 * Currently, this feature is not supported on the NVME target
5625 * Value range is [0,1]. Default value is 0 (disabled).
5626 */
5627 LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
5628 "Enable First Burst feature for NVME Initiator.");
5629
5630 /*
5631 # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
5632 # depth. Default value is 0. When the value of this parameter is zero the
5633 # SCSI command completion time is not used for controlling I/O queue depth. When
5634 # the parameter is set to a non-zero value, the I/O queue depth is controlled
5635 # to limit the I/O completion time to the parameter value.
5636 # The value is set in milliseconds.
5637 */
5638 LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
5639 "Use command completion time to control queue depth");
5640
5641 lpfc_vport_param_show(max_scsicmpl_time);
5642 static int
lpfc_max_scsicmpl_time_set(struct lpfc_vport * vport,int val)5643 lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
5644 {
5645 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5646 struct lpfc_nodelist *ndlp, *next_ndlp;
5647
5648 if (val == vport->cfg_max_scsicmpl_time)
5649 return 0;
5650 if ((val < 0) || (val > 60000))
5651 return -EINVAL;
5652 vport->cfg_max_scsicmpl_time = val;
5653
5654 spin_lock_irq(shost->host_lock);
5655 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
5656 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
5657 continue;
5658 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
5659 }
5660 spin_unlock_irq(shost->host_lock);
5661 return 0;
5662 }
5663 lpfc_vport_param_store(max_scsicmpl_time);
5664 static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time);
5665
5666 /*
5667 # lpfc_ack0: Use ACK0, instead of ACK1 for class 2 acknowledgement. Value
5668 # range is [0,1]. Default value is 0.
5669 */
5670 LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
5671
5672 /*
5673 # lpfc_xri_rebalancing: enable or disable XRI rebalancing feature
5674 # range is [0,1]. Default value is 1.
5675 */
5676 LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing");
5677
5678 /*
5679 * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds
5680 * range is [0,1]. Default value is 0.
5681 * For [0], FCP commands are issued to Work Queues based on upper layer
5682 * hardware queue index.
5683 * For [1], FCP commands are issued to a Work Queue associated with the
5684 * current CPU.
5685 *
5686 * LPFC_FCP_SCHED_BY_HDWQ == 0
5687 * LPFC_FCP_SCHED_BY_CPU == 1
5688 *
5689 * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu
5690 * affinity for FCP/NVME I/Os through Work Queues associated with the current
5691 * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os
5692 * through WQs will be used.
5693 */
5694 LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU,
5695 LPFC_FCP_SCHED_BY_HDWQ,
5696 LPFC_FCP_SCHED_BY_CPU,
5697 "Determine scheduling algorithm for "
5698 "issuing commands [0] - Hardware Queue, [1] - Current CPU");
5699
5700 /*
5701 * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN
5702 * range is [0,1]. Default value is 0.
5703 * For [0], GID_FT is used for NameServer queries after RSCN (default)
5704 * For [1], GID_PT is used for NameServer queries after RSCN
5705 *
5706 */
5707 LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT,
5708 LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT,
5709 "Determine algorithm NameServer queries after RSCN "
5710 "[0] - GID_FT, [1] - GID_PT");
5711
5712 /*
5713 # lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
5714 # range is [0,1]. Default value is 0.
5715 # For [0], bus reset issues target reset to ALL devices
5716 # For [1], bus reset issues target reset to non-FCP2 devices
5717 */
5718 LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for "
5719 "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset");
5720
5721
5722 /*
5723 # lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
5724 # cr_delay (msec) or cr_count outstanding commands. cr_delay can take
5725 # value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
5726 # is 0. Default value of cr_count is 1. The cr_count feature is disabled if
5727 # cr_delay is set to 0.
5728 */
5729 LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an "
5730 "interrupt response is generated");
5731
5732 LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
5733 "interrupt response is generated");
5734
5735 /*
5736 # lpfc_multi_ring_support: Determines how many rings to spread available
5737 # cmd/rsp IOCB entries across.
5738 # Value range is [1,2]. Default value is 1.
5739 */
5740 LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
5741 "SLI rings to spread IOCB entries across");
5742
5743 /*
5744 # lpfc_multi_ring_rctl: If lpfc_multi_ring_support is enabled, this
5745 # identifies what rctl value to configure the additional ring for.
5746 # Value range is [1,0xff]. Default value is 4 (Unsolicated Data).
5747 */
5748 LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
5749 255, "Identifies RCTL for additional ring configuration");
5750
5751 /*
5752 # lpfc_multi_ring_type: If lpfc_multi_ring_support is enabled, this
5753 # identifies what type value to configure the additional ring for.
5754 # Value range is [1,0xff]. Default value is 5 (LLC/SNAP).
5755 */
5756 LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
5757 255, "Identifies TYPE for additional ring configuration");
5758
5759 /*
5760 # lpfc_enable_SmartSAN: Sets up FDMI support for SmartSAN
5761 # 0 = SmartSAN functionality disabled (default)
5762 # 1 = SmartSAN functionality enabled
5763 # This parameter will override the value of lpfc_fdmi_on module parameter.
5764 # Value range is [0,1]. Default value is 0.
5765 */
5766 LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
5767
5768 /*
5769 # lpfc_fdmi_on: Controls FDMI support.
5770 # 0 No FDMI support
5771 # 1 Traditional FDMI support (default)
5772 # Traditional FDMI support means the driver will assume FDMI-2 support;
5773 # however, if that fails, it will fallback to FDMI-1.
5774 # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
5775 # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
5776 # lpfc_fdmi_on.
5777 # Value range [0,1]. Default value is 1.
5778 */
5779 LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
5780
5781 /*
5782 # Specifies the maximum number of ELS cmds we can have outstanding (for
5783 # discovery). Value range is [1,64]. Default value = 32.
5784 */
5785 LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
5786 "during discovery");
5787
5788 /*
5789 # lpfc_max_luns: maximum allowed LUN ID. This is the highest LUN ID that
5790 # will be scanned by the SCSI midlayer when sequential scanning is
5791 # used; and is also the highest LUN ID allowed when the SCSI midlayer
5792 # parses REPORT_LUN responses. The lpfc driver has no LUN count or
5793 # LUN ID limit, but the SCSI midlayer requires this field for the uses
5794 # above. The lpfc driver limits the default value to 255 for two reasons.
5795 # As it bounds the sequential scan loop, scanning for thousands of luns
5796 # on a target can take minutes of wall clock time. Additionally,
5797 # there are FC targets, such as JBODs, that only recognize 8-bits of
5798 # LUN ID. When they receive a value greater than 8 bits, they chop off
5799 # the high order bits. In other words, they see LUN IDs 0, 256, 512,
5800 # and so on all as LUN ID 0. This causes the linux kernel, which sees
5801 # valid responses at each of the LUN IDs, to believe there are multiple
5802 # devices present, when in fact, there is only 1.
5803 # A customer that is aware of their target behaviors, and the results as
5804 # indicated above, is welcome to increase the lpfc_max_luns value.
5805 # As mentioned, this value is not used by the lpfc driver, only the
5806 # SCSI midlayer.
5807 # Value range is [0,65535]. Default value is 255.
5808 # NOTE: The SCSI layer might probe all allowed LUN on some old targets.
5809 */
5810 LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID");
5811
5812 /*
5813 # lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
5814 # Value range is [1,255], default value is 10.
5815 */
5816 LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
5817 "Milliseconds driver will wait between polling FCP ring");
5818
5819 /*
5820 # lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
5821 # to complete in seconds. Value range is [5,180], default value is 60.
5822 */
5823 LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
5824 "Maximum time to wait for task management commands to complete");
5825 /*
5826 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
5827 # support this feature
5828 # 0 = MSI disabled
5829 # 1 = MSI enabled
5830 # 2 = MSI-X enabled (default)
5831 # Value range is [0,2]. Default value is 2.
5832 */
5833 LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
5834 "MSI-X (2), if possible");
5835
5836 /*
5837 * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs
5838 *
5839 * 0 = NVME OAS disabled
5840 * 1 = NVME OAS enabled
5841 *
5842 * Value range is [0,1]. Default value is 0.
5843 */
5844 LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
5845 "Use OAS bit on NVME IOs");
5846
5847 /*
5848 * lpfc_nvme_embed_cmd: Use the oas bit when sending NVME/NVMET IOs
5849 *
5850 * 0 = Put NVME Command in SGL
5851 * 1 = Embed NVME Command in WQE (unless G7)
5852 * 2 = Embed NVME Command in WQE (force)
5853 *
5854 * Value range is [0,2]. Default value is 1.
5855 */
5856 LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
5857 "Embed NVME Command in WQE");
5858
5859 /*
5860 * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
5861 * the driver will advertise it supports to the SCSI layer.
5862 *
5863 * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
5864 * 1,256 = Manually specify nr_hw_queue value to be advertised,
5865 *
5866 * Value range is [0,256]. Default value is 8.
5867 */
5868 LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5869 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
5870 "Set the number of SCSI Queues advertised");
5871
5872 /*
5873 * lpfc_hdw_queue: Set the number of Hardware Queues the driver
5874 * will advertise it supports to the NVME and SCSI layers. This also
5875 * will map to the number of CQ/WQ pairs the driver will create.
5876 *
5877 * The NVME Layer will try to create this many, plus 1 administrative
5878 * hardware queue. The administrative queue will always map to WQ 0
5879 * A hardware IO queue maps (qidx) to a specific driver CQ/WQ.
5880 *
5881 * 0 = Configure the number of hdw queues to the number of active CPUs.
5882 * 1,256 = Manually specify how many hdw queues to use.
5883 *
5884 * Value range is [0,256]. Default value is 0.
5885 */
5886 LPFC_ATTR_R(hdw_queue,
5887 LPFC_HBA_HDWQ_DEF,
5888 LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
5889 "Set the number of I/O Hardware Queues");
5890
5891 #if IS_ENABLED(CONFIG_X86)
5892 /**
5893 * lpfc_cpumask_irq_mode_init - initalizes cpumask of phba based on
5894 * irq_chann_mode
5895 * @phba: Pointer to HBA context object.
5896 **/
5897 static void
lpfc_cpumask_irq_mode_init(struct lpfc_hba * phba)5898 lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba)
5899 {
5900 unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE;
5901 const struct cpumask *sibling_mask;
5902 struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask;
5903
5904 cpumask_clear(aff_mask);
5905
5906 if (phba->irq_chann_mode == NUMA_MODE) {
5907 /* Check if we're a NUMA architecture */
5908 numa_node = dev_to_node(&phba->pcidev->dev);
5909 if (numa_node == NUMA_NO_NODE) {
5910 phba->irq_chann_mode = NORMAL_MODE;
5911 return;
5912 }
5913 }
5914
5915 for_each_possible_cpu(cpu) {
5916 switch (phba->irq_chann_mode) {
5917 case NUMA_MODE:
5918 if (cpu_to_node(cpu) == numa_node)
5919 cpumask_set_cpu(cpu, aff_mask);
5920 break;
5921 case NHT_MODE:
5922 sibling_mask = topology_sibling_cpumask(cpu);
5923 first_cpu = cpumask_first(sibling_mask);
5924 if (first_cpu < nr_cpu_ids)
5925 cpumask_set_cpu(first_cpu, aff_mask);
5926 break;
5927 default:
5928 break;
5929 }
5930 }
5931 }
5932 #endif
5933
5934 static void
lpfc_assign_default_irq_chann(struct lpfc_hba * phba)5935 lpfc_assign_default_irq_chann(struct lpfc_hba *phba)
5936 {
5937 #if IS_ENABLED(CONFIG_X86)
5938 switch (boot_cpu_data.x86_vendor) {
5939 case X86_VENDOR_AMD:
5940 /* If AMD architecture, then default is NUMA_MODE */
5941 phba->irq_chann_mode = NUMA_MODE;
5942 break;
5943 case X86_VENDOR_INTEL:
5944 /* If Intel architecture, then default is no hyperthread mode */
5945 phba->irq_chann_mode = NHT_MODE;
5946 break;
5947 default:
5948 phba->irq_chann_mode = NORMAL_MODE;
5949 break;
5950 }
5951 lpfc_cpumask_irq_mode_init(phba);
5952 #else
5953 phba->irq_chann_mode = NORMAL_MODE;
5954 #endif
5955 }
5956
5957 /*
5958 * lpfc_irq_chann: Set the number of IRQ vectors that are available
5959 * for Hardware Queues to utilize. This also will map to the number
5960 * of EQ / MSI-X vectors the driver will create. This should never be
5961 * more than the number of Hardware Queues
5962 *
5963 * 0 = Configure number of IRQ Channels to:
5964 * if AMD architecture, number of CPUs on HBA's NUMA node
5965 * if Intel architecture, number of physical CPUs.
5966 * otherwise, number of active CPUs.
5967 * [1,256] = Manually specify how many IRQ Channels to use.
5968 *
5969 * Value range is [0,256]. Default value is [0].
5970 */
5971 static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF;
5972 module_param(lpfc_irq_chann, uint, 0444);
5973 MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
5974
5975 /* lpfc_irq_chann_init - Set the hba irq_chann initial value
5976 * @phba: lpfc_hba pointer.
5977 * @val: contains the initial value
5978 *
5979 * Description:
5980 * Validates the initial value is within range and assigns it to the
5981 * adapter. If not in range, an error message is posted and the
5982 * default value is assigned.
5983 *
5984 * Returns:
5985 * zero if value is in range and is set
5986 * -EINVAL if value was out of range
5987 **/
5988 static int
lpfc_irq_chann_init(struct lpfc_hba * phba,uint32_t val)5989 lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
5990 {
5991 const struct cpumask *aff_mask;
5992
5993 if (phba->cfg_use_msi != 2) {
5994 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5995 "8532 use_msi = %u ignoring cfg_irq_numa\n",
5996 phba->cfg_use_msi);
5997 phba->irq_chann_mode = NORMAL_MODE;
5998 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5999 return 0;
6000 }
6001
6002 /* Check if default setting was passed */
6003 if (val == LPFC_IRQ_CHANN_DEF &&
6004 phba->cfg_hdw_queue == LPFC_HBA_HDWQ_DEF &&
6005 phba->sli_rev == LPFC_SLI_REV4)
6006 lpfc_assign_default_irq_chann(phba);
6007
6008 if (phba->irq_chann_mode != NORMAL_MODE) {
6009 aff_mask = &phba->sli4_hba.irq_aff_mask;
6010
6011 if (cpumask_empty(aff_mask)) {
6012 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6013 "8533 Could not identify CPUS for "
6014 "mode %d, ignoring\n",
6015 phba->irq_chann_mode);
6016 phba->irq_chann_mode = NORMAL_MODE;
6017 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
6018 } else {
6019 phba->cfg_irq_chann = cpumask_weight(aff_mask);
6020
6021 /* If no hyperthread mode, then set hdwq count to
6022 * aff_mask weight as well
6023 */
6024 if (phba->irq_chann_mode == NHT_MODE)
6025 phba->cfg_hdw_queue = phba->cfg_irq_chann;
6026
6027 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6028 "8543 lpfc_irq_chann set to %u "
6029 "(mode: %d)\n", phba->cfg_irq_chann,
6030 phba->irq_chann_mode);
6031 }
6032 } else {
6033 if (val > LPFC_IRQ_CHANN_MAX) {
6034 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6035 "8545 lpfc_irq_chann attribute cannot "
6036 "be set to %u, allowed range is "
6037 "[%u,%u]\n",
6038 val,
6039 LPFC_IRQ_CHANN_MIN,
6040 LPFC_IRQ_CHANN_MAX);
6041 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
6042 return -EINVAL;
6043 }
6044 if (phba->sli_rev == LPFC_SLI_REV4) {
6045 phba->cfg_irq_chann = val;
6046 } else {
6047 phba->cfg_irq_chann = 2;
6048 phba->cfg_hdw_queue = 1;
6049 }
6050 }
6051
6052 return 0;
6053 }
6054
6055 /**
6056 * lpfc_irq_chann_show - Display value of irq_chann
6057 * @dev: class converted to a Scsi_host structure.
6058 * @attr: device attribute, not used.
6059 * @buf: on return contains a string with the list sizes
6060 *
6061 * Returns: size of formatted string.
6062 **/
6063 static ssize_t
lpfc_irq_chann_show(struct device * dev,struct device_attribute * attr,char * buf)6064 lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr,
6065 char *buf)
6066 {
6067 struct Scsi_Host *shost = class_to_shost(dev);
6068 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6069 struct lpfc_hba *phba = vport->phba;
6070
6071 return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
6072 }
6073
6074 static DEVICE_ATTR_RO(lpfc_irq_chann);
6075
6076 /*
6077 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
6078 # 0 = HBA resets disabled
6079 # 1 = HBA resets enabled (default)
6080 # 2 = HBA reset via PCI bus reset enabled
6081 # Value range is [0,2]. Default value is 1.
6082 */
6083 LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver.");
6084
6085 /*
6086 # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer..
6087 # 0 = HBA Heartbeat disabled
6088 # 1 = HBA Heartbeat enabled (default)
6089 # Value range is [0,1]. Default value is 1.
6090 */
6091 LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
6092
6093 /*
6094 # lpfc_EnableXLane: Enable Express Lane Feature
6095 # 0x0 Express Lane Feature disabled
6096 # 0x1 Express Lane Feature enabled
6097 # Value range is [0,1]. Default value is 0.
6098 */
6099 LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
6100
6101 /*
6102 # lpfc_XLanePriority: Define CS_CTL priority for Express Lane Feature
6103 # 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits)
6104 # Value range is [0x0,0x7f]. Default value is 0
6105 */
6106 LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
6107
6108 /*
6109 # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)
6110 # 0 = BlockGuard disabled (default)
6111 # 1 = BlockGuard enabled
6112 # Value range is [0,1]. Default value is 0.
6113 */
6114 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
6115
6116 /*
6117 # lpfc_prot_mask:
6118 # - Bit mask of host protection capabilities used to register with the
6119 # SCSI mid-layer
6120 # - Only meaningful if BG is turned on (lpfc_enable_bg=1).
6121 # - Allows you to ultimately specify which profiles to use
6122 # - Default will result in registering capabilities for all profiles.
6123 # - SHOST_DIF_TYPE1_PROTECTION 1
6124 # HBA supports T10 DIF Type 1: HBA to Target Type 1 Protection
6125 # - SHOST_DIX_TYPE0_PROTECTION 8
6126 # HBA supports DIX Type 0: Host to HBA protection only
6127 # - SHOST_DIX_TYPE1_PROTECTION 16
6128 # HBA supports DIX Type 1: Host to HBA Type 1 protection
6129 #
6130 */
6131 LPFC_ATTR(prot_mask,
6132 (SHOST_DIF_TYPE1_PROTECTION |
6133 SHOST_DIX_TYPE0_PROTECTION |
6134 SHOST_DIX_TYPE1_PROTECTION),
6135 0,
6136 (SHOST_DIF_TYPE1_PROTECTION |
6137 SHOST_DIX_TYPE0_PROTECTION |
6138 SHOST_DIX_TYPE1_PROTECTION),
6139 "T10-DIF host protection capabilities mask");
6140
6141 /*
6142 # lpfc_prot_guard:
6143 # - Bit mask of protection guard types to register with the SCSI mid-layer
6144 # - Guard types are currently either 1) T10-DIF CRC 2) IP checksum
6145 # - Allows you to ultimately specify which profiles to use
6146 # - Default will result in registering capabilities for all guard types
6147 #
6148 */
6149 LPFC_ATTR(prot_guard,
6150 SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
6151 "T10-DIF host protection guard type");
6152
6153 /*
6154 * Delay initial NPort discovery when Clean Address bit is cleared in
6155 * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
6156 * This parameter can have value 0 or 1.
6157 * When this parameter is set to 0, no delay is added to the initial
6158 * discovery.
6159 * When this parameter is set to non-zero value, initial Nport discovery is
6160 * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
6161 * accept and FCID/Fabric name/Fabric portname is changed.
6162 * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
6163 * when Clean Address bit is cleared in FLOGI/FDISC
6164 * accept and FCID/Fabric name/Fabric portname is changed.
6165 * Default value is 0.
6166 */
6167 LPFC_ATTR(delay_discovery, 0, 0, 1,
6168 "Delay NPort discovery when Clean Address bit is cleared.");
6169
6170 /*
6171 * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
6172 * This value can be set to values between 64 and 4096. The default value
6173 * is 64, but may be increased to allow for larger Max I/O sizes. The scsi
6174 * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE).
6175 * Because of the additional overhead involved in setting up T10-DIF,
6176 * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
6177 * and will be limited to 512 if BlockGuard is enabled under SLI3.
6178 */
6179 static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
6180 module_param(lpfc_sg_seg_cnt, uint, 0444);
6181 MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
6182
6183 /**
6184 * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes
6185 * configured for the adapter
6186 * @dev: class converted to a Scsi_host structure.
6187 * @attr: device attribute, not used.
6188 * @buf: on return contains a string with the list sizes
6189 *
6190 * Returns: size of formatted string.
6191 **/
6192 static ssize_t
lpfc_sg_seg_cnt_show(struct device * dev,struct device_attribute * attr,char * buf)6193 lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
6194 char *buf)
6195 {
6196 struct Scsi_Host *shost = class_to_shost(dev);
6197 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6198 struct lpfc_hba *phba = vport->phba;
6199 int len;
6200
6201 len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
6202 phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
6203
6204 len += scnprintf(buf + len, PAGE_SIZE - len,
6205 "Cfg: %d SCSI: %d NVME: %d\n",
6206 phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
6207 phba->cfg_nvme_seg_cnt);
6208 return len;
6209 }
6210
6211 static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
6212
6213 /**
6214 * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value
6215 * @phba: lpfc_hba pointer.
6216 * @val: contains the initial value
6217 *
6218 * Description:
6219 * Validates the initial value is within range and assigns it to the
6220 * adapter. If not in range, an error message is posted and the
6221 * default value is assigned.
6222 *
6223 * Returns:
6224 * zero if value is in range and is set
6225 * -EINVAL if value was out of range
6226 **/
6227 static int
lpfc_sg_seg_cnt_init(struct lpfc_hba * phba,int val)6228 lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
6229 {
6230 if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
6231 phba->cfg_sg_seg_cnt = val;
6232 return 0;
6233 }
6234 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6235 "0409 lpfc_sg_seg_cnt attribute cannot be set to %d, "
6236 "allowed range is [%d, %d]\n",
6237 val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
6238 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
6239 return -EINVAL;
6240 }
6241
6242 /*
6243 * lpfc_enable_mds_diags: Enable MDS Diagnostics
6244 * 0 = MDS Diagnostics disabled (default)
6245 * 1 = MDS Diagnostics enabled
6246 * Value range is [0,1]. Default value is 0.
6247 */
6248 LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
6249
6250 /*
6251 * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
6252 * 0 = Disable firmware logging (default)
6253 * [1-4] = Multiple of 1/4th Mb of host memory for FW logging
6254 * Value range [0..4]. Default value is 0
6255 */
6256 LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
6257 lpfc_param_show(ras_fwlog_buffsize);
6258
6259 static ssize_t
lpfc_ras_fwlog_buffsize_set(struct lpfc_hba * phba,uint val)6260 lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
6261 {
6262 int ret = 0;
6263 enum ras_state state;
6264
6265 if (!lpfc_rangecheck(val, 0, 4))
6266 return -EINVAL;
6267
6268 if (phba->cfg_ras_fwlog_buffsize == val)
6269 return 0;
6270
6271 if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
6272 return -EINVAL;
6273
6274 spin_lock_irq(&phba->hbalock);
6275 state = phba->ras_fwlog.state;
6276 spin_unlock_irq(&phba->hbalock);
6277
6278 if (state == REG_INPROGRESS) {
6279 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
6280 "registration is in progress\n");
6281 return -EBUSY;
6282 }
6283
6284 /* For disable logging: stop the logs and free the DMA.
6285 * For ras_fwlog_buffsize size change we still need to free and
6286 * reallocate the DMA in lpfc_sli4_ras_fwlog_init.
6287 */
6288 phba->cfg_ras_fwlog_buffsize = val;
6289 if (state == ACTIVE) {
6290 lpfc_ras_stop_fwlog(phba);
6291 lpfc_sli4_ras_dma_free(phba);
6292 }
6293
6294 lpfc_sli4_ras_init(phba);
6295 if (phba->ras_fwlog.ras_enabled)
6296 ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6297 LPFC_RAS_ENABLE_LOGGING);
6298 return ret;
6299 }
6300
6301 lpfc_param_store(ras_fwlog_buffsize);
6302 static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize);
6303
6304 /*
6305 * lpfc_ras_fwlog_level: Firmware logging verbosity level
6306 * Valid only if firmware logging is enabled
6307 * 0(Least Verbosity) 4 (most verbosity)
6308 * Value range is [0..4]. Default value is 0
6309 */
6310 LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
6311
6312 /*
6313 * lpfc_ras_fwlog_func: Firmware logging enabled on function number
6314 * Default function which has RAS support : 0
6315 * Value Range is [0..7].
6316 * FW logging is a global action and enablement is via a specific
6317 * port.
6318 */
6319 LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
6320
6321 /*
6322 * lpfc_enable_bbcr: Enable BB Credit Recovery
6323 * 0 = BB Credit Recovery disabled
6324 * 1 = BB Credit Recovery enabled (default)
6325 * Value range is [0,1]. Default value is 1.
6326 */
6327 LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
6328
6329 /* Signaling module parameters */
6330 int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
6331 module_param(lpfc_fabric_cgn_frequency, int, 0444);
6332 MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq");
6333
6334 int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */
6335 module_param(lpfc_acqe_cgn_frequency, int, 0444);
6336 MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq");
6337
6338 int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */
6339 module_param(lpfc_use_cgn_signal, int, 0444);
6340 MODULE_PARM_DESC(lpfc_use_cgn_signal, "Use Congestion signaling if available");
6341
6342 /*
6343 * lpfc_enable_dpp: Enable DPP on G7
6344 * 0 = DPP on G7 disabled
6345 * 1 = DPP on G7 enabled (default)
6346 * Value range is [0,1]. Default value is 1.
6347 */
6348 LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push");
6349
6350 /*
6351 * lpfc_enable_mi: Enable FDMI MIB
6352 * 0 = disabled
6353 * 1 = enabled (default)
6354 * Value range is [0,1].
6355 */
6356 LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI");
6357
6358 /*
6359 * lpfc_max_vmid: Maximum number of VMs to be tagged. This is valid only if
6360 * either vmid_app_header or vmid_priority_tagging is enabled.
6361 * 4 - 255 = vmid support enabled for 4-255 VMs
6362 * Value range is [4,255].
6363 */
6364 LPFC_ATTR_RW(max_vmid, LPFC_MIN_VMID, LPFC_MIN_VMID, LPFC_MAX_VMID,
6365 "Maximum number of VMs supported");
6366
6367 /*
6368 * lpfc_vmid_inactivity_timeout: Inactivity timeout duration in hours
6369 * 0 = Timeout is disabled
6370 * Value range is [0,24].
6371 */
6372 LPFC_ATTR_RW(vmid_inactivity_timeout, 4, 0, 24,
6373 "Inactivity timeout in hours");
6374
6375 /*
6376 * lpfc_vmid_app_header: Enable App Header VMID support
6377 * 0 = Support is disabled (default)
6378 * 1 = Support is enabled
6379 * Value range is [0,1].
6380 */
6381 LPFC_ATTR_RW(vmid_app_header, LPFC_VMID_APP_HEADER_DISABLE,
6382 LPFC_VMID_APP_HEADER_DISABLE, LPFC_VMID_APP_HEADER_ENABLE,
6383 "Enable App Header VMID support");
6384
6385 /*
6386 * lpfc_vmid_priority_tagging: Enable Priority Tagging VMID support
6387 * 0 = Support is disabled (default)
6388 * 1 = Allow supported targets only
6389 * 2 = Allow all targets
6390 * Value range is [0,2].
6391 */
6392 LPFC_ATTR_RW(vmid_priority_tagging, LPFC_VMID_PRIO_TAG_DISABLE,
6393 LPFC_VMID_PRIO_TAG_DISABLE,
6394 LPFC_VMID_PRIO_TAG_ALL_TARGETS,
6395 "Enable Priority Tagging VMID support");
6396
6397 struct device_attribute *lpfc_hba_attrs[] = {
6398 &dev_attr_nvme_info,
6399 &dev_attr_scsi_stat,
6400 &dev_attr_bg_info,
6401 &dev_attr_bg_guard_err,
6402 &dev_attr_bg_apptag_err,
6403 &dev_attr_bg_reftag_err,
6404 &dev_attr_info,
6405 &dev_attr_serialnum,
6406 &dev_attr_modeldesc,
6407 &dev_attr_modelname,
6408 &dev_attr_programtype,
6409 &dev_attr_portnum,
6410 &dev_attr_fwrev,
6411 &dev_attr_hdw,
6412 &dev_attr_option_rom_version,
6413 &dev_attr_link_state,
6414 &dev_attr_num_discovered_ports,
6415 &dev_attr_menlo_mgmt_mode,
6416 &dev_attr_lpfc_drvr_version,
6417 &dev_attr_lpfc_enable_fip,
6418 &dev_attr_lpfc_temp_sensor,
6419 &dev_attr_lpfc_log_verbose,
6420 &dev_attr_lpfc_lun_queue_depth,
6421 &dev_attr_lpfc_tgt_queue_depth,
6422 &dev_attr_lpfc_hba_queue_depth,
6423 &dev_attr_lpfc_peer_port_login,
6424 &dev_attr_lpfc_nodev_tmo,
6425 &dev_attr_lpfc_devloss_tmo,
6426 &dev_attr_lpfc_enable_fc4_type,
6427 &dev_attr_lpfc_fcp_class,
6428 &dev_attr_lpfc_use_adisc,
6429 &dev_attr_lpfc_first_burst_size,
6430 &dev_attr_lpfc_ack0,
6431 &dev_attr_lpfc_xri_rebalancing,
6432 &dev_attr_lpfc_topology,
6433 &dev_attr_lpfc_scan_down,
6434 &dev_attr_lpfc_link_speed,
6435 &dev_attr_lpfc_fcp_io_sched,
6436 &dev_attr_lpfc_ns_query,
6437 &dev_attr_lpfc_fcp2_no_tgt_reset,
6438 &dev_attr_lpfc_cr_delay,
6439 &dev_attr_lpfc_cr_count,
6440 &dev_attr_lpfc_multi_ring_support,
6441 &dev_attr_lpfc_multi_ring_rctl,
6442 &dev_attr_lpfc_multi_ring_type,
6443 &dev_attr_lpfc_fdmi_on,
6444 &dev_attr_lpfc_enable_SmartSAN,
6445 &dev_attr_lpfc_max_luns,
6446 &dev_attr_lpfc_enable_npiv,
6447 &dev_attr_lpfc_fcf_failover_policy,
6448 &dev_attr_lpfc_enable_rrq,
6449 &dev_attr_lpfc_fcp_wait_abts_rsp,
6450 &dev_attr_nport_evt_cnt,
6451 &dev_attr_board_mode,
6452 &dev_attr_max_vpi,
6453 &dev_attr_used_vpi,
6454 &dev_attr_max_rpi,
6455 &dev_attr_used_rpi,
6456 &dev_attr_max_xri,
6457 &dev_attr_used_xri,
6458 &dev_attr_npiv_info,
6459 &dev_attr_issue_reset,
6460 &dev_attr_lpfc_poll,
6461 &dev_attr_lpfc_poll_tmo,
6462 &dev_attr_lpfc_task_mgmt_tmo,
6463 &dev_attr_lpfc_use_msi,
6464 &dev_attr_lpfc_nvme_oas,
6465 &dev_attr_lpfc_nvme_embed_cmd,
6466 &dev_attr_lpfc_fcp_imax,
6467 &dev_attr_lpfc_force_rscn,
6468 &dev_attr_lpfc_cq_poll_threshold,
6469 &dev_attr_lpfc_cq_max_proc_limit,
6470 &dev_attr_lpfc_fcp_cpu_map,
6471 &dev_attr_lpfc_fcp_mq_threshold,
6472 &dev_attr_lpfc_hdw_queue,
6473 &dev_attr_lpfc_irq_chann,
6474 &dev_attr_lpfc_suppress_rsp,
6475 &dev_attr_lpfc_nvmet_mrq,
6476 &dev_attr_lpfc_nvmet_mrq_post,
6477 &dev_attr_lpfc_nvme_enable_fb,
6478 &dev_attr_lpfc_nvmet_fb_size,
6479 &dev_attr_lpfc_enable_bg,
6480 &dev_attr_lpfc_soft_wwnn,
6481 &dev_attr_lpfc_soft_wwpn,
6482 &dev_attr_lpfc_soft_wwn_enable,
6483 &dev_attr_lpfc_enable_hba_reset,
6484 &dev_attr_lpfc_enable_hba_heartbeat,
6485 &dev_attr_lpfc_EnableXLane,
6486 &dev_attr_lpfc_XLanePriority,
6487 &dev_attr_lpfc_xlane_lun,
6488 &dev_attr_lpfc_xlane_tgt,
6489 &dev_attr_lpfc_xlane_vpt,
6490 &dev_attr_lpfc_xlane_lun_state,
6491 &dev_attr_lpfc_xlane_lun_status,
6492 &dev_attr_lpfc_xlane_priority,
6493 &dev_attr_lpfc_sg_seg_cnt,
6494 &dev_attr_lpfc_max_scsicmpl_time,
6495 &dev_attr_lpfc_stat_data_ctrl,
6496 &dev_attr_lpfc_aer_support,
6497 &dev_attr_lpfc_aer_state_cleanup,
6498 &dev_attr_lpfc_sriov_nr_virtfn,
6499 &dev_attr_lpfc_req_fw_upgrade,
6500 &dev_attr_lpfc_suppress_link_up,
6501 &dev_attr_iocb_hw,
6502 &dev_attr_pls,
6503 &dev_attr_pt,
6504 &dev_attr_txq_hw,
6505 &dev_attr_txcmplq_hw,
6506 &dev_attr_lpfc_sriov_hw_max_virtfn,
6507 &dev_attr_protocol,
6508 &dev_attr_lpfc_xlane_supported,
6509 &dev_attr_lpfc_enable_mds_diags,
6510 &dev_attr_lpfc_ras_fwlog_buffsize,
6511 &dev_attr_lpfc_ras_fwlog_level,
6512 &dev_attr_lpfc_ras_fwlog_func,
6513 &dev_attr_lpfc_enable_bbcr,
6514 &dev_attr_lpfc_enable_dpp,
6515 &dev_attr_lpfc_enable_mi,
6516 &dev_attr_cmf_info,
6517 &dev_attr_lpfc_max_vmid,
6518 &dev_attr_lpfc_vmid_inactivity_timeout,
6519 &dev_attr_lpfc_vmid_app_header,
6520 &dev_attr_lpfc_vmid_priority_tagging,
6521 NULL,
6522 };
6523
6524 struct device_attribute *lpfc_vport_attrs[] = {
6525 &dev_attr_info,
6526 &dev_attr_link_state,
6527 &dev_attr_num_discovered_ports,
6528 &dev_attr_lpfc_drvr_version,
6529 &dev_attr_lpfc_log_verbose,
6530 &dev_attr_lpfc_lun_queue_depth,
6531 &dev_attr_lpfc_tgt_queue_depth,
6532 &dev_attr_lpfc_nodev_tmo,
6533 &dev_attr_lpfc_devloss_tmo,
6534 &dev_attr_lpfc_hba_queue_depth,
6535 &dev_attr_lpfc_peer_port_login,
6536 &dev_attr_lpfc_restrict_login,
6537 &dev_attr_lpfc_fcp_class,
6538 &dev_attr_lpfc_use_adisc,
6539 &dev_attr_lpfc_first_burst_size,
6540 &dev_attr_lpfc_max_luns,
6541 &dev_attr_nport_evt_cnt,
6542 &dev_attr_npiv_info,
6543 &dev_attr_lpfc_enable_da_id,
6544 &dev_attr_lpfc_max_scsicmpl_time,
6545 &dev_attr_lpfc_stat_data_ctrl,
6546 &dev_attr_lpfc_static_vport,
6547 &dev_attr_cmf_info,
6548 NULL,
6549 };
6550
6551 /**
6552 * sysfs_ctlreg_write - Write method for writing to ctlreg
6553 * @filp: open sysfs file
6554 * @kobj: kernel kobject that contains the kernel class device.
6555 * @bin_attr: kernel attributes passed to us.
6556 * @buf: contains the data to be written to the adapter IOREG space.
6557 * @off: offset into buffer to beginning of data.
6558 * @count: bytes to transfer.
6559 *
6560 * Description:
6561 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
6562 * Uses the adapter io control registers to send buf contents to the adapter.
6563 *
6564 * Returns:
6565 * -ERANGE off and count combo out of range
6566 * -EINVAL off, count or buff address invalid
6567 * -EPERM adapter is offline
6568 * value of count, buf contents written
6569 **/
6570 static ssize_t
sysfs_ctlreg_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6571 sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
6572 struct bin_attribute *bin_attr,
6573 char *buf, loff_t off, size_t count)
6574 {
6575 size_t buf_off;
6576 struct device *dev = container_of(kobj, struct device, kobj);
6577 struct Scsi_Host *shost = class_to_shost(dev);
6578 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6579 struct lpfc_hba *phba = vport->phba;
6580
6581 if (phba->sli_rev >= LPFC_SLI_REV4)
6582 return -EPERM;
6583
6584 if ((off + count) > FF_REG_AREA_SIZE)
6585 return -ERANGE;
6586
6587 if (count <= LPFC_REG_WRITE_KEY_SIZE)
6588 return 0;
6589
6590 if (off % 4 || count % 4 || (unsigned long)buf % 4)
6591 return -EINVAL;
6592
6593 /* This is to protect HBA registers from accidental writes. */
6594 if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
6595 return -EINVAL;
6596
6597 if (!(vport->fc_flag & FC_OFFLINE_MODE))
6598 return -EPERM;
6599
6600 spin_lock_irq(&phba->hbalock);
6601 for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
6602 buf_off += sizeof(uint32_t))
6603 writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
6604 phba->ctrl_regs_memmap_p + off + buf_off);
6605
6606 spin_unlock_irq(&phba->hbalock);
6607
6608 return count;
6609 }
6610
6611 /**
6612 * sysfs_ctlreg_read - Read method for reading from ctlreg
6613 * @filp: open sysfs file
6614 * @kobj: kernel kobject that contains the kernel class device.
6615 * @bin_attr: kernel attributes passed to us.
6616 * @buf: if successful contains the data from the adapter IOREG space.
6617 * @off: offset into buffer to beginning of data.
6618 * @count: bytes to transfer.
6619 *
6620 * Description:
6621 * Accessed via /sys/class/scsi_host/hostxxx/ctlreg.
6622 * Uses the adapter io control registers to read data into buf.
6623 *
6624 * Returns:
6625 * -ERANGE off and count combo out of range
6626 * -EINVAL off, count or buff address invalid
6627 * value of count, buf contents read
6628 **/
6629 static ssize_t
sysfs_ctlreg_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6630 sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
6631 struct bin_attribute *bin_attr,
6632 char *buf, loff_t off, size_t count)
6633 {
6634 size_t buf_off;
6635 uint32_t * tmp_ptr;
6636 struct device *dev = container_of(kobj, struct device, kobj);
6637 struct Scsi_Host *shost = class_to_shost(dev);
6638 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6639 struct lpfc_hba *phba = vport->phba;
6640
6641 if (phba->sli_rev >= LPFC_SLI_REV4)
6642 return -EPERM;
6643
6644 if (off > FF_REG_AREA_SIZE)
6645 return -ERANGE;
6646
6647 if ((off + count) > FF_REG_AREA_SIZE)
6648 count = FF_REG_AREA_SIZE - off;
6649
6650 if (count == 0) return 0;
6651
6652 if (off % 4 || count % 4 || (unsigned long)buf % 4)
6653 return -EINVAL;
6654
6655 spin_lock_irq(&phba->hbalock);
6656
6657 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
6658 tmp_ptr = (uint32_t *)(buf + buf_off);
6659 *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
6660 }
6661
6662 spin_unlock_irq(&phba->hbalock);
6663
6664 return count;
6665 }
6666
6667 static struct bin_attribute sysfs_ctlreg_attr = {
6668 .attr = {
6669 .name = "ctlreg",
6670 .mode = S_IRUSR | S_IWUSR,
6671 },
6672 .size = 256,
6673 .read = sysfs_ctlreg_read,
6674 .write = sysfs_ctlreg_write,
6675 };
6676
6677 /**
6678 * sysfs_mbox_write - Write method for writing information via mbox
6679 * @filp: open sysfs file
6680 * @kobj: kernel kobject that contains the kernel class device.
6681 * @bin_attr: kernel attributes passed to us.
6682 * @buf: contains the data to be written to sysfs mbox.
6683 * @off: offset into buffer to beginning of data.
6684 * @count: bytes to transfer.
6685 *
6686 * Description:
6687 * Deprecated function. All mailbox access from user space is performed via the
6688 * bsg interface.
6689 *
6690 * Returns:
6691 * -EPERM operation not permitted
6692 **/
6693 static ssize_t
sysfs_mbox_write(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6694 sysfs_mbox_write(struct file *filp, struct kobject *kobj,
6695 struct bin_attribute *bin_attr,
6696 char *buf, loff_t off, size_t count)
6697 {
6698 return -EPERM;
6699 }
6700
6701 /**
6702 * sysfs_mbox_read - Read method for reading information via mbox
6703 * @filp: open sysfs file
6704 * @kobj: kernel kobject that contains the kernel class device.
6705 * @bin_attr: kernel attributes passed to us.
6706 * @buf: contains the data to be read from sysfs mbox.
6707 * @off: offset into buffer to beginning of data.
6708 * @count: bytes to transfer.
6709 *
6710 * Description:
6711 * Deprecated function. All mailbox access from user space is performed via the
6712 * bsg interface.
6713 *
6714 * Returns:
6715 * -EPERM operation not permitted
6716 **/
6717 static ssize_t
sysfs_mbox_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)6718 sysfs_mbox_read(struct file *filp, struct kobject *kobj,
6719 struct bin_attribute *bin_attr,
6720 char *buf, loff_t off, size_t count)
6721 {
6722 return -EPERM;
6723 }
6724
6725 static struct bin_attribute sysfs_mbox_attr = {
6726 .attr = {
6727 .name = "mbox",
6728 .mode = S_IRUSR | S_IWUSR,
6729 },
6730 .size = MAILBOX_SYSFS_MAX,
6731 .read = sysfs_mbox_read,
6732 .write = sysfs_mbox_write,
6733 };
6734
6735 /**
6736 * lpfc_alloc_sysfs_attr - Creates the ctlreg and mbox entries
6737 * @vport: address of lpfc vport structure.
6738 *
6739 * Return codes:
6740 * zero on success
6741 * error return code from sysfs_create_bin_file()
6742 **/
6743 int
lpfc_alloc_sysfs_attr(struct lpfc_vport * vport)6744 lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
6745 {
6746 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6747 int error;
6748
6749 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6750 &sysfs_drvr_stat_data_attr);
6751
6752 /* Virtual ports do not need ctrl_reg and mbox */
6753 if (error || vport->port_type == LPFC_NPIV_PORT)
6754 goto out;
6755
6756 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6757 &sysfs_ctlreg_attr);
6758 if (error)
6759 goto out_remove_stat_attr;
6760
6761 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6762 &sysfs_mbox_attr);
6763 if (error)
6764 goto out_remove_ctlreg_attr;
6765
6766 return 0;
6767 out_remove_ctlreg_attr:
6768 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6769 out_remove_stat_attr:
6770 sysfs_remove_bin_file(&shost->shost_dev.kobj,
6771 &sysfs_drvr_stat_data_attr);
6772 out:
6773 return error;
6774 }
6775
6776 /**
6777 * lpfc_free_sysfs_attr - Removes the ctlreg and mbox entries
6778 * @vport: address of lpfc vport structure.
6779 **/
6780 void
lpfc_free_sysfs_attr(struct lpfc_vport * vport)6781 lpfc_free_sysfs_attr(struct lpfc_vport *vport)
6782 {
6783 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6784 sysfs_remove_bin_file(&shost->shost_dev.kobj,
6785 &sysfs_drvr_stat_data_attr);
6786 /* Virtual ports do not need ctrl_reg and mbox */
6787 if (vport->port_type == LPFC_NPIV_PORT)
6788 return;
6789 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
6790 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6791 }
6792
6793 /*
6794 * Dynamic FC Host Attributes Support
6795 */
6796
6797 /**
6798 * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host
6799 * @shost: kernel scsi host pointer.
6800 **/
6801 static void
lpfc_get_host_symbolic_name(struct Scsi_Host * shost)6802 lpfc_get_host_symbolic_name(struct Scsi_Host *shost)
6803 {
6804 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6805
6806 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
6807 sizeof fc_host_symbolic_name(shost));
6808 }
6809
6810 /**
6811 * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
6812 * @shost: kernel scsi host pointer.
6813 **/
6814 static void
lpfc_get_host_port_id(struct Scsi_Host * shost)6815 lpfc_get_host_port_id(struct Scsi_Host *shost)
6816 {
6817 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6818
6819 /* note: fc_myDID already in cpu endianness */
6820 fc_host_port_id(shost) = vport->fc_myDID;
6821 }
6822
6823 /**
6824 * lpfc_get_host_port_type - Set the value of the scsi host port type
6825 * @shost: kernel scsi host pointer.
6826 **/
6827 static void
lpfc_get_host_port_type(struct Scsi_Host * shost)6828 lpfc_get_host_port_type(struct Scsi_Host *shost)
6829 {
6830 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6831 struct lpfc_hba *phba = vport->phba;
6832
6833 spin_lock_irq(shost->host_lock);
6834
6835 if (vport->port_type == LPFC_NPIV_PORT) {
6836 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
6837 } else if (lpfc_is_link_up(phba)) {
6838 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6839 if (vport->fc_flag & FC_PUBLIC_LOOP)
6840 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
6841 else
6842 fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
6843 } else {
6844 if (vport->fc_flag & FC_FABRIC)
6845 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
6846 else
6847 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
6848 }
6849 } else
6850 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
6851
6852 spin_unlock_irq(shost->host_lock);
6853 }
6854
6855 /**
6856 * lpfc_get_host_port_state - Set the value of the scsi host port state
6857 * @shost: kernel scsi host pointer.
6858 **/
6859 static void
lpfc_get_host_port_state(struct Scsi_Host * shost)6860 lpfc_get_host_port_state(struct Scsi_Host *shost)
6861 {
6862 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6863 struct lpfc_hba *phba = vport->phba;
6864
6865 spin_lock_irq(shost->host_lock);
6866
6867 if (vport->fc_flag & FC_OFFLINE_MODE)
6868 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
6869 else {
6870 switch (phba->link_state) {
6871 case LPFC_LINK_UNKNOWN:
6872 case LPFC_LINK_DOWN:
6873 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
6874 break;
6875 case LPFC_LINK_UP:
6876 case LPFC_CLEAR_LA:
6877 case LPFC_HBA_READY:
6878 /* Links up, reports port state accordingly */
6879 if (vport->port_state < LPFC_VPORT_READY)
6880 fc_host_port_state(shost) =
6881 FC_PORTSTATE_BYPASSED;
6882 else
6883 fc_host_port_state(shost) =
6884 FC_PORTSTATE_ONLINE;
6885 break;
6886 case LPFC_HBA_ERROR:
6887 fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
6888 break;
6889 default:
6890 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
6891 break;
6892 }
6893 }
6894
6895 spin_unlock_irq(shost->host_lock);
6896 }
6897
6898 /**
6899 * lpfc_get_host_speed - Set the value of the scsi host speed
6900 * @shost: kernel scsi host pointer.
6901 **/
6902 static void
lpfc_get_host_speed(struct Scsi_Host * shost)6903 lpfc_get_host_speed(struct Scsi_Host *shost)
6904 {
6905 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6906 struct lpfc_hba *phba = vport->phba;
6907
6908 spin_lock_irq(shost->host_lock);
6909
6910 if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) {
6911 switch(phba->fc_linkspeed) {
6912 case LPFC_LINK_SPEED_1GHZ:
6913 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6914 break;
6915 case LPFC_LINK_SPEED_2GHZ:
6916 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
6917 break;
6918 case LPFC_LINK_SPEED_4GHZ:
6919 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
6920 break;
6921 case LPFC_LINK_SPEED_8GHZ:
6922 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
6923 break;
6924 case LPFC_LINK_SPEED_10GHZ:
6925 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6926 break;
6927 case LPFC_LINK_SPEED_16GHZ:
6928 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
6929 break;
6930 case LPFC_LINK_SPEED_32GHZ:
6931 fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
6932 break;
6933 case LPFC_LINK_SPEED_64GHZ:
6934 fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
6935 break;
6936 case LPFC_LINK_SPEED_128GHZ:
6937 fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
6938 break;
6939 case LPFC_LINK_SPEED_256GHZ:
6940 fc_host_speed(shost) = FC_PORTSPEED_256GBIT;
6941 break;
6942 default:
6943 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6944 break;
6945 }
6946 } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
6947 switch (phba->fc_linkspeed) {
6948 case LPFC_ASYNC_LINK_SPEED_1GBPS:
6949 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6950 break;
6951 case LPFC_ASYNC_LINK_SPEED_10GBPS:
6952 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6953 break;
6954 case LPFC_ASYNC_LINK_SPEED_20GBPS:
6955 fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
6956 break;
6957 case LPFC_ASYNC_LINK_SPEED_25GBPS:
6958 fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
6959 break;
6960 case LPFC_ASYNC_LINK_SPEED_40GBPS:
6961 fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
6962 break;
6963 case LPFC_ASYNC_LINK_SPEED_100GBPS:
6964 fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
6965 break;
6966 default:
6967 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6968 break;
6969 }
6970 } else
6971 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6972
6973 spin_unlock_irq(shost->host_lock);
6974 }
6975
6976 /**
6977 * lpfc_get_host_fabric_name - Set the value of the scsi host fabric name
6978 * @shost: kernel scsi host pointer.
6979 **/
6980 static void
lpfc_get_host_fabric_name(struct Scsi_Host * shost)6981 lpfc_get_host_fabric_name (struct Scsi_Host *shost)
6982 {
6983 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6984 struct lpfc_hba *phba = vport->phba;
6985 u64 node_name;
6986
6987 spin_lock_irq(shost->host_lock);
6988
6989 if ((vport->port_state > LPFC_FLOGI) &&
6990 ((vport->fc_flag & FC_FABRIC) ||
6991 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
6992 (vport->fc_flag & FC_PUBLIC_LOOP))))
6993 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
6994 else
6995 /* fabric is local port if there is no F/FL_Port */
6996 node_name = 0;
6997
6998 spin_unlock_irq(shost->host_lock);
6999
7000 fc_host_fabric_name(shost) = node_name;
7001 }
7002
7003 /**
7004 * lpfc_get_stats - Return statistical information about the adapter
7005 * @shost: kernel scsi host pointer.
7006 *
7007 * Notes:
7008 * NULL on error for link down, no mbox pool, sli2 active,
7009 * management not allowed, memory allocation error, or mbox error.
7010 *
7011 * Returns:
7012 * NULL for error
7013 * address of the adapter host statistics
7014 **/
7015 static struct fc_host_statistics *
lpfc_get_stats(struct Scsi_Host * shost)7016 lpfc_get_stats(struct Scsi_Host *shost)
7017 {
7018 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7019 struct lpfc_hba *phba = vport->phba;
7020 struct lpfc_sli *psli = &phba->sli;
7021 struct fc_host_statistics *hs = &phba->link_stats;
7022 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
7023 LPFC_MBOXQ_t *pmboxq;
7024 MAILBOX_t *pmb;
7025 int rc = 0;
7026
7027 /*
7028 * prevent udev from issuing mailbox commands until the port is
7029 * configured.
7030 */
7031 if (phba->link_state < LPFC_LINK_DOWN ||
7032 !phba->mbox_mem_pool ||
7033 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
7034 return NULL;
7035
7036 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
7037 return NULL;
7038
7039 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7040 if (!pmboxq)
7041 return NULL;
7042 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
7043
7044 pmb = &pmboxq->u.mb;
7045 pmb->mbxCommand = MBX_READ_STATUS;
7046 pmb->mbxOwner = OWN_HOST;
7047 pmboxq->ctx_buf = NULL;
7048 pmboxq->vport = vport;
7049
7050 if (vport->fc_flag & FC_OFFLINE_MODE) {
7051 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
7052 if (rc != MBX_SUCCESS) {
7053 mempool_free(pmboxq, phba->mbox_mem_pool);
7054 return NULL;
7055 }
7056 } else {
7057 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
7058 if (rc != MBX_SUCCESS) {
7059 if (rc != MBX_TIMEOUT)
7060 mempool_free(pmboxq, phba->mbox_mem_pool);
7061 return NULL;
7062 }
7063 }
7064
7065 memset(hs, 0, sizeof (struct fc_host_statistics));
7066
7067 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
7068 /*
7069 * The MBX_READ_STATUS returns tx_k_bytes which has to
7070 * converted to words
7071 */
7072 hs->tx_words = (uint64_t)
7073 ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
7074 * (uint64_t)256);
7075 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
7076 hs->rx_words = (uint64_t)
7077 ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
7078 * (uint64_t)256);
7079
7080 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
7081 pmb->mbxCommand = MBX_READ_LNK_STAT;
7082 pmb->mbxOwner = OWN_HOST;
7083 pmboxq->ctx_buf = NULL;
7084 pmboxq->vport = vport;
7085
7086 if (vport->fc_flag & FC_OFFLINE_MODE) {
7087 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
7088 if (rc != MBX_SUCCESS) {
7089 mempool_free(pmboxq, phba->mbox_mem_pool);
7090 return NULL;
7091 }
7092 } else {
7093 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
7094 if (rc != MBX_SUCCESS) {
7095 if (rc != MBX_TIMEOUT)
7096 mempool_free(pmboxq, phba->mbox_mem_pool);
7097 return NULL;
7098 }
7099 }
7100
7101 hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
7102 hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
7103 hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
7104 hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
7105 hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
7106 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
7107 hs->error_frames = pmb->un.varRdLnk.crcCnt;
7108
7109 hs->cn_sig_warn = atomic64_read(&phba->cgn_acqe_stat.warn);
7110 hs->cn_sig_alarm = atomic64_read(&phba->cgn_acqe_stat.alarm);
7111
7112 hs->link_failure_count -= lso->link_failure_count;
7113 hs->loss_of_sync_count -= lso->loss_of_sync_count;
7114 hs->loss_of_signal_count -= lso->loss_of_signal_count;
7115 hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
7116 hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
7117 hs->invalid_crc_count -= lso->invalid_crc_count;
7118 hs->error_frames -= lso->error_frames;
7119
7120 if (phba->hba_flag & HBA_FCOE_MODE) {
7121 hs->lip_count = -1;
7122 hs->nos_count = (phba->link_events >> 1);
7123 hs->nos_count -= lso->link_events;
7124 } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
7125 hs->lip_count = (phba->fc_eventTag >> 1);
7126 hs->lip_count -= lso->link_events;
7127 hs->nos_count = -1;
7128 } else {
7129 hs->lip_count = -1;
7130 hs->nos_count = (phba->fc_eventTag >> 1);
7131 hs->nos_count -= lso->link_events;
7132 }
7133
7134 hs->dumped_frames = -1;
7135
7136 hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start;
7137
7138 mempool_free(pmboxq, phba->mbox_mem_pool);
7139
7140 return hs;
7141 }
7142
7143 /**
7144 * lpfc_reset_stats - Copy the adapter link stats information
7145 * @shost: kernel scsi host pointer.
7146 **/
7147 static void
lpfc_reset_stats(struct Scsi_Host * shost)7148 lpfc_reset_stats(struct Scsi_Host *shost)
7149 {
7150 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7151 struct lpfc_hba *phba = vport->phba;
7152 struct lpfc_sli *psli = &phba->sli;
7153 struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
7154 LPFC_MBOXQ_t *pmboxq;
7155 MAILBOX_t *pmb;
7156 int rc = 0;
7157
7158 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
7159 return;
7160
7161 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7162 if (!pmboxq)
7163 return;
7164 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
7165
7166 pmb = &pmboxq->u.mb;
7167 pmb->mbxCommand = MBX_READ_STATUS;
7168 pmb->mbxOwner = OWN_HOST;
7169 pmb->un.varWords[0] = 0x1; /* reset request */
7170 pmboxq->ctx_buf = NULL;
7171 pmboxq->vport = vport;
7172
7173 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
7174 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
7175 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
7176 if (rc != MBX_SUCCESS) {
7177 mempool_free(pmboxq, phba->mbox_mem_pool);
7178 return;
7179 }
7180 } else {
7181 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
7182 if (rc != MBX_SUCCESS) {
7183 if (rc != MBX_TIMEOUT)
7184 mempool_free(pmboxq, phba->mbox_mem_pool);
7185 return;
7186 }
7187 }
7188
7189 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
7190 pmb->mbxCommand = MBX_READ_LNK_STAT;
7191 pmb->mbxOwner = OWN_HOST;
7192 pmboxq->ctx_buf = NULL;
7193 pmboxq->vport = vport;
7194
7195 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
7196 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
7197 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
7198 if (rc != MBX_SUCCESS) {
7199 mempool_free(pmboxq, phba->mbox_mem_pool);
7200 return;
7201 }
7202 } else {
7203 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
7204 if (rc != MBX_SUCCESS) {
7205 if (rc != MBX_TIMEOUT)
7206 mempool_free(pmboxq, phba->mbox_mem_pool);
7207 return;
7208 }
7209 }
7210
7211 lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
7212 lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
7213 lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
7214 lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
7215 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
7216 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
7217 lso->error_frames = pmb->un.varRdLnk.crcCnt;
7218 if (phba->hba_flag & HBA_FCOE_MODE)
7219 lso->link_events = (phba->link_events >> 1);
7220 else
7221 lso->link_events = (phba->fc_eventTag >> 1);
7222
7223 atomic64_set(&phba->cgn_acqe_stat.warn, 0);
7224 atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
7225
7226 memset(&shost_to_fc_host(shost)->fpin_stats, 0,
7227 sizeof(shost_to_fc_host(shost)->fpin_stats));
7228
7229 psli->stats_start = ktime_get_seconds();
7230
7231 mempool_free(pmboxq, phba->mbox_mem_pool);
7232
7233 return;
7234 }
7235
7236 /*
7237 * The LPFC driver treats linkdown handling as target loss events so there
7238 * are no sysfs handlers for link_down_tmo.
7239 */
7240
7241 /**
7242 * lpfc_get_node_by_target - Return the nodelist for a target
7243 * @starget: kernel scsi target pointer.
7244 *
7245 * Returns:
7246 * address of the node list if found
7247 * NULL target not found
7248 **/
7249 static struct lpfc_nodelist *
lpfc_get_node_by_target(struct scsi_target * starget)7250 lpfc_get_node_by_target(struct scsi_target *starget)
7251 {
7252 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
7253 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7254 struct lpfc_nodelist *ndlp;
7255
7256 spin_lock_irq(shost->host_lock);
7257 /* Search for this, mapped, target ID */
7258 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
7259 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
7260 starget->id == ndlp->nlp_sid) {
7261 spin_unlock_irq(shost->host_lock);
7262 return ndlp;
7263 }
7264 }
7265 spin_unlock_irq(shost->host_lock);
7266 return NULL;
7267 }
7268
7269 /**
7270 * lpfc_get_starget_port_id - Set the target port id to the ndlp DID or -1
7271 * @starget: kernel scsi target pointer.
7272 **/
7273 static void
lpfc_get_starget_port_id(struct scsi_target * starget)7274 lpfc_get_starget_port_id(struct scsi_target *starget)
7275 {
7276 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7277
7278 fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
7279 }
7280
7281 /**
7282 * lpfc_get_starget_node_name - Set the target node name
7283 * @starget: kernel scsi target pointer.
7284 *
7285 * Description: Set the target node name to the ndlp node name wwn or zero.
7286 **/
7287 static void
lpfc_get_starget_node_name(struct scsi_target * starget)7288 lpfc_get_starget_node_name(struct scsi_target *starget)
7289 {
7290 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7291
7292 fc_starget_node_name(starget) =
7293 ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
7294 }
7295
7296 /**
7297 * lpfc_get_starget_port_name - Set the target port name
7298 * @starget: kernel scsi target pointer.
7299 *
7300 * Description: set the target port name to the ndlp port name wwn or zero.
7301 **/
7302 static void
lpfc_get_starget_port_name(struct scsi_target * starget)7303 lpfc_get_starget_port_name(struct scsi_target *starget)
7304 {
7305 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7306
7307 fc_starget_port_name(starget) =
7308 ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
7309 }
7310
7311 /**
7312 * lpfc_set_rport_loss_tmo - Set the rport dev loss tmo
7313 * @rport: fc rport address.
7314 * @timeout: new value for dev loss tmo.
7315 *
7316 * Description:
7317 * If timeout is non zero set the dev_loss_tmo to timeout, else set
7318 * dev_loss_tmo to one.
7319 **/
7320 static void
lpfc_set_rport_loss_tmo(struct fc_rport * rport,uint32_t timeout)7321 lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
7322 {
7323 struct lpfc_rport_data *rdata = rport->dd_data;
7324 struct lpfc_nodelist *ndlp = rdata->pnode;
7325 #if (IS_ENABLED(CONFIG_NVME_FC))
7326 struct lpfc_nvme_rport *nrport = NULL;
7327 #endif
7328
7329 if (timeout)
7330 rport->dev_loss_tmo = timeout;
7331 else
7332 rport->dev_loss_tmo = 1;
7333
7334 if (!ndlp) {
7335 dev_info(&rport->dev, "Cannot find remote node to "
7336 "set rport dev loss tmo, port_id x%x\n",
7337 rport->port_id);
7338 return;
7339 }
7340
7341 #if (IS_ENABLED(CONFIG_NVME_FC))
7342 nrport = lpfc_ndlp_get_nrport(ndlp);
7343
7344 if (nrport && nrport->remoteport)
7345 nvme_fc_set_remoteport_devloss(nrport->remoteport,
7346 rport->dev_loss_tmo);
7347 #endif
7348 }
7349
7350 /*
7351 * lpfc_rport_show_function - Return rport target information
7352 *
7353 * Description:
7354 * Macro that uses field to generate a function with the name lpfc_show_rport_
7355 *
7356 * lpfc_show_rport_##field: returns the bytes formatted in buf
7357 * @cdev: class converted to an fc_rport.
7358 * @buf: on return contains the target_field or zero.
7359 *
7360 * Returns: size of formatted string.
7361 **/
7362 #define lpfc_rport_show_function(field, format_string, sz, cast) \
7363 static ssize_t \
7364 lpfc_show_rport_##field (struct device *dev, \
7365 struct device_attribute *attr, \
7366 char *buf) \
7367 { \
7368 struct fc_rport *rport = transport_class_to_rport(dev); \
7369 struct lpfc_rport_data *rdata = rport->hostdata; \
7370 return scnprintf(buf, sz, format_string, \
7371 (rdata->target) ? cast rdata->target->field : 0); \
7372 }
7373
7374 #define lpfc_rport_rd_attr(field, format_string, sz) \
7375 lpfc_rport_show_function(field, format_string, sz, ) \
7376 static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
7377
7378 /**
7379 * lpfc_set_vport_symbolic_name - Set the vport's symbolic name
7380 * @fc_vport: The fc_vport who's symbolic name has been changed.
7381 *
7382 * Description:
7383 * This function is called by the transport after the @fc_vport's symbolic name
7384 * has been changed. This function re-registers the symbolic name with the
7385 * switch to propagate the change into the fabric if the vport is active.
7386 **/
7387 static void
lpfc_set_vport_symbolic_name(struct fc_vport * fc_vport)7388 lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
7389 {
7390 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
7391
7392 if (vport->port_state == LPFC_VPORT_READY)
7393 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
7394 }
7395
7396 /**
7397 * lpfc_hba_log_verbose_init - Set hba's log verbose level
7398 * @phba: Pointer to lpfc_hba struct.
7399 * @verbose: Verbose level to set.
7400 *
7401 * This function is called by the lpfc_get_cfgparam() routine to set the
7402 * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
7403 * log message according to the module's lpfc_log_verbose parameter setting
7404 * before hba port or vport created.
7405 **/
7406 static void
lpfc_hba_log_verbose_init(struct lpfc_hba * phba,uint32_t verbose)7407 lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
7408 {
7409 phba->cfg_log_verbose = verbose;
7410 }
7411
7412 struct fc_function_template lpfc_transport_functions = {
7413 /* fixed attributes the driver supports */
7414 .show_host_node_name = 1,
7415 .show_host_port_name = 1,
7416 .show_host_supported_classes = 1,
7417 .show_host_supported_fc4s = 1,
7418 .show_host_supported_speeds = 1,
7419 .show_host_maxframe_size = 1,
7420
7421 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
7422 .show_host_symbolic_name = 1,
7423
7424 /* dynamic attributes the driver supports */
7425 .get_host_port_id = lpfc_get_host_port_id,
7426 .show_host_port_id = 1,
7427
7428 .get_host_port_type = lpfc_get_host_port_type,
7429 .show_host_port_type = 1,
7430
7431 .get_host_port_state = lpfc_get_host_port_state,
7432 .show_host_port_state = 1,
7433
7434 /* active_fc4s is shown but doesn't change (thus no get function) */
7435 .show_host_active_fc4s = 1,
7436
7437 .get_host_speed = lpfc_get_host_speed,
7438 .show_host_speed = 1,
7439
7440 .get_host_fabric_name = lpfc_get_host_fabric_name,
7441 .show_host_fabric_name = 1,
7442
7443 /*
7444 * The LPFC driver treats linkdown handling as target loss events
7445 * so there are no sysfs handlers for link_down_tmo.
7446 */
7447
7448 .get_fc_host_stats = lpfc_get_stats,
7449 .reset_fc_host_stats = lpfc_reset_stats,
7450
7451 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
7452 .show_rport_maxframe_size = 1,
7453 .show_rport_supported_classes = 1,
7454
7455 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7456 .show_rport_dev_loss_tmo = 1,
7457
7458 .get_starget_port_id = lpfc_get_starget_port_id,
7459 .show_starget_port_id = 1,
7460
7461 .get_starget_node_name = lpfc_get_starget_node_name,
7462 .show_starget_node_name = 1,
7463
7464 .get_starget_port_name = lpfc_get_starget_port_name,
7465 .show_starget_port_name = 1,
7466
7467 .issue_fc_host_lip = lpfc_issue_lip,
7468 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7469 .terminate_rport_io = lpfc_terminate_rport_io,
7470
7471 .dd_fcvport_size = sizeof(struct lpfc_vport *),
7472
7473 .vport_disable = lpfc_vport_disable,
7474
7475 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7476
7477 .bsg_request = lpfc_bsg_request,
7478 .bsg_timeout = lpfc_bsg_timeout,
7479 };
7480
7481 struct fc_function_template lpfc_vport_transport_functions = {
7482 /* fixed attributes the driver supports */
7483 .show_host_node_name = 1,
7484 .show_host_port_name = 1,
7485 .show_host_supported_classes = 1,
7486 .show_host_supported_fc4s = 1,
7487 .show_host_supported_speeds = 1,
7488 .show_host_maxframe_size = 1,
7489
7490 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
7491 .show_host_symbolic_name = 1,
7492
7493 /* dynamic attributes the driver supports */
7494 .get_host_port_id = lpfc_get_host_port_id,
7495 .show_host_port_id = 1,
7496
7497 .get_host_port_type = lpfc_get_host_port_type,
7498 .show_host_port_type = 1,
7499
7500 .get_host_port_state = lpfc_get_host_port_state,
7501 .show_host_port_state = 1,
7502
7503 /* active_fc4s is shown but doesn't change (thus no get function) */
7504 .show_host_active_fc4s = 1,
7505
7506 .get_host_speed = lpfc_get_host_speed,
7507 .show_host_speed = 1,
7508
7509 .get_host_fabric_name = lpfc_get_host_fabric_name,
7510 .show_host_fabric_name = 1,
7511
7512 /*
7513 * The LPFC driver treats linkdown handling as target loss events
7514 * so there are no sysfs handlers for link_down_tmo.
7515 */
7516
7517 .get_fc_host_stats = lpfc_get_stats,
7518 .reset_fc_host_stats = lpfc_reset_stats,
7519
7520 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
7521 .show_rport_maxframe_size = 1,
7522 .show_rport_supported_classes = 1,
7523
7524 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7525 .show_rport_dev_loss_tmo = 1,
7526
7527 .get_starget_port_id = lpfc_get_starget_port_id,
7528 .show_starget_port_id = 1,
7529
7530 .get_starget_node_name = lpfc_get_starget_node_name,
7531 .show_starget_node_name = 1,
7532
7533 .get_starget_port_name = lpfc_get_starget_port_name,
7534 .show_starget_port_name = 1,
7535
7536 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7537 .terminate_rport_io = lpfc_terminate_rport_io,
7538
7539 .vport_disable = lpfc_vport_disable,
7540
7541 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7542 };
7543
7544 /**
7545 * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE
7546 * Mode
7547 * @phba: lpfc_hba pointer.
7548 **/
7549 static void
lpfc_get_hba_function_mode(struct lpfc_hba * phba)7550 lpfc_get_hba_function_mode(struct lpfc_hba *phba)
7551 {
7552 /* If the adapter supports FCoE mode */
7553 switch (phba->pcidev->device) {
7554 case PCI_DEVICE_ID_SKYHAWK:
7555 case PCI_DEVICE_ID_SKYHAWK_VF:
7556 case PCI_DEVICE_ID_LANCER_FCOE:
7557 case PCI_DEVICE_ID_LANCER_FCOE_VF:
7558 case PCI_DEVICE_ID_ZEPHYR_DCSP:
7559 case PCI_DEVICE_ID_HORNET:
7560 case PCI_DEVICE_ID_TIGERSHARK:
7561 case PCI_DEVICE_ID_TOMCAT:
7562 phba->hba_flag |= HBA_FCOE_MODE;
7563 break;
7564 default:
7565 /* for others, clear the flag */
7566 phba->hba_flag &= ~HBA_FCOE_MODE;
7567 }
7568 }
7569
7570 /**
7571 * lpfc_get_cfgparam - Used during probe_one to init the adapter structure
7572 * @phba: lpfc_hba pointer.
7573 **/
7574 void
lpfc_get_cfgparam(struct lpfc_hba * phba)7575 lpfc_get_cfgparam(struct lpfc_hba *phba)
7576 {
7577 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
7578 lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
7579 lpfc_ns_query_init(phba, lpfc_ns_query);
7580 lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
7581 lpfc_cr_delay_init(phba, lpfc_cr_delay);
7582 lpfc_cr_count_init(phba, lpfc_cr_count);
7583 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
7584 lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
7585 lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
7586 lpfc_ack0_init(phba, lpfc_ack0);
7587 lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing);
7588 lpfc_topology_init(phba, lpfc_topology);
7589 lpfc_link_speed_init(phba, lpfc_link_speed);
7590 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
7591 lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
7592 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
7593 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
7594 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
7595 lpfc_fcp_wait_abts_rsp_init(phba, lpfc_fcp_wait_abts_rsp);
7596 lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
7597 lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
7598 lpfc_use_msi_init(phba, lpfc_use_msi);
7599 lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
7600 lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd);
7601 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
7602 lpfc_force_rscn_init(phba, lpfc_force_rscn);
7603 lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold);
7604 lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit);
7605 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
7606 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
7607 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
7608
7609 lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
7610 /* VMID Inits */
7611 lpfc_max_vmid_init(phba, lpfc_max_vmid);
7612 lpfc_vmid_inactivity_timeout_init(phba, lpfc_vmid_inactivity_timeout);
7613 lpfc_vmid_app_header_init(phba, lpfc_vmid_app_header);
7614 lpfc_vmid_priority_tagging_init(phba, lpfc_vmid_priority_tagging);
7615 if (phba->sli_rev != LPFC_SLI_REV4)
7616 phba->cfg_EnableXLane = 0;
7617 lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
7618
7619 memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
7620 memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
7621 phba->cfg_oas_lun_state = 0;
7622 phba->cfg_oas_lun_status = 0;
7623 phba->cfg_oas_flags = 0;
7624 phba->cfg_oas_priority = 0;
7625 lpfc_enable_bg_init(phba, lpfc_enable_bg);
7626 lpfc_prot_mask_init(phba, lpfc_prot_mask);
7627 lpfc_prot_guard_init(phba, lpfc_prot_guard);
7628 if (phba->sli_rev == LPFC_SLI_REV4)
7629 phba->cfg_poll = 0;
7630 else
7631 phba->cfg_poll = lpfc_poll;
7632
7633 /* Get the function mode */
7634 lpfc_get_hba_function_mode(phba);
7635
7636 /* BlockGuard allowed for FC only. */
7637 if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
7638 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7639 "0581 BlockGuard feature not supported\n");
7640 /* If set, clear the BlockGuard support param */
7641 phba->cfg_enable_bg = 0;
7642 } else if (phba->cfg_enable_bg) {
7643 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
7644 }
7645
7646 lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
7647
7648 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
7649 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
7650 lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
7651
7652 /* Initialize first burst. Target vs Initiator are different. */
7653 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
7654 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
7655 lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
7656 lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
7657 lpfc_irq_chann_init(phba, lpfc_irq_chann);
7658 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
7659 lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
7660 lpfc_enable_mi_init(phba, lpfc_enable_mi);
7661
7662 phba->cgn_p.cgn_param_mode = LPFC_CFG_OFF;
7663 phba->cmf_active_mode = LPFC_CFG_OFF;
7664 if (lpfc_fabric_cgn_frequency > EDC_CG_SIGFREQ_CNT_MAX ||
7665 lpfc_fabric_cgn_frequency < EDC_CG_SIGFREQ_CNT_MIN)
7666 lpfc_fabric_cgn_frequency = 100; /* 100 ms default */
7667
7668 if (phba->sli_rev != LPFC_SLI_REV4) {
7669 /* NVME only supported on SLI4 */
7670 phba->nvmet_support = 0;
7671 phba->cfg_nvmet_mrq = 0;
7672 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
7673 phba->cfg_enable_bbcr = 0;
7674 phba->cfg_xri_rebalancing = 0;
7675 } else {
7676 /* We MUST have FCP support */
7677 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
7678 phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
7679 }
7680
7681 phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1;
7682
7683 phba->cfg_enable_pbde = 0;
7684
7685 /* A value of 0 means use the number of CPUs found in the system */
7686 if (phba->cfg_hdw_queue == 0)
7687 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7688 if (phba->cfg_irq_chann == 0)
7689 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7690 if (phba->cfg_irq_chann > phba->cfg_hdw_queue &&
7691 phba->sli_rev == LPFC_SLI_REV4)
7692 phba->cfg_irq_chann = phba->cfg_hdw_queue;
7693
7694 phba->cfg_soft_wwnn = 0L;
7695 phba->cfg_soft_wwpn = 0L;
7696 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
7697 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
7698 lpfc_aer_support_init(phba, lpfc_aer_support);
7699 lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
7700 lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
7701 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
7702 lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
7703 lpfc_sli_mode_init(phba, lpfc_sli_mode);
7704 lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
7705 lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
7706 lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
7707 lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
7708
7709 return;
7710 }
7711
7712 /**
7713 * lpfc_nvme_mod_param_dep - Adjust module parameter value based on
7714 * dependencies between protocols and roles.
7715 * @phba: lpfc_hba pointer.
7716 **/
7717 void
lpfc_nvme_mod_param_dep(struct lpfc_hba * phba)7718 lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
7719 {
7720 int logit = 0;
7721
7722 if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) {
7723 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7724 logit = 1;
7725 }
7726 if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) {
7727 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7728 logit = 1;
7729 }
7730 if (phba->cfg_irq_chann > phba->cfg_hdw_queue) {
7731 phba->cfg_irq_chann = phba->cfg_hdw_queue;
7732 logit = 1;
7733 }
7734 if (logit)
7735 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7736 "2006 Reducing Queues - CPU limitation: "
7737 "IRQ %d HDWQ %d\n",
7738 phba->cfg_irq_chann,
7739 phba->cfg_hdw_queue);
7740
7741 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
7742 phba->nvmet_support) {
7743 phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
7744
7745 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
7746 "6013 %s x%x fb_size x%x, fb_max x%x\n",
7747 "NVME Target PRLI ACC enable_fb ",
7748 phba->cfg_nvme_enable_fb,
7749 phba->cfg_nvmet_fb_size,
7750 LPFC_NVMET_FB_SZ_MAX);
7751
7752 if (phba->cfg_nvme_enable_fb == 0)
7753 phba->cfg_nvmet_fb_size = 0;
7754 else {
7755 if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX)
7756 phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
7757 }
7758
7759 if (!phba->cfg_nvmet_mrq)
7760 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7761
7762 /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
7763 if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
7764 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7765 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
7766 "6018 Adjust lpfc_nvmet_mrq to %d\n",
7767 phba->cfg_nvmet_mrq);
7768 }
7769 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
7770 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
7771
7772 } else {
7773 /* Not NVME Target mode. Turn off Target parameters. */
7774 phba->nvmet_support = 0;
7775 phba->cfg_nvmet_mrq = 0;
7776 phba->cfg_nvmet_fb_size = 0;
7777 }
7778 }
7779
7780 /**
7781 * lpfc_get_vport_cfgparam - Used during port create, init the vport structure
7782 * @vport: lpfc_vport pointer.
7783 **/
7784 void
lpfc_get_vport_cfgparam(struct lpfc_vport * vport)7785 lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
7786 {
7787 lpfc_log_verbose_init(vport, lpfc_log_verbose);
7788 lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
7789 lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
7790 lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
7791 lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
7792 lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
7793 lpfc_restrict_login_init(vport, lpfc_restrict_login);
7794 lpfc_fcp_class_init(vport, lpfc_fcp_class);
7795 lpfc_use_adisc_init(vport, lpfc_use_adisc);
7796 lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
7797 lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
7798 lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
7799 lpfc_max_luns_init(vport, lpfc_max_luns);
7800 lpfc_scan_down_init(vport, lpfc_scan_down);
7801 lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
7802 return;
7803 }
7804