1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/kthread.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
13 #include <linux/delay.h>
14
15 static int qla24xx_vport_disable(struct fc_vport *, bool);
16
17 /* SYSFS attributes --------------------------------------------------------- */
18
19 static ssize_t
qla2x00_sysfs_read_fw_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)20 qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
21 struct bin_attribute *bin_attr,
22 char *buf, loff_t off, size_t count)
23 {
24 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
25 struct device, kobj)));
26 struct qla_hw_data *ha = vha->hw;
27 int rval = 0;
28
29 if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
30 return 0;
31
32 mutex_lock(&ha->optrom_mutex);
33 if (IS_P3P_TYPE(ha)) {
34 if (off < ha->md_template_size) {
35 rval = memory_read_from_buffer(buf, count,
36 &off, ha->md_tmplt_hdr, ha->md_template_size);
37 } else {
38 off -= ha->md_template_size;
39 rval = memory_read_from_buffer(buf, count,
40 &off, ha->md_dump, ha->md_dump_size);
41 }
42 } else if (ha->mctp_dumped && ha->mctp_dump_reading) {
43 rval = memory_read_from_buffer(buf, count, &off, ha->mctp_dump,
44 MCTP_DUMP_SIZE);
45 } else if (ha->fw_dump_reading) {
46 rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump,
47 ha->fw_dump_len);
48 } else {
49 rval = 0;
50 }
51 mutex_unlock(&ha->optrom_mutex);
52 return rval;
53 }
54
55 static ssize_t
qla2x00_sysfs_write_fw_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)56 qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
57 struct bin_attribute *bin_attr,
58 char *buf, loff_t off, size_t count)
59 {
60 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
61 struct device, kobj)));
62 struct qla_hw_data *ha = vha->hw;
63 int reading;
64
65 if (off != 0)
66 return (0);
67
68 reading = simple_strtol(buf, NULL, 10);
69 switch (reading) {
70 case 0:
71 if (!ha->fw_dump_reading)
72 break;
73
74 ql_log(ql_log_info, vha, 0x705d,
75 "Firmware dump cleared on (%ld).\n", vha->host_no);
76
77 if (IS_P3P_TYPE(ha)) {
78 qla82xx_md_free(vha);
79 qla82xx_md_prep(vha);
80 }
81 ha->fw_dump_reading = 0;
82 ha->fw_dumped = 0;
83 break;
84 case 1:
85 if (ha->fw_dumped && !ha->fw_dump_reading) {
86 ha->fw_dump_reading = 1;
87
88 ql_log(ql_log_info, vha, 0x705e,
89 "Raw firmware dump ready for read on (%ld).\n",
90 vha->host_no);
91 }
92 break;
93 case 2:
94 qla2x00_alloc_fw_dump(vha);
95 break;
96 case 3:
97 if (IS_QLA82XX(ha)) {
98 qla82xx_idc_lock(ha);
99 qla82xx_set_reset_owner(vha);
100 qla82xx_idc_unlock(ha);
101 } else if (IS_QLA8044(ha)) {
102 qla8044_idc_lock(ha);
103 qla82xx_set_reset_owner(vha);
104 qla8044_idc_unlock(ha);
105 } else
106 qla2x00_system_error(vha);
107 break;
108 case 4:
109 if (IS_P3P_TYPE(ha)) {
110 if (ha->md_tmplt_hdr)
111 ql_dbg(ql_dbg_user, vha, 0x705b,
112 "MiniDump supported with this firmware.\n");
113 else
114 ql_dbg(ql_dbg_user, vha, 0x709d,
115 "MiniDump not supported with this firmware.\n");
116 }
117 break;
118 case 5:
119 if (IS_P3P_TYPE(ha))
120 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
121 break;
122 case 6:
123 if (!ha->mctp_dump_reading)
124 break;
125 ql_log(ql_log_info, vha, 0x70c1,
126 "MCTP dump cleared on (%ld).\n", vha->host_no);
127 ha->mctp_dump_reading = 0;
128 ha->mctp_dumped = 0;
129 break;
130 case 7:
131 if (ha->mctp_dumped && !ha->mctp_dump_reading) {
132 ha->mctp_dump_reading = 1;
133 ql_log(ql_log_info, vha, 0x70c2,
134 "Raw mctp dump ready for read on (%ld).\n",
135 vha->host_no);
136 }
137 break;
138 }
139 return count;
140 }
141
142 static struct bin_attribute sysfs_fw_dump_attr = {
143 .attr = {
144 .name = "fw_dump",
145 .mode = S_IRUSR | S_IWUSR,
146 },
147 .size = 0,
148 .read = qla2x00_sysfs_read_fw_dump,
149 .write = qla2x00_sysfs_write_fw_dump,
150 };
151
152 static ssize_t
qla2x00_sysfs_read_nvram(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)153 qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
154 struct bin_attribute *bin_attr,
155 char *buf, loff_t off, size_t count)
156 {
157 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
158 struct device, kobj)));
159 struct qla_hw_data *ha = vha->hw;
160 uint32_t faddr;
161 struct active_regions active_regions = { };
162
163 if (!capable(CAP_SYS_ADMIN))
164 return 0;
165
166 mutex_lock(&ha->optrom_mutex);
167 if (qla2x00_chip_is_down(vha)) {
168 mutex_unlock(&ha->optrom_mutex);
169 return -EAGAIN;
170 }
171
172 if (!IS_NOCACHE_VPD_TYPE(ha)) {
173 mutex_unlock(&ha->optrom_mutex);
174 goto skip;
175 }
176
177 faddr = ha->flt_region_nvram;
178 if (IS_QLA28XX(ha)) {
179 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
180 faddr = ha->flt_region_nvram_sec;
181 }
182 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
183
184 mutex_unlock(&ha->optrom_mutex);
185
186 skip:
187 return memory_read_from_buffer(buf, count, &off, ha->nvram,
188 ha->nvram_size);
189 }
190
191 static ssize_t
qla2x00_sysfs_write_nvram(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)192 qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
193 struct bin_attribute *bin_attr,
194 char *buf, loff_t off, size_t count)
195 {
196 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
197 struct device, kobj)));
198 struct qla_hw_data *ha = vha->hw;
199 uint16_t cnt;
200
201 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
202 !ha->isp_ops->write_nvram)
203 return -EINVAL;
204
205 /* Checksum NVRAM. */
206 if (IS_FWI2_CAPABLE(ha)) {
207 uint32_t *iter;
208 uint32_t chksum;
209
210 iter = (uint32_t *)buf;
211 chksum = 0;
212 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++)
213 chksum += le32_to_cpu(*iter);
214 chksum = ~chksum + 1;
215 *iter = cpu_to_le32(chksum);
216 } else {
217 uint8_t *iter;
218 uint8_t chksum;
219
220 iter = (uint8_t *)buf;
221 chksum = 0;
222 for (cnt = 0; cnt < count - 1; cnt++)
223 chksum += *iter++;
224 chksum = ~chksum + 1;
225 *iter = chksum;
226 }
227
228 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
229 ql_log(ql_log_warn, vha, 0x705f,
230 "HBA not online, failing NVRAM update.\n");
231 return -EAGAIN;
232 }
233
234 mutex_lock(&ha->optrom_mutex);
235 if (qla2x00_chip_is_down(vha)) {
236 mutex_unlock(&ha->optrom_mutex);
237 return -EAGAIN;
238 }
239
240 /* Write NVRAM. */
241 ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count);
242 ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base,
243 count);
244 mutex_unlock(&ha->optrom_mutex);
245
246 ql_dbg(ql_dbg_user, vha, 0x7060,
247 "Setting ISP_ABORT_NEEDED\n");
248 /* NVRAM settings take effect immediately. */
249 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
250 qla2xxx_wake_dpc(vha);
251 qla2x00_wait_for_chip_reset(vha);
252
253 return count;
254 }
255
256 static struct bin_attribute sysfs_nvram_attr = {
257 .attr = {
258 .name = "nvram",
259 .mode = S_IRUSR | S_IWUSR,
260 },
261 .size = 512,
262 .read = qla2x00_sysfs_read_nvram,
263 .write = qla2x00_sysfs_write_nvram,
264 };
265
266 static ssize_t
qla2x00_sysfs_read_optrom(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)267 qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
268 struct bin_attribute *bin_attr,
269 char *buf, loff_t off, size_t count)
270 {
271 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
272 struct device, kobj)));
273 struct qla_hw_data *ha = vha->hw;
274 ssize_t rval = 0;
275
276 mutex_lock(&ha->optrom_mutex);
277
278 if (ha->optrom_state != QLA_SREADING)
279 goto out;
280
281 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
282 ha->optrom_region_size);
283
284 out:
285 mutex_unlock(&ha->optrom_mutex);
286
287 return rval;
288 }
289
290 static ssize_t
qla2x00_sysfs_write_optrom(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)291 qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
292 struct bin_attribute *bin_attr,
293 char *buf, loff_t off, size_t count)
294 {
295 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
296 struct device, kobj)));
297 struct qla_hw_data *ha = vha->hw;
298
299 mutex_lock(&ha->optrom_mutex);
300
301 if (ha->optrom_state != QLA_SWRITING) {
302 mutex_unlock(&ha->optrom_mutex);
303 return -EINVAL;
304 }
305 if (off > ha->optrom_region_size) {
306 mutex_unlock(&ha->optrom_mutex);
307 return -ERANGE;
308 }
309 if (off + count > ha->optrom_region_size)
310 count = ha->optrom_region_size - off;
311
312 memcpy(&ha->optrom_buffer[off], buf, count);
313 mutex_unlock(&ha->optrom_mutex);
314
315 return count;
316 }
317
318 static struct bin_attribute sysfs_optrom_attr = {
319 .attr = {
320 .name = "optrom",
321 .mode = S_IRUSR | S_IWUSR,
322 },
323 .size = 0,
324 .read = qla2x00_sysfs_read_optrom,
325 .write = qla2x00_sysfs_write_optrom,
326 };
327
328 static ssize_t
qla2x00_sysfs_write_optrom_ctl(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)329 qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
330 struct bin_attribute *bin_attr,
331 char *buf, loff_t off, size_t count)
332 {
333 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
334 struct device, kobj)));
335 struct qla_hw_data *ha = vha->hw;
336 uint32_t start = 0;
337 uint32_t size = ha->optrom_size;
338 int val, valid;
339 ssize_t rval = count;
340
341 if (off)
342 return -EINVAL;
343
344 if (unlikely(pci_channel_offline(ha->pdev)))
345 return -EAGAIN;
346
347 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
348 return -EINVAL;
349 if (start > ha->optrom_size)
350 return -EINVAL;
351 if (size > ha->optrom_size - start)
352 size = ha->optrom_size - start;
353
354 mutex_lock(&ha->optrom_mutex);
355 if (qla2x00_chip_is_down(vha)) {
356 mutex_unlock(&ha->optrom_mutex);
357 return -EAGAIN;
358 }
359 switch (val) {
360 case 0:
361 if (ha->optrom_state != QLA_SREADING &&
362 ha->optrom_state != QLA_SWRITING) {
363 rval = -EINVAL;
364 goto out;
365 }
366 ha->optrom_state = QLA_SWAITING;
367
368 ql_dbg(ql_dbg_user, vha, 0x7061,
369 "Freeing flash region allocation -- 0x%x bytes.\n",
370 ha->optrom_region_size);
371
372 vfree(ha->optrom_buffer);
373 ha->optrom_buffer = NULL;
374 break;
375 case 1:
376 if (ha->optrom_state != QLA_SWAITING) {
377 rval = -EINVAL;
378 goto out;
379 }
380
381 ha->optrom_region_start = start;
382 ha->optrom_region_size = size;
383
384 ha->optrom_state = QLA_SREADING;
385 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
386 if (ha->optrom_buffer == NULL) {
387 ql_log(ql_log_warn, vha, 0x7062,
388 "Unable to allocate memory for optrom retrieval "
389 "(%x).\n", ha->optrom_region_size);
390
391 ha->optrom_state = QLA_SWAITING;
392 rval = -ENOMEM;
393 goto out;
394 }
395
396 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
397 ql_log(ql_log_warn, vha, 0x7063,
398 "HBA not online, failing NVRAM update.\n");
399 rval = -EAGAIN;
400 goto out;
401 }
402
403 ql_dbg(ql_dbg_user, vha, 0x7064,
404 "Reading flash region -- 0x%x/0x%x.\n",
405 ha->optrom_region_start, ha->optrom_region_size);
406
407 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
408 ha->optrom_region_start, ha->optrom_region_size);
409 break;
410 case 2:
411 if (ha->optrom_state != QLA_SWAITING) {
412 rval = -EINVAL;
413 goto out;
414 }
415
416 /*
417 * We need to be more restrictive on which FLASH regions are
418 * allowed to be updated via user-space. Regions accessible
419 * via this method include:
420 *
421 * ISP21xx/ISP22xx/ISP23xx type boards:
422 *
423 * 0x000000 -> 0x020000 -- Boot code.
424 *
425 * ISP2322/ISP24xx type boards:
426 *
427 * 0x000000 -> 0x07ffff -- Boot code.
428 * 0x080000 -> 0x0fffff -- Firmware.
429 *
430 * ISP25xx type boards:
431 *
432 * 0x000000 -> 0x07ffff -- Boot code.
433 * 0x080000 -> 0x0fffff -- Firmware.
434 * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
435 *
436 * > ISP25xx type boards:
437 *
438 * None -- should go through BSG.
439 */
440 valid = 0;
441 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
442 valid = 1;
443 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
444 valid = 1;
445 if (!valid) {
446 ql_log(ql_log_warn, vha, 0x7065,
447 "Invalid start region 0x%x/0x%x.\n", start, size);
448 rval = -EINVAL;
449 goto out;
450 }
451
452 ha->optrom_region_start = start;
453 ha->optrom_region_size = size;
454
455 ha->optrom_state = QLA_SWRITING;
456 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
457 if (ha->optrom_buffer == NULL) {
458 ql_log(ql_log_warn, vha, 0x7066,
459 "Unable to allocate memory for optrom update "
460 "(%x)\n", ha->optrom_region_size);
461
462 ha->optrom_state = QLA_SWAITING;
463 rval = -ENOMEM;
464 goto out;
465 }
466
467 ql_dbg(ql_dbg_user, vha, 0x7067,
468 "Staging flash region write -- 0x%x/0x%x.\n",
469 ha->optrom_region_start, ha->optrom_region_size);
470
471 break;
472 case 3:
473 if (ha->optrom_state != QLA_SWRITING) {
474 rval = -EINVAL;
475 goto out;
476 }
477
478 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
479 ql_log(ql_log_warn, vha, 0x7068,
480 "HBA not online, failing flash update.\n");
481 rval = -EAGAIN;
482 goto out;
483 }
484
485 ql_dbg(ql_dbg_user, vha, 0x7069,
486 "Writing flash region -- 0x%x/0x%x.\n",
487 ha->optrom_region_start, ha->optrom_region_size);
488
489 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
490 ha->optrom_region_start, ha->optrom_region_size);
491 if (rval)
492 rval = -EIO;
493 break;
494 default:
495 rval = -EINVAL;
496 }
497
498 out:
499 mutex_unlock(&ha->optrom_mutex);
500 return rval;
501 }
502
503 static struct bin_attribute sysfs_optrom_ctl_attr = {
504 .attr = {
505 .name = "optrom_ctl",
506 .mode = S_IWUSR,
507 },
508 .size = 0,
509 .write = qla2x00_sysfs_write_optrom_ctl,
510 };
511
512 static ssize_t
qla2x00_sysfs_read_vpd(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)513 qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
514 struct bin_attribute *bin_attr,
515 char *buf, loff_t off, size_t count)
516 {
517 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
518 struct device, kobj)));
519 struct qla_hw_data *ha = vha->hw;
520 uint32_t faddr;
521 struct active_regions active_regions = { };
522
523 if (unlikely(pci_channel_offline(ha->pdev)))
524 return -EAGAIN;
525
526 if (!capable(CAP_SYS_ADMIN))
527 return -EINVAL;
528
529 if (IS_NOCACHE_VPD_TYPE(ha))
530 goto skip;
531
532 faddr = ha->flt_region_vpd << 2;
533
534 if (IS_QLA28XX(ha)) {
535 qla28xx_get_aux_images(vha, &active_regions);
536 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
537 faddr = ha->flt_region_vpd_sec << 2;
538
539 ql_dbg(ql_dbg_init, vha, 0x7070,
540 "Loading %s nvram image.\n",
541 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
542 "primary" : "secondary");
543 }
544
545 mutex_lock(&ha->optrom_mutex);
546 if (qla2x00_chip_is_down(vha)) {
547 mutex_unlock(&ha->optrom_mutex);
548 return -EAGAIN;
549 }
550
551 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
552 mutex_unlock(&ha->optrom_mutex);
553
554 ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
555 skip:
556 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
557 }
558
559 static ssize_t
qla2x00_sysfs_write_vpd(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)560 qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
561 struct bin_attribute *bin_attr,
562 char *buf, loff_t off, size_t count)
563 {
564 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
565 struct device, kobj)));
566 struct qla_hw_data *ha = vha->hw;
567 uint8_t *tmp_data;
568
569 if (unlikely(pci_channel_offline(ha->pdev)))
570 return 0;
571
572 if (qla2x00_chip_is_down(vha))
573 return 0;
574
575 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
576 !ha->isp_ops->write_nvram)
577 return 0;
578
579 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
580 ql_log(ql_log_warn, vha, 0x706a,
581 "HBA not online, failing VPD update.\n");
582 return -EAGAIN;
583 }
584
585 mutex_lock(&ha->optrom_mutex);
586 if (qla2x00_chip_is_down(vha)) {
587 mutex_unlock(&ha->optrom_mutex);
588 return -EAGAIN;
589 }
590
591 /* Write NVRAM. */
592 ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count);
593 ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count);
594
595 /* Update flash version information for 4Gb & above. */
596 if (!IS_FWI2_CAPABLE(ha)) {
597 mutex_unlock(&ha->optrom_mutex);
598 return -EINVAL;
599 }
600
601 tmp_data = vmalloc(256);
602 if (!tmp_data) {
603 mutex_unlock(&ha->optrom_mutex);
604 ql_log(ql_log_warn, vha, 0x706b,
605 "Unable to allocate memory for VPD information update.\n");
606 return -ENOMEM;
607 }
608 ha->isp_ops->get_flash_version(vha, tmp_data);
609 vfree(tmp_data);
610
611 mutex_unlock(&ha->optrom_mutex);
612
613 return count;
614 }
615
616 static struct bin_attribute sysfs_vpd_attr = {
617 .attr = {
618 .name = "vpd",
619 .mode = S_IRUSR | S_IWUSR,
620 },
621 .size = 0,
622 .read = qla2x00_sysfs_read_vpd,
623 .write = qla2x00_sysfs_write_vpd,
624 };
625
626 static ssize_t
qla2x00_sysfs_read_sfp(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)627 qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
628 struct bin_attribute *bin_attr,
629 char *buf, loff_t off, size_t count)
630 {
631 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
632 struct device, kobj)));
633 int rval;
634
635 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
636 return 0;
637
638 mutex_lock(&vha->hw->optrom_mutex);
639 if (qla2x00_chip_is_down(vha)) {
640 mutex_unlock(&vha->hw->optrom_mutex);
641 return 0;
642 }
643
644 rval = qla2x00_read_sfp_dev(vha, buf, count);
645 mutex_unlock(&vha->hw->optrom_mutex);
646
647 if (rval)
648 return -EIO;
649
650 return count;
651 }
652
653 static struct bin_attribute sysfs_sfp_attr = {
654 .attr = {
655 .name = "sfp",
656 .mode = S_IRUSR | S_IWUSR,
657 },
658 .size = SFP_DEV_SIZE,
659 .read = qla2x00_sysfs_read_sfp,
660 };
661
662 static ssize_t
qla2x00_sysfs_write_reset(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)663 qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
664 struct bin_attribute *bin_attr,
665 char *buf, loff_t off, size_t count)
666 {
667 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
668 struct device, kobj)));
669 struct qla_hw_data *ha = vha->hw;
670 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
671 int type;
672 uint32_t idc_control;
673 uint8_t *tmp_data = NULL;
674
675 if (off != 0)
676 return -EINVAL;
677
678 type = simple_strtol(buf, NULL, 10);
679 switch (type) {
680 case 0x2025c:
681 ql_log(ql_log_info, vha, 0x706e,
682 "Issuing ISP reset.\n");
683
684 scsi_block_requests(vha->host);
685 if (IS_QLA82XX(ha)) {
686 ha->flags.isp82xx_no_md_cap = 1;
687 qla82xx_idc_lock(ha);
688 qla82xx_set_reset_owner(vha);
689 qla82xx_idc_unlock(ha);
690 } else if (IS_QLA8044(ha)) {
691 qla8044_idc_lock(ha);
692 idc_control = qla8044_rd_reg(ha,
693 QLA8044_IDC_DRV_CTRL);
694 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
695 (idc_control | GRACEFUL_RESET_BIT1));
696 qla82xx_set_reset_owner(vha);
697 qla8044_idc_unlock(ha);
698 } else {
699 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
700 qla2xxx_wake_dpc(vha);
701 }
702 qla2x00_wait_for_chip_reset(vha);
703 scsi_unblock_requests(vha->host);
704 break;
705 case 0x2025d:
706 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
707 return -EPERM;
708
709 ql_log(ql_log_info, vha, 0x706f,
710 "Issuing MPI reset.\n");
711
712 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
713 uint32_t idc_control;
714
715 qla83xx_idc_lock(vha, 0);
716 __qla83xx_get_idc_control(vha, &idc_control);
717 idc_control |= QLA83XX_IDC_GRACEFUL_RESET;
718 __qla83xx_set_idc_control(vha, idc_control);
719 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
720 QLA8XXX_DEV_NEED_RESET);
721 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
722 qla83xx_idc_unlock(vha, 0);
723 break;
724 } else {
725 /* Make sure FC side is not in reset */
726 WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
727 QLA_SUCCESS);
728
729 /* Issue MPI reset */
730 scsi_block_requests(vha->host);
731 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
732 ql_log(ql_log_warn, vha, 0x7070,
733 "MPI reset failed.\n");
734 scsi_unblock_requests(vha->host);
735 break;
736 }
737 case 0x2025e:
738 if (!IS_P3P_TYPE(ha) || vha != base_vha) {
739 ql_log(ql_log_info, vha, 0x7071,
740 "FCoE ctx reset not supported.\n");
741 return -EPERM;
742 }
743
744 ql_log(ql_log_info, vha, 0x7072,
745 "Issuing FCoE ctx reset.\n");
746 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
747 qla2xxx_wake_dpc(vha);
748 qla2x00_wait_for_fcoe_ctx_reset(vha);
749 break;
750 case 0x2025f:
751 if (!IS_QLA8031(ha))
752 return -EPERM;
753 ql_log(ql_log_info, vha, 0x70bc,
754 "Disabling Reset by IDC control\n");
755 qla83xx_idc_lock(vha, 0);
756 __qla83xx_get_idc_control(vha, &idc_control);
757 idc_control |= QLA83XX_IDC_RESET_DISABLED;
758 __qla83xx_set_idc_control(vha, idc_control);
759 qla83xx_idc_unlock(vha, 0);
760 break;
761 case 0x20260:
762 if (!IS_QLA8031(ha))
763 return -EPERM;
764 ql_log(ql_log_info, vha, 0x70bd,
765 "Enabling Reset by IDC control\n");
766 qla83xx_idc_lock(vha, 0);
767 __qla83xx_get_idc_control(vha, &idc_control);
768 idc_control &= ~QLA83XX_IDC_RESET_DISABLED;
769 __qla83xx_set_idc_control(vha, idc_control);
770 qla83xx_idc_unlock(vha, 0);
771 break;
772 case 0x20261:
773 ql_dbg(ql_dbg_user, vha, 0x70e0,
774 "Updating cache versions without reset ");
775
776 tmp_data = vmalloc(256);
777 if (!tmp_data) {
778 ql_log(ql_log_warn, vha, 0x70e1,
779 "Unable to allocate memory for VPD information update.\n");
780 return -ENOMEM;
781 }
782 ha->isp_ops->get_flash_version(vha, tmp_data);
783 vfree(tmp_data);
784 break;
785 }
786 return count;
787 }
788
789 static struct bin_attribute sysfs_reset_attr = {
790 .attr = {
791 .name = "reset",
792 .mode = S_IWUSR,
793 },
794 .size = 0,
795 .write = qla2x00_sysfs_write_reset,
796 };
797
798 static ssize_t
qla2x00_issue_logo(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)799 qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
800 struct bin_attribute *bin_attr,
801 char *buf, loff_t off, size_t count)
802 {
803 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
804 struct device, kobj)));
805 int type;
806 port_id_t did;
807
808 if (!capable(CAP_SYS_ADMIN))
809 return 0;
810
811 if (unlikely(pci_channel_offline(vha->hw->pdev)))
812 return 0;
813
814 if (qla2x00_chip_is_down(vha))
815 return 0;
816
817 type = simple_strtol(buf, NULL, 10);
818
819 did.b.domain = (type & 0x00ff0000) >> 16;
820 did.b.area = (type & 0x0000ff00) >> 8;
821 did.b.al_pa = (type & 0x000000ff);
822
823 ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n",
824 did.b.domain, did.b.area, did.b.al_pa);
825
826 ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
827
828 qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
829 return count;
830 }
831
832 static struct bin_attribute sysfs_issue_logo_attr = {
833 .attr = {
834 .name = "issue_logo",
835 .mode = S_IWUSR,
836 },
837 .size = 0,
838 .write = qla2x00_issue_logo,
839 };
840
841 static ssize_t
qla2x00_sysfs_read_xgmac_stats(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)842 qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
843 struct bin_attribute *bin_attr,
844 char *buf, loff_t off, size_t count)
845 {
846 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
847 struct device, kobj)));
848 struct qla_hw_data *ha = vha->hw;
849 int rval;
850 uint16_t actual_size;
851
852 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
853 return 0;
854
855 if (unlikely(pci_channel_offline(ha->pdev)))
856 return 0;
857 mutex_lock(&vha->hw->optrom_mutex);
858 if (qla2x00_chip_is_down(vha)) {
859 mutex_unlock(&vha->hw->optrom_mutex);
860 return 0;
861 }
862
863 if (ha->xgmac_data)
864 goto do_read;
865
866 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
867 &ha->xgmac_data_dma, GFP_KERNEL);
868 if (!ha->xgmac_data) {
869 mutex_unlock(&vha->hw->optrom_mutex);
870 ql_log(ql_log_warn, vha, 0x7076,
871 "Unable to allocate memory for XGMAC read-data.\n");
872 return 0;
873 }
874
875 do_read:
876 actual_size = 0;
877 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
878
879 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
880 XGMAC_DATA_SIZE, &actual_size);
881
882 mutex_unlock(&vha->hw->optrom_mutex);
883 if (rval != QLA_SUCCESS) {
884 ql_log(ql_log_warn, vha, 0x7077,
885 "Unable to read XGMAC data (%x).\n", rval);
886 count = 0;
887 }
888
889 count = actual_size > count ? count : actual_size;
890 memcpy(buf, ha->xgmac_data, count);
891
892 return count;
893 }
894
895 static struct bin_attribute sysfs_xgmac_stats_attr = {
896 .attr = {
897 .name = "xgmac_stats",
898 .mode = S_IRUSR,
899 },
900 .size = 0,
901 .read = qla2x00_sysfs_read_xgmac_stats,
902 };
903
904 static ssize_t
qla2x00_sysfs_read_dcbx_tlv(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)905 qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
906 struct bin_attribute *bin_attr,
907 char *buf, loff_t off, size_t count)
908 {
909 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
910 struct device, kobj)));
911 struct qla_hw_data *ha = vha->hw;
912 int rval;
913
914 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
915 return 0;
916
917 if (ha->dcbx_tlv)
918 goto do_read;
919 mutex_lock(&vha->hw->optrom_mutex);
920 if (qla2x00_chip_is_down(vha)) {
921 mutex_unlock(&vha->hw->optrom_mutex);
922 return 0;
923 }
924
925 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
926 &ha->dcbx_tlv_dma, GFP_KERNEL);
927 if (!ha->dcbx_tlv) {
928 mutex_unlock(&vha->hw->optrom_mutex);
929 ql_log(ql_log_warn, vha, 0x7078,
930 "Unable to allocate memory for DCBX TLV read-data.\n");
931 return -ENOMEM;
932 }
933
934 do_read:
935 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
936
937 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
938 DCBX_TLV_DATA_SIZE);
939
940 mutex_unlock(&vha->hw->optrom_mutex);
941
942 if (rval != QLA_SUCCESS) {
943 ql_log(ql_log_warn, vha, 0x7079,
944 "Unable to read DCBX TLV (%x).\n", rval);
945 return -EIO;
946 }
947
948 memcpy(buf, ha->dcbx_tlv, count);
949
950 return count;
951 }
952
953 static struct bin_attribute sysfs_dcbx_tlv_attr = {
954 .attr = {
955 .name = "dcbx_tlv",
956 .mode = S_IRUSR,
957 },
958 .size = 0,
959 .read = qla2x00_sysfs_read_dcbx_tlv,
960 };
961
962 static struct sysfs_entry {
963 char *name;
964 struct bin_attribute *attr;
965 int type;
966 } bin_file_entries[] = {
967 { "fw_dump", &sysfs_fw_dump_attr, },
968 { "nvram", &sysfs_nvram_attr, },
969 { "optrom", &sysfs_optrom_attr, },
970 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
971 { "vpd", &sysfs_vpd_attr, 1 },
972 { "sfp", &sysfs_sfp_attr, 1 },
973 { "reset", &sysfs_reset_attr, },
974 { "issue_logo", &sysfs_issue_logo_attr, },
975 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
976 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
977 { NULL },
978 };
979
980 void
qla2x00_alloc_sysfs_attr(scsi_qla_host_t * vha)981 qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
982 {
983 struct Scsi_Host *host = vha->host;
984 struct sysfs_entry *iter;
985 int ret;
986
987 for (iter = bin_file_entries; iter->name; iter++) {
988 if (iter->type && !IS_FWI2_CAPABLE(vha->hw))
989 continue;
990 if (iter->type == 2 && !IS_QLA25XX(vha->hw))
991 continue;
992 if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw)))
993 continue;
994
995 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
996 iter->attr);
997 if (ret)
998 ql_log(ql_log_warn, vha, 0x00f3,
999 "Unable to create sysfs %s binary attribute (%d).\n",
1000 iter->name, ret);
1001 else
1002 ql_dbg(ql_dbg_init, vha, 0x00f4,
1003 "Successfully created sysfs %s binary attribute.\n",
1004 iter->name);
1005 }
1006 }
1007
1008 void
qla2x00_free_sysfs_attr(scsi_qla_host_t * vha,bool stop_beacon)1009 qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
1010 {
1011 struct Scsi_Host *host = vha->host;
1012 struct sysfs_entry *iter;
1013 struct qla_hw_data *ha = vha->hw;
1014
1015 for (iter = bin_file_entries; iter->name; iter++) {
1016 if (iter->type && !IS_FWI2_CAPABLE(ha))
1017 continue;
1018 if (iter->type == 2 && !IS_QLA25XX(ha))
1019 continue;
1020 if (iter->type == 3 && !(IS_CNA_CAPABLE(ha)))
1021 continue;
1022 if (iter->type == 0x27 &&
1023 (!IS_QLA27XX(ha) || !IS_QLA28XX(ha)))
1024 continue;
1025
1026 sysfs_remove_bin_file(&host->shost_gendev.kobj,
1027 iter->attr);
1028 }
1029
1030 if (stop_beacon && ha->beacon_blink_led == 1)
1031 ha->isp_ops->beacon_off(vha);
1032 }
1033
1034 /* Scsi_Host attributes. */
1035
1036 static ssize_t
qla2x00_driver_version_show(struct device * dev,struct device_attribute * attr,char * buf)1037 qla2x00_driver_version_show(struct device *dev,
1038 struct device_attribute *attr, char *buf)
1039 {
1040 return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
1041 }
1042
1043 static ssize_t
qla2x00_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)1044 qla2x00_fw_version_show(struct device *dev,
1045 struct device_attribute *attr, char *buf)
1046 {
1047 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1048 struct qla_hw_data *ha = vha->hw;
1049 char fw_str[128];
1050
1051 return scnprintf(buf, PAGE_SIZE, "%s\n",
1052 ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
1053 }
1054
1055 static ssize_t
qla2x00_serial_num_show(struct device * dev,struct device_attribute * attr,char * buf)1056 qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
1057 char *buf)
1058 {
1059 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1060 struct qla_hw_data *ha = vha->hw;
1061 uint32_t sn;
1062
1063 if (IS_QLAFX00(vha->hw)) {
1064 return scnprintf(buf, PAGE_SIZE, "%s\n",
1065 vha->hw->mr.serial_num);
1066 } else if (IS_FWI2_CAPABLE(ha)) {
1067 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
1068 return strlen(strcat(buf, "\n"));
1069 }
1070
1071 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
1072 return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
1073 sn % 100000);
1074 }
1075
1076 static ssize_t
qla2x00_isp_name_show(struct device * dev,struct device_attribute * attr,char * buf)1077 qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
1078 char *buf)
1079 {
1080 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1081
1082 return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
1083 }
1084
1085 static ssize_t
qla2x00_isp_id_show(struct device * dev,struct device_attribute * attr,char * buf)1086 qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
1087 char *buf)
1088 {
1089 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1090 struct qla_hw_data *ha = vha->hw;
1091
1092 if (IS_QLAFX00(vha->hw))
1093 return scnprintf(buf, PAGE_SIZE, "%s\n",
1094 vha->hw->mr.hw_version);
1095
1096 return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
1097 ha->product_id[0], ha->product_id[1], ha->product_id[2],
1098 ha->product_id[3]);
1099 }
1100
1101 static ssize_t
qla2x00_model_name_show(struct device * dev,struct device_attribute * attr,char * buf)1102 qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
1103 char *buf)
1104 {
1105 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1106
1107 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
1108 }
1109
1110 static ssize_t
qla2x00_model_desc_show(struct device * dev,struct device_attribute * attr,char * buf)1111 qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
1112 char *buf)
1113 {
1114 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1115
1116 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
1117 }
1118
1119 static ssize_t
qla2x00_pci_info_show(struct device * dev,struct device_attribute * attr,char * buf)1120 qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
1121 char *buf)
1122 {
1123 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1124 char pci_info[30];
1125
1126 return scnprintf(buf, PAGE_SIZE, "%s\n",
1127 vha->hw->isp_ops->pci_info_str(vha, pci_info,
1128 sizeof(pci_info)));
1129 }
1130
1131 static ssize_t
qla2x00_link_state_show(struct device * dev,struct device_attribute * attr,char * buf)1132 qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
1133 char *buf)
1134 {
1135 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1136 struct qla_hw_data *ha = vha->hw;
1137 int len = 0;
1138
1139 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
1140 atomic_read(&vha->loop_state) == LOOP_DEAD ||
1141 vha->device_flags & DFLG_NO_CABLE)
1142 len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
1143 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
1144 qla2x00_chip_is_down(vha))
1145 len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
1146 else {
1147 len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
1148
1149 switch (ha->current_topology) {
1150 case ISP_CFG_NL:
1151 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1152 break;
1153 case ISP_CFG_FL:
1154 len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
1155 break;
1156 case ISP_CFG_N:
1157 len += scnprintf(buf + len, PAGE_SIZE-len,
1158 "N_Port to N_Port\n");
1159 break;
1160 case ISP_CFG_F:
1161 len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
1162 break;
1163 default:
1164 len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1165 break;
1166 }
1167 }
1168 return len;
1169 }
1170
1171 static ssize_t
qla2x00_zio_show(struct device * dev,struct device_attribute * attr,char * buf)1172 qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1173 char *buf)
1174 {
1175 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1176 int len = 0;
1177
1178 switch (vha->hw->zio_mode) {
1179 case QLA_ZIO_MODE_6:
1180 len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1181 break;
1182 case QLA_ZIO_DISABLED:
1183 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1184 break;
1185 }
1186 return len;
1187 }
1188
1189 static ssize_t
qla2x00_zio_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1190 qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1191 const char *buf, size_t count)
1192 {
1193 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1194 struct qla_hw_data *ha = vha->hw;
1195 int val = 0;
1196 uint16_t zio_mode;
1197
1198 if (!IS_ZIO_SUPPORTED(ha))
1199 return -ENOTSUPP;
1200
1201 if (sscanf(buf, "%d", &val) != 1)
1202 return -EINVAL;
1203
1204 if (val)
1205 zio_mode = QLA_ZIO_MODE_6;
1206 else
1207 zio_mode = QLA_ZIO_DISABLED;
1208
1209 /* Update per-hba values and queue a reset. */
1210 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1211 ha->zio_mode = zio_mode;
1212 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1213 }
1214 return strlen(buf);
1215 }
1216
1217 static ssize_t
qla2x00_zio_timer_show(struct device * dev,struct device_attribute * attr,char * buf)1218 qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1219 char *buf)
1220 {
1221 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1222
1223 return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1224 }
1225
1226 static ssize_t
qla2x00_zio_timer_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1227 qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1228 const char *buf, size_t count)
1229 {
1230 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1231 int val = 0;
1232 uint16_t zio_timer;
1233
1234 if (sscanf(buf, "%d", &val) != 1)
1235 return -EINVAL;
1236 if (val > 25500 || val < 100)
1237 return -ERANGE;
1238
1239 zio_timer = (uint16_t)(val / 100);
1240 vha->hw->zio_timer = zio_timer;
1241
1242 return strlen(buf);
1243 }
1244
1245 static ssize_t
qla_zio_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)1246 qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
1247 char *buf)
1248 {
1249 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1250
1251 return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
1252 vha->hw->last_zio_threshold);
1253 }
1254
1255 static ssize_t
qla_zio_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1256 qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
1257 const char *buf, size_t count)
1258 {
1259 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1260 int val = 0;
1261
1262 if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
1263 return -EINVAL;
1264 if (sscanf(buf, "%d", &val) != 1)
1265 return -EINVAL;
1266 if (val < 0 || val > 256)
1267 return -ERANGE;
1268
1269 atomic_set(&vha->hw->zio_threshold, val);
1270 return strlen(buf);
1271 }
1272
1273 static ssize_t
qla2x00_beacon_show(struct device * dev,struct device_attribute * attr,char * buf)1274 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1275 char *buf)
1276 {
1277 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1278 int len = 0;
1279
1280 if (vha->hw->beacon_blink_led)
1281 len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1282 else
1283 len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1284 return len;
1285 }
1286
1287 static ssize_t
qla2x00_beacon_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1288 qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1289 const char *buf, size_t count)
1290 {
1291 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1292 struct qla_hw_data *ha = vha->hw;
1293 int val = 0;
1294 int rval;
1295
1296 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1297 return -EPERM;
1298
1299 if (sscanf(buf, "%d", &val) != 1)
1300 return -EINVAL;
1301
1302 mutex_lock(&vha->hw->optrom_mutex);
1303 if (qla2x00_chip_is_down(vha)) {
1304 mutex_unlock(&vha->hw->optrom_mutex);
1305 ql_log(ql_log_warn, vha, 0x707a,
1306 "Abort ISP active -- ignoring beacon request.\n");
1307 return -EBUSY;
1308 }
1309
1310 if (val)
1311 rval = ha->isp_ops->beacon_on(vha);
1312 else
1313 rval = ha->isp_ops->beacon_off(vha);
1314
1315 if (rval != QLA_SUCCESS)
1316 count = 0;
1317
1318 mutex_unlock(&vha->hw->optrom_mutex);
1319
1320 return count;
1321 }
1322
1323 static ssize_t
qla2x00_optrom_bios_version_show(struct device * dev,struct device_attribute * attr,char * buf)1324 qla2x00_optrom_bios_version_show(struct device *dev,
1325 struct device_attribute *attr, char *buf)
1326 {
1327 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1328 struct qla_hw_data *ha = vha->hw;
1329
1330 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1331 ha->bios_revision[0]);
1332 }
1333
1334 static ssize_t
qla2x00_optrom_efi_version_show(struct device * dev,struct device_attribute * attr,char * buf)1335 qla2x00_optrom_efi_version_show(struct device *dev,
1336 struct device_attribute *attr, char *buf)
1337 {
1338 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1339 struct qla_hw_data *ha = vha->hw;
1340
1341 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1342 ha->efi_revision[0]);
1343 }
1344
1345 static ssize_t
qla2x00_optrom_fcode_version_show(struct device * dev,struct device_attribute * attr,char * buf)1346 qla2x00_optrom_fcode_version_show(struct device *dev,
1347 struct device_attribute *attr, char *buf)
1348 {
1349 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1350 struct qla_hw_data *ha = vha->hw;
1351
1352 return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1353 ha->fcode_revision[0]);
1354 }
1355
1356 static ssize_t
qla2x00_optrom_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)1357 qla2x00_optrom_fw_version_show(struct device *dev,
1358 struct device_attribute *attr, char *buf)
1359 {
1360 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1361 struct qla_hw_data *ha = vha->hw;
1362
1363 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1364 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1365 ha->fw_revision[3]);
1366 }
1367
1368 static ssize_t
qla2x00_optrom_gold_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)1369 qla2x00_optrom_gold_fw_version_show(struct device *dev,
1370 struct device_attribute *attr, char *buf)
1371 {
1372 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1373 struct qla_hw_data *ha = vha->hw;
1374
1375 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
1376 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1377 return scnprintf(buf, PAGE_SIZE, "\n");
1378
1379 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1380 ha->gold_fw_version[0], ha->gold_fw_version[1],
1381 ha->gold_fw_version[2], ha->gold_fw_version[3]);
1382 }
1383
1384 static ssize_t
qla2x00_total_isp_aborts_show(struct device * dev,struct device_attribute * attr,char * buf)1385 qla2x00_total_isp_aborts_show(struct device *dev,
1386 struct device_attribute *attr, char *buf)
1387 {
1388 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1389
1390 return scnprintf(buf, PAGE_SIZE, "%d\n",
1391 vha->qla_stats.total_isp_aborts);
1392 }
1393
1394 static ssize_t
qla24xx_84xx_fw_version_show(struct device * dev,struct device_attribute * attr,char * buf)1395 qla24xx_84xx_fw_version_show(struct device *dev,
1396 struct device_attribute *attr, char *buf)
1397 {
1398 int rval = QLA_SUCCESS;
1399 uint16_t status[2] = { 0 };
1400 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1401 struct qla_hw_data *ha = vha->hw;
1402
1403 if (!IS_QLA84XX(ha))
1404 return scnprintf(buf, PAGE_SIZE, "\n");
1405
1406 if (!ha->cs84xx->op_fw_version) {
1407 rval = qla84xx_verify_chip(vha, status);
1408
1409 if (!rval && !status[0])
1410 return scnprintf(buf, PAGE_SIZE, "%u\n",
1411 (uint32_t)ha->cs84xx->op_fw_version);
1412 }
1413
1414 return scnprintf(buf, PAGE_SIZE, "\n");
1415 }
1416
1417 static ssize_t
qla2x00_serdes_version_show(struct device * dev,struct device_attribute * attr,char * buf)1418 qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr,
1419 char *buf)
1420 {
1421 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1422 struct qla_hw_data *ha = vha->hw;
1423
1424 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1425 return scnprintf(buf, PAGE_SIZE, "\n");
1426
1427 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1428 ha->serdes_version[0], ha->serdes_version[1],
1429 ha->serdes_version[2]);
1430 }
1431
1432 static ssize_t
qla2x00_mpi_version_show(struct device * dev,struct device_attribute * attr,char * buf)1433 qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1434 char *buf)
1435 {
1436 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1437 struct qla_hw_data *ha = vha->hw;
1438
1439 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
1440 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1441 return scnprintf(buf, PAGE_SIZE, "\n");
1442
1443 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1444 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1445 ha->mpi_capabilities);
1446 }
1447
1448 static ssize_t
qla2x00_phy_version_show(struct device * dev,struct device_attribute * attr,char * buf)1449 qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1450 char *buf)
1451 {
1452 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1453 struct qla_hw_data *ha = vha->hw;
1454
1455 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1456 return scnprintf(buf, PAGE_SIZE, "\n");
1457
1458 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1459 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1460 }
1461
1462 static ssize_t
qla2x00_flash_block_size_show(struct device * dev,struct device_attribute * attr,char * buf)1463 qla2x00_flash_block_size_show(struct device *dev,
1464 struct device_attribute *attr, char *buf)
1465 {
1466 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1467 struct qla_hw_data *ha = vha->hw;
1468
1469 return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1470 }
1471
1472 static ssize_t
qla2x00_vlan_id_show(struct device * dev,struct device_attribute * attr,char * buf)1473 qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1474 char *buf)
1475 {
1476 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1477
1478 if (!IS_CNA_CAPABLE(vha->hw))
1479 return scnprintf(buf, PAGE_SIZE, "\n");
1480
1481 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1482 }
1483
1484 static ssize_t
qla2x00_vn_port_mac_address_show(struct device * dev,struct device_attribute * attr,char * buf)1485 qla2x00_vn_port_mac_address_show(struct device *dev,
1486 struct device_attribute *attr, char *buf)
1487 {
1488 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1489
1490 if (!IS_CNA_CAPABLE(vha->hw))
1491 return scnprintf(buf, PAGE_SIZE, "\n");
1492
1493 return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
1494 }
1495
1496 static ssize_t
qla2x00_fabric_param_show(struct device * dev,struct device_attribute * attr,char * buf)1497 qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1498 char *buf)
1499 {
1500 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1501
1502 return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1503 }
1504
1505 static ssize_t
qla2x00_thermal_temp_show(struct device * dev,struct device_attribute * attr,char * buf)1506 qla2x00_thermal_temp_show(struct device *dev,
1507 struct device_attribute *attr, char *buf)
1508 {
1509 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1510 uint16_t temp = 0;
1511 int rc;
1512
1513 mutex_lock(&vha->hw->optrom_mutex);
1514 if (qla2x00_chip_is_down(vha)) {
1515 mutex_unlock(&vha->hw->optrom_mutex);
1516 ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
1517 goto done;
1518 }
1519
1520 if (vha->hw->flags.eeh_busy) {
1521 mutex_unlock(&vha->hw->optrom_mutex);
1522 ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
1523 goto done;
1524 }
1525
1526 rc = qla2x00_get_thermal_temp(vha, &temp);
1527 mutex_unlock(&vha->hw->optrom_mutex);
1528 if (rc == QLA_SUCCESS)
1529 return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
1530
1531 done:
1532 return scnprintf(buf, PAGE_SIZE, "\n");
1533 }
1534
1535 static ssize_t
qla2x00_fw_state_show(struct device * dev,struct device_attribute * attr,char * buf)1536 qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1537 char *buf)
1538 {
1539 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1540 int rval = QLA_FUNCTION_FAILED;
1541 uint16_t state[6];
1542 uint32_t pstate;
1543
1544 if (IS_QLAFX00(vha->hw)) {
1545 pstate = qlafx00_fw_state_show(dev, attr, buf);
1546 return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
1547 }
1548
1549 mutex_lock(&vha->hw->optrom_mutex);
1550 if (qla2x00_chip_is_down(vha)) {
1551 mutex_unlock(&vha->hw->optrom_mutex);
1552 ql_log(ql_log_warn, vha, 0x707c,
1553 "ISP reset active.\n");
1554 goto out;
1555 } else if (vha->hw->flags.eeh_busy) {
1556 mutex_unlock(&vha->hw->optrom_mutex);
1557 goto out;
1558 }
1559
1560 rval = qla2x00_get_firmware_state(vha, state);
1561 mutex_unlock(&vha->hw->optrom_mutex);
1562 out:
1563 if (rval != QLA_SUCCESS) {
1564 memset(state, -1, sizeof(state));
1565 rval = qla2x00_get_firmware_state(vha, state);
1566 }
1567
1568 return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1569 state[0], state[1], state[2], state[3], state[4], state[5]);
1570 }
1571
1572 static ssize_t
qla2x00_diag_requests_show(struct device * dev,struct device_attribute * attr,char * buf)1573 qla2x00_diag_requests_show(struct device *dev,
1574 struct device_attribute *attr, char *buf)
1575 {
1576 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1577
1578 if (!IS_BIDI_CAPABLE(vha->hw))
1579 return scnprintf(buf, PAGE_SIZE, "\n");
1580
1581 return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
1582 }
1583
1584 static ssize_t
qla2x00_diag_megabytes_show(struct device * dev,struct device_attribute * attr,char * buf)1585 qla2x00_diag_megabytes_show(struct device *dev,
1586 struct device_attribute *attr, char *buf)
1587 {
1588 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1589
1590 if (!IS_BIDI_CAPABLE(vha->hw))
1591 return scnprintf(buf, PAGE_SIZE, "\n");
1592
1593 return scnprintf(buf, PAGE_SIZE, "%llu\n",
1594 vha->bidi_stats.transfer_bytes >> 20);
1595 }
1596
1597 static ssize_t
qla2x00_fw_dump_size_show(struct device * dev,struct device_attribute * attr,char * buf)1598 qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1599 char *buf)
1600 {
1601 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1602 struct qla_hw_data *ha = vha->hw;
1603 uint32_t size;
1604
1605 if (!ha->fw_dumped)
1606 size = 0;
1607 else if (IS_P3P_TYPE(ha))
1608 size = ha->md_template_size + ha->md_dump_size;
1609 else
1610 size = ha->fw_dump_len;
1611
1612 return scnprintf(buf, PAGE_SIZE, "%d\n", size);
1613 }
1614
1615 static ssize_t
qla2x00_allow_cna_fw_dump_show(struct device * dev,struct device_attribute * attr,char * buf)1616 qla2x00_allow_cna_fw_dump_show(struct device *dev,
1617 struct device_attribute *attr, char *buf)
1618 {
1619 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1620
1621 if (!IS_P3P_TYPE(vha->hw))
1622 return scnprintf(buf, PAGE_SIZE, "\n");
1623 else
1624 return scnprintf(buf, PAGE_SIZE, "%s\n",
1625 vha->hw->allow_cna_fw_dump ? "true" : "false");
1626 }
1627
1628 static ssize_t
qla2x00_allow_cna_fw_dump_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1629 qla2x00_allow_cna_fw_dump_store(struct device *dev,
1630 struct device_attribute *attr, const char *buf, size_t count)
1631 {
1632 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1633 int val = 0;
1634
1635 if (!IS_P3P_TYPE(vha->hw))
1636 return -EINVAL;
1637
1638 if (sscanf(buf, "%d", &val) != 1)
1639 return -EINVAL;
1640
1641 vha->hw->allow_cna_fw_dump = val != 0;
1642
1643 return strlen(buf);
1644 }
1645
1646 static ssize_t
qla2x00_pep_version_show(struct device * dev,struct device_attribute * attr,char * buf)1647 qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
1648 char *buf)
1649 {
1650 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1651 struct qla_hw_data *ha = vha->hw;
1652
1653 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1654 return scnprintf(buf, PAGE_SIZE, "\n");
1655
1656 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1657 ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
1658 }
1659
1660 static ssize_t
qla2x00_min_supported_speed_show(struct device * dev,struct device_attribute * attr,char * buf)1661 qla2x00_min_supported_speed_show(struct device *dev,
1662 struct device_attribute *attr, char *buf)
1663 {
1664 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1665 struct qla_hw_data *ha = vha->hw;
1666
1667 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1668 return scnprintf(buf, PAGE_SIZE, "\n");
1669
1670 return scnprintf(buf, PAGE_SIZE, "%s\n",
1671 ha->min_supported_speed == 6 ? "64Gps" :
1672 ha->min_supported_speed == 5 ? "32Gps" :
1673 ha->min_supported_speed == 4 ? "16Gps" :
1674 ha->min_supported_speed == 3 ? "8Gps" :
1675 ha->min_supported_speed == 2 ? "4Gps" :
1676 ha->min_supported_speed != 0 ? "unknown" : "");
1677 }
1678
1679 static ssize_t
qla2x00_max_supported_speed_show(struct device * dev,struct device_attribute * attr,char * buf)1680 qla2x00_max_supported_speed_show(struct device *dev,
1681 struct device_attribute *attr, char *buf)
1682 {
1683 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1684 struct qla_hw_data *ha = vha->hw;
1685
1686 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
1687 return scnprintf(buf, PAGE_SIZE, "\n");
1688
1689 return scnprintf(buf, PAGE_SIZE, "%s\n",
1690 ha->max_supported_speed == 2 ? "64Gps" :
1691 ha->max_supported_speed == 1 ? "32Gps" :
1692 ha->max_supported_speed == 0 ? "16Gps" : "unknown");
1693 }
1694
1695 static ssize_t
qla2x00_port_speed_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1696 qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
1697 const char *buf, size_t count)
1698 {
1699 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1700 ulong type, speed;
1701 int oldspeed, rval;
1702 int mode = QLA_SET_DATA_RATE_LR;
1703 struct qla_hw_data *ha = vha->hw;
1704
1705 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
1706 ql_log(ql_log_warn, vha, 0x70d8,
1707 "Speed setting not supported \n");
1708 return -EINVAL;
1709 }
1710
1711 rval = kstrtol(buf, 10, &type);
1712 if (rval)
1713 return rval;
1714 speed = type;
1715 if (type == 40 || type == 80 || type == 160 ||
1716 type == 320) {
1717 ql_dbg(ql_dbg_user, vha, 0x70d9,
1718 "Setting will be affected after a loss of sync\n");
1719 type = type/10;
1720 mode = QLA_SET_DATA_RATE_NOLR;
1721 }
1722
1723 oldspeed = ha->set_data_rate;
1724
1725 switch (type) {
1726 case 0:
1727 ha->set_data_rate = PORT_SPEED_AUTO;
1728 break;
1729 case 4:
1730 ha->set_data_rate = PORT_SPEED_4GB;
1731 break;
1732 case 8:
1733 ha->set_data_rate = PORT_SPEED_8GB;
1734 break;
1735 case 16:
1736 ha->set_data_rate = PORT_SPEED_16GB;
1737 break;
1738 case 32:
1739 ha->set_data_rate = PORT_SPEED_32GB;
1740 break;
1741 default:
1742 ql_log(ql_log_warn, vha, 0x1199,
1743 "Unrecognized speed setting:%lx. Setting Autoneg\n",
1744 speed);
1745 ha->set_data_rate = PORT_SPEED_AUTO;
1746 }
1747
1748 if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate))
1749 return -EINVAL;
1750
1751 ql_log(ql_log_info, vha, 0x70da,
1752 "Setting speed to %lx Gbps \n", type);
1753
1754 rval = qla2x00_set_data_rate(vha, mode);
1755 if (rval != QLA_SUCCESS)
1756 return -EIO;
1757
1758 return strlen(buf);
1759 }
1760
1761 static ssize_t
qla2x00_port_speed_show(struct device * dev,struct device_attribute * attr,char * buf)1762 qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr,
1763 char *buf)
1764 {
1765 struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev));
1766 struct qla_hw_data *ha = vha->hw;
1767 ssize_t rval;
1768 char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"};
1769
1770 rval = qla2x00_get_data_rate(vha);
1771 if (rval != QLA_SUCCESS) {
1772 ql_log(ql_log_warn, vha, 0x70db,
1773 "Unable to get port speed rval:%zd\n", rval);
1774 return -EINVAL;
1775 }
1776
1777 ql_log(ql_log_info, vha, 0x70d6,
1778 "port speed:%d\n", ha->link_data_rate);
1779
1780 return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]);
1781 }
1782
1783 /* ----- */
1784
1785 static ssize_t
qlini_mode_show(struct device * dev,struct device_attribute * attr,char * buf)1786 qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1787 {
1788 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1789 int len = 0;
1790
1791 len += scnprintf(buf + len, PAGE_SIZE-len,
1792 "Supported options: enabled | disabled | dual | exclusive\n");
1793
1794 /* --- */
1795 len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
1796
1797 switch (vha->qlini_mode) {
1798 case QLA2XXX_INI_MODE_EXCLUSIVE:
1799 len += scnprintf(buf + len, PAGE_SIZE-len,
1800 QLA2XXX_INI_MODE_STR_EXCLUSIVE);
1801 break;
1802 case QLA2XXX_INI_MODE_DISABLED:
1803 len += scnprintf(buf + len, PAGE_SIZE-len,
1804 QLA2XXX_INI_MODE_STR_DISABLED);
1805 break;
1806 case QLA2XXX_INI_MODE_ENABLED:
1807 len += scnprintf(buf + len, PAGE_SIZE-len,
1808 QLA2XXX_INI_MODE_STR_ENABLED);
1809 break;
1810 case QLA2XXX_INI_MODE_DUAL:
1811 len += scnprintf(buf + len, PAGE_SIZE-len,
1812 QLA2XXX_INI_MODE_STR_DUAL);
1813 break;
1814 }
1815 len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
1816
1817 return len;
1818 }
1819
1820 static char *mode_to_str[] = {
1821 "exclusive",
1822 "disabled",
1823 "enabled",
1824 "dual",
1825 };
1826
1827 #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
qla_set_ini_mode(scsi_qla_host_t * vha,int op)1828 static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
1829 {
1830 int rc = 0;
1831 enum {
1832 NO_ACTION,
1833 MODE_CHANGE_ACCEPT,
1834 MODE_CHANGE_NO_ACTION,
1835 TARGET_STILL_ACTIVE,
1836 };
1837 int action = NO_ACTION;
1838 int set_mode = 0;
1839 u8 eo_toggle = 0; /* exchange offload flipped */
1840
1841 switch (vha->qlini_mode) {
1842 case QLA2XXX_INI_MODE_DISABLED:
1843 switch (op) {
1844 case QLA2XXX_INI_MODE_DISABLED:
1845 if (qla_tgt_mode_enabled(vha)) {
1846 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1847 vha->hw->flags.exchoffld_enabled)
1848 eo_toggle = 1;
1849 if (((vha->ql2xexchoffld !=
1850 vha->u_ql2xexchoffld) &&
1851 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1852 eo_toggle) {
1853 /*
1854 * The number of exchange to be offload
1855 * was tweaked or offload option was
1856 * flipped
1857 */
1858 action = MODE_CHANGE_ACCEPT;
1859 } else {
1860 action = MODE_CHANGE_NO_ACTION;
1861 }
1862 } else {
1863 action = MODE_CHANGE_NO_ACTION;
1864 }
1865 break;
1866 case QLA2XXX_INI_MODE_EXCLUSIVE:
1867 if (qla_tgt_mode_enabled(vha)) {
1868 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1869 vha->hw->flags.exchoffld_enabled)
1870 eo_toggle = 1;
1871 if (((vha->ql2xexchoffld !=
1872 vha->u_ql2xexchoffld) &&
1873 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1874 eo_toggle) {
1875 /*
1876 * The number of exchange to be offload
1877 * was tweaked or offload option was
1878 * flipped
1879 */
1880 action = MODE_CHANGE_ACCEPT;
1881 } else {
1882 action = MODE_CHANGE_NO_ACTION;
1883 }
1884 } else {
1885 action = MODE_CHANGE_ACCEPT;
1886 }
1887 break;
1888 case QLA2XXX_INI_MODE_DUAL:
1889 action = MODE_CHANGE_ACCEPT;
1890 /* active_mode is target only, reset it to dual */
1891 if (qla_tgt_mode_enabled(vha)) {
1892 set_mode = 1;
1893 action = MODE_CHANGE_ACCEPT;
1894 } else {
1895 action = MODE_CHANGE_NO_ACTION;
1896 }
1897 break;
1898
1899 case QLA2XXX_INI_MODE_ENABLED:
1900 if (qla_tgt_mode_enabled(vha))
1901 action = TARGET_STILL_ACTIVE;
1902 else {
1903 action = MODE_CHANGE_ACCEPT;
1904 set_mode = 1;
1905 }
1906 break;
1907 }
1908 break;
1909
1910 case QLA2XXX_INI_MODE_EXCLUSIVE:
1911 switch (op) {
1912 case QLA2XXX_INI_MODE_EXCLUSIVE:
1913 if (qla_tgt_mode_enabled(vha)) {
1914 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1915 vha->hw->flags.exchoffld_enabled)
1916 eo_toggle = 1;
1917 if (((vha->ql2xexchoffld !=
1918 vha->u_ql2xexchoffld) &&
1919 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1920 eo_toggle)
1921 /*
1922 * The number of exchange to be offload
1923 * was tweaked or offload option was
1924 * flipped
1925 */
1926 action = MODE_CHANGE_ACCEPT;
1927 else
1928 action = NO_ACTION;
1929 } else
1930 action = NO_ACTION;
1931
1932 break;
1933
1934 case QLA2XXX_INI_MODE_DISABLED:
1935 if (qla_tgt_mode_enabled(vha)) {
1936 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
1937 vha->hw->flags.exchoffld_enabled)
1938 eo_toggle = 1;
1939 if (((vha->ql2xexchoffld !=
1940 vha->u_ql2xexchoffld) &&
1941 NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
1942 eo_toggle)
1943 action = MODE_CHANGE_ACCEPT;
1944 else
1945 action = MODE_CHANGE_NO_ACTION;
1946 } else
1947 action = MODE_CHANGE_NO_ACTION;
1948 break;
1949
1950 case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
1951 if (qla_tgt_mode_enabled(vha)) {
1952 action = MODE_CHANGE_ACCEPT;
1953 set_mode = 1;
1954 } else
1955 action = MODE_CHANGE_ACCEPT;
1956 break;
1957
1958 case QLA2XXX_INI_MODE_ENABLED:
1959 if (qla_tgt_mode_enabled(vha))
1960 action = TARGET_STILL_ACTIVE;
1961 else {
1962 if (vha->hw->flags.fw_started)
1963 action = MODE_CHANGE_NO_ACTION;
1964 else
1965 action = MODE_CHANGE_ACCEPT;
1966 }
1967 break;
1968 }
1969 break;
1970
1971 case QLA2XXX_INI_MODE_ENABLED:
1972 switch (op) {
1973 case QLA2XXX_INI_MODE_ENABLED:
1974 if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
1975 vha->hw->flags.exchoffld_enabled)
1976 eo_toggle = 1;
1977 if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
1978 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
1979 eo_toggle)
1980 action = MODE_CHANGE_ACCEPT;
1981 else
1982 action = NO_ACTION;
1983 break;
1984 case QLA2XXX_INI_MODE_DUAL:
1985 case QLA2XXX_INI_MODE_DISABLED:
1986 action = MODE_CHANGE_ACCEPT;
1987 break;
1988 default:
1989 action = MODE_CHANGE_NO_ACTION;
1990 break;
1991 }
1992 break;
1993
1994 case QLA2XXX_INI_MODE_DUAL:
1995 switch (op) {
1996 case QLA2XXX_INI_MODE_DUAL:
1997 if (qla_tgt_mode_enabled(vha) ||
1998 qla_dual_mode_enabled(vha)) {
1999 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2000 vha->u_ql2xiniexchg) !=
2001 vha->hw->flags.exchoffld_enabled)
2002 eo_toggle = 1;
2003
2004 if ((((vha->ql2xexchoffld +
2005 vha->ql2xiniexchg) !=
2006 (vha->u_ql2xiniexchg +
2007 vha->u_ql2xexchoffld)) &&
2008 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2009 vha->u_ql2xexchoffld)) || eo_toggle)
2010 action = MODE_CHANGE_ACCEPT;
2011 else
2012 action = NO_ACTION;
2013 } else {
2014 if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
2015 vha->u_ql2xiniexchg) !=
2016 vha->hw->flags.exchoffld_enabled)
2017 eo_toggle = 1;
2018
2019 if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
2020 != (vha->u_ql2xiniexchg +
2021 vha->u_ql2xexchoffld)) &&
2022 NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
2023 vha->u_ql2xexchoffld)) || eo_toggle)
2024 action = MODE_CHANGE_NO_ACTION;
2025 else
2026 action = NO_ACTION;
2027 }
2028 break;
2029
2030 case QLA2XXX_INI_MODE_DISABLED:
2031 if (qla_tgt_mode_enabled(vha) ||
2032 qla_dual_mode_enabled(vha)) {
2033 /* turning off initiator mode */
2034 set_mode = 1;
2035 action = MODE_CHANGE_ACCEPT;
2036 } else {
2037 action = MODE_CHANGE_NO_ACTION;
2038 }
2039 break;
2040
2041 case QLA2XXX_INI_MODE_EXCLUSIVE:
2042 if (qla_tgt_mode_enabled(vha) ||
2043 qla_dual_mode_enabled(vha)) {
2044 set_mode = 1;
2045 action = MODE_CHANGE_ACCEPT;
2046 } else {
2047 action = MODE_CHANGE_ACCEPT;
2048 }
2049 break;
2050
2051 case QLA2XXX_INI_MODE_ENABLED:
2052 if (qla_tgt_mode_enabled(vha) ||
2053 qla_dual_mode_enabled(vha)) {
2054 action = TARGET_STILL_ACTIVE;
2055 } else {
2056 action = MODE_CHANGE_ACCEPT;
2057 }
2058 }
2059 break;
2060 }
2061
2062 switch (action) {
2063 case MODE_CHANGE_ACCEPT:
2064 ql_log(ql_log_warn, vha, 0xffff,
2065 "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2066 mode_to_str[vha->qlini_mode], mode_to_str[op],
2067 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2068 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2069
2070 vha->qlini_mode = op;
2071 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2072 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2073 if (set_mode)
2074 qlt_set_mode(vha);
2075 vha->flags.online = 1;
2076 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2077 break;
2078
2079 case MODE_CHANGE_NO_ACTION:
2080 ql_log(ql_log_warn, vha, 0xffff,
2081 "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
2082 mode_to_str[vha->qlini_mode], mode_to_str[op],
2083 vha->ql2xexchoffld, vha->u_ql2xexchoffld,
2084 vha->ql2xiniexchg, vha->u_ql2xiniexchg);
2085 vha->qlini_mode = op;
2086 vha->ql2xexchoffld = vha->u_ql2xexchoffld;
2087 vha->ql2xiniexchg = vha->u_ql2xiniexchg;
2088 break;
2089
2090 case TARGET_STILL_ACTIVE:
2091 ql_log(ql_log_warn, vha, 0xffff,
2092 "Target Mode is active. Unable to change Mode.\n");
2093 break;
2094
2095 case NO_ACTION:
2096 default:
2097 ql_log(ql_log_warn, vha, 0xffff,
2098 "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
2099 vha->qlini_mode, op,
2100 vha->ql2xexchoffld, vha->u_ql2xexchoffld);
2101 break;
2102 }
2103
2104 return rc;
2105 }
2106
2107 static ssize_t
qlini_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2108 qlini_mode_store(struct device *dev, struct device_attribute *attr,
2109 const char *buf, size_t count)
2110 {
2111 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2112 int ini;
2113
2114 if (!buf)
2115 return -EINVAL;
2116
2117 if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
2118 strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
2119 ini = QLA2XXX_INI_MODE_EXCLUSIVE;
2120 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
2121 strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
2122 ini = QLA2XXX_INI_MODE_DISABLED;
2123 else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
2124 strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
2125 ini = QLA2XXX_INI_MODE_ENABLED;
2126 else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
2127 strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
2128 ini = QLA2XXX_INI_MODE_DUAL;
2129 else
2130 return -EINVAL;
2131
2132 qla_set_ini_mode(vha, ini);
2133 return strlen(buf);
2134 }
2135
2136 static ssize_t
ql2xexchoffld_show(struct device * dev,struct device_attribute * attr,char * buf)2137 ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
2138 char *buf)
2139 {
2140 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2141 int len = 0;
2142
2143 len += scnprintf(buf + len, PAGE_SIZE-len,
2144 "target exchange: new %d : current: %d\n\n",
2145 vha->u_ql2xexchoffld, vha->ql2xexchoffld);
2146
2147 len += scnprintf(buf + len, PAGE_SIZE-len,
2148 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2149 vha->host_no);
2150
2151 return len;
2152 }
2153
2154 static ssize_t
ql2xexchoffld_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2155 ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
2156 const char *buf, size_t count)
2157 {
2158 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2159 int val = 0;
2160
2161 if (sscanf(buf, "%d", &val) != 1)
2162 return -EINVAL;
2163
2164 if (val > FW_MAX_EXCHANGES_CNT)
2165 val = FW_MAX_EXCHANGES_CNT;
2166 else if (val < 0)
2167 val = 0;
2168
2169 vha->u_ql2xexchoffld = val;
2170 return strlen(buf);
2171 }
2172
2173 static ssize_t
ql2xiniexchg_show(struct device * dev,struct device_attribute * attr,char * buf)2174 ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
2175 char *buf)
2176 {
2177 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2178 int len = 0;
2179
2180 len += scnprintf(buf + len, PAGE_SIZE-len,
2181 "target exchange: new %d : current: %d\n\n",
2182 vha->u_ql2xiniexchg, vha->ql2xiniexchg);
2183
2184 len += scnprintf(buf + len, PAGE_SIZE-len,
2185 "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
2186 vha->host_no);
2187
2188 return len;
2189 }
2190
2191 static ssize_t
ql2xiniexchg_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2192 ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
2193 const char *buf, size_t count)
2194 {
2195 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2196 int val = 0;
2197
2198 if (sscanf(buf, "%d", &val) != 1)
2199 return -EINVAL;
2200
2201 if (val > FW_MAX_EXCHANGES_CNT)
2202 val = FW_MAX_EXCHANGES_CNT;
2203 else if (val < 0)
2204 val = 0;
2205
2206 vha->u_ql2xiniexchg = val;
2207 return strlen(buf);
2208 }
2209
2210 static ssize_t
qla2x00_dif_bundle_statistics_show(struct device * dev,struct device_attribute * attr,char * buf)2211 qla2x00_dif_bundle_statistics_show(struct device *dev,
2212 struct device_attribute *attr, char *buf)
2213 {
2214 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2215 struct qla_hw_data *ha = vha->hw;
2216
2217 return scnprintf(buf, PAGE_SIZE,
2218 "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n",
2219 ha->dif_bundle_crossed_pages, ha->dif_bundle_reads,
2220 ha->dif_bundle_writes, ha->dif_bundle_kallocs,
2221 ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
2222 }
2223
2224 static ssize_t
qla2x00_fw_attr_show(struct device * dev,struct device_attribute * attr,char * buf)2225 qla2x00_fw_attr_show(struct device *dev,
2226 struct device_attribute *attr, char *buf)
2227 {
2228 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2229 struct qla_hw_data *ha = vha->hw;
2230
2231 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2232 return scnprintf(buf, PAGE_SIZE, "\n");
2233
2234 return scnprintf(buf, PAGE_SIZE, "%llx\n",
2235 (uint64_t)ha->fw_attributes_ext[1] << 48 |
2236 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2237 (uint64_t)ha->fw_attributes_h << 16 |
2238 (uint64_t)ha->fw_attributes);
2239 }
2240
2241 static ssize_t
qla2x00_port_no_show(struct device * dev,struct device_attribute * attr,char * buf)2242 qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
2243 char *buf)
2244 {
2245 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2246
2247 return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
2248 }
2249
2250 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
2251 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
2252 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
2253 static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
2254 static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
2255 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
2256 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
2257 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
2258 static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
2259 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
2260 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
2261 qla2x00_zio_timer_store);
2262 static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
2263 qla2x00_beacon_store);
2264 static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
2265 qla2x00_optrom_bios_version_show, NULL);
2266 static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
2267 qla2x00_optrom_efi_version_show, NULL);
2268 static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
2269 qla2x00_optrom_fcode_version_show, NULL);
2270 static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
2271 NULL);
2272 static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
2273 qla2x00_optrom_gold_fw_version_show, NULL);
2274 static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
2275 NULL);
2276 static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
2277 NULL);
2278 static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL);
2279 static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
2280 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
2281 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
2282 NULL);
2283 static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
2284 static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
2285 qla2x00_vn_port_mac_address_show, NULL);
2286 static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
2287 static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
2288 static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
2289 static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
2290 static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
2291 static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
2292 static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
2293 qla2x00_allow_cna_fw_dump_show,
2294 qla2x00_allow_cna_fw_dump_store);
2295 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
2296 static DEVICE_ATTR(min_supported_speed, 0444,
2297 qla2x00_min_supported_speed_show, NULL);
2298 static DEVICE_ATTR(max_supported_speed, 0444,
2299 qla2x00_max_supported_speed_show, NULL);
2300 static DEVICE_ATTR(zio_threshold, 0644,
2301 qla_zio_threshold_show,
2302 qla_zio_threshold_store);
2303 static DEVICE_ATTR_RW(qlini_mode);
2304 static DEVICE_ATTR_RW(ql2xexchoffld);
2305 static DEVICE_ATTR_RW(ql2xiniexchg);
2306 static DEVICE_ATTR(dif_bundle_statistics, 0444,
2307 qla2x00_dif_bundle_statistics_show, NULL);
2308 static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
2309 qla2x00_port_speed_store);
2310 static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
2311 static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
2312
2313
2314 struct device_attribute *qla2x00_host_attrs[] = {
2315 &dev_attr_driver_version,
2316 &dev_attr_fw_version,
2317 &dev_attr_serial_num,
2318 &dev_attr_isp_name,
2319 &dev_attr_isp_id,
2320 &dev_attr_model_name,
2321 &dev_attr_model_desc,
2322 &dev_attr_pci_info,
2323 &dev_attr_link_state,
2324 &dev_attr_zio,
2325 &dev_attr_zio_timer,
2326 &dev_attr_beacon,
2327 &dev_attr_optrom_bios_version,
2328 &dev_attr_optrom_efi_version,
2329 &dev_attr_optrom_fcode_version,
2330 &dev_attr_optrom_fw_version,
2331 &dev_attr_84xx_fw_version,
2332 &dev_attr_total_isp_aborts,
2333 &dev_attr_serdes_version,
2334 &dev_attr_mpi_version,
2335 &dev_attr_phy_version,
2336 &dev_attr_flash_block_size,
2337 &dev_attr_vlan_id,
2338 &dev_attr_vn_port_mac_address,
2339 &dev_attr_fabric_param,
2340 &dev_attr_fw_state,
2341 &dev_attr_optrom_gold_fw_version,
2342 &dev_attr_thermal_temp,
2343 &dev_attr_diag_requests,
2344 &dev_attr_diag_megabytes,
2345 &dev_attr_fw_dump_size,
2346 &dev_attr_allow_cna_fw_dump,
2347 &dev_attr_pep_version,
2348 &dev_attr_min_supported_speed,
2349 &dev_attr_max_supported_speed,
2350 &dev_attr_zio_threshold,
2351 &dev_attr_dif_bundle_statistics,
2352 &dev_attr_port_speed,
2353 &dev_attr_port_no,
2354 &dev_attr_fw_attr,
2355 NULL, /* reserve for qlini_mode */
2356 NULL, /* reserve for ql2xiniexchg */
2357 NULL, /* reserve for ql2xexchoffld */
2358 NULL,
2359 };
2360
qla_insert_tgt_attrs(void)2361 void qla_insert_tgt_attrs(void)
2362 {
2363 struct device_attribute **attr;
2364
2365 /* advance to empty slot */
2366 for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
2367 continue;
2368
2369 *attr = &dev_attr_qlini_mode;
2370 attr++;
2371 *attr = &dev_attr_ql2xiniexchg;
2372 attr++;
2373 *attr = &dev_attr_ql2xexchoffld;
2374 }
2375
2376 /* Host attributes. */
2377
2378 static void
qla2x00_get_host_port_id(struct Scsi_Host * shost)2379 qla2x00_get_host_port_id(struct Scsi_Host *shost)
2380 {
2381 scsi_qla_host_t *vha = shost_priv(shost);
2382
2383 fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
2384 vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
2385 }
2386
2387 static void
qla2x00_get_host_speed(struct Scsi_Host * shost)2388 qla2x00_get_host_speed(struct Scsi_Host *shost)
2389 {
2390 scsi_qla_host_t *vha = shost_priv(shost);
2391 u32 speed;
2392
2393 if (IS_QLAFX00(vha->hw)) {
2394 qlafx00_get_host_speed(shost);
2395 return;
2396 }
2397
2398 switch (vha->hw->link_data_rate) {
2399 case PORT_SPEED_1GB:
2400 speed = FC_PORTSPEED_1GBIT;
2401 break;
2402 case PORT_SPEED_2GB:
2403 speed = FC_PORTSPEED_2GBIT;
2404 break;
2405 case PORT_SPEED_4GB:
2406 speed = FC_PORTSPEED_4GBIT;
2407 break;
2408 case PORT_SPEED_8GB:
2409 speed = FC_PORTSPEED_8GBIT;
2410 break;
2411 case PORT_SPEED_10GB:
2412 speed = FC_PORTSPEED_10GBIT;
2413 break;
2414 case PORT_SPEED_16GB:
2415 speed = FC_PORTSPEED_16GBIT;
2416 break;
2417 case PORT_SPEED_32GB:
2418 speed = FC_PORTSPEED_32GBIT;
2419 break;
2420 case PORT_SPEED_64GB:
2421 speed = FC_PORTSPEED_64GBIT;
2422 break;
2423 default:
2424 speed = FC_PORTSPEED_UNKNOWN;
2425 break;
2426 }
2427
2428 fc_host_speed(shost) = speed;
2429 }
2430
2431 static void
qla2x00_get_host_port_type(struct Scsi_Host * shost)2432 qla2x00_get_host_port_type(struct Scsi_Host *shost)
2433 {
2434 scsi_qla_host_t *vha = shost_priv(shost);
2435 uint32_t port_type;
2436
2437 if (vha->vp_idx) {
2438 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
2439 return;
2440 }
2441 switch (vha->hw->current_topology) {
2442 case ISP_CFG_NL:
2443 port_type = FC_PORTTYPE_LPORT;
2444 break;
2445 case ISP_CFG_FL:
2446 port_type = FC_PORTTYPE_NLPORT;
2447 break;
2448 case ISP_CFG_N:
2449 port_type = FC_PORTTYPE_PTP;
2450 break;
2451 case ISP_CFG_F:
2452 port_type = FC_PORTTYPE_NPORT;
2453 break;
2454 default:
2455 port_type = FC_PORTTYPE_UNKNOWN;
2456 break;
2457 }
2458
2459 fc_host_port_type(shost) = port_type;
2460 }
2461
2462 static void
qla2x00_get_starget_node_name(struct scsi_target * starget)2463 qla2x00_get_starget_node_name(struct scsi_target *starget)
2464 {
2465 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2466 scsi_qla_host_t *vha = shost_priv(host);
2467 fc_port_t *fcport;
2468 u64 node_name = 0;
2469
2470 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2471 if (fcport->rport &&
2472 starget->id == fcport->rport->scsi_target_id) {
2473 node_name = wwn_to_u64(fcport->node_name);
2474 break;
2475 }
2476 }
2477
2478 fc_starget_node_name(starget) = node_name;
2479 }
2480
2481 static void
qla2x00_get_starget_port_name(struct scsi_target * starget)2482 qla2x00_get_starget_port_name(struct scsi_target *starget)
2483 {
2484 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2485 scsi_qla_host_t *vha = shost_priv(host);
2486 fc_port_t *fcport;
2487 u64 port_name = 0;
2488
2489 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2490 if (fcport->rport &&
2491 starget->id == fcport->rport->scsi_target_id) {
2492 port_name = wwn_to_u64(fcport->port_name);
2493 break;
2494 }
2495 }
2496
2497 fc_starget_port_name(starget) = port_name;
2498 }
2499
2500 static void
qla2x00_get_starget_port_id(struct scsi_target * starget)2501 qla2x00_get_starget_port_id(struct scsi_target *starget)
2502 {
2503 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
2504 scsi_qla_host_t *vha = shost_priv(host);
2505 fc_port_t *fcport;
2506 uint32_t port_id = ~0U;
2507
2508 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2509 if (fcport->rport &&
2510 starget->id == fcport->rport->scsi_target_id) {
2511 port_id = fcport->d_id.b.domain << 16 |
2512 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2513 break;
2514 }
2515 }
2516
2517 fc_starget_port_id(starget) = port_id;
2518 }
2519
2520 static inline void
qla2x00_set_rport_loss_tmo(struct fc_rport * rport,uint32_t timeout)2521 qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
2522 {
2523 rport->dev_loss_tmo = timeout ? timeout : 1;
2524 }
2525
2526 static void
qla2x00_dev_loss_tmo_callbk(struct fc_rport * rport)2527 qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
2528 {
2529 struct Scsi_Host *host = rport_to_shost(rport);
2530 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2531 unsigned long flags;
2532
2533 if (!fcport)
2534 return;
2535
2536 /* Now that the rport has been deleted, set the fcport state to
2537 FCS_DEVICE_DEAD */
2538 qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
2539
2540 /*
2541 * Transport has effectively 'deleted' the rport, clear
2542 * all local references.
2543 */
2544 spin_lock_irqsave(host->host_lock, flags);
2545 fcport->rport = fcport->drport = NULL;
2546 *((fc_port_t **)rport->dd_data) = NULL;
2547 spin_unlock_irqrestore(host->host_lock, flags);
2548
2549 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2550 return;
2551
2552 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2553 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2554 return;
2555 }
2556 }
2557
2558 static void
qla2x00_terminate_rport_io(struct fc_rport * rport)2559 qla2x00_terminate_rport_io(struct fc_rport *rport)
2560 {
2561 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
2562
2563 if (!fcport)
2564 return;
2565
2566 if (test_bit(UNLOADING, &fcport->vha->dpc_flags))
2567 return;
2568
2569 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
2570 return;
2571
2572 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
2573 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
2574 return;
2575 }
2576 /*
2577 * At this point all fcport's software-states are cleared. Perform any
2578 * final cleanup of firmware resources (PCBs and XCBs).
2579 */
2580 if (fcport->loop_id != FC_NO_LOOP_ID) {
2581 if (IS_FWI2_CAPABLE(fcport->vha->hw))
2582 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
2583 fcport->loop_id, fcport->d_id.b.domain,
2584 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2585 else
2586 qla2x00_port_logout(fcport->vha, fcport);
2587 }
2588 }
2589
2590 static int
qla2x00_issue_lip(struct Scsi_Host * shost)2591 qla2x00_issue_lip(struct Scsi_Host *shost)
2592 {
2593 scsi_qla_host_t *vha = shost_priv(shost);
2594
2595 if (IS_QLAFX00(vha->hw))
2596 return 0;
2597
2598 qla2x00_loop_reset(vha);
2599 return 0;
2600 }
2601
2602 static struct fc_host_statistics *
qla2x00_get_fc_host_stats(struct Scsi_Host * shost)2603 qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
2604 {
2605 scsi_qla_host_t *vha = shost_priv(shost);
2606 struct qla_hw_data *ha = vha->hw;
2607 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2608 int rval;
2609 struct link_statistics *stats;
2610 dma_addr_t stats_dma;
2611 struct fc_host_statistics *p = &vha->fc_host_stat;
2612
2613 memset(p, -1, sizeof(*p));
2614
2615 if (IS_QLAFX00(vha->hw))
2616 goto done;
2617
2618 if (test_bit(UNLOADING, &vha->dpc_flags))
2619 goto done;
2620
2621 if (unlikely(pci_channel_offline(ha->pdev)))
2622 goto done;
2623
2624 if (qla2x00_chip_is_down(vha))
2625 goto done;
2626
2627 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2628 GFP_KERNEL);
2629 if (!stats) {
2630 ql_log(ql_log_warn, vha, 0x707d,
2631 "Failed to allocate memory for stats.\n");
2632 goto done;
2633 }
2634
2635 rval = QLA_FUNCTION_FAILED;
2636 if (IS_FWI2_CAPABLE(ha)) {
2637 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, 0);
2638 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
2639 !ha->dpc_active) {
2640 /* Must be in a 'READY' state for statistics retrieval. */
2641 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
2642 stats, stats_dma);
2643 }
2644
2645 if (rval != QLA_SUCCESS)
2646 goto done_free;
2647
2648 p->link_failure_count = stats->link_fail_cnt;
2649 p->loss_of_sync_count = stats->loss_sync_cnt;
2650 p->loss_of_signal_count = stats->loss_sig_cnt;
2651 p->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
2652 p->invalid_tx_word_count = stats->inval_xmit_word_cnt;
2653 p->invalid_crc_count = stats->inval_crc_cnt;
2654 if (IS_FWI2_CAPABLE(ha)) {
2655 p->lip_count = stats->lip_cnt;
2656 p->tx_frames = stats->tx_frames;
2657 p->rx_frames = stats->rx_frames;
2658 p->dumped_frames = stats->discarded_frames;
2659 p->nos_count = stats->nos_rcvd;
2660 p->error_frames =
2661 stats->dropped_frames + stats->discarded_frames;
2662 p->rx_words = vha->qla_stats.input_bytes;
2663 p->tx_words = vha->qla_stats.output_bytes;
2664 }
2665 p->fcp_control_requests = vha->qla_stats.control_requests;
2666 p->fcp_input_requests = vha->qla_stats.input_requests;
2667 p->fcp_output_requests = vha->qla_stats.output_requests;
2668 p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
2669 p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
2670 p->seconds_since_last_reset =
2671 get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
2672 do_div(p->seconds_since_last_reset, HZ);
2673
2674 done_free:
2675 dma_free_coherent(&ha->pdev->dev, sizeof(struct link_statistics),
2676 stats, stats_dma);
2677 done:
2678 return p;
2679 }
2680
2681 static void
qla2x00_reset_host_stats(struct Scsi_Host * shost)2682 qla2x00_reset_host_stats(struct Scsi_Host *shost)
2683 {
2684 scsi_qla_host_t *vha = shost_priv(shost);
2685 struct qla_hw_data *ha = vha->hw;
2686 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2687 struct link_statistics *stats;
2688 dma_addr_t stats_dma;
2689
2690 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2691 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2692
2693 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2694
2695 if (IS_FWI2_CAPABLE(ha)) {
2696 stats = dma_alloc_coherent(&ha->pdev->dev,
2697 sizeof(*stats), &stats_dma, GFP_KERNEL);
2698 if (!stats) {
2699 ql_log(ql_log_warn, vha, 0x70d7,
2700 "Failed to allocate memory for stats.\n");
2701 return;
2702 }
2703
2704 /* reset firmware statistics */
2705 qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0);
2706
2707 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2708 stats, stats_dma);
2709 }
2710 }
2711
2712 static void
qla2x00_get_host_symbolic_name(struct Scsi_Host * shost)2713 qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
2714 {
2715 scsi_qla_host_t *vha = shost_priv(shost);
2716
2717 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
2718 sizeof(fc_host_symbolic_name(shost)));
2719 }
2720
2721 static void
qla2x00_set_host_system_hostname(struct Scsi_Host * shost)2722 qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
2723 {
2724 scsi_qla_host_t *vha = shost_priv(shost);
2725
2726 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
2727 }
2728
2729 static void
qla2x00_get_host_fabric_name(struct Scsi_Host * shost)2730 qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
2731 {
2732 scsi_qla_host_t *vha = shost_priv(shost);
2733 static const uint8_t node_name[WWN_SIZE] = {
2734 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
2735 };
2736 u64 fabric_name = wwn_to_u64(node_name);
2737
2738 if (vha->device_flags & SWITCH_FOUND)
2739 fabric_name = wwn_to_u64(vha->fabric_node_name);
2740
2741 fc_host_fabric_name(shost) = fabric_name;
2742 }
2743
2744 static void
qla2x00_get_host_port_state(struct Scsi_Host * shost)2745 qla2x00_get_host_port_state(struct Scsi_Host *shost)
2746 {
2747 scsi_qla_host_t *vha = shost_priv(shost);
2748 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
2749
2750 if (!base_vha->flags.online) {
2751 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
2752 return;
2753 }
2754
2755 switch (atomic_read(&base_vha->loop_state)) {
2756 case LOOP_UPDATE:
2757 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2758 break;
2759 case LOOP_DOWN:
2760 if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
2761 fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
2762 else
2763 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2764 break;
2765 case LOOP_DEAD:
2766 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
2767 break;
2768 case LOOP_READY:
2769 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
2770 break;
2771 default:
2772 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
2773 break;
2774 }
2775 }
2776
2777 static int
qla24xx_vport_create(struct fc_vport * fc_vport,bool disable)2778 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
2779 {
2780 int ret = 0;
2781 uint8_t qos = 0;
2782 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
2783 scsi_qla_host_t *vha = NULL;
2784 struct qla_hw_data *ha = base_vha->hw;
2785 int cnt;
2786 struct req_que *req = ha->req_q_map[0];
2787 struct qla_qpair *qpair;
2788
2789 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
2790 if (ret) {
2791 ql_log(ql_log_warn, vha, 0x707e,
2792 "Vport sanity check failed, status %x\n", ret);
2793 return (ret);
2794 }
2795
2796 vha = qla24xx_create_vhost(fc_vport);
2797 if (vha == NULL) {
2798 ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
2799 return FC_VPORT_FAILED;
2800 }
2801 if (disable) {
2802 atomic_set(&vha->vp_state, VP_OFFLINE);
2803 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
2804 } else
2805 atomic_set(&vha->vp_state, VP_FAILED);
2806
2807 /* ready to create vport */
2808 ql_log(ql_log_info, vha, 0x7080,
2809 "VP entry id %d assigned.\n", vha->vp_idx);
2810
2811 /* initialized vport states */
2812 atomic_set(&vha->loop_state, LOOP_DOWN);
2813 vha->vp_err_state = VP_ERR_PORTDWN;
2814 vha->vp_prev_err_state = VP_ERR_UNKWN;
2815 /* Check if physical ha port is Up */
2816 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
2817 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
2818 /* Don't retry or attempt login of this virtual port */
2819 ql_dbg(ql_dbg_user, vha, 0x7081,
2820 "Vport loop state is not UP.\n");
2821 atomic_set(&vha->loop_state, LOOP_DEAD);
2822 if (!disable)
2823 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
2824 }
2825
2826 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
2827 if (ha->fw_attributes & BIT_4) {
2828 int prot = 0, guard;
2829
2830 vha->flags.difdix_supported = 1;
2831 ql_dbg(ql_dbg_user, vha, 0x7082,
2832 "Registered for DIF/DIX type 1 and 3 protection.\n");
2833 if (ql2xenabledif == 1)
2834 prot = SHOST_DIX_TYPE0_PROTECTION;
2835 scsi_host_set_prot(vha->host,
2836 prot | SHOST_DIF_TYPE1_PROTECTION
2837 | SHOST_DIF_TYPE2_PROTECTION
2838 | SHOST_DIF_TYPE3_PROTECTION
2839 | SHOST_DIX_TYPE1_PROTECTION
2840 | SHOST_DIX_TYPE2_PROTECTION
2841 | SHOST_DIX_TYPE3_PROTECTION);
2842
2843 guard = SHOST_DIX_GUARD_CRC;
2844
2845 if (IS_PI_IPGUARD_CAPABLE(ha) &&
2846 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
2847 guard |= SHOST_DIX_GUARD_IP;
2848
2849 scsi_host_set_guard(vha->host, guard);
2850 } else
2851 vha->flags.difdix_supported = 0;
2852 }
2853
2854 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
2855 &ha->pdev->dev)) {
2856 ql_dbg(ql_dbg_user, vha, 0x7083,
2857 "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
2858 goto vport_create_failed_2;
2859 }
2860
2861 /* initialize attributes */
2862 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2863 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2864 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2865 fc_host_supported_classes(vha->host) =
2866 fc_host_supported_classes(base_vha->host);
2867 fc_host_supported_speeds(vha->host) =
2868 fc_host_supported_speeds(base_vha->host);
2869
2870 qlt_vport_create(vha, ha);
2871 qla24xx_vport_disable(fc_vport, disable);
2872
2873 if (!ql2xmqsupport || !ha->npiv_info)
2874 goto vport_queue;
2875
2876 /* Create a request queue in QoS mode for the vport */
2877 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
2878 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
2879 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
2880 8) == 0) {
2881 qos = ha->npiv_info[cnt].q_qos;
2882 break;
2883 }
2884 }
2885
2886 if (qos) {
2887 qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true);
2888 if (!qpair)
2889 ql_log(ql_log_warn, vha, 0x7084,
2890 "Can't create qpair for VP[%d]\n",
2891 vha->vp_idx);
2892 else {
2893 ql_dbg(ql_dbg_multiq, vha, 0xc001,
2894 "Queue pair: %d Qos: %d) created for VP[%d]\n",
2895 qpair->id, qos, vha->vp_idx);
2896 ql_dbg(ql_dbg_user, vha, 0x7085,
2897 "Queue Pair: %d Qos: %d) created for VP[%d]\n",
2898 qpair->id, qos, vha->vp_idx);
2899 req = qpair->req;
2900 vha->qpair = qpair;
2901 }
2902 }
2903
2904 vport_queue:
2905 vha->req = req;
2906 return 0;
2907
2908 vport_create_failed_2:
2909 qla24xx_disable_vp(vha);
2910 qla24xx_deallocate_vp_id(vha);
2911 scsi_host_put(vha->host);
2912 return FC_VPORT_FAILED;
2913 }
2914
2915 static int
qla24xx_vport_delete(struct fc_vport * fc_vport)2916 qla24xx_vport_delete(struct fc_vport *fc_vport)
2917 {
2918 scsi_qla_host_t *vha = fc_vport->dd_data;
2919 struct qla_hw_data *ha = vha->hw;
2920 uint16_t id = vha->vp_idx;
2921
2922 set_bit(VPORT_DELETE, &vha->dpc_flags);
2923
2924 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
2925 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
2926 msleep(1000);
2927
2928 qla_nvme_delete(vha);
2929
2930 qla24xx_disable_vp(vha);
2931 qla2x00_wait_for_sess_deletion(vha);
2932
2933 vha->flags.delete_progress = 1;
2934
2935 qlt_remove_target(ha, vha);
2936
2937 fc_remove_host(vha->host);
2938
2939 scsi_remove_host(vha->host);
2940
2941 /* Allow timer to run to drain queued items, when removing vp */
2942 qla24xx_deallocate_vp_id(vha);
2943
2944 if (vha->timer_active) {
2945 qla2x00_vp_stop_timer(vha);
2946 ql_dbg(ql_dbg_user, vha, 0x7086,
2947 "Timer for the VP[%d] has stopped\n", vha->vp_idx);
2948 }
2949
2950 qla2x00_free_fcports(vha);
2951
2952 mutex_lock(&ha->vport_lock);
2953 ha->cur_vport_count--;
2954 clear_bit(vha->vp_idx, ha->vp_idx_map);
2955 mutex_unlock(&ha->vport_lock);
2956
2957 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
2958 vha->gnl.ldma);
2959
2960 vha->gnl.l = NULL;
2961
2962 vfree(vha->scan.l);
2963
2964 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
2965 if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
2966 ql_log(ql_log_warn, vha, 0x7087,
2967 "Queue Pair delete failed.\n");
2968 }
2969
2970 ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
2971 scsi_host_put(vha->host);
2972 return 0;
2973 }
2974
2975 static int
qla24xx_vport_disable(struct fc_vport * fc_vport,bool disable)2976 qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
2977 {
2978 scsi_qla_host_t *vha = fc_vport->dd_data;
2979
2980 if (disable)
2981 qla24xx_disable_vp(vha);
2982 else
2983 qla24xx_enable_vp(vha);
2984
2985 return 0;
2986 }
2987
2988 struct fc_function_template qla2xxx_transport_functions = {
2989
2990 .show_host_node_name = 1,
2991 .show_host_port_name = 1,
2992 .show_host_supported_classes = 1,
2993 .show_host_supported_speeds = 1,
2994
2995 .get_host_port_id = qla2x00_get_host_port_id,
2996 .show_host_port_id = 1,
2997 .get_host_speed = qla2x00_get_host_speed,
2998 .show_host_speed = 1,
2999 .get_host_port_type = qla2x00_get_host_port_type,
3000 .show_host_port_type = 1,
3001 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3002 .show_host_symbolic_name = 1,
3003 .set_host_system_hostname = qla2x00_set_host_system_hostname,
3004 .show_host_system_hostname = 1,
3005 .get_host_fabric_name = qla2x00_get_host_fabric_name,
3006 .show_host_fabric_name = 1,
3007 .get_host_port_state = qla2x00_get_host_port_state,
3008 .show_host_port_state = 1,
3009
3010 .dd_fcrport_size = sizeof(struct fc_port *),
3011 .show_rport_supported_classes = 1,
3012
3013 .get_starget_node_name = qla2x00_get_starget_node_name,
3014 .show_starget_node_name = 1,
3015 .get_starget_port_name = qla2x00_get_starget_port_name,
3016 .show_starget_port_name = 1,
3017 .get_starget_port_id = qla2x00_get_starget_port_id,
3018 .show_starget_port_id = 1,
3019
3020 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3021 .show_rport_dev_loss_tmo = 1,
3022
3023 .issue_fc_host_lip = qla2x00_issue_lip,
3024 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3025 .terminate_rport_io = qla2x00_terminate_rport_io,
3026 .get_fc_host_stats = qla2x00_get_fc_host_stats,
3027 .reset_fc_host_stats = qla2x00_reset_host_stats,
3028
3029 .vport_create = qla24xx_vport_create,
3030 .vport_disable = qla24xx_vport_disable,
3031 .vport_delete = qla24xx_vport_delete,
3032 .bsg_request = qla24xx_bsg_request,
3033 .bsg_timeout = qla24xx_bsg_timeout,
3034 };
3035
3036 struct fc_function_template qla2xxx_transport_vport_functions = {
3037
3038 .show_host_node_name = 1,
3039 .show_host_port_name = 1,
3040 .show_host_supported_classes = 1,
3041
3042 .get_host_port_id = qla2x00_get_host_port_id,
3043 .show_host_port_id = 1,
3044 .get_host_speed = qla2x00_get_host_speed,
3045 .show_host_speed = 1,
3046 .get_host_port_type = qla2x00_get_host_port_type,
3047 .show_host_port_type = 1,
3048 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
3049 .show_host_symbolic_name = 1,
3050 .set_host_system_hostname = qla2x00_set_host_system_hostname,
3051 .show_host_system_hostname = 1,
3052 .get_host_fabric_name = qla2x00_get_host_fabric_name,
3053 .show_host_fabric_name = 1,
3054 .get_host_port_state = qla2x00_get_host_port_state,
3055 .show_host_port_state = 1,
3056
3057 .dd_fcrport_size = sizeof(struct fc_port *),
3058 .show_rport_supported_classes = 1,
3059
3060 .get_starget_node_name = qla2x00_get_starget_node_name,
3061 .show_starget_node_name = 1,
3062 .get_starget_port_name = qla2x00_get_starget_port_name,
3063 .show_starget_port_name = 1,
3064 .get_starget_port_id = qla2x00_get_starget_port_id,
3065 .show_starget_port_id = 1,
3066
3067 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
3068 .show_rport_dev_loss_tmo = 1,
3069
3070 .issue_fc_host_lip = qla2x00_issue_lip,
3071 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
3072 .terminate_rport_io = qla2x00_terminate_rport_io,
3073 .get_fc_host_stats = qla2x00_get_fc_host_stats,
3074 .reset_fc_host_stats = qla2x00_reset_host_stats,
3075
3076 .bsg_request = qla24xx_bsg_request,
3077 .bsg_timeout = qla24xx_bsg_timeout,
3078 };
3079
3080 void
qla2x00_init_host_attr(scsi_qla_host_t * vha)3081 qla2x00_init_host_attr(scsi_qla_host_t *vha)
3082 {
3083 struct qla_hw_data *ha = vha->hw;
3084 u32 speeds = FC_PORTSPEED_UNKNOWN;
3085
3086 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
3087 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
3088 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
3089 fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ?
3090 (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
3091 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
3092 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
3093
3094 if (IS_CNA_CAPABLE(ha))
3095 speeds = FC_PORTSPEED_10GBIT;
3096 else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
3097 if (ha->max_supported_speed == 2) {
3098 if (ha->min_supported_speed <= 6)
3099 speeds |= FC_PORTSPEED_64GBIT;
3100 }
3101 if (ha->max_supported_speed == 2 ||
3102 ha->max_supported_speed == 1) {
3103 if (ha->min_supported_speed <= 5)
3104 speeds |= FC_PORTSPEED_32GBIT;
3105 }
3106 if (ha->max_supported_speed == 2 ||
3107 ha->max_supported_speed == 1 ||
3108 ha->max_supported_speed == 0) {
3109 if (ha->min_supported_speed <= 4)
3110 speeds |= FC_PORTSPEED_16GBIT;
3111 }
3112 if (ha->max_supported_speed == 1 ||
3113 ha->max_supported_speed == 0) {
3114 if (ha->min_supported_speed <= 3)
3115 speeds |= FC_PORTSPEED_8GBIT;
3116 }
3117 if (ha->max_supported_speed == 0) {
3118 if (ha->min_supported_speed <= 2)
3119 speeds |= FC_PORTSPEED_4GBIT;
3120 }
3121 } else if (IS_QLA2031(ha))
3122 speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT|
3123 FC_PORTSPEED_4GBIT;
3124 else if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
3125 speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT|
3126 FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
3127 else if (IS_QLA24XX_TYPE(ha))
3128 speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT|
3129 FC_PORTSPEED_1GBIT;
3130 else if (IS_QLA23XX(ha))
3131 speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
3132 else
3133 speeds = FC_PORTSPEED_1GBIT;
3134
3135 fc_host_supported_speeds(vha->host) = speeds;
3136 }
3137