1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7 #include "qla_target.h"
8
9 #include <linux/delay.h>
10 #include <linux/gfp.h>
11
12 static struct mb_cmd_name {
13 uint16_t cmd;
14 const char *str;
15 } mb_str[] = {
16 {MBC_GET_PORT_DATABASE, "GPDB"},
17 {MBC_GET_ID_LIST, "GIDList"},
18 {MBC_GET_LINK_PRIV_STATS, "Stats"},
19 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
20 };
21
mb_to_str(uint16_t cmd)22 static const char *mb_to_str(uint16_t cmd)
23 {
24 int i;
25 struct mb_cmd_name *e;
26
27 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
28 e = mb_str + i;
29 if (cmd == e->cmd)
30 return e->str;
31 }
32 return "unknown";
33 }
34
35 static struct rom_cmd {
36 uint16_t cmd;
37 } rom_cmds[] = {
38 { MBC_LOAD_RAM },
39 { MBC_EXECUTE_FIRMWARE },
40 { MBC_READ_RAM_WORD },
41 { MBC_MAILBOX_REGISTER_TEST },
42 { MBC_VERIFY_CHECKSUM },
43 { MBC_GET_FIRMWARE_VERSION },
44 { MBC_LOAD_RISC_RAM },
45 { MBC_DUMP_RISC_RAM },
46 { MBC_LOAD_RISC_RAM_EXTENDED },
47 { MBC_DUMP_RISC_RAM_EXTENDED },
48 { MBC_WRITE_RAM_WORD_EXTENDED },
49 { MBC_READ_RAM_EXTENDED },
50 { MBC_GET_RESOURCE_COUNTS },
51 { MBC_SET_FIRMWARE_OPTION },
52 { MBC_MID_INITIALIZE_FIRMWARE },
53 { MBC_GET_FIRMWARE_STATE },
54 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
55 { MBC_GET_RETRY_COUNT },
56 { MBC_TRACE_CONTROL },
57 { MBC_INITIALIZE_MULTIQ },
58 { MBC_IOCB_COMMAND_A64 },
59 { MBC_GET_ADAPTER_LOOP_ID },
60 { MBC_READ_SFP },
61 { MBC_SET_RNID_PARAMS },
62 { MBC_GET_RNID_PARAMS },
63 { MBC_GET_SET_ZIO_THRESHOLD },
64 };
65
is_rom_cmd(uint16_t cmd)66 static int is_rom_cmd(uint16_t cmd)
67 {
68 int i;
69 struct rom_cmd *wc;
70
71 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
72 wc = rom_cmds + i;
73 if (wc->cmd == cmd)
74 return 1;
75 }
76
77 return 0;
78 }
79
80 /*
81 * qla2x00_mailbox_command
82 * Issue mailbox command and waits for completion.
83 *
84 * Input:
85 * ha = adapter block pointer.
86 * mcp = driver internal mbx struct pointer.
87 *
88 * Output:
89 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
90 *
91 * Returns:
92 * 0 : QLA_SUCCESS = cmd performed success
93 * 1 : QLA_FUNCTION_FAILED (error encountered)
94 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
95 *
96 * Context:
97 * Kernel context.
98 */
99 static int
qla2x00_mailbox_command(scsi_qla_host_t * vha,mbx_cmd_t * mcp)100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
101 {
102 int rval, i;
103 unsigned long flags = 0;
104 device_reg_t *reg;
105 uint8_t abort_active, eeh_delay;
106 uint8_t io_lock_on;
107 uint16_t command = 0;
108 uint16_t *iptr;
109 __le16 __iomem *optr;
110 uint32_t cnt;
111 uint32_t mboxes;
112 unsigned long wait_time;
113 struct qla_hw_data *ha = vha->hw;
114 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
115 u32 chip_reset;
116
117
118 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
119
120 if (ha->pdev->error_state == pci_channel_io_perm_failure) {
121 ql_log(ql_log_warn, vha, 0x1001,
122 "PCI channel failed permanently, exiting.\n");
123 return QLA_FUNCTION_TIMEOUT;
124 }
125
126 if (vha->device_flags & DFLG_DEV_FAILED) {
127 ql_log(ql_log_warn, vha, 0x1002,
128 "Device in failed state, exiting.\n");
129 return QLA_FUNCTION_TIMEOUT;
130 }
131
132 /* if PCI error, then avoid mbx processing.*/
133 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
134 test_bit(UNLOADING, &base_vha->dpc_flags)) {
135 ql_log(ql_log_warn, vha, 0xd04e,
136 "PCI error, exiting.\n");
137 return QLA_FUNCTION_TIMEOUT;
138 }
139 eeh_delay = 0;
140 reg = ha->iobase;
141 io_lock_on = base_vha->flags.init_done;
142
143 rval = QLA_SUCCESS;
144 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
145 chip_reset = ha->chip_reset;
146
147 if (ha->flags.pci_channel_io_perm_failure) {
148 ql_log(ql_log_warn, vha, 0x1003,
149 "Perm failure on EEH timeout MBX, exiting.\n");
150 return QLA_FUNCTION_TIMEOUT;
151 }
152
153 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
154 /* Setting Link-Down error */
155 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
156 ql_log(ql_log_warn, vha, 0x1004,
157 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
158 return QLA_FUNCTION_TIMEOUT;
159 }
160
161 /* check if ISP abort is active and return cmd with timeout */
162 if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
163 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
164 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
165 !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) {
166 ql_log(ql_log_info, vha, 0x1005,
167 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
168 mcp->mb[0]);
169 return QLA_FUNCTION_TIMEOUT;
170 }
171
172 atomic_inc(&ha->num_pend_mbx_stage1);
173 /*
174 * Wait for active mailbox commands to finish by waiting at most tov
175 * seconds. This is to serialize actual issuing of mailbox cmds during
176 * non ISP abort time.
177 */
178 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
179 /* Timeout occurred. Return error. */
180 ql_log(ql_log_warn, vha, 0xd035,
181 "Cmd access timeout, cmd=0x%x, Exiting.\n",
182 mcp->mb[0]);
183 vha->hw_err_cnt++;
184 atomic_dec(&ha->num_pend_mbx_stage1);
185 return QLA_FUNCTION_TIMEOUT;
186 }
187 atomic_dec(&ha->num_pend_mbx_stage1);
188 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
189 ha->flags.eeh_busy) {
190 ql_log(ql_log_warn, vha, 0xd035,
191 "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
192 ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
193 rval = QLA_ABORTED;
194 goto premature_exit;
195 }
196
197
198 /* Save mailbox command for debug */
199 ha->mcp = mcp;
200
201 ql_dbg(ql_dbg_mbx, vha, 0x1006,
202 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
203
204 spin_lock_irqsave(&ha->hardware_lock, flags);
205
206 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
207 ha->flags.mbox_busy) {
208 rval = QLA_ABORTED;
209 spin_unlock_irqrestore(&ha->hardware_lock, flags);
210 goto premature_exit;
211 }
212 ha->flags.mbox_busy = 1;
213
214 /* Load mailbox registers. */
215 if (IS_P3P_TYPE(ha))
216 optr = ®->isp82.mailbox_in[0];
217 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
218 optr = ®->isp24.mailbox0;
219 else
220 optr = MAILBOX_REG(ha, ®->isp, 0);
221
222 iptr = mcp->mb;
223 command = mcp->mb[0];
224 mboxes = mcp->out_mb;
225
226 ql_dbg(ql_dbg_mbx, vha, 0x1111,
227 "Mailbox registers (OUT):\n");
228 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
229 if (IS_QLA2200(ha) && cnt == 8)
230 optr = MAILBOX_REG(ha, ®->isp, 8);
231 if (mboxes & BIT_0) {
232 ql_dbg(ql_dbg_mbx, vha, 0x1112,
233 "mbox[%d]<-0x%04x\n", cnt, *iptr);
234 wrt_reg_word(optr, *iptr);
235 }
236
237 mboxes >>= 1;
238 optr++;
239 iptr++;
240 }
241
242 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
243 "I/O Address = %p.\n", optr);
244
245 /* Issue set host interrupt command to send cmd out. */
246 ha->flags.mbox_int = 0;
247 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
248
249 /* Unlock mbx registers and wait for interrupt */
250 ql_dbg(ql_dbg_mbx, vha, 0x100f,
251 "Going to unlock irq & waiting for interrupts. "
252 "jiffies=%lx.\n", jiffies);
253
254 /* Wait for mbx cmd completion until timeout */
255 atomic_inc(&ha->num_pend_mbx_stage2);
256 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
257 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
258
259 if (IS_P3P_TYPE(ha))
260 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING);
261 else if (IS_FWI2_CAPABLE(ha))
262 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT);
263 else
264 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT);
265 spin_unlock_irqrestore(&ha->hardware_lock, flags);
266
267 wait_time = jiffies;
268 atomic_inc(&ha->num_pend_mbx_stage3);
269 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
270 mcp->tov * HZ)) {
271 if (chip_reset != ha->chip_reset) {
272 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
273
274 spin_lock_irqsave(&ha->hardware_lock, flags);
275 ha->flags.mbox_busy = 0;
276 spin_unlock_irqrestore(&ha->hardware_lock,
277 flags);
278 atomic_dec(&ha->num_pend_mbx_stage2);
279 atomic_dec(&ha->num_pend_mbx_stage3);
280 rval = QLA_ABORTED;
281 goto premature_exit;
282 }
283 ql_dbg(ql_dbg_mbx, vha, 0x117a,
284 "cmd=%x Timeout.\n", command);
285 spin_lock_irqsave(&ha->hardware_lock, flags);
286 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
287 spin_unlock_irqrestore(&ha->hardware_lock, flags);
288
289 } else if (ha->flags.purge_mbox ||
290 chip_reset != ha->chip_reset) {
291 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
292
293 spin_lock_irqsave(&ha->hardware_lock, flags);
294 ha->flags.mbox_busy = 0;
295 spin_unlock_irqrestore(&ha->hardware_lock, flags);
296 atomic_dec(&ha->num_pend_mbx_stage2);
297 atomic_dec(&ha->num_pend_mbx_stage3);
298 rval = QLA_ABORTED;
299 goto premature_exit;
300 }
301 atomic_dec(&ha->num_pend_mbx_stage3);
302
303 if (time_after(jiffies, wait_time + 5 * HZ))
304 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
305 command, jiffies_to_msecs(jiffies - wait_time));
306 } else {
307 ql_dbg(ql_dbg_mbx, vha, 0x1011,
308 "Cmd=%x Polling Mode.\n", command);
309
310 if (IS_P3P_TYPE(ha)) {
311 if (rd_reg_dword(®->isp82.hint) &
312 HINT_MBX_INT_PENDING) {
313 ha->flags.mbox_busy = 0;
314 spin_unlock_irqrestore(&ha->hardware_lock,
315 flags);
316 atomic_dec(&ha->num_pend_mbx_stage2);
317 ql_dbg(ql_dbg_mbx, vha, 0x1012,
318 "Pending mailbox timeout, exiting.\n");
319 vha->hw_err_cnt++;
320 rval = QLA_FUNCTION_TIMEOUT;
321 goto premature_exit;
322 }
323 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING);
324 } else if (IS_FWI2_CAPABLE(ha))
325 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT);
326 else
327 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT);
328 spin_unlock_irqrestore(&ha->hardware_lock, flags);
329
330 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
331 while (!ha->flags.mbox_int) {
332 if (ha->flags.purge_mbox ||
333 chip_reset != ha->chip_reset) {
334 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
335
336 spin_lock_irqsave(&ha->hardware_lock, flags);
337 ha->flags.mbox_busy = 0;
338 spin_unlock_irqrestore(&ha->hardware_lock,
339 flags);
340 atomic_dec(&ha->num_pend_mbx_stage2);
341 rval = QLA_ABORTED;
342 goto premature_exit;
343 }
344
345 if (time_after(jiffies, wait_time))
346 break;
347
348 /* Check for pending interrupts. */
349 qla2x00_poll(ha->rsp_q_map[0]);
350
351 if (!ha->flags.mbox_int &&
352 !(IS_QLA2200(ha) &&
353 command == MBC_LOAD_RISC_RAM_EXTENDED))
354 msleep(10);
355 } /* while */
356 ql_dbg(ql_dbg_mbx, vha, 0x1013,
357 "Waited %d sec.\n",
358 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
359 }
360 atomic_dec(&ha->num_pend_mbx_stage2);
361
362 /* Check whether we timed out */
363 if (ha->flags.mbox_int) {
364 uint16_t *iptr2;
365
366 ql_dbg(ql_dbg_mbx, vha, 0x1014,
367 "Cmd=%x completed.\n", command);
368
369 /* Got interrupt. Clear the flag. */
370 ha->flags.mbox_int = 0;
371 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
372
373 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
374 spin_lock_irqsave(&ha->hardware_lock, flags);
375 ha->flags.mbox_busy = 0;
376 spin_unlock_irqrestore(&ha->hardware_lock, flags);
377
378 /* Setting Link-Down error */
379 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
380 ha->mcp = NULL;
381 rval = QLA_FUNCTION_FAILED;
382 ql_log(ql_log_warn, vha, 0xd048,
383 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
384 goto premature_exit;
385 }
386
387 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
388 ql_dbg(ql_dbg_mbx, vha, 0x11ff,
389 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
390 MBS_COMMAND_COMPLETE);
391 rval = QLA_FUNCTION_FAILED;
392 }
393
394 /* Load return mailbox registers. */
395 iptr2 = mcp->mb;
396 iptr = (uint16_t *)&ha->mailbox_out[0];
397 mboxes = mcp->in_mb;
398
399 ql_dbg(ql_dbg_mbx, vha, 0x1113,
400 "Mailbox registers (IN):\n");
401 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
402 if (mboxes & BIT_0) {
403 *iptr2 = *iptr;
404 ql_dbg(ql_dbg_mbx, vha, 0x1114,
405 "mbox[%d]->0x%04x\n", cnt, *iptr2);
406 }
407
408 mboxes >>= 1;
409 iptr2++;
410 iptr++;
411 }
412 } else {
413
414 uint16_t mb[8];
415 uint32_t ictrl, host_status, hccr;
416 uint16_t w;
417
418 if (IS_FWI2_CAPABLE(ha)) {
419 mb[0] = rd_reg_word(®->isp24.mailbox0);
420 mb[1] = rd_reg_word(®->isp24.mailbox1);
421 mb[2] = rd_reg_word(®->isp24.mailbox2);
422 mb[3] = rd_reg_word(®->isp24.mailbox3);
423 mb[7] = rd_reg_word(®->isp24.mailbox7);
424 ictrl = rd_reg_dword(®->isp24.ictrl);
425 host_status = rd_reg_dword(®->isp24.host_status);
426 hccr = rd_reg_dword(®->isp24.hccr);
427
428 ql_log(ql_log_warn, vha, 0xd04c,
429 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
430 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
431 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
432 mb[7], host_status, hccr);
433 vha->hw_err_cnt++;
434
435 } else {
436 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0);
437 ictrl = rd_reg_word(®->isp.ictrl);
438 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
439 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
440 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
441 vha->hw_err_cnt++;
442 }
443 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
444
445 /* Capture FW dump only, if PCI device active */
446 if (!pci_channel_offline(vha->hw->pdev)) {
447 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
448 if (w == 0xffff || ictrl == 0xffffffff ||
449 (chip_reset != ha->chip_reset)) {
450 /* This is special case if there is unload
451 * of driver happening and if PCI device go
452 * into bad state due to PCI error condition
453 * then only PCI ERR flag would be set.
454 * we will do premature exit for above case.
455 */
456 spin_lock_irqsave(&ha->hardware_lock, flags);
457 ha->flags.mbox_busy = 0;
458 spin_unlock_irqrestore(&ha->hardware_lock,
459 flags);
460 rval = QLA_FUNCTION_TIMEOUT;
461 goto premature_exit;
462 }
463
464 /* Attempt to capture firmware dump for further
465 * anallysis of the current formware state. we do not
466 * need to do this if we are intentionally generating
467 * a dump
468 */
469 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
470 qla2xxx_dump_fw(vha);
471 rval = QLA_FUNCTION_TIMEOUT;
472 }
473 }
474 spin_lock_irqsave(&ha->hardware_lock, flags);
475 ha->flags.mbox_busy = 0;
476 spin_unlock_irqrestore(&ha->hardware_lock, flags);
477
478 /* Clean up */
479 ha->mcp = NULL;
480
481 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
482 ql_dbg(ql_dbg_mbx, vha, 0x101a,
483 "Checking for additional resp interrupt.\n");
484
485 /* polling mode for non isp_abort commands. */
486 qla2x00_poll(ha->rsp_q_map[0]);
487 }
488
489 if (rval == QLA_FUNCTION_TIMEOUT &&
490 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
491 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
492 ha->flags.eeh_busy) {
493 /* not in dpc. schedule it for dpc to take over. */
494 ql_dbg(ql_dbg_mbx, vha, 0x101b,
495 "Timeout, schedule isp_abort_needed.\n");
496
497 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
498 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
499 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
500 if (IS_QLA82XX(ha)) {
501 ql_dbg(ql_dbg_mbx, vha, 0x112a,
502 "disabling pause transmit on port "
503 "0 & 1.\n");
504 qla82xx_wr_32(ha,
505 QLA82XX_CRB_NIU + 0x98,
506 CRB_NIU_XG_PAUSE_CTL_P0|
507 CRB_NIU_XG_PAUSE_CTL_P1);
508 }
509 ql_log(ql_log_info, base_vha, 0x101c,
510 "Mailbox cmd timeout occurred, cmd=0x%x, "
511 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
512 "abort.\n", command, mcp->mb[0],
513 ha->flags.eeh_busy);
514 vha->hw_err_cnt++;
515 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
516 qla2xxx_wake_dpc(vha);
517 }
518 } else if (current == ha->dpc_thread) {
519 /* call abort directly since we are in the DPC thread */
520 ql_dbg(ql_dbg_mbx, vha, 0x101d,
521 "Timeout, calling abort_isp.\n");
522
523 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
524 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
525 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
526 if (IS_QLA82XX(ha)) {
527 ql_dbg(ql_dbg_mbx, vha, 0x112b,
528 "disabling pause transmit on port "
529 "0 & 1.\n");
530 qla82xx_wr_32(ha,
531 QLA82XX_CRB_NIU + 0x98,
532 CRB_NIU_XG_PAUSE_CTL_P0|
533 CRB_NIU_XG_PAUSE_CTL_P1);
534 }
535 ql_log(ql_log_info, base_vha, 0x101e,
536 "Mailbox cmd timeout occurred, cmd=0x%x, "
537 "mb[0]=0x%x. Scheduling ISP abort ",
538 command, mcp->mb[0]);
539 vha->hw_err_cnt++;
540 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
541 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
542 /* Allow next mbx cmd to come in. */
543 complete(&ha->mbx_cmd_comp);
544 if (ha->isp_ops->abort_isp(vha) &&
545 !ha->flags.eeh_busy) {
546 /* Failed. retry later. */
547 set_bit(ISP_ABORT_NEEDED,
548 &vha->dpc_flags);
549 }
550 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
551 ql_dbg(ql_dbg_mbx, vha, 0x101f,
552 "Finished abort_isp.\n");
553 goto mbx_done;
554 }
555 }
556 }
557
558 premature_exit:
559 /* Allow next mbx cmd to come in. */
560 complete(&ha->mbx_cmd_comp);
561
562 mbx_done:
563 if (rval == QLA_ABORTED) {
564 ql_log(ql_log_info, vha, 0xd035,
565 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
566 mcp->mb[0]);
567 } else if (rval) {
568 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
569 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
570 dev_name(&ha->pdev->dev), 0x1020+0x800,
571 vha->host_no, rval);
572 mboxes = mcp->in_mb;
573 cnt = 4;
574 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
575 if (mboxes & BIT_0) {
576 printk(" mb[%u]=%x", i, mcp->mb[i]);
577 cnt--;
578 }
579 pr_warn(" cmd=%x ****\n", command);
580 }
581 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
582 ql_dbg(ql_dbg_mbx, vha, 0x1198,
583 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
584 rd_reg_dword(®->isp24.host_status),
585 rd_reg_dword(®->isp24.ictrl),
586 rd_reg_dword(®->isp24.istatus));
587 } else {
588 ql_dbg(ql_dbg_mbx, vha, 0x1206,
589 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
590 rd_reg_word(®->isp.ctrl_status),
591 rd_reg_word(®->isp.ictrl),
592 rd_reg_word(®->isp.istatus));
593 }
594 } else {
595 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
596 }
597
598 i = 500;
599 while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) {
600 /*
601 * The caller of this mailbox encounter pci error.
602 * Hold the thread until PCIE link reset complete to make
603 * sure caller does not unmap dma while recovery is
604 * in progress.
605 */
606 msleep(1);
607 i--;
608 }
609 return rval;
610 }
611
612 int
qla2x00_load_ram(scsi_qla_host_t * vha,dma_addr_t req_dma,uint32_t risc_addr,uint32_t risc_code_size)613 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
614 uint32_t risc_code_size)
615 {
616 int rval;
617 struct qla_hw_data *ha = vha->hw;
618 mbx_cmd_t mc;
619 mbx_cmd_t *mcp = &mc;
620
621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
622 "Entered %s.\n", __func__);
623
624 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
625 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
626 mcp->mb[8] = MSW(risc_addr);
627 mcp->out_mb = MBX_8|MBX_0;
628 } else {
629 mcp->mb[0] = MBC_LOAD_RISC_RAM;
630 mcp->out_mb = MBX_0;
631 }
632 mcp->mb[1] = LSW(risc_addr);
633 mcp->mb[2] = MSW(req_dma);
634 mcp->mb[3] = LSW(req_dma);
635 mcp->mb[6] = MSW(MSD(req_dma));
636 mcp->mb[7] = LSW(MSD(req_dma));
637 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
638 if (IS_FWI2_CAPABLE(ha)) {
639 mcp->mb[4] = MSW(risc_code_size);
640 mcp->mb[5] = LSW(risc_code_size);
641 mcp->out_mb |= MBX_5|MBX_4;
642 } else {
643 mcp->mb[4] = LSW(risc_code_size);
644 mcp->out_mb |= MBX_4;
645 }
646
647 mcp->in_mb = MBX_1|MBX_0;
648 mcp->tov = MBX_TOV_SECONDS;
649 mcp->flags = 0;
650 rval = qla2x00_mailbox_command(vha, mcp);
651
652 if (rval != QLA_SUCCESS) {
653 ql_dbg(ql_dbg_mbx, vha, 0x1023,
654 "Failed=%x mb[0]=%x mb[1]=%x.\n",
655 rval, mcp->mb[0], mcp->mb[1]);
656 vha->hw_err_cnt++;
657 } else {
658 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
659 "Done %s.\n", __func__);
660 }
661
662 return rval;
663 }
664
665 #define NVME_ENABLE_FLAG BIT_3
666 #define EDIF_HW_SUPPORT BIT_10
667
668 /*
669 * qla2x00_execute_fw
670 * Start adapter firmware.
671 *
672 * Input:
673 * ha = adapter block pointer.
674 * TARGET_QUEUE_LOCK must be released.
675 * ADAPTER_STATE_LOCK must be released.
676 *
677 * Returns:
678 * qla2x00 local function return status code.
679 *
680 * Context:
681 * Kernel context.
682 */
683 int
qla2x00_execute_fw(scsi_qla_host_t * vha,uint32_t risc_addr)684 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
685 {
686 int rval;
687 struct qla_hw_data *ha = vha->hw;
688 mbx_cmd_t mc;
689 mbx_cmd_t *mcp = &mc;
690 u8 semaphore = 0;
691 #define EXE_FW_FORCE_SEMAPHORE BIT_7
692 u8 retry = 3;
693
694 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
695 "Entered %s.\n", __func__);
696
697 again:
698 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
699 mcp->out_mb = MBX_0;
700 mcp->in_mb = MBX_0;
701 if (IS_FWI2_CAPABLE(ha)) {
702 mcp->mb[1] = MSW(risc_addr);
703 mcp->mb[2] = LSW(risc_addr);
704 mcp->mb[3] = 0;
705 mcp->mb[4] = 0;
706 mcp->mb[11] = 0;
707
708 /* Enable BPM? */
709 if (ha->flags.lr_detected) {
710 mcp->mb[4] = BIT_0;
711 if (IS_BPM_RANGE_CAPABLE(ha))
712 mcp->mb[4] |=
713 ha->lr_distance << LR_DIST_FW_POS;
714 }
715
716 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
717 mcp->mb[4] |= NVME_ENABLE_FLAG;
718
719 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
720 struct nvram_81xx *nv = ha->nvram;
721 /* set minimum speed if specified in nvram */
722 if (nv->min_supported_speed >= 2 &&
723 nv->min_supported_speed <= 5) {
724 mcp->mb[4] |= BIT_4;
725 mcp->mb[11] |= nv->min_supported_speed & 0xF;
726 mcp->out_mb |= MBX_11;
727 mcp->in_mb |= BIT_5;
728 vha->min_supported_speed =
729 nv->min_supported_speed;
730 }
731 }
732
733 if (ha->flags.exlogins_enabled)
734 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
735
736 if (ha->flags.exchoffld_enabled)
737 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
738
739 if (semaphore)
740 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
741
742 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
743 mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1;
744 } else {
745 mcp->mb[1] = LSW(risc_addr);
746 mcp->out_mb |= MBX_1;
747 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
748 mcp->mb[2] = 0;
749 mcp->out_mb |= MBX_2;
750 }
751 }
752
753 mcp->tov = MBX_TOV_SECONDS;
754 mcp->flags = 0;
755 rval = qla2x00_mailbox_command(vha, mcp);
756
757 if (rval != QLA_SUCCESS) {
758 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
759 mcp->mb[1] == 0x27 && retry) {
760 semaphore = 1;
761 retry--;
762 ql_dbg(ql_dbg_async, vha, 0x1026,
763 "Exe FW: force semaphore.\n");
764 goto again;
765 }
766
767 ql_dbg(ql_dbg_mbx, vha, 0x1026,
768 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
769 vha->hw_err_cnt++;
770 return rval;
771 }
772
773 if (!IS_FWI2_CAPABLE(ha))
774 goto done;
775
776 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
777 ql_dbg(ql_dbg_mbx, vha, 0x119a,
778 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
779 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
780 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
781 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
782 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
783 ha->max_supported_speed == 0 ? "16Gps" :
784 ha->max_supported_speed == 1 ? "32Gps" :
785 ha->max_supported_speed == 2 ? "64Gps" : "unknown");
786 if (vha->min_supported_speed) {
787 ha->min_supported_speed = mcp->mb[5] &
788 (BIT_0 | BIT_1 | BIT_2);
789 ql_dbg(ql_dbg_mbx, vha, 0x119c,
790 "min_supported_speed=%s.\n",
791 ha->min_supported_speed == 6 ? "64Gps" :
792 ha->min_supported_speed == 5 ? "32Gps" :
793 ha->min_supported_speed == 4 ? "16Gps" :
794 ha->min_supported_speed == 3 ? "8Gps" :
795 ha->min_supported_speed == 2 ? "4Gps" : "unknown");
796 }
797 }
798
799 if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) {
800 ha->flags.edif_hw = 1;
801 ql_log(ql_log_info, vha, 0xffff,
802 "%s: edif HW\n", __func__);
803 }
804
805 done:
806 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
807 "Done %s.\n", __func__);
808
809 return rval;
810 }
811
812 /*
813 * qla_get_exlogin_status
814 * Get extended login status
815 * uses the memory offload control/status Mailbox
816 *
817 * Input:
818 * ha: adapter state pointer.
819 * fwopt: firmware options
820 *
821 * Returns:
822 * qla2x00 local function status
823 *
824 * Context:
825 * Kernel context.
826 */
827 #define FETCH_XLOGINS_STAT 0x8
828 int
qla_get_exlogin_status(scsi_qla_host_t * vha,uint16_t * buf_sz,uint16_t * ex_logins_cnt)829 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
830 uint16_t *ex_logins_cnt)
831 {
832 int rval;
833 mbx_cmd_t mc;
834 mbx_cmd_t *mcp = &mc;
835
836 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
837 "Entered %s\n", __func__);
838
839 memset(mcp->mb, 0 , sizeof(mcp->mb));
840 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
841 mcp->mb[1] = FETCH_XLOGINS_STAT;
842 mcp->out_mb = MBX_1|MBX_0;
843 mcp->in_mb = MBX_10|MBX_4|MBX_0;
844 mcp->tov = MBX_TOV_SECONDS;
845 mcp->flags = 0;
846
847 rval = qla2x00_mailbox_command(vha, mcp);
848 if (rval != QLA_SUCCESS) {
849 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
850 } else {
851 *buf_sz = mcp->mb[4];
852 *ex_logins_cnt = mcp->mb[10];
853
854 ql_log(ql_log_info, vha, 0x1190,
855 "buffer size 0x%x, exchange login count=%d\n",
856 mcp->mb[4], mcp->mb[10]);
857
858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
859 "Done %s.\n", __func__);
860 }
861
862 return rval;
863 }
864
865 /*
866 * qla_set_exlogin_mem_cfg
867 * set extended login memory configuration
868 * Mbx needs to be issues before init_cb is set
869 *
870 * Input:
871 * ha: adapter state pointer.
872 * buffer: buffer pointer
873 * phys_addr: physical address of buffer
874 * size: size of buffer
875 * TARGET_QUEUE_LOCK must be released
876 * ADAPTER_STATE_LOCK must be release
877 *
878 * Returns:
879 * qla2x00 local funxtion status code.
880 *
881 * Context:
882 * Kernel context.
883 */
884 #define CONFIG_XLOGINS_MEM 0x9
885 int
qla_set_exlogin_mem_cfg(scsi_qla_host_t * vha,dma_addr_t phys_addr)886 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
887 {
888 int rval;
889 mbx_cmd_t mc;
890 mbx_cmd_t *mcp = &mc;
891 struct qla_hw_data *ha = vha->hw;
892
893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
894 "Entered %s.\n", __func__);
895
896 memset(mcp->mb, 0 , sizeof(mcp->mb));
897 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
898 mcp->mb[1] = CONFIG_XLOGINS_MEM;
899 mcp->mb[2] = MSW(phys_addr);
900 mcp->mb[3] = LSW(phys_addr);
901 mcp->mb[6] = MSW(MSD(phys_addr));
902 mcp->mb[7] = LSW(MSD(phys_addr));
903 mcp->mb[8] = MSW(ha->exlogin_size);
904 mcp->mb[9] = LSW(ha->exlogin_size);
905 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
906 mcp->in_mb = MBX_11|MBX_0;
907 mcp->tov = MBX_TOV_SECONDS;
908 mcp->flags = 0;
909 rval = qla2x00_mailbox_command(vha, mcp);
910 if (rval != QLA_SUCCESS) {
911 ql_dbg(ql_dbg_mbx, vha, 0x111b,
912 "EXlogin Failed=%x. MB0=%x MB11=%x\n",
913 rval, mcp->mb[0], mcp->mb[11]);
914 } else {
915 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
916 "Done %s.\n", __func__);
917 }
918
919 return rval;
920 }
921
922 /*
923 * qla_get_exchoffld_status
924 * Get exchange offload status
925 * uses the memory offload control/status Mailbox
926 *
927 * Input:
928 * ha: adapter state pointer.
929 * fwopt: firmware options
930 *
931 * Returns:
932 * qla2x00 local function status
933 *
934 * Context:
935 * Kernel context.
936 */
937 #define FETCH_XCHOFFLD_STAT 0x2
938 int
qla_get_exchoffld_status(scsi_qla_host_t * vha,uint16_t * buf_sz,uint16_t * ex_logins_cnt)939 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
940 uint16_t *ex_logins_cnt)
941 {
942 int rval;
943 mbx_cmd_t mc;
944 mbx_cmd_t *mcp = &mc;
945
946 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
947 "Entered %s\n", __func__);
948
949 memset(mcp->mb, 0 , sizeof(mcp->mb));
950 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
951 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
952 mcp->out_mb = MBX_1|MBX_0;
953 mcp->in_mb = MBX_10|MBX_4|MBX_0;
954 mcp->tov = MBX_TOV_SECONDS;
955 mcp->flags = 0;
956
957 rval = qla2x00_mailbox_command(vha, mcp);
958 if (rval != QLA_SUCCESS) {
959 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
960 } else {
961 *buf_sz = mcp->mb[4];
962 *ex_logins_cnt = mcp->mb[10];
963
964 ql_log(ql_log_info, vha, 0x118e,
965 "buffer size 0x%x, exchange offload count=%d\n",
966 mcp->mb[4], mcp->mb[10]);
967
968 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
969 "Done %s.\n", __func__);
970 }
971
972 return rval;
973 }
974
975 /*
976 * qla_set_exchoffld_mem_cfg
977 * Set exchange offload memory configuration
978 * Mbx needs to be issues before init_cb is set
979 *
980 * Input:
981 * ha: adapter state pointer.
982 * buffer: buffer pointer
983 * phys_addr: physical address of buffer
984 * size: size of buffer
985 * TARGET_QUEUE_LOCK must be released
986 * ADAPTER_STATE_LOCK must be release
987 *
988 * Returns:
989 * qla2x00 local funxtion status code.
990 *
991 * Context:
992 * Kernel context.
993 */
994 #define CONFIG_XCHOFFLD_MEM 0x3
995 int
qla_set_exchoffld_mem_cfg(scsi_qla_host_t * vha)996 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
997 {
998 int rval;
999 mbx_cmd_t mc;
1000 mbx_cmd_t *mcp = &mc;
1001 struct qla_hw_data *ha = vha->hw;
1002
1003 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
1004 "Entered %s.\n", __func__);
1005
1006 memset(mcp->mb, 0 , sizeof(mcp->mb));
1007 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
1008 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
1009 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1010 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1011 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1012 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1013 mcp->mb[8] = MSW(ha->exchoffld_size);
1014 mcp->mb[9] = LSW(ha->exchoffld_size);
1015 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1016 mcp->in_mb = MBX_11|MBX_0;
1017 mcp->tov = MBX_TOV_SECONDS;
1018 mcp->flags = 0;
1019 rval = qla2x00_mailbox_command(vha, mcp);
1020 if (rval != QLA_SUCCESS) {
1021 /*EMPTY*/
1022 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
1023 } else {
1024 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
1025 "Done %s.\n", __func__);
1026 }
1027
1028 return rval;
1029 }
1030
1031 /*
1032 * qla2x00_get_fw_version
1033 * Get firmware version.
1034 *
1035 * Input:
1036 * ha: adapter state pointer.
1037 * major: pointer for major number.
1038 * minor: pointer for minor number.
1039 * subminor: pointer for subminor number.
1040 *
1041 * Returns:
1042 * qla2x00 local function return status code.
1043 *
1044 * Context:
1045 * Kernel context.
1046 */
1047 int
qla2x00_get_fw_version(scsi_qla_host_t * vha)1048 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1049 {
1050 int rval;
1051 mbx_cmd_t mc;
1052 mbx_cmd_t *mcp = &mc;
1053 struct qla_hw_data *ha = vha->hw;
1054
1055 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1056 "Entered %s.\n", __func__);
1057
1058 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1059 mcp->out_mb = MBX_0;
1060 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1061 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1062 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1063 if (IS_FWI2_CAPABLE(ha))
1064 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1065 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1066 mcp->in_mb |=
1067 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1068 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
1069
1070 mcp->flags = 0;
1071 mcp->tov = MBX_TOV_SECONDS;
1072 rval = qla2x00_mailbox_command(vha, mcp);
1073 if (rval != QLA_SUCCESS)
1074 goto failed;
1075
1076 /* Return mailbox data. */
1077 ha->fw_major_version = mcp->mb[1];
1078 ha->fw_minor_version = mcp->mb[2];
1079 ha->fw_subminor_version = mcp->mb[3];
1080 ha->fw_attributes = mcp->mb[6];
1081 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1082 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1083 else
1084 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1085
1086 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1087 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1088 ha->mpi_version[1] = mcp->mb[11] >> 8;
1089 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1090 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1091 ha->phy_version[0] = mcp->mb[8] & 0xff;
1092 ha->phy_version[1] = mcp->mb[9] >> 8;
1093 ha->phy_version[2] = mcp->mb[9] & 0xff;
1094 }
1095
1096 if (IS_FWI2_CAPABLE(ha)) {
1097 ha->fw_attributes_h = mcp->mb[15];
1098 ha->fw_attributes_ext[0] = mcp->mb[16];
1099 ha->fw_attributes_ext[1] = mcp->mb[17];
1100 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1101 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1102 __func__, mcp->mb[15], mcp->mb[6]);
1103 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1104 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1105 __func__, mcp->mb[17], mcp->mb[16]);
1106
1107 if (ha->fw_attributes_h & 0x4)
1108 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1109 "%s: Firmware supports Extended Login 0x%x\n",
1110 __func__, ha->fw_attributes_h);
1111
1112 if (ha->fw_attributes_h & 0x8)
1113 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1114 "%s: Firmware supports Exchange Offload 0x%x\n",
1115 __func__, ha->fw_attributes_h);
1116
1117 /*
1118 * FW supports nvme and driver load parameter requested nvme.
1119 * BIT 26 of fw_attributes indicates NVMe support.
1120 */
1121 if ((ha->fw_attributes_h &
1122 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1123 ql2xnvmeenable) {
1124 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1125 vha->flags.nvme_first_burst = 1;
1126
1127 vha->flags.nvme_enabled = 1;
1128 ql_log(ql_log_info, vha, 0xd302,
1129 "%s: FC-NVMe is Enabled (0x%x)\n",
1130 __func__, ha->fw_attributes_h);
1131 }
1132
1133 /* BIT_13 of Extended FW Attributes informs about NVMe2 support */
1134 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
1135 ql_log(ql_log_info, vha, 0xd302,
1136 "Firmware supports NVMe2 0x%x\n",
1137 ha->fw_attributes_ext[0]);
1138 vha->flags.nvme2_enabled = 1;
1139 }
1140
1141 if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable &&
1142 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) {
1143 ha->flags.edif_enabled = 1;
1144 ql_log(ql_log_info, vha, 0xffff,
1145 "%s: edif is enabled\n", __func__);
1146 }
1147 }
1148
1149 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1150 ha->serdes_version[0] = mcp->mb[7] & 0xff;
1151 ha->serdes_version[1] = mcp->mb[8] >> 8;
1152 ha->serdes_version[2] = mcp->mb[8] & 0xff;
1153 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1154 ha->mpi_version[1] = mcp->mb[11] >> 8;
1155 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1156 ha->pep_version[0] = mcp->mb[13] & 0xff;
1157 ha->pep_version[1] = mcp->mb[14] >> 8;
1158 ha->pep_version[2] = mcp->mb[14] & 0xff;
1159 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1160 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1161 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1162 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1163 if (IS_QLA28XX(ha)) {
1164 if (mcp->mb[16] & BIT_10)
1165 ha->flags.secure_fw = 1;
1166
1167 ql_log(ql_log_info, vha, 0xffff,
1168 "Secure Flash Update in FW: %s\n",
1169 (ha->flags.secure_fw) ? "Supported" :
1170 "Not Supported");
1171 }
1172
1173 if (ha->flags.scm_supported_a &&
1174 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
1175 ha->flags.scm_supported_f = 1;
1176 ha->sf_init_cb->flags |= cpu_to_le16(BIT_13);
1177 }
1178 ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n",
1179 (ha->flags.scm_supported_f) ? "Supported" :
1180 "Not Supported");
1181
1182 if (vha->flags.nvme2_enabled) {
1183 /* set BIT_15 of special feature control block for SLER */
1184 ha->sf_init_cb->flags |= cpu_to_le16(BIT_15);
1185 /* set BIT_14 of special feature control block for PI CTRL*/
1186 ha->sf_init_cb->flags |= cpu_to_le16(BIT_14);
1187 }
1188 }
1189
1190 failed:
1191 if (rval != QLA_SUCCESS) {
1192 /*EMPTY*/
1193 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1194 } else {
1195 /*EMPTY*/
1196 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1197 "Done %s.\n", __func__);
1198 }
1199 return rval;
1200 }
1201
1202 /*
1203 * qla2x00_get_fw_options
1204 * Set firmware options.
1205 *
1206 * Input:
1207 * ha = adapter block pointer.
1208 * fwopt = pointer for firmware options.
1209 *
1210 * Returns:
1211 * qla2x00 local function return status code.
1212 *
1213 * Context:
1214 * Kernel context.
1215 */
1216 int
qla2x00_get_fw_options(scsi_qla_host_t * vha,uint16_t * fwopts)1217 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1218 {
1219 int rval;
1220 mbx_cmd_t mc;
1221 mbx_cmd_t *mcp = &mc;
1222
1223 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1224 "Entered %s.\n", __func__);
1225
1226 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1227 mcp->out_mb = MBX_0;
1228 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1229 mcp->tov = MBX_TOV_SECONDS;
1230 mcp->flags = 0;
1231 rval = qla2x00_mailbox_command(vha, mcp);
1232
1233 if (rval != QLA_SUCCESS) {
1234 /*EMPTY*/
1235 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1236 } else {
1237 fwopts[0] = mcp->mb[0];
1238 fwopts[1] = mcp->mb[1];
1239 fwopts[2] = mcp->mb[2];
1240 fwopts[3] = mcp->mb[3];
1241
1242 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1243 "Done %s.\n", __func__);
1244 }
1245
1246 return rval;
1247 }
1248
1249
1250 /*
1251 * qla2x00_set_fw_options
1252 * Set firmware options.
1253 *
1254 * Input:
1255 * ha = adapter block pointer.
1256 * fwopt = pointer for firmware options.
1257 *
1258 * Returns:
1259 * qla2x00 local function return status code.
1260 *
1261 * Context:
1262 * Kernel context.
1263 */
1264 int
qla2x00_set_fw_options(scsi_qla_host_t * vha,uint16_t * fwopts)1265 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1266 {
1267 int rval;
1268 mbx_cmd_t mc;
1269 mbx_cmd_t *mcp = &mc;
1270
1271 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1272 "Entered %s.\n", __func__);
1273
1274 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1275 mcp->mb[1] = fwopts[1];
1276 mcp->mb[2] = fwopts[2];
1277 mcp->mb[3] = fwopts[3];
1278 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1279 mcp->in_mb = MBX_0;
1280 if (IS_FWI2_CAPABLE(vha->hw)) {
1281 mcp->in_mb |= MBX_1;
1282 mcp->mb[10] = fwopts[10];
1283 mcp->out_mb |= MBX_10;
1284 } else {
1285 mcp->mb[10] = fwopts[10];
1286 mcp->mb[11] = fwopts[11];
1287 mcp->mb[12] = 0; /* Undocumented, but used */
1288 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1289 }
1290 mcp->tov = MBX_TOV_SECONDS;
1291 mcp->flags = 0;
1292 rval = qla2x00_mailbox_command(vha, mcp);
1293
1294 fwopts[0] = mcp->mb[0];
1295
1296 if (rval != QLA_SUCCESS) {
1297 /*EMPTY*/
1298 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1299 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1300 } else {
1301 /*EMPTY*/
1302 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1303 "Done %s.\n", __func__);
1304 }
1305
1306 return rval;
1307 }
1308
1309 /*
1310 * qla2x00_mbx_reg_test
1311 * Mailbox register wrap test.
1312 *
1313 * Input:
1314 * ha = adapter block pointer.
1315 * TARGET_QUEUE_LOCK must be released.
1316 * ADAPTER_STATE_LOCK must be released.
1317 *
1318 * Returns:
1319 * qla2x00 local function return status code.
1320 *
1321 * Context:
1322 * Kernel context.
1323 */
1324 int
qla2x00_mbx_reg_test(scsi_qla_host_t * vha)1325 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1326 {
1327 int rval;
1328 mbx_cmd_t mc;
1329 mbx_cmd_t *mcp = &mc;
1330
1331 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1332 "Entered %s.\n", __func__);
1333
1334 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1335 mcp->mb[1] = 0xAAAA;
1336 mcp->mb[2] = 0x5555;
1337 mcp->mb[3] = 0xAA55;
1338 mcp->mb[4] = 0x55AA;
1339 mcp->mb[5] = 0xA5A5;
1340 mcp->mb[6] = 0x5A5A;
1341 mcp->mb[7] = 0x2525;
1342 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1343 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1344 mcp->tov = MBX_TOV_SECONDS;
1345 mcp->flags = 0;
1346 rval = qla2x00_mailbox_command(vha, mcp);
1347
1348 if (rval == QLA_SUCCESS) {
1349 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1350 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1351 rval = QLA_FUNCTION_FAILED;
1352 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1353 mcp->mb[7] != 0x2525)
1354 rval = QLA_FUNCTION_FAILED;
1355 }
1356
1357 if (rval != QLA_SUCCESS) {
1358 /*EMPTY*/
1359 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1360 vha->hw_err_cnt++;
1361 } else {
1362 /*EMPTY*/
1363 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1364 "Done %s.\n", __func__);
1365 }
1366
1367 return rval;
1368 }
1369
1370 /*
1371 * qla2x00_verify_checksum
1372 * Verify firmware checksum.
1373 *
1374 * Input:
1375 * ha = adapter block pointer.
1376 * TARGET_QUEUE_LOCK must be released.
1377 * ADAPTER_STATE_LOCK must be released.
1378 *
1379 * Returns:
1380 * qla2x00 local function return status code.
1381 *
1382 * Context:
1383 * Kernel context.
1384 */
1385 int
qla2x00_verify_checksum(scsi_qla_host_t * vha,uint32_t risc_addr)1386 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1387 {
1388 int rval;
1389 mbx_cmd_t mc;
1390 mbx_cmd_t *mcp = &mc;
1391
1392 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1393 "Entered %s.\n", __func__);
1394
1395 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1396 mcp->out_mb = MBX_0;
1397 mcp->in_mb = MBX_0;
1398 if (IS_FWI2_CAPABLE(vha->hw)) {
1399 mcp->mb[1] = MSW(risc_addr);
1400 mcp->mb[2] = LSW(risc_addr);
1401 mcp->out_mb |= MBX_2|MBX_1;
1402 mcp->in_mb |= MBX_2|MBX_1;
1403 } else {
1404 mcp->mb[1] = LSW(risc_addr);
1405 mcp->out_mb |= MBX_1;
1406 mcp->in_mb |= MBX_1;
1407 }
1408
1409 mcp->tov = MBX_TOV_SECONDS;
1410 mcp->flags = 0;
1411 rval = qla2x00_mailbox_command(vha, mcp);
1412
1413 if (rval != QLA_SUCCESS) {
1414 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1415 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1416 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1417 } else {
1418 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1419 "Done %s.\n", __func__);
1420 }
1421
1422 return rval;
1423 }
1424
1425 /*
1426 * qla2x00_issue_iocb
1427 * Issue IOCB using mailbox command
1428 *
1429 * Input:
1430 * ha = adapter state pointer.
1431 * buffer = buffer pointer.
1432 * phys_addr = physical address of buffer.
1433 * size = size of buffer.
1434 * TARGET_QUEUE_LOCK must be released.
1435 * ADAPTER_STATE_LOCK must be released.
1436 *
1437 * Returns:
1438 * qla2x00 local function return status code.
1439 *
1440 * Context:
1441 * Kernel context.
1442 */
1443 int
qla2x00_issue_iocb_timeout(scsi_qla_host_t * vha,void * buffer,dma_addr_t phys_addr,size_t size,uint32_t tov)1444 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1445 dma_addr_t phys_addr, size_t size, uint32_t tov)
1446 {
1447 int rval;
1448 mbx_cmd_t mc;
1449 mbx_cmd_t *mcp = &mc;
1450
1451 if (!vha->hw->flags.fw_started)
1452 return QLA_INVALID_COMMAND;
1453
1454 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1455 "Entered %s.\n", __func__);
1456
1457 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1458 mcp->mb[1] = 0;
1459 mcp->mb[2] = MSW(LSD(phys_addr));
1460 mcp->mb[3] = LSW(LSD(phys_addr));
1461 mcp->mb[6] = MSW(MSD(phys_addr));
1462 mcp->mb[7] = LSW(MSD(phys_addr));
1463 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1464 mcp->in_mb = MBX_1|MBX_0;
1465 mcp->tov = tov;
1466 mcp->flags = 0;
1467 rval = qla2x00_mailbox_command(vha, mcp);
1468
1469 if (rval != QLA_SUCCESS) {
1470 /*EMPTY*/
1471 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1472 } else {
1473 sts_entry_t *sts_entry = buffer;
1474
1475 /* Mask reserved bits. */
1476 sts_entry->entry_status &=
1477 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1478 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1479 "Done %s (status=%x).\n", __func__,
1480 sts_entry->entry_status);
1481 }
1482
1483 return rval;
1484 }
1485
1486 int
qla2x00_issue_iocb(scsi_qla_host_t * vha,void * buffer,dma_addr_t phys_addr,size_t size)1487 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1488 size_t size)
1489 {
1490 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1491 MBX_TOV_SECONDS);
1492 }
1493
1494 /*
1495 * qla2x00_abort_command
1496 * Abort command aborts a specified IOCB.
1497 *
1498 * Input:
1499 * ha = adapter block pointer.
1500 * sp = SB structure pointer.
1501 *
1502 * Returns:
1503 * qla2x00 local function return status code.
1504 *
1505 * Context:
1506 * Kernel context.
1507 */
1508 int
qla2x00_abort_command(srb_t * sp)1509 qla2x00_abort_command(srb_t *sp)
1510 {
1511 unsigned long flags = 0;
1512 int rval;
1513 uint32_t handle = 0;
1514 mbx_cmd_t mc;
1515 mbx_cmd_t *mcp = &mc;
1516 fc_port_t *fcport = sp->fcport;
1517 scsi_qla_host_t *vha = fcport->vha;
1518 struct qla_hw_data *ha = vha->hw;
1519 struct req_que *req;
1520 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1521
1522 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1523 "Entered %s.\n", __func__);
1524
1525 if (sp->qpair)
1526 req = sp->qpair->req;
1527 else
1528 req = vha->req;
1529
1530 spin_lock_irqsave(&ha->hardware_lock, flags);
1531 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1532 if (req->outstanding_cmds[handle] == sp)
1533 break;
1534 }
1535 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1536
1537 if (handle == req->num_outstanding_cmds) {
1538 /* command not found */
1539 return QLA_FUNCTION_FAILED;
1540 }
1541
1542 mcp->mb[0] = MBC_ABORT_COMMAND;
1543 if (HAS_EXTENDED_IDS(ha))
1544 mcp->mb[1] = fcport->loop_id;
1545 else
1546 mcp->mb[1] = fcport->loop_id << 8;
1547 mcp->mb[2] = (uint16_t)handle;
1548 mcp->mb[3] = (uint16_t)(handle >> 16);
1549 mcp->mb[6] = (uint16_t)cmd->device->lun;
1550 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1551 mcp->in_mb = MBX_0;
1552 mcp->tov = MBX_TOV_SECONDS;
1553 mcp->flags = 0;
1554 rval = qla2x00_mailbox_command(vha, mcp);
1555
1556 if (rval != QLA_SUCCESS) {
1557 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1558 } else {
1559 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1560 "Done %s.\n", __func__);
1561 }
1562
1563 return rval;
1564 }
1565
1566 int
qla2x00_abort_target(struct fc_port * fcport,uint64_t l,int tag)1567 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1568 {
1569 int rval, rval2;
1570 mbx_cmd_t mc;
1571 mbx_cmd_t *mcp = &mc;
1572 scsi_qla_host_t *vha;
1573
1574 vha = fcport->vha;
1575
1576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1577 "Entered %s.\n", __func__);
1578
1579 mcp->mb[0] = MBC_ABORT_TARGET;
1580 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1581 if (HAS_EXTENDED_IDS(vha->hw)) {
1582 mcp->mb[1] = fcport->loop_id;
1583 mcp->mb[10] = 0;
1584 mcp->out_mb |= MBX_10;
1585 } else {
1586 mcp->mb[1] = fcport->loop_id << 8;
1587 }
1588 mcp->mb[2] = vha->hw->loop_reset_delay;
1589 mcp->mb[9] = vha->vp_idx;
1590
1591 mcp->in_mb = MBX_0;
1592 mcp->tov = MBX_TOV_SECONDS;
1593 mcp->flags = 0;
1594 rval = qla2x00_mailbox_command(vha, mcp);
1595 if (rval != QLA_SUCCESS) {
1596 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1597 "Failed=%x.\n", rval);
1598 }
1599
1600 /* Issue marker IOCB. */
1601 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1602 MK_SYNC_ID);
1603 if (rval2 != QLA_SUCCESS) {
1604 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1605 "Failed to issue marker IOCB (%x).\n", rval2);
1606 } else {
1607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1608 "Done %s.\n", __func__);
1609 }
1610
1611 return rval;
1612 }
1613
1614 int
qla2x00_lun_reset(struct fc_port * fcport,uint64_t l,int tag)1615 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1616 {
1617 int rval, rval2;
1618 mbx_cmd_t mc;
1619 mbx_cmd_t *mcp = &mc;
1620 scsi_qla_host_t *vha;
1621
1622 vha = fcport->vha;
1623
1624 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1625 "Entered %s.\n", __func__);
1626
1627 mcp->mb[0] = MBC_LUN_RESET;
1628 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1629 if (HAS_EXTENDED_IDS(vha->hw))
1630 mcp->mb[1] = fcport->loop_id;
1631 else
1632 mcp->mb[1] = fcport->loop_id << 8;
1633 mcp->mb[2] = (u32)l;
1634 mcp->mb[3] = 0;
1635 mcp->mb[9] = vha->vp_idx;
1636
1637 mcp->in_mb = MBX_0;
1638 mcp->tov = MBX_TOV_SECONDS;
1639 mcp->flags = 0;
1640 rval = qla2x00_mailbox_command(vha, mcp);
1641 if (rval != QLA_SUCCESS) {
1642 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1643 }
1644
1645 /* Issue marker IOCB. */
1646 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1647 MK_SYNC_ID_LUN);
1648 if (rval2 != QLA_SUCCESS) {
1649 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1650 "Failed to issue marker IOCB (%x).\n", rval2);
1651 } else {
1652 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1653 "Done %s.\n", __func__);
1654 }
1655
1656 return rval;
1657 }
1658
1659 /*
1660 * qla2x00_get_adapter_id
1661 * Get adapter ID and topology.
1662 *
1663 * Input:
1664 * ha = adapter block pointer.
1665 * id = pointer for loop ID.
1666 * al_pa = pointer for AL_PA.
1667 * area = pointer for area.
1668 * domain = pointer for domain.
1669 * top = pointer for topology.
1670 * TARGET_QUEUE_LOCK must be released.
1671 * ADAPTER_STATE_LOCK must be released.
1672 *
1673 * Returns:
1674 * qla2x00 local function return status code.
1675 *
1676 * Context:
1677 * Kernel context.
1678 */
1679 int
qla2x00_get_adapter_id(scsi_qla_host_t * vha,uint16_t * id,uint8_t * al_pa,uint8_t * area,uint8_t * domain,uint16_t * top,uint16_t * sw_cap)1680 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1681 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1682 {
1683 int rval;
1684 mbx_cmd_t mc;
1685 mbx_cmd_t *mcp = &mc;
1686
1687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1688 "Entered %s.\n", __func__);
1689
1690 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1691 mcp->mb[9] = vha->vp_idx;
1692 mcp->out_mb = MBX_9|MBX_0;
1693 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1694 if (IS_CNA_CAPABLE(vha->hw))
1695 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1696 if (IS_FWI2_CAPABLE(vha->hw))
1697 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1698 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
1699 mcp->in_mb |= MBX_15;
1700 mcp->out_mb |= MBX_7|MBX_21|MBX_22|MBX_23;
1701 }
1702
1703 mcp->tov = MBX_TOV_SECONDS;
1704 mcp->flags = 0;
1705 rval = qla2x00_mailbox_command(vha, mcp);
1706 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1707 rval = QLA_COMMAND_ERROR;
1708 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1709 rval = QLA_INVALID_COMMAND;
1710
1711 /* Return data. */
1712 *id = mcp->mb[1];
1713 *al_pa = LSB(mcp->mb[2]);
1714 *area = MSB(mcp->mb[2]);
1715 *domain = LSB(mcp->mb[3]);
1716 *top = mcp->mb[6];
1717 *sw_cap = mcp->mb[7];
1718
1719 if (rval != QLA_SUCCESS) {
1720 /*EMPTY*/
1721 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1722 } else {
1723 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1724 "Done %s.\n", __func__);
1725
1726 if (IS_CNA_CAPABLE(vha->hw)) {
1727 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1728 vha->fcoe_fcf_idx = mcp->mb[10];
1729 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1730 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1731 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1732 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1733 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1734 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1735 }
1736 /* If FA-WWN supported */
1737 if (IS_FAWWN_CAPABLE(vha->hw)) {
1738 if (mcp->mb[7] & BIT_14) {
1739 vha->port_name[0] = MSB(mcp->mb[16]);
1740 vha->port_name[1] = LSB(mcp->mb[16]);
1741 vha->port_name[2] = MSB(mcp->mb[17]);
1742 vha->port_name[3] = LSB(mcp->mb[17]);
1743 vha->port_name[4] = MSB(mcp->mb[18]);
1744 vha->port_name[5] = LSB(mcp->mb[18]);
1745 vha->port_name[6] = MSB(mcp->mb[19]);
1746 vha->port_name[7] = LSB(mcp->mb[19]);
1747 fc_host_port_name(vha->host) =
1748 wwn_to_u64(vha->port_name);
1749 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1750 "FA-WWN acquired %016llx\n",
1751 wwn_to_u64(vha->port_name));
1752 }
1753 }
1754
1755 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
1756 vha->bbcr = mcp->mb[15];
1757 if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) {
1758 ql_log(ql_log_info, vha, 0x11a4,
1759 "SCM: EDC ELS completed, flags 0x%x\n",
1760 mcp->mb[21]);
1761 }
1762 if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) {
1763 vha->hw->flags.scm_enabled = 1;
1764 vha->scm_fabric_connection_flags |=
1765 SCM_FLAG_RDF_COMPLETED;
1766 ql_log(ql_log_info, vha, 0x11a5,
1767 "SCM: RDF ELS completed, flags 0x%x\n",
1768 mcp->mb[23]);
1769 }
1770 }
1771 }
1772
1773 return rval;
1774 }
1775
1776 /*
1777 * qla2x00_get_retry_cnt
1778 * Get current firmware login retry count and delay.
1779 *
1780 * Input:
1781 * ha = adapter block pointer.
1782 * retry_cnt = pointer to login retry count.
1783 * tov = pointer to login timeout value.
1784 *
1785 * Returns:
1786 * qla2x00 local function return status code.
1787 *
1788 * Context:
1789 * Kernel context.
1790 */
1791 int
qla2x00_get_retry_cnt(scsi_qla_host_t * vha,uint8_t * retry_cnt,uint8_t * tov,uint16_t * r_a_tov)1792 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1793 uint16_t *r_a_tov)
1794 {
1795 int rval;
1796 uint16_t ratov;
1797 mbx_cmd_t mc;
1798 mbx_cmd_t *mcp = &mc;
1799
1800 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1801 "Entered %s.\n", __func__);
1802
1803 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1804 mcp->out_mb = MBX_0;
1805 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1806 mcp->tov = MBX_TOV_SECONDS;
1807 mcp->flags = 0;
1808 rval = qla2x00_mailbox_command(vha, mcp);
1809
1810 if (rval != QLA_SUCCESS) {
1811 /*EMPTY*/
1812 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1813 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1814 } else {
1815 /* Convert returned data and check our values. */
1816 *r_a_tov = mcp->mb[3] / 2;
1817 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1818 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1819 /* Update to the larger values */
1820 *retry_cnt = (uint8_t)mcp->mb[1];
1821 *tov = ratov;
1822 }
1823
1824 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1825 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1826 }
1827
1828 return rval;
1829 }
1830
1831 /*
1832 * qla2x00_init_firmware
1833 * Initialize adapter firmware.
1834 *
1835 * Input:
1836 * ha = adapter block pointer.
1837 * dptr = Initialization control block pointer.
1838 * size = size of initialization control block.
1839 * TARGET_QUEUE_LOCK must be released.
1840 * ADAPTER_STATE_LOCK must be released.
1841 *
1842 * Returns:
1843 * qla2x00 local function return status code.
1844 *
1845 * Context:
1846 * Kernel context.
1847 */
1848 int
qla2x00_init_firmware(scsi_qla_host_t * vha,uint16_t size)1849 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1850 {
1851 int rval;
1852 mbx_cmd_t mc;
1853 mbx_cmd_t *mcp = &mc;
1854 struct qla_hw_data *ha = vha->hw;
1855
1856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1857 "Entered %s.\n", __func__);
1858
1859 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1860 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1861 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1862
1863 if (ha->flags.npiv_supported)
1864 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1865 else
1866 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1867
1868 mcp->mb[1] = 0;
1869 mcp->mb[2] = MSW(ha->init_cb_dma);
1870 mcp->mb[3] = LSW(ha->init_cb_dma);
1871 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1872 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1873 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1874 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1875 mcp->mb[1] = BIT_0;
1876 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1877 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1878 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1879 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1880 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1881 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1882 }
1883
1884 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
1885 mcp->mb[1] |= BIT_1;
1886 mcp->mb[16] = MSW(ha->sf_init_cb_dma);
1887 mcp->mb[17] = LSW(ha->sf_init_cb_dma);
1888 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma));
1889 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma));
1890 mcp->mb[15] = sizeof(*ha->sf_init_cb);
1891 mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15;
1892 }
1893
1894 /* 1 and 2 should normally be captured. */
1895 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1896 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1897 /* mb3 is additional info about the installed SFP. */
1898 mcp->in_mb |= MBX_3;
1899 mcp->buf_size = size;
1900 mcp->flags = MBX_DMA_OUT;
1901 mcp->tov = MBX_TOV_SECONDS;
1902 rval = qla2x00_mailbox_command(vha, mcp);
1903
1904 if (rval != QLA_SUCCESS) {
1905 /*EMPTY*/
1906 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1907 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
1908 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1909 if (ha->init_cb) {
1910 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
1911 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1912 0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1913 }
1914 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1915 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
1916 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1917 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1918 }
1919 } else {
1920 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1921 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1922 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1923 "Invalid SFP/Validation Failed\n");
1924 }
1925 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1926 "Done %s.\n", __func__);
1927 }
1928
1929 return rval;
1930 }
1931
1932
1933 /*
1934 * qla2x00_get_port_database
1935 * Issue normal/enhanced get port database mailbox command
1936 * and copy device name as necessary.
1937 *
1938 * Input:
1939 * ha = adapter state pointer.
1940 * dev = structure pointer.
1941 * opt = enhanced cmd option byte.
1942 *
1943 * Returns:
1944 * qla2x00 local function return status code.
1945 *
1946 * Context:
1947 * Kernel context.
1948 */
1949 int
qla2x00_get_port_database(scsi_qla_host_t * vha,fc_port_t * fcport,uint8_t opt)1950 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1951 {
1952 int rval;
1953 mbx_cmd_t mc;
1954 mbx_cmd_t *mcp = &mc;
1955 port_database_t *pd;
1956 struct port_database_24xx *pd24;
1957 dma_addr_t pd_dma;
1958 struct qla_hw_data *ha = vha->hw;
1959
1960 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1961 "Entered %s.\n", __func__);
1962
1963 pd24 = NULL;
1964 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1965 if (pd == NULL) {
1966 ql_log(ql_log_warn, vha, 0x1050,
1967 "Failed to allocate port database structure.\n");
1968 fcport->query = 0;
1969 return QLA_MEMORY_ALLOC_FAILED;
1970 }
1971
1972 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1973 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1974 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1975 mcp->mb[2] = MSW(pd_dma);
1976 mcp->mb[3] = LSW(pd_dma);
1977 mcp->mb[6] = MSW(MSD(pd_dma));
1978 mcp->mb[7] = LSW(MSD(pd_dma));
1979 mcp->mb[9] = vha->vp_idx;
1980 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1981 mcp->in_mb = MBX_0;
1982 if (IS_FWI2_CAPABLE(ha)) {
1983 mcp->mb[1] = fcport->loop_id;
1984 mcp->mb[10] = opt;
1985 mcp->out_mb |= MBX_10|MBX_1;
1986 mcp->in_mb |= MBX_1;
1987 } else if (HAS_EXTENDED_IDS(ha)) {
1988 mcp->mb[1] = fcport->loop_id;
1989 mcp->mb[10] = opt;
1990 mcp->out_mb |= MBX_10|MBX_1;
1991 } else {
1992 mcp->mb[1] = fcport->loop_id << 8 | opt;
1993 mcp->out_mb |= MBX_1;
1994 }
1995 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1996 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1997 mcp->flags = MBX_DMA_IN;
1998 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1999 rval = qla2x00_mailbox_command(vha, mcp);
2000 if (rval != QLA_SUCCESS)
2001 goto gpd_error_out;
2002
2003 if (IS_FWI2_CAPABLE(ha)) {
2004 uint64_t zero = 0;
2005 u8 current_login_state, last_login_state;
2006
2007 pd24 = (struct port_database_24xx *) pd;
2008
2009 /* Check for logged in state. */
2010 if (NVME_TARGET(ha, fcport)) {
2011 current_login_state = pd24->current_login_state >> 4;
2012 last_login_state = pd24->last_login_state >> 4;
2013 } else {
2014 current_login_state = pd24->current_login_state & 0xf;
2015 last_login_state = pd24->last_login_state & 0xf;
2016 }
2017 fcport->current_login_state = pd24->current_login_state;
2018 fcport->last_login_state = pd24->last_login_state;
2019
2020 /* Check for logged in state. */
2021 if (current_login_state != PDS_PRLI_COMPLETE &&
2022 last_login_state != PDS_PRLI_COMPLETE) {
2023 ql_dbg(ql_dbg_mbx, vha, 0x119a,
2024 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
2025 current_login_state, last_login_state,
2026 fcport->loop_id);
2027 rval = QLA_FUNCTION_FAILED;
2028
2029 if (!fcport->query)
2030 goto gpd_error_out;
2031 }
2032
2033 if (fcport->loop_id == FC_NO_LOOP_ID ||
2034 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2035 memcmp(fcport->port_name, pd24->port_name, 8))) {
2036 /* We lost the device mid way. */
2037 rval = QLA_NOT_LOGGED_IN;
2038 goto gpd_error_out;
2039 }
2040
2041 /* Names are little-endian. */
2042 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
2043 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
2044
2045 /* Get port_id of device. */
2046 fcport->d_id.b.domain = pd24->port_id[0];
2047 fcport->d_id.b.area = pd24->port_id[1];
2048 fcport->d_id.b.al_pa = pd24->port_id[2];
2049 fcport->d_id.b.rsvd_1 = 0;
2050
2051 /* If not target must be initiator or unknown type. */
2052 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
2053 fcport->port_type = FCT_INITIATOR;
2054 else
2055 fcport->port_type = FCT_TARGET;
2056
2057 /* Passback COS information. */
2058 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
2059 FC_COS_CLASS2 : FC_COS_CLASS3;
2060
2061 if (pd24->prli_svc_param_word_3[0] & BIT_7)
2062 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2063 } else {
2064 uint64_t zero = 0;
2065
2066 /* Check for logged in state. */
2067 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
2068 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
2069 ql_dbg(ql_dbg_mbx, vha, 0x100a,
2070 "Unable to verify login-state (%x/%x) - "
2071 "portid=%02x%02x%02x.\n", pd->master_state,
2072 pd->slave_state, fcport->d_id.b.domain,
2073 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2074 rval = QLA_FUNCTION_FAILED;
2075 goto gpd_error_out;
2076 }
2077
2078 if (fcport->loop_id == FC_NO_LOOP_ID ||
2079 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2080 memcmp(fcport->port_name, pd->port_name, 8))) {
2081 /* We lost the device mid way. */
2082 rval = QLA_NOT_LOGGED_IN;
2083 goto gpd_error_out;
2084 }
2085
2086 /* Names are little-endian. */
2087 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
2088 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2089
2090 /* Get port_id of device. */
2091 fcport->d_id.b.domain = pd->port_id[0];
2092 fcport->d_id.b.area = pd->port_id[3];
2093 fcport->d_id.b.al_pa = pd->port_id[2];
2094 fcport->d_id.b.rsvd_1 = 0;
2095
2096 /* If not target must be initiator or unknown type. */
2097 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2098 fcport->port_type = FCT_INITIATOR;
2099 else
2100 fcport->port_type = FCT_TARGET;
2101
2102 /* Passback COS information. */
2103 fcport->supported_classes = (pd->options & BIT_4) ?
2104 FC_COS_CLASS2 : FC_COS_CLASS3;
2105 }
2106
2107 gpd_error_out:
2108 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2109 fcport->query = 0;
2110
2111 if (rval != QLA_SUCCESS) {
2112 ql_dbg(ql_dbg_mbx, vha, 0x1052,
2113 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2114 mcp->mb[0], mcp->mb[1]);
2115 } else {
2116 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2117 "Done %s.\n", __func__);
2118 }
2119
2120 return rval;
2121 }
2122
2123 int
qla24xx_get_port_database(scsi_qla_host_t * vha,u16 nport_handle,struct port_database_24xx * pdb)2124 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
2125 struct port_database_24xx *pdb)
2126 {
2127 mbx_cmd_t mc;
2128 mbx_cmd_t *mcp = &mc;
2129 dma_addr_t pdb_dma;
2130 int rval;
2131
2132 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115,
2133 "Entered %s.\n", __func__);
2134
2135 memset(pdb, 0, sizeof(*pdb));
2136
2137 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
2138 sizeof(*pdb), DMA_FROM_DEVICE);
2139 if (!pdb_dma) {
2140 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
2141 return QLA_MEMORY_ALLOC_FAILED;
2142 }
2143
2144 mcp->mb[0] = MBC_GET_PORT_DATABASE;
2145 mcp->mb[1] = nport_handle;
2146 mcp->mb[2] = MSW(LSD(pdb_dma));
2147 mcp->mb[3] = LSW(LSD(pdb_dma));
2148 mcp->mb[6] = MSW(MSD(pdb_dma));
2149 mcp->mb[7] = LSW(MSD(pdb_dma));
2150 mcp->mb[9] = 0;
2151 mcp->mb[10] = 0;
2152 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2153 mcp->in_mb = MBX_1|MBX_0;
2154 mcp->buf_size = sizeof(*pdb);
2155 mcp->flags = MBX_DMA_IN;
2156 mcp->tov = vha->hw->login_timeout * 2;
2157 rval = qla2x00_mailbox_command(vha, mcp);
2158
2159 if (rval != QLA_SUCCESS) {
2160 ql_dbg(ql_dbg_mbx, vha, 0x111a,
2161 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2162 rval, mcp->mb[0], mcp->mb[1]);
2163 } else {
2164 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b,
2165 "Done %s.\n", __func__);
2166 }
2167
2168 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma,
2169 sizeof(*pdb), DMA_FROM_DEVICE);
2170
2171 return rval;
2172 }
2173
2174 /*
2175 * qla2x00_get_firmware_state
2176 * Get adapter firmware state.
2177 *
2178 * Input:
2179 * ha = adapter block pointer.
2180 * dptr = pointer for firmware state.
2181 * TARGET_QUEUE_LOCK must be released.
2182 * ADAPTER_STATE_LOCK must be released.
2183 *
2184 * Returns:
2185 * qla2x00 local function return status code.
2186 *
2187 * Context:
2188 * Kernel context.
2189 */
2190 int
qla2x00_get_firmware_state(scsi_qla_host_t * vha,uint16_t * states)2191 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2192 {
2193 int rval;
2194 mbx_cmd_t mc;
2195 mbx_cmd_t *mcp = &mc;
2196 struct qla_hw_data *ha = vha->hw;
2197
2198 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2199 "Entered %s.\n", __func__);
2200
2201 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2202 mcp->out_mb = MBX_0;
2203 if (IS_FWI2_CAPABLE(vha->hw))
2204 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2205 else
2206 mcp->in_mb = MBX_1|MBX_0;
2207 mcp->tov = MBX_TOV_SECONDS;
2208 mcp->flags = 0;
2209 rval = qla2x00_mailbox_command(vha, mcp);
2210
2211 /* Return firmware states. */
2212 states[0] = mcp->mb[1];
2213 if (IS_FWI2_CAPABLE(vha->hw)) {
2214 states[1] = mcp->mb[2];
2215 states[2] = mcp->mb[3]; /* SFP info */
2216 states[3] = mcp->mb[4];
2217 states[4] = mcp->mb[5];
2218 states[5] = mcp->mb[6]; /* DPORT status */
2219 }
2220
2221 if (rval != QLA_SUCCESS) {
2222 /*EMPTY*/
2223 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2224 } else {
2225 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2226 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2227 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2228 "Invalid SFP/Validation Failed\n");
2229 }
2230 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2231 "Done %s.\n", __func__);
2232 }
2233
2234 return rval;
2235 }
2236
2237 /*
2238 * qla2x00_get_port_name
2239 * Issue get port name mailbox command.
2240 * Returned name is in big endian format.
2241 *
2242 * Input:
2243 * ha = adapter block pointer.
2244 * loop_id = loop ID of device.
2245 * name = pointer for name.
2246 * TARGET_QUEUE_LOCK must be released.
2247 * ADAPTER_STATE_LOCK must be released.
2248 *
2249 * Returns:
2250 * qla2x00 local function return status code.
2251 *
2252 * Context:
2253 * Kernel context.
2254 */
2255 int
qla2x00_get_port_name(scsi_qla_host_t * vha,uint16_t loop_id,uint8_t * name,uint8_t opt)2256 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2257 uint8_t opt)
2258 {
2259 int rval;
2260 mbx_cmd_t mc;
2261 mbx_cmd_t *mcp = &mc;
2262
2263 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2264 "Entered %s.\n", __func__);
2265
2266 mcp->mb[0] = MBC_GET_PORT_NAME;
2267 mcp->mb[9] = vha->vp_idx;
2268 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2269 if (HAS_EXTENDED_IDS(vha->hw)) {
2270 mcp->mb[1] = loop_id;
2271 mcp->mb[10] = opt;
2272 mcp->out_mb |= MBX_10;
2273 } else {
2274 mcp->mb[1] = loop_id << 8 | opt;
2275 }
2276
2277 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2278 mcp->tov = MBX_TOV_SECONDS;
2279 mcp->flags = 0;
2280 rval = qla2x00_mailbox_command(vha, mcp);
2281
2282 if (rval != QLA_SUCCESS) {
2283 /*EMPTY*/
2284 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2285 } else {
2286 if (name != NULL) {
2287 /* This function returns name in big endian. */
2288 name[0] = MSB(mcp->mb[2]);
2289 name[1] = LSB(mcp->mb[2]);
2290 name[2] = MSB(mcp->mb[3]);
2291 name[3] = LSB(mcp->mb[3]);
2292 name[4] = MSB(mcp->mb[6]);
2293 name[5] = LSB(mcp->mb[6]);
2294 name[6] = MSB(mcp->mb[7]);
2295 name[7] = LSB(mcp->mb[7]);
2296 }
2297
2298 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2299 "Done %s.\n", __func__);
2300 }
2301
2302 return rval;
2303 }
2304
2305 /*
2306 * qla24xx_link_initialization
2307 * Issue link initialization mailbox command.
2308 *
2309 * Input:
2310 * ha = adapter block pointer.
2311 * TARGET_QUEUE_LOCK must be released.
2312 * ADAPTER_STATE_LOCK must be released.
2313 *
2314 * Returns:
2315 * qla2x00 local function return status code.
2316 *
2317 * Context:
2318 * Kernel context.
2319 */
2320 int
qla24xx_link_initialize(scsi_qla_host_t * vha)2321 qla24xx_link_initialize(scsi_qla_host_t *vha)
2322 {
2323 int rval;
2324 mbx_cmd_t mc;
2325 mbx_cmd_t *mcp = &mc;
2326
2327 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2328 "Entered %s.\n", __func__);
2329
2330 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2331 return QLA_FUNCTION_FAILED;
2332
2333 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2334 mcp->mb[1] = BIT_4;
2335 if (vha->hw->operating_mode == LOOP)
2336 mcp->mb[1] |= BIT_6;
2337 else
2338 mcp->mb[1] |= BIT_5;
2339 mcp->mb[2] = 0;
2340 mcp->mb[3] = 0;
2341 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2342 mcp->in_mb = MBX_0;
2343 mcp->tov = MBX_TOV_SECONDS;
2344 mcp->flags = 0;
2345 rval = qla2x00_mailbox_command(vha, mcp);
2346
2347 if (rval != QLA_SUCCESS) {
2348 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2349 } else {
2350 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2351 "Done %s.\n", __func__);
2352 }
2353
2354 return rval;
2355 }
2356
2357 /*
2358 * qla2x00_lip_reset
2359 * Issue LIP reset mailbox command.
2360 *
2361 * Input:
2362 * ha = adapter block pointer.
2363 * TARGET_QUEUE_LOCK must be released.
2364 * ADAPTER_STATE_LOCK must be released.
2365 *
2366 * Returns:
2367 * qla2x00 local function return status code.
2368 *
2369 * Context:
2370 * Kernel context.
2371 */
2372 int
qla2x00_lip_reset(scsi_qla_host_t * vha)2373 qla2x00_lip_reset(scsi_qla_host_t *vha)
2374 {
2375 int rval;
2376 mbx_cmd_t mc;
2377 mbx_cmd_t *mcp = &mc;
2378
2379 ql_dbg(ql_dbg_disc, vha, 0x105a,
2380 "Entered %s.\n", __func__);
2381
2382 if (IS_CNA_CAPABLE(vha->hw)) {
2383 /* Logout across all FCFs. */
2384 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2385 mcp->mb[1] = BIT_1;
2386 mcp->mb[2] = 0;
2387 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2388 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2389 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2390 mcp->mb[1] = BIT_4;
2391 mcp->mb[2] = 0;
2392 mcp->mb[3] = vha->hw->loop_reset_delay;
2393 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2394 } else {
2395 mcp->mb[0] = MBC_LIP_RESET;
2396 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2397 if (HAS_EXTENDED_IDS(vha->hw)) {
2398 mcp->mb[1] = 0x00ff;
2399 mcp->mb[10] = 0;
2400 mcp->out_mb |= MBX_10;
2401 } else {
2402 mcp->mb[1] = 0xff00;
2403 }
2404 mcp->mb[2] = vha->hw->loop_reset_delay;
2405 mcp->mb[3] = 0;
2406 }
2407 mcp->in_mb = MBX_0;
2408 mcp->tov = MBX_TOV_SECONDS;
2409 mcp->flags = 0;
2410 rval = qla2x00_mailbox_command(vha, mcp);
2411
2412 if (rval != QLA_SUCCESS) {
2413 /*EMPTY*/
2414 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2415 } else {
2416 /*EMPTY*/
2417 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2418 "Done %s.\n", __func__);
2419 }
2420
2421 return rval;
2422 }
2423
2424 /*
2425 * qla2x00_send_sns
2426 * Send SNS command.
2427 *
2428 * Input:
2429 * ha = adapter block pointer.
2430 * sns = pointer for command.
2431 * cmd_size = command size.
2432 * buf_size = response/command size.
2433 * TARGET_QUEUE_LOCK must be released.
2434 * ADAPTER_STATE_LOCK must be released.
2435 *
2436 * Returns:
2437 * qla2x00 local function return status code.
2438 *
2439 * Context:
2440 * Kernel context.
2441 */
2442 int
qla2x00_send_sns(scsi_qla_host_t * vha,dma_addr_t sns_phys_address,uint16_t cmd_size,size_t buf_size)2443 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2444 uint16_t cmd_size, size_t buf_size)
2445 {
2446 int rval;
2447 mbx_cmd_t mc;
2448 mbx_cmd_t *mcp = &mc;
2449
2450 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2451 "Entered %s.\n", __func__);
2452
2453 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2454 "Retry cnt=%d ratov=%d total tov=%d.\n",
2455 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2456
2457 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2458 mcp->mb[1] = cmd_size;
2459 mcp->mb[2] = MSW(sns_phys_address);
2460 mcp->mb[3] = LSW(sns_phys_address);
2461 mcp->mb[6] = MSW(MSD(sns_phys_address));
2462 mcp->mb[7] = LSW(MSD(sns_phys_address));
2463 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2464 mcp->in_mb = MBX_0|MBX_1;
2465 mcp->buf_size = buf_size;
2466 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2467 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2468 rval = qla2x00_mailbox_command(vha, mcp);
2469
2470 if (rval != QLA_SUCCESS) {
2471 /*EMPTY*/
2472 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2473 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2474 rval, mcp->mb[0], mcp->mb[1]);
2475 } else {
2476 /*EMPTY*/
2477 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2478 "Done %s.\n", __func__);
2479 }
2480
2481 return rval;
2482 }
2483
2484 int
qla24xx_login_fabric(scsi_qla_host_t * vha,uint16_t loop_id,uint8_t domain,uint8_t area,uint8_t al_pa,uint16_t * mb,uint8_t opt)2485 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2486 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2487 {
2488 int rval;
2489
2490 struct logio_entry_24xx *lg;
2491 dma_addr_t lg_dma;
2492 uint32_t iop[2];
2493 struct qla_hw_data *ha = vha->hw;
2494 struct req_que *req;
2495
2496 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2497 "Entered %s.\n", __func__);
2498
2499 if (vha->vp_idx && vha->qpair)
2500 req = vha->qpair->req;
2501 else
2502 req = ha->req_q_map[0];
2503
2504 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2505 if (lg == NULL) {
2506 ql_log(ql_log_warn, vha, 0x1062,
2507 "Failed to allocate login IOCB.\n");
2508 return QLA_MEMORY_ALLOC_FAILED;
2509 }
2510
2511 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2512 lg->entry_count = 1;
2513 lg->handle = make_handle(req->id, lg->handle);
2514 lg->nport_handle = cpu_to_le16(loop_id);
2515 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2516 if (opt & BIT_0)
2517 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2518 if (opt & BIT_1)
2519 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2520 lg->port_id[0] = al_pa;
2521 lg->port_id[1] = area;
2522 lg->port_id[2] = domain;
2523 lg->vp_index = vha->vp_idx;
2524 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2525 (ha->r_a_tov / 10 * 2) + 2);
2526 if (rval != QLA_SUCCESS) {
2527 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2528 "Failed to issue login IOCB (%x).\n", rval);
2529 } else if (lg->entry_status != 0) {
2530 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2531 "Failed to complete IOCB -- error status (%x).\n",
2532 lg->entry_status);
2533 rval = QLA_FUNCTION_FAILED;
2534 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2535 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2536 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2537
2538 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2539 "Failed to complete IOCB -- completion status (%x) "
2540 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2541 iop[0], iop[1]);
2542
2543 switch (iop[0]) {
2544 case LSC_SCODE_PORTID_USED:
2545 mb[0] = MBS_PORT_ID_USED;
2546 mb[1] = LSW(iop[1]);
2547 break;
2548 case LSC_SCODE_NPORT_USED:
2549 mb[0] = MBS_LOOP_ID_USED;
2550 break;
2551 case LSC_SCODE_NOLINK:
2552 case LSC_SCODE_NOIOCB:
2553 case LSC_SCODE_NOXCB:
2554 case LSC_SCODE_CMD_FAILED:
2555 case LSC_SCODE_NOFABRIC:
2556 case LSC_SCODE_FW_NOT_READY:
2557 case LSC_SCODE_NOT_LOGGED_IN:
2558 case LSC_SCODE_NOPCB:
2559 case LSC_SCODE_ELS_REJECT:
2560 case LSC_SCODE_CMD_PARAM_ERR:
2561 case LSC_SCODE_NONPORT:
2562 case LSC_SCODE_LOGGED_IN:
2563 case LSC_SCODE_NOFLOGI_ACC:
2564 default:
2565 mb[0] = MBS_COMMAND_ERROR;
2566 break;
2567 }
2568 } else {
2569 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2570 "Done %s.\n", __func__);
2571
2572 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2573
2574 mb[0] = MBS_COMMAND_COMPLETE;
2575 mb[1] = 0;
2576 if (iop[0] & BIT_4) {
2577 if (iop[0] & BIT_8)
2578 mb[1] |= BIT_1;
2579 } else
2580 mb[1] = BIT_0;
2581
2582 /* Passback COS information. */
2583 mb[10] = 0;
2584 if (lg->io_parameter[7] || lg->io_parameter[8])
2585 mb[10] |= BIT_0; /* Class 2. */
2586 if (lg->io_parameter[9] || lg->io_parameter[10])
2587 mb[10] |= BIT_1; /* Class 3. */
2588 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2589 mb[10] |= BIT_7; /* Confirmed Completion
2590 * Allowed
2591 */
2592 }
2593
2594 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2595
2596 return rval;
2597 }
2598
2599 /*
2600 * qla2x00_login_fabric
2601 * Issue login fabric port mailbox command.
2602 *
2603 * Input:
2604 * ha = adapter block pointer.
2605 * loop_id = device loop ID.
2606 * domain = device domain.
2607 * area = device area.
2608 * al_pa = device AL_PA.
2609 * status = pointer for return status.
2610 * opt = command options.
2611 * TARGET_QUEUE_LOCK must be released.
2612 * ADAPTER_STATE_LOCK must be released.
2613 *
2614 * Returns:
2615 * qla2x00 local function return status code.
2616 *
2617 * Context:
2618 * Kernel context.
2619 */
2620 int
qla2x00_login_fabric(scsi_qla_host_t * vha,uint16_t loop_id,uint8_t domain,uint8_t area,uint8_t al_pa,uint16_t * mb,uint8_t opt)2621 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2622 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2623 {
2624 int rval;
2625 mbx_cmd_t mc;
2626 mbx_cmd_t *mcp = &mc;
2627 struct qla_hw_data *ha = vha->hw;
2628
2629 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2630 "Entered %s.\n", __func__);
2631
2632 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2633 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2634 if (HAS_EXTENDED_IDS(ha)) {
2635 mcp->mb[1] = loop_id;
2636 mcp->mb[10] = opt;
2637 mcp->out_mb |= MBX_10;
2638 } else {
2639 mcp->mb[1] = (loop_id << 8) | opt;
2640 }
2641 mcp->mb[2] = domain;
2642 mcp->mb[3] = area << 8 | al_pa;
2643
2644 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2645 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2646 mcp->flags = 0;
2647 rval = qla2x00_mailbox_command(vha, mcp);
2648
2649 /* Return mailbox statuses. */
2650 if (mb != NULL) {
2651 mb[0] = mcp->mb[0];
2652 mb[1] = mcp->mb[1];
2653 mb[2] = mcp->mb[2];
2654 mb[6] = mcp->mb[6];
2655 mb[7] = mcp->mb[7];
2656 /* COS retrieved from Get-Port-Database mailbox command. */
2657 mb[10] = 0;
2658 }
2659
2660 if (rval != QLA_SUCCESS) {
2661 /* RLU tmp code: need to change main mailbox_command function to
2662 * return ok even when the mailbox completion value is not
2663 * SUCCESS. The caller needs to be responsible to interpret
2664 * the return values of this mailbox command if we're not
2665 * to change too much of the existing code.
2666 */
2667 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2668 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2669 mcp->mb[0] == 0x4006)
2670 rval = QLA_SUCCESS;
2671
2672 /*EMPTY*/
2673 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2674 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2675 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2676 } else {
2677 /*EMPTY*/
2678 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2679 "Done %s.\n", __func__);
2680 }
2681
2682 return rval;
2683 }
2684
2685 /*
2686 * qla2x00_login_local_device
2687 * Issue login loop port mailbox command.
2688 *
2689 * Input:
2690 * ha = adapter block pointer.
2691 * loop_id = device loop ID.
2692 * opt = command options.
2693 *
2694 * Returns:
2695 * Return status code.
2696 *
2697 * Context:
2698 * Kernel context.
2699 *
2700 */
2701 int
qla2x00_login_local_device(scsi_qla_host_t * vha,fc_port_t * fcport,uint16_t * mb_ret,uint8_t opt)2702 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2703 uint16_t *mb_ret, uint8_t opt)
2704 {
2705 int rval;
2706 mbx_cmd_t mc;
2707 mbx_cmd_t *mcp = &mc;
2708 struct qla_hw_data *ha = vha->hw;
2709
2710 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2711 "Entered %s.\n", __func__);
2712
2713 if (IS_FWI2_CAPABLE(ha))
2714 return qla24xx_login_fabric(vha, fcport->loop_id,
2715 fcport->d_id.b.domain, fcport->d_id.b.area,
2716 fcport->d_id.b.al_pa, mb_ret, opt);
2717
2718 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2719 if (HAS_EXTENDED_IDS(ha))
2720 mcp->mb[1] = fcport->loop_id;
2721 else
2722 mcp->mb[1] = fcport->loop_id << 8;
2723 mcp->mb[2] = opt;
2724 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2725 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2726 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2727 mcp->flags = 0;
2728 rval = qla2x00_mailbox_command(vha, mcp);
2729
2730 /* Return mailbox statuses. */
2731 if (mb_ret != NULL) {
2732 mb_ret[0] = mcp->mb[0];
2733 mb_ret[1] = mcp->mb[1];
2734 mb_ret[6] = mcp->mb[6];
2735 mb_ret[7] = mcp->mb[7];
2736 }
2737
2738 if (rval != QLA_SUCCESS) {
2739 /* AV tmp code: need to change main mailbox_command function to
2740 * return ok even when the mailbox completion value is not
2741 * SUCCESS. The caller needs to be responsible to interpret
2742 * the return values of this mailbox command if we're not
2743 * to change too much of the existing code.
2744 */
2745 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2746 rval = QLA_SUCCESS;
2747
2748 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2749 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2750 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2751 } else {
2752 /*EMPTY*/
2753 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2754 "Done %s.\n", __func__);
2755 }
2756
2757 return (rval);
2758 }
2759
2760 int
qla24xx_fabric_logout(scsi_qla_host_t * vha,uint16_t loop_id,uint8_t domain,uint8_t area,uint8_t al_pa)2761 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2762 uint8_t area, uint8_t al_pa)
2763 {
2764 int rval;
2765 struct logio_entry_24xx *lg;
2766 dma_addr_t lg_dma;
2767 struct qla_hw_data *ha = vha->hw;
2768 struct req_que *req;
2769
2770 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2771 "Entered %s.\n", __func__);
2772
2773 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2774 if (lg == NULL) {
2775 ql_log(ql_log_warn, vha, 0x106e,
2776 "Failed to allocate logout IOCB.\n");
2777 return QLA_MEMORY_ALLOC_FAILED;
2778 }
2779
2780 req = vha->req;
2781 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2782 lg->entry_count = 1;
2783 lg->handle = make_handle(req->id, lg->handle);
2784 lg->nport_handle = cpu_to_le16(loop_id);
2785 lg->control_flags =
2786 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2787 LCF_FREE_NPORT);
2788 lg->port_id[0] = al_pa;
2789 lg->port_id[1] = area;
2790 lg->port_id[2] = domain;
2791 lg->vp_index = vha->vp_idx;
2792 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2793 (ha->r_a_tov / 10 * 2) + 2);
2794 if (rval != QLA_SUCCESS) {
2795 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2796 "Failed to issue logout IOCB (%x).\n", rval);
2797 } else if (lg->entry_status != 0) {
2798 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2799 "Failed to complete IOCB -- error status (%x).\n",
2800 lg->entry_status);
2801 rval = QLA_FUNCTION_FAILED;
2802 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2803 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2804 "Failed to complete IOCB -- completion status (%x) "
2805 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2806 le32_to_cpu(lg->io_parameter[0]),
2807 le32_to_cpu(lg->io_parameter[1]));
2808 } else {
2809 /*EMPTY*/
2810 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2811 "Done %s.\n", __func__);
2812 }
2813
2814 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2815
2816 return rval;
2817 }
2818
2819 /*
2820 * qla2x00_fabric_logout
2821 * Issue logout fabric port mailbox command.
2822 *
2823 * Input:
2824 * ha = adapter block pointer.
2825 * loop_id = device loop ID.
2826 * TARGET_QUEUE_LOCK must be released.
2827 * ADAPTER_STATE_LOCK must be released.
2828 *
2829 * Returns:
2830 * qla2x00 local function return status code.
2831 *
2832 * Context:
2833 * Kernel context.
2834 */
2835 int
qla2x00_fabric_logout(scsi_qla_host_t * vha,uint16_t loop_id,uint8_t domain,uint8_t area,uint8_t al_pa)2836 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2837 uint8_t area, uint8_t al_pa)
2838 {
2839 int rval;
2840 mbx_cmd_t mc;
2841 mbx_cmd_t *mcp = &mc;
2842
2843 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2844 "Entered %s.\n", __func__);
2845
2846 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2847 mcp->out_mb = MBX_1|MBX_0;
2848 if (HAS_EXTENDED_IDS(vha->hw)) {
2849 mcp->mb[1] = loop_id;
2850 mcp->mb[10] = 0;
2851 mcp->out_mb |= MBX_10;
2852 } else {
2853 mcp->mb[1] = loop_id << 8;
2854 }
2855
2856 mcp->in_mb = MBX_1|MBX_0;
2857 mcp->tov = MBX_TOV_SECONDS;
2858 mcp->flags = 0;
2859 rval = qla2x00_mailbox_command(vha, mcp);
2860
2861 if (rval != QLA_SUCCESS) {
2862 /*EMPTY*/
2863 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2864 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2865 } else {
2866 /*EMPTY*/
2867 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2868 "Done %s.\n", __func__);
2869 }
2870
2871 return rval;
2872 }
2873
2874 /*
2875 * qla2x00_full_login_lip
2876 * Issue full login LIP mailbox command.
2877 *
2878 * Input:
2879 * ha = adapter block pointer.
2880 * TARGET_QUEUE_LOCK must be released.
2881 * ADAPTER_STATE_LOCK must be released.
2882 *
2883 * Returns:
2884 * qla2x00 local function return status code.
2885 *
2886 * Context:
2887 * Kernel context.
2888 */
2889 int
qla2x00_full_login_lip(scsi_qla_host_t * vha)2890 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2891 {
2892 int rval;
2893 mbx_cmd_t mc;
2894 mbx_cmd_t *mcp = &mc;
2895
2896 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2897 "Entered %s.\n", __func__);
2898
2899 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2900 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2901 mcp->mb[2] = 0;
2902 mcp->mb[3] = 0;
2903 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2904 mcp->in_mb = MBX_0;
2905 mcp->tov = MBX_TOV_SECONDS;
2906 mcp->flags = 0;
2907 rval = qla2x00_mailbox_command(vha, mcp);
2908
2909 if (rval != QLA_SUCCESS) {
2910 /*EMPTY*/
2911 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2912 } else {
2913 /*EMPTY*/
2914 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2915 "Done %s.\n", __func__);
2916 }
2917
2918 return rval;
2919 }
2920
2921 /*
2922 * qla2x00_get_id_list
2923 *
2924 * Input:
2925 * ha = adapter block pointer.
2926 *
2927 * Returns:
2928 * qla2x00 local function return status code.
2929 *
2930 * Context:
2931 * Kernel context.
2932 */
2933 int
qla2x00_get_id_list(scsi_qla_host_t * vha,void * id_list,dma_addr_t id_list_dma,uint16_t * entries)2934 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2935 uint16_t *entries)
2936 {
2937 int rval;
2938 mbx_cmd_t mc;
2939 mbx_cmd_t *mcp = &mc;
2940
2941 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2942 "Entered %s.\n", __func__);
2943
2944 if (id_list == NULL)
2945 return QLA_FUNCTION_FAILED;
2946
2947 mcp->mb[0] = MBC_GET_ID_LIST;
2948 mcp->out_mb = MBX_0;
2949 if (IS_FWI2_CAPABLE(vha->hw)) {
2950 mcp->mb[2] = MSW(id_list_dma);
2951 mcp->mb[3] = LSW(id_list_dma);
2952 mcp->mb[6] = MSW(MSD(id_list_dma));
2953 mcp->mb[7] = LSW(MSD(id_list_dma));
2954 mcp->mb[8] = 0;
2955 mcp->mb[9] = vha->vp_idx;
2956 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2957 } else {
2958 mcp->mb[1] = MSW(id_list_dma);
2959 mcp->mb[2] = LSW(id_list_dma);
2960 mcp->mb[3] = MSW(MSD(id_list_dma));
2961 mcp->mb[6] = LSW(MSD(id_list_dma));
2962 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2963 }
2964 mcp->in_mb = MBX_1|MBX_0;
2965 mcp->tov = MBX_TOV_SECONDS;
2966 mcp->flags = 0;
2967 rval = qla2x00_mailbox_command(vha, mcp);
2968
2969 if (rval != QLA_SUCCESS) {
2970 /*EMPTY*/
2971 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2972 } else {
2973 *entries = mcp->mb[1];
2974 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2975 "Done %s.\n", __func__);
2976 }
2977
2978 return rval;
2979 }
2980
2981 /*
2982 * qla2x00_get_resource_cnts
2983 * Get current firmware resource counts.
2984 *
2985 * Input:
2986 * ha = adapter block pointer.
2987 *
2988 * Returns:
2989 * qla2x00 local function return status code.
2990 *
2991 * Context:
2992 * Kernel context.
2993 */
2994 int
qla2x00_get_resource_cnts(scsi_qla_host_t * vha)2995 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2996 {
2997 struct qla_hw_data *ha = vha->hw;
2998 int rval;
2999 mbx_cmd_t mc;
3000 mbx_cmd_t *mcp = &mc;
3001
3002 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
3003 "Entered %s.\n", __func__);
3004
3005 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
3006 mcp->out_mb = MBX_0;
3007 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3008 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
3009 IS_QLA27XX(ha) || IS_QLA28XX(ha))
3010 mcp->in_mb |= MBX_12;
3011 mcp->tov = MBX_TOV_SECONDS;
3012 mcp->flags = 0;
3013 rval = qla2x00_mailbox_command(vha, mcp);
3014
3015 if (rval != QLA_SUCCESS) {
3016 /*EMPTY*/
3017 ql_dbg(ql_dbg_mbx, vha, 0x107d,
3018 "Failed mb[0]=%x.\n", mcp->mb[0]);
3019 } else {
3020 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
3021 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
3022 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
3023 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
3024 mcp->mb[11], mcp->mb[12]);
3025
3026 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
3027 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
3028 ha->cur_fw_xcb_count = mcp->mb[3];
3029 ha->orig_fw_xcb_count = mcp->mb[6];
3030 ha->cur_fw_iocb_count = mcp->mb[7];
3031 ha->orig_fw_iocb_count = mcp->mb[10];
3032 if (ha->flags.npiv_supported)
3033 ha->max_npiv_vports = mcp->mb[11];
3034 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3035 IS_QLA28XX(ha))
3036 ha->fw_max_fcf_count = mcp->mb[12];
3037 }
3038
3039 return (rval);
3040 }
3041
3042 /*
3043 * qla2x00_get_fcal_position_map
3044 * Get FCAL (LILP) position map using mailbox command
3045 *
3046 * Input:
3047 * ha = adapter state pointer.
3048 * pos_map = buffer pointer (can be NULL).
3049 *
3050 * Returns:
3051 * qla2x00 local function return status code.
3052 *
3053 * Context:
3054 * Kernel context.
3055 */
3056 int
qla2x00_get_fcal_position_map(scsi_qla_host_t * vha,char * pos_map)3057 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
3058 {
3059 int rval;
3060 mbx_cmd_t mc;
3061 mbx_cmd_t *mcp = &mc;
3062 char *pmap;
3063 dma_addr_t pmap_dma;
3064 struct qla_hw_data *ha = vha->hw;
3065
3066 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
3067 "Entered %s.\n", __func__);
3068
3069 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
3070 if (pmap == NULL) {
3071 ql_log(ql_log_warn, vha, 0x1080,
3072 "Memory alloc failed.\n");
3073 return QLA_MEMORY_ALLOC_FAILED;
3074 }
3075
3076 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
3077 mcp->mb[2] = MSW(pmap_dma);
3078 mcp->mb[3] = LSW(pmap_dma);
3079 mcp->mb[6] = MSW(MSD(pmap_dma));
3080 mcp->mb[7] = LSW(MSD(pmap_dma));
3081 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3082 mcp->in_mb = MBX_1|MBX_0;
3083 mcp->buf_size = FCAL_MAP_SIZE;
3084 mcp->flags = MBX_DMA_IN;
3085 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
3086 rval = qla2x00_mailbox_command(vha, mcp);
3087
3088 if (rval == QLA_SUCCESS) {
3089 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
3090 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
3091 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
3092 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
3093 pmap, pmap[0] + 1);
3094
3095 if (pos_map)
3096 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
3097 }
3098 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
3099
3100 if (rval != QLA_SUCCESS) {
3101 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
3102 } else {
3103 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
3104 "Done %s.\n", __func__);
3105 }
3106
3107 return rval;
3108 }
3109
3110 /*
3111 * qla2x00_get_link_status
3112 *
3113 * Input:
3114 * ha = adapter block pointer.
3115 * loop_id = device loop ID.
3116 * ret_buf = pointer to link status return buffer.
3117 *
3118 * Returns:
3119 * 0 = success.
3120 * BIT_0 = mem alloc error.
3121 * BIT_1 = mailbox error.
3122 */
3123 int
qla2x00_get_link_status(scsi_qla_host_t * vha,uint16_t loop_id,struct link_statistics * stats,dma_addr_t stats_dma)3124 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
3125 struct link_statistics *stats, dma_addr_t stats_dma)
3126 {
3127 int rval;
3128 mbx_cmd_t mc;
3129 mbx_cmd_t *mcp = &mc;
3130 uint32_t *iter = (uint32_t *)stats;
3131 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
3132 struct qla_hw_data *ha = vha->hw;
3133
3134 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
3135 "Entered %s.\n", __func__);
3136
3137 mcp->mb[0] = MBC_GET_LINK_STATUS;
3138 mcp->mb[2] = MSW(LSD(stats_dma));
3139 mcp->mb[3] = LSW(LSD(stats_dma));
3140 mcp->mb[6] = MSW(MSD(stats_dma));
3141 mcp->mb[7] = LSW(MSD(stats_dma));
3142 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3143 mcp->in_mb = MBX_0;
3144 if (IS_FWI2_CAPABLE(ha)) {
3145 mcp->mb[1] = loop_id;
3146 mcp->mb[4] = 0;
3147 mcp->mb[10] = 0;
3148 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3149 mcp->in_mb |= MBX_1;
3150 } else if (HAS_EXTENDED_IDS(ha)) {
3151 mcp->mb[1] = loop_id;
3152 mcp->mb[10] = 0;
3153 mcp->out_mb |= MBX_10|MBX_1;
3154 } else {
3155 mcp->mb[1] = loop_id << 8;
3156 mcp->out_mb |= MBX_1;
3157 }
3158 mcp->tov = MBX_TOV_SECONDS;
3159 mcp->flags = IOCTL_CMD;
3160 rval = qla2x00_mailbox_command(vha, mcp);
3161
3162 if (rval == QLA_SUCCESS) {
3163 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3164 ql_dbg(ql_dbg_mbx, vha, 0x1085,
3165 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3166 rval = QLA_FUNCTION_FAILED;
3167 } else {
3168 /* Re-endianize - firmware data is le32. */
3169 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3170 "Done %s.\n", __func__);
3171 for ( ; dwords--; iter++)
3172 le32_to_cpus(iter);
3173 }
3174 } else {
3175 /* Failed. */
3176 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3177 }
3178
3179 return rval;
3180 }
3181
3182 int
qla24xx_get_isp_stats(scsi_qla_host_t * vha,struct link_statistics * stats,dma_addr_t stats_dma,uint16_t options)3183 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3184 dma_addr_t stats_dma, uint16_t options)
3185 {
3186 int rval;
3187 mbx_cmd_t mc;
3188 mbx_cmd_t *mcp = &mc;
3189 uint32_t *iter = (uint32_t *)stats;
3190 ushort dwords = sizeof(*stats)/sizeof(*iter);
3191
3192 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3193 "Entered %s.\n", __func__);
3194
3195 memset(&mc, 0, sizeof(mc));
3196 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3197 mc.mb[2] = MSW(LSD(stats_dma));
3198 mc.mb[3] = LSW(LSD(stats_dma));
3199 mc.mb[6] = MSW(MSD(stats_dma));
3200 mc.mb[7] = LSW(MSD(stats_dma));
3201 mc.mb[8] = dwords;
3202 mc.mb[9] = vha->vp_idx;
3203 mc.mb[10] = options;
3204
3205 rval = qla24xx_send_mb_cmd(vha, &mc);
3206
3207 if (rval == QLA_SUCCESS) {
3208 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3209 ql_dbg(ql_dbg_mbx, vha, 0x1089,
3210 "Failed mb[0]=%x.\n", mcp->mb[0]);
3211 rval = QLA_FUNCTION_FAILED;
3212 } else {
3213 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3214 "Done %s.\n", __func__);
3215 /* Re-endianize - firmware data is le32. */
3216 for ( ; dwords--; iter++)
3217 le32_to_cpus(iter);
3218 }
3219 } else {
3220 /* Failed. */
3221 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3222 }
3223
3224 return rval;
3225 }
3226
3227 int
qla24xx_abort_command(srb_t * sp)3228 qla24xx_abort_command(srb_t *sp)
3229 {
3230 int rval;
3231 unsigned long flags = 0;
3232
3233 struct abort_entry_24xx *abt;
3234 dma_addr_t abt_dma;
3235 uint32_t handle;
3236 fc_port_t *fcport = sp->fcport;
3237 struct scsi_qla_host *vha = fcport->vha;
3238 struct qla_hw_data *ha = vha->hw;
3239 struct req_que *req = vha->req;
3240 struct qla_qpair *qpair = sp->qpair;
3241
3242 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3243 "Entered %s.\n", __func__);
3244
3245 if (sp->qpair)
3246 req = sp->qpair->req;
3247 else
3248 return QLA_ERR_NO_QPAIR;
3249
3250 if (ql2xasynctmfenable)
3251 return qla24xx_async_abort_command(sp);
3252
3253 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3254 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3255 if (req->outstanding_cmds[handle] == sp)
3256 break;
3257 }
3258 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3259 if (handle == req->num_outstanding_cmds) {
3260 /* Command not found. */
3261 return QLA_ERR_NOT_FOUND;
3262 }
3263
3264 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3265 if (abt == NULL) {
3266 ql_log(ql_log_warn, vha, 0x108d,
3267 "Failed to allocate abort IOCB.\n");
3268 return QLA_MEMORY_ALLOC_FAILED;
3269 }
3270
3271 abt->entry_type = ABORT_IOCB_TYPE;
3272 abt->entry_count = 1;
3273 abt->handle = make_handle(req->id, abt->handle);
3274 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3275 abt->handle_to_abort = make_handle(req->id, handle);
3276 abt->port_id[0] = fcport->d_id.b.al_pa;
3277 abt->port_id[1] = fcport->d_id.b.area;
3278 abt->port_id[2] = fcport->d_id.b.domain;
3279 abt->vp_index = fcport->vha->vp_idx;
3280
3281 abt->req_que_no = cpu_to_le16(req->id);
3282 /* Need to pass original sp */
3283 qla_nvme_abort_set_option(abt, sp);
3284
3285 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3286 if (rval != QLA_SUCCESS) {
3287 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3288 "Failed to issue IOCB (%x).\n", rval);
3289 } else if (abt->entry_status != 0) {
3290 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3291 "Failed to complete IOCB -- error status (%x).\n",
3292 abt->entry_status);
3293 rval = QLA_FUNCTION_FAILED;
3294 } else if (abt->nport_handle != cpu_to_le16(0)) {
3295 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3296 "Failed to complete IOCB -- completion status (%x).\n",
3297 le16_to_cpu(abt->nport_handle));
3298 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
3299 rval = QLA_FUNCTION_PARAMETER_ERROR;
3300 else
3301 rval = QLA_FUNCTION_FAILED;
3302 } else {
3303 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3304 "Done %s.\n", __func__);
3305 }
3306 if (rval == QLA_SUCCESS)
3307 qla_nvme_abort_process_comp_status(abt, sp);
3308
3309 qla_wait_nvme_release_cmd_kref(sp);
3310
3311 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3312
3313 return rval;
3314 }
3315
3316 struct tsk_mgmt_cmd {
3317 union {
3318 struct tsk_mgmt_entry tsk;
3319 struct sts_entry_24xx sts;
3320 } p;
3321 };
3322
3323 static int
__qla24xx_issue_tmf(char * name,uint32_t type,struct fc_port * fcport,uint64_t l,int tag)3324 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3325 uint64_t l, int tag)
3326 {
3327 int rval, rval2;
3328 struct tsk_mgmt_cmd *tsk;
3329 struct sts_entry_24xx *sts;
3330 dma_addr_t tsk_dma;
3331 scsi_qla_host_t *vha;
3332 struct qla_hw_data *ha;
3333 struct req_que *req;
3334 struct qla_qpair *qpair;
3335
3336 vha = fcport->vha;
3337 ha = vha->hw;
3338 req = vha->req;
3339
3340 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3341 "Entered %s.\n", __func__);
3342
3343 if (vha->vp_idx && vha->qpair) {
3344 /* NPIV port */
3345 qpair = vha->qpair;
3346 req = qpair->req;
3347 }
3348
3349 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3350 if (tsk == NULL) {
3351 ql_log(ql_log_warn, vha, 0x1093,
3352 "Failed to allocate task management IOCB.\n");
3353 return QLA_MEMORY_ALLOC_FAILED;
3354 }
3355
3356 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3357 tsk->p.tsk.entry_count = 1;
3358 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle);
3359 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3360 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3361 tsk->p.tsk.control_flags = cpu_to_le32(type);
3362 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3363 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3364 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3365 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3366 if (type == TCF_LUN_RESET) {
3367 int_to_scsilun(l, &tsk->p.tsk.lun);
3368 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3369 sizeof(tsk->p.tsk.lun));
3370 }
3371
3372 sts = &tsk->p.sts;
3373 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3374 if (rval != QLA_SUCCESS) {
3375 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3376 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3377 } else if (sts->entry_status != 0) {
3378 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3379 "Failed to complete IOCB -- error status (%x).\n",
3380 sts->entry_status);
3381 rval = QLA_FUNCTION_FAILED;
3382 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3383 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3384 "Failed to complete IOCB -- completion status (%x).\n",
3385 le16_to_cpu(sts->comp_status));
3386 rval = QLA_FUNCTION_FAILED;
3387 } else if (le16_to_cpu(sts->scsi_status) &
3388 SS_RESPONSE_INFO_LEN_VALID) {
3389 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3390 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3391 "Ignoring inconsistent data length -- not enough "
3392 "response info (%d).\n",
3393 le32_to_cpu(sts->rsp_data_len));
3394 } else if (sts->data[3]) {
3395 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3396 "Failed to complete IOCB -- response (%x).\n",
3397 sts->data[3]);
3398 rval = QLA_FUNCTION_FAILED;
3399 }
3400 }
3401
3402 /* Issue marker IOCB. */
3403 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3404 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
3405 if (rval2 != QLA_SUCCESS) {
3406 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3407 "Failed to issue marker IOCB (%x).\n", rval2);
3408 } else {
3409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3410 "Done %s.\n", __func__);
3411 }
3412
3413 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3414
3415 return rval;
3416 }
3417
3418 int
qla24xx_abort_target(struct fc_port * fcport,uint64_t l,int tag)3419 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3420 {
3421 struct qla_hw_data *ha = fcport->vha->hw;
3422
3423 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3424 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3425
3426 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3427 }
3428
3429 int
qla24xx_lun_reset(struct fc_port * fcport,uint64_t l,int tag)3430 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3431 {
3432 struct qla_hw_data *ha = fcport->vha->hw;
3433
3434 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3435 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3436
3437 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3438 }
3439
3440 int
qla2x00_system_error(scsi_qla_host_t * vha)3441 qla2x00_system_error(scsi_qla_host_t *vha)
3442 {
3443 int rval;
3444 mbx_cmd_t mc;
3445 mbx_cmd_t *mcp = &mc;
3446 struct qla_hw_data *ha = vha->hw;
3447
3448 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3449 return QLA_FUNCTION_FAILED;
3450
3451 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3452 "Entered %s.\n", __func__);
3453
3454 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3455 mcp->out_mb = MBX_0;
3456 mcp->in_mb = MBX_0;
3457 mcp->tov = 5;
3458 mcp->flags = 0;
3459 rval = qla2x00_mailbox_command(vha, mcp);
3460
3461 if (rval != QLA_SUCCESS) {
3462 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3463 } else {
3464 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3465 "Done %s.\n", __func__);
3466 }
3467
3468 return rval;
3469 }
3470
3471 int
qla2x00_write_serdes_word(scsi_qla_host_t * vha,uint16_t addr,uint16_t data)3472 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3473 {
3474 int rval;
3475 mbx_cmd_t mc;
3476 mbx_cmd_t *mcp = &mc;
3477
3478 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3479 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3480 return QLA_FUNCTION_FAILED;
3481
3482 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3483 "Entered %s.\n", __func__);
3484
3485 mcp->mb[0] = MBC_WRITE_SERDES;
3486 mcp->mb[1] = addr;
3487 if (IS_QLA2031(vha->hw))
3488 mcp->mb[2] = data & 0xff;
3489 else
3490 mcp->mb[2] = data;
3491
3492 mcp->mb[3] = 0;
3493 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3494 mcp->in_mb = MBX_0;
3495 mcp->tov = MBX_TOV_SECONDS;
3496 mcp->flags = 0;
3497 rval = qla2x00_mailbox_command(vha, mcp);
3498
3499 if (rval != QLA_SUCCESS) {
3500 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3501 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3502 } else {
3503 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3504 "Done %s.\n", __func__);
3505 }
3506
3507 return rval;
3508 }
3509
3510 int
qla2x00_read_serdes_word(scsi_qla_host_t * vha,uint16_t addr,uint16_t * data)3511 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3512 {
3513 int rval;
3514 mbx_cmd_t mc;
3515 mbx_cmd_t *mcp = &mc;
3516
3517 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3518 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3519 return QLA_FUNCTION_FAILED;
3520
3521 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3522 "Entered %s.\n", __func__);
3523
3524 mcp->mb[0] = MBC_READ_SERDES;
3525 mcp->mb[1] = addr;
3526 mcp->mb[3] = 0;
3527 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3528 mcp->in_mb = MBX_1|MBX_0;
3529 mcp->tov = MBX_TOV_SECONDS;
3530 mcp->flags = 0;
3531 rval = qla2x00_mailbox_command(vha, mcp);
3532
3533 if (IS_QLA2031(vha->hw))
3534 *data = mcp->mb[1] & 0xff;
3535 else
3536 *data = mcp->mb[1];
3537
3538 if (rval != QLA_SUCCESS) {
3539 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3540 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3541 } else {
3542 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3543 "Done %s.\n", __func__);
3544 }
3545
3546 return rval;
3547 }
3548
3549 int
qla8044_write_serdes_word(scsi_qla_host_t * vha,uint32_t addr,uint32_t data)3550 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3551 {
3552 int rval;
3553 mbx_cmd_t mc;
3554 mbx_cmd_t *mcp = &mc;
3555
3556 if (!IS_QLA8044(vha->hw))
3557 return QLA_FUNCTION_FAILED;
3558
3559 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3560 "Entered %s.\n", __func__);
3561
3562 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3563 mcp->mb[1] = HCS_WRITE_SERDES;
3564 mcp->mb[3] = LSW(addr);
3565 mcp->mb[4] = MSW(addr);
3566 mcp->mb[5] = LSW(data);
3567 mcp->mb[6] = MSW(data);
3568 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3569 mcp->in_mb = MBX_0;
3570 mcp->tov = MBX_TOV_SECONDS;
3571 mcp->flags = 0;
3572 rval = qla2x00_mailbox_command(vha, mcp);
3573
3574 if (rval != QLA_SUCCESS) {
3575 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3576 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3577 } else {
3578 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3579 "Done %s.\n", __func__);
3580 }
3581
3582 return rval;
3583 }
3584
3585 int
qla8044_read_serdes_word(scsi_qla_host_t * vha,uint32_t addr,uint32_t * data)3586 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3587 {
3588 int rval;
3589 mbx_cmd_t mc;
3590 mbx_cmd_t *mcp = &mc;
3591
3592 if (!IS_QLA8044(vha->hw))
3593 return QLA_FUNCTION_FAILED;
3594
3595 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3596 "Entered %s.\n", __func__);
3597
3598 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3599 mcp->mb[1] = HCS_READ_SERDES;
3600 mcp->mb[3] = LSW(addr);
3601 mcp->mb[4] = MSW(addr);
3602 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3603 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3604 mcp->tov = MBX_TOV_SECONDS;
3605 mcp->flags = 0;
3606 rval = qla2x00_mailbox_command(vha, mcp);
3607
3608 *data = mcp->mb[2] << 16 | mcp->mb[1];
3609
3610 if (rval != QLA_SUCCESS) {
3611 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3612 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3613 } else {
3614 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3615 "Done %s.\n", __func__);
3616 }
3617
3618 return rval;
3619 }
3620
3621 /**
3622 * qla2x00_set_serdes_params() -
3623 * @vha: HA context
3624 * @sw_em_1g: serial link options
3625 * @sw_em_2g: serial link options
3626 * @sw_em_4g: serial link options
3627 *
3628 * Returns
3629 */
3630 int
qla2x00_set_serdes_params(scsi_qla_host_t * vha,uint16_t sw_em_1g,uint16_t sw_em_2g,uint16_t sw_em_4g)3631 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3632 uint16_t sw_em_2g, uint16_t sw_em_4g)
3633 {
3634 int rval;
3635 mbx_cmd_t mc;
3636 mbx_cmd_t *mcp = &mc;
3637
3638 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3639 "Entered %s.\n", __func__);
3640
3641 mcp->mb[0] = MBC_SERDES_PARAMS;
3642 mcp->mb[1] = BIT_0;
3643 mcp->mb[2] = sw_em_1g | BIT_15;
3644 mcp->mb[3] = sw_em_2g | BIT_15;
3645 mcp->mb[4] = sw_em_4g | BIT_15;
3646 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3647 mcp->in_mb = MBX_0;
3648 mcp->tov = MBX_TOV_SECONDS;
3649 mcp->flags = 0;
3650 rval = qla2x00_mailbox_command(vha, mcp);
3651
3652 if (rval != QLA_SUCCESS) {
3653 /*EMPTY*/
3654 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3655 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3656 } else {
3657 /*EMPTY*/
3658 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3659 "Done %s.\n", __func__);
3660 }
3661
3662 return rval;
3663 }
3664
3665 int
qla2x00_stop_firmware(scsi_qla_host_t * vha)3666 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3667 {
3668 int rval;
3669 mbx_cmd_t mc;
3670 mbx_cmd_t *mcp = &mc;
3671
3672 if (!IS_FWI2_CAPABLE(vha->hw))
3673 return QLA_FUNCTION_FAILED;
3674
3675 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3676 "Entered %s.\n", __func__);
3677
3678 mcp->mb[0] = MBC_STOP_FIRMWARE;
3679 mcp->mb[1] = 0;
3680 mcp->out_mb = MBX_1|MBX_0;
3681 mcp->in_mb = MBX_0;
3682 mcp->tov = 5;
3683 mcp->flags = 0;
3684 rval = qla2x00_mailbox_command(vha, mcp);
3685
3686 if (rval != QLA_SUCCESS) {
3687 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3688 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3689 rval = QLA_INVALID_COMMAND;
3690 } else {
3691 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3692 "Done %s.\n", __func__);
3693 }
3694
3695 return rval;
3696 }
3697
3698 int
qla2x00_enable_eft_trace(scsi_qla_host_t * vha,dma_addr_t eft_dma,uint16_t buffers)3699 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3700 uint16_t buffers)
3701 {
3702 int rval;
3703 mbx_cmd_t mc;
3704 mbx_cmd_t *mcp = &mc;
3705
3706 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3707 "Entered %s.\n", __func__);
3708
3709 if (!IS_FWI2_CAPABLE(vha->hw))
3710 return QLA_FUNCTION_FAILED;
3711
3712 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3713 return QLA_FUNCTION_FAILED;
3714
3715 mcp->mb[0] = MBC_TRACE_CONTROL;
3716 mcp->mb[1] = TC_EFT_ENABLE;
3717 mcp->mb[2] = LSW(eft_dma);
3718 mcp->mb[3] = MSW(eft_dma);
3719 mcp->mb[4] = LSW(MSD(eft_dma));
3720 mcp->mb[5] = MSW(MSD(eft_dma));
3721 mcp->mb[6] = buffers;
3722 mcp->mb[7] = TC_AEN_DISABLE;
3723 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3724 mcp->in_mb = MBX_1|MBX_0;
3725 mcp->tov = MBX_TOV_SECONDS;
3726 mcp->flags = 0;
3727 rval = qla2x00_mailbox_command(vha, mcp);
3728 if (rval != QLA_SUCCESS) {
3729 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3730 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3731 rval, mcp->mb[0], mcp->mb[1]);
3732 } else {
3733 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3734 "Done %s.\n", __func__);
3735 }
3736
3737 return rval;
3738 }
3739
3740 int
qla2x00_disable_eft_trace(scsi_qla_host_t * vha)3741 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3742 {
3743 int rval;
3744 mbx_cmd_t mc;
3745 mbx_cmd_t *mcp = &mc;
3746
3747 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3748 "Entered %s.\n", __func__);
3749
3750 if (!IS_FWI2_CAPABLE(vha->hw))
3751 return QLA_FUNCTION_FAILED;
3752
3753 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3754 return QLA_FUNCTION_FAILED;
3755
3756 mcp->mb[0] = MBC_TRACE_CONTROL;
3757 mcp->mb[1] = TC_EFT_DISABLE;
3758 mcp->out_mb = MBX_1|MBX_0;
3759 mcp->in_mb = MBX_1|MBX_0;
3760 mcp->tov = MBX_TOV_SECONDS;
3761 mcp->flags = 0;
3762 rval = qla2x00_mailbox_command(vha, mcp);
3763 if (rval != QLA_SUCCESS) {
3764 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3765 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3766 rval, mcp->mb[0], mcp->mb[1]);
3767 } else {
3768 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3769 "Done %s.\n", __func__);
3770 }
3771
3772 return rval;
3773 }
3774
3775 int
qla2x00_enable_fce_trace(scsi_qla_host_t * vha,dma_addr_t fce_dma,uint16_t buffers,uint16_t * mb,uint32_t * dwords)3776 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3777 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3778 {
3779 int rval;
3780 mbx_cmd_t mc;
3781 mbx_cmd_t *mcp = &mc;
3782
3783 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3784 "Entered %s.\n", __func__);
3785
3786 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3787 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
3788 !IS_QLA28XX(vha->hw))
3789 return QLA_FUNCTION_FAILED;
3790
3791 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3792 return QLA_FUNCTION_FAILED;
3793
3794 mcp->mb[0] = MBC_TRACE_CONTROL;
3795 mcp->mb[1] = TC_FCE_ENABLE;
3796 mcp->mb[2] = LSW(fce_dma);
3797 mcp->mb[3] = MSW(fce_dma);
3798 mcp->mb[4] = LSW(MSD(fce_dma));
3799 mcp->mb[5] = MSW(MSD(fce_dma));
3800 mcp->mb[6] = buffers;
3801 mcp->mb[7] = TC_AEN_DISABLE;
3802 mcp->mb[8] = 0;
3803 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3804 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3805 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3806 MBX_1|MBX_0;
3807 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3808 mcp->tov = MBX_TOV_SECONDS;
3809 mcp->flags = 0;
3810 rval = qla2x00_mailbox_command(vha, mcp);
3811 if (rval != QLA_SUCCESS) {
3812 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3813 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3814 rval, mcp->mb[0], mcp->mb[1]);
3815 } else {
3816 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3817 "Done %s.\n", __func__);
3818
3819 if (mb)
3820 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3821 if (dwords)
3822 *dwords = buffers;
3823 }
3824
3825 return rval;
3826 }
3827
3828 int
qla2x00_disable_fce_trace(scsi_qla_host_t * vha,uint64_t * wr,uint64_t * rd)3829 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3830 {
3831 int rval;
3832 mbx_cmd_t mc;
3833 mbx_cmd_t *mcp = &mc;
3834
3835 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3836 "Entered %s.\n", __func__);
3837
3838 if (!IS_FWI2_CAPABLE(vha->hw))
3839 return QLA_FUNCTION_FAILED;
3840
3841 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3842 return QLA_FUNCTION_FAILED;
3843
3844 mcp->mb[0] = MBC_TRACE_CONTROL;
3845 mcp->mb[1] = TC_FCE_DISABLE;
3846 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3847 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3848 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3849 MBX_1|MBX_0;
3850 mcp->tov = MBX_TOV_SECONDS;
3851 mcp->flags = 0;
3852 rval = qla2x00_mailbox_command(vha, mcp);
3853 if (rval != QLA_SUCCESS) {
3854 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3855 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3856 rval, mcp->mb[0], mcp->mb[1]);
3857 } else {
3858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3859 "Done %s.\n", __func__);
3860
3861 if (wr)
3862 *wr = (uint64_t) mcp->mb[5] << 48 |
3863 (uint64_t) mcp->mb[4] << 32 |
3864 (uint64_t) mcp->mb[3] << 16 |
3865 (uint64_t) mcp->mb[2];
3866 if (rd)
3867 *rd = (uint64_t) mcp->mb[9] << 48 |
3868 (uint64_t) mcp->mb[8] << 32 |
3869 (uint64_t) mcp->mb[7] << 16 |
3870 (uint64_t) mcp->mb[6];
3871 }
3872
3873 return rval;
3874 }
3875
3876 int
qla2x00_get_idma_speed(scsi_qla_host_t * vha,uint16_t loop_id,uint16_t * port_speed,uint16_t * mb)3877 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3878 uint16_t *port_speed, uint16_t *mb)
3879 {
3880 int rval;
3881 mbx_cmd_t mc;
3882 mbx_cmd_t *mcp = &mc;
3883
3884 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3885 "Entered %s.\n", __func__);
3886
3887 if (!IS_IIDMA_CAPABLE(vha->hw))
3888 return QLA_FUNCTION_FAILED;
3889
3890 mcp->mb[0] = MBC_PORT_PARAMS;
3891 mcp->mb[1] = loop_id;
3892 mcp->mb[2] = mcp->mb[3] = 0;
3893 mcp->mb[9] = vha->vp_idx;
3894 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3895 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3896 mcp->tov = MBX_TOV_SECONDS;
3897 mcp->flags = 0;
3898 rval = qla2x00_mailbox_command(vha, mcp);
3899
3900 /* Return mailbox statuses. */
3901 if (mb) {
3902 mb[0] = mcp->mb[0];
3903 mb[1] = mcp->mb[1];
3904 mb[3] = mcp->mb[3];
3905 }
3906
3907 if (rval != QLA_SUCCESS) {
3908 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3909 } else {
3910 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3911 "Done %s.\n", __func__);
3912 if (port_speed)
3913 *port_speed = mcp->mb[3];
3914 }
3915
3916 return rval;
3917 }
3918
3919 int
qla2x00_set_idma_speed(scsi_qla_host_t * vha,uint16_t loop_id,uint16_t port_speed,uint16_t * mb)3920 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3921 uint16_t port_speed, uint16_t *mb)
3922 {
3923 int rval;
3924 mbx_cmd_t mc;
3925 mbx_cmd_t *mcp = &mc;
3926
3927 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3928 "Entered %s.\n", __func__);
3929
3930 if (!IS_IIDMA_CAPABLE(vha->hw))
3931 return QLA_FUNCTION_FAILED;
3932
3933 mcp->mb[0] = MBC_PORT_PARAMS;
3934 mcp->mb[1] = loop_id;
3935 mcp->mb[2] = BIT_0;
3936 mcp->mb[3] = port_speed & 0x3F;
3937 mcp->mb[9] = vha->vp_idx;
3938 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3939 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3940 mcp->tov = MBX_TOV_SECONDS;
3941 mcp->flags = 0;
3942 rval = qla2x00_mailbox_command(vha, mcp);
3943
3944 /* Return mailbox statuses. */
3945 if (mb) {
3946 mb[0] = mcp->mb[0];
3947 mb[1] = mcp->mb[1];
3948 mb[3] = mcp->mb[3];
3949 }
3950
3951 if (rval != QLA_SUCCESS) {
3952 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3953 "Failed=%x.\n", rval);
3954 } else {
3955 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3956 "Done %s.\n", __func__);
3957 }
3958
3959 return rval;
3960 }
3961
3962 void
qla24xx_report_id_acquisition(scsi_qla_host_t * vha,struct vp_rpt_id_entry_24xx * rptid_entry)3963 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3964 struct vp_rpt_id_entry_24xx *rptid_entry)
3965 {
3966 struct qla_hw_data *ha = vha->hw;
3967 scsi_qla_host_t *vp = NULL;
3968 unsigned long flags;
3969 int found;
3970 port_id_t id;
3971 struct fc_port *fcport;
3972
3973 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3974 "Entered %s.\n", __func__);
3975
3976 if (rptid_entry->entry_status != 0)
3977 return;
3978
3979 id.b.domain = rptid_entry->port_id[2];
3980 id.b.area = rptid_entry->port_id[1];
3981 id.b.al_pa = rptid_entry->port_id[0];
3982 id.b.rsvd_1 = 0;
3983 ha->flags.n2n_ae = 0;
3984
3985 if (rptid_entry->format == 0) {
3986 /* loop */
3987 ql_dbg(ql_dbg_async, vha, 0x10b7,
3988 "Format 0 : Number of VPs setup %d, number of "
3989 "VPs acquired %d.\n", rptid_entry->vp_setup,
3990 rptid_entry->vp_acquired);
3991 ql_dbg(ql_dbg_async, vha, 0x10b8,
3992 "Primary port id %02x%02x%02x.\n",
3993 rptid_entry->port_id[2], rptid_entry->port_id[1],
3994 rptid_entry->port_id[0]);
3995 ha->current_topology = ISP_CFG_NL;
3996 qlt_update_host_map(vha, id);
3997
3998 } else if (rptid_entry->format == 1) {
3999 /* fabric */
4000 ql_dbg(ql_dbg_async, vha, 0x10b9,
4001 "Format 1: VP[%d] enabled - status %d - with "
4002 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
4003 rptid_entry->vp_status,
4004 rptid_entry->port_id[2], rptid_entry->port_id[1],
4005 rptid_entry->port_id[0]);
4006 ql_dbg(ql_dbg_async, vha, 0x5075,
4007 "Format 1: Remote WWPN %8phC.\n",
4008 rptid_entry->u.f1.port_name);
4009
4010 ql_dbg(ql_dbg_async, vha, 0x5075,
4011 "Format 1: WWPN %8phC.\n",
4012 vha->port_name);
4013
4014 switch (rptid_entry->u.f1.flags & TOPO_MASK) {
4015 case TOPO_N2N:
4016 ha->current_topology = ISP_CFG_N;
4017 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4018 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4019 fcport->scan_state = QLA_FCPORT_SCAN;
4020 fcport->n2n_flag = 0;
4021 }
4022 id.b24 = 0;
4023 if (wwn_to_u64(vha->port_name) >
4024 wwn_to_u64(rptid_entry->u.f1.port_name)) {
4025 vha->d_id.b24 = 0;
4026 vha->d_id.b.al_pa = 1;
4027 ha->flags.n2n_bigger = 1;
4028
4029 id.b.al_pa = 2;
4030 ql_dbg(ql_dbg_async, vha, 0x5075,
4031 "Format 1: assign local id %x remote id %x\n",
4032 vha->d_id.b24, id.b24);
4033 } else {
4034 ql_dbg(ql_dbg_async, vha, 0x5075,
4035 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
4036 rptid_entry->u.f1.port_name);
4037 ha->flags.n2n_bigger = 0;
4038 }
4039
4040 fcport = qla2x00_find_fcport_by_wwpn(vha,
4041 rptid_entry->u.f1.port_name, 1);
4042 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4043
4044
4045 if (fcport) {
4046 fcport->plogi_nack_done_deadline = jiffies + HZ;
4047 fcport->dm_login_expire = jiffies +
4048 QLA_N2N_WAIT_TIME * HZ;
4049 fcport->scan_state = QLA_FCPORT_FOUND;
4050 fcport->n2n_flag = 1;
4051 fcport->keep_nport_handle = 1;
4052 fcport->login_retry = vha->hw->login_retry_count;
4053 fcport->fc4_type = FS_FC4TYPE_FCP;
4054 if (vha->flags.nvme_enabled)
4055 fcport->fc4_type |= FS_FC4TYPE_NVME;
4056
4057 if (wwn_to_u64(vha->port_name) >
4058 wwn_to_u64(fcport->port_name)) {
4059 fcport->d_id = id;
4060 }
4061
4062 switch (fcport->disc_state) {
4063 case DSC_DELETED:
4064 set_bit(RELOGIN_NEEDED,
4065 &vha->dpc_flags);
4066 break;
4067 case DSC_DELETE_PEND:
4068 break;
4069 default:
4070 qlt_schedule_sess_for_deletion(fcport);
4071 break;
4072 }
4073 } else {
4074 qla24xx_post_newsess_work(vha, &id,
4075 rptid_entry->u.f1.port_name,
4076 rptid_entry->u.f1.node_name,
4077 NULL,
4078 FS_FCP_IS_N2N);
4079 }
4080
4081 /* if our portname is higher then initiate N2N login */
4082
4083 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
4084 return;
4085 case TOPO_FL:
4086 ha->current_topology = ISP_CFG_FL;
4087 break;
4088 case TOPO_F:
4089 ha->current_topology = ISP_CFG_F;
4090 break;
4091 default:
4092 break;
4093 }
4094
4095 ha->flags.gpsc_supported = 1;
4096 ha->current_topology = ISP_CFG_F;
4097 /* buffer to buffer credit flag */
4098 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
4099
4100 if (rptid_entry->vp_idx == 0) {
4101 if (rptid_entry->vp_status == VP_STAT_COMPL) {
4102 /* FA-WWN is only for physical port */
4103 if (qla_ini_mode_enabled(vha) &&
4104 ha->flags.fawwpn_enabled &&
4105 (rptid_entry->u.f1.flags &
4106 BIT_6)) {
4107 memcpy(vha->port_name,
4108 rptid_entry->u.f1.port_name,
4109 WWN_SIZE);
4110 }
4111
4112 qlt_update_host_map(vha, id);
4113 }
4114
4115 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
4116 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4117 } else {
4118 if (rptid_entry->vp_status != VP_STAT_COMPL &&
4119 rptid_entry->vp_status != VP_STAT_ID_CHG) {
4120 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
4121 "Could not acquire ID for VP[%d].\n",
4122 rptid_entry->vp_idx);
4123 return;
4124 }
4125
4126 found = 0;
4127 spin_lock_irqsave(&ha->vport_slock, flags);
4128 list_for_each_entry(vp, &ha->vp_list, list) {
4129 if (rptid_entry->vp_idx == vp->vp_idx) {
4130 found = 1;
4131 break;
4132 }
4133 }
4134 spin_unlock_irqrestore(&ha->vport_slock, flags);
4135
4136 if (!found)
4137 return;
4138
4139 qlt_update_host_map(vp, id);
4140
4141 /*
4142 * Cannot configure here as we are still sitting on the
4143 * response queue. Handle it in dpc context.
4144 */
4145 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
4146 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
4147 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
4148 }
4149 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
4150 qla2xxx_wake_dpc(vha);
4151 } else if (rptid_entry->format == 2) {
4152 ql_dbg(ql_dbg_async, vha, 0x505f,
4153 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
4154 rptid_entry->port_id[2], rptid_entry->port_id[1],
4155 rptid_entry->port_id[0]);
4156
4157 ql_dbg(ql_dbg_async, vha, 0x5075,
4158 "N2N: Remote WWPN %8phC.\n",
4159 rptid_entry->u.f2.port_name);
4160
4161 /* N2N. direct connect */
4162 ha->current_topology = ISP_CFG_N;
4163 ha->flags.rida_fmt2 = 1;
4164 vha->d_id.b.domain = rptid_entry->port_id[2];
4165 vha->d_id.b.area = rptid_entry->port_id[1];
4166 vha->d_id.b.al_pa = rptid_entry->port_id[0];
4167
4168 ha->flags.n2n_ae = 1;
4169 spin_lock_irqsave(&ha->vport_slock, flags);
4170 qlt_update_vp_map(vha, SET_AL_PA);
4171 spin_unlock_irqrestore(&ha->vport_slock, flags);
4172
4173 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4174 fcport->scan_state = QLA_FCPORT_SCAN;
4175 fcport->n2n_flag = 0;
4176 }
4177
4178 fcport = qla2x00_find_fcport_by_wwpn(vha,
4179 rptid_entry->u.f2.port_name, 1);
4180
4181 if (fcport) {
4182 fcport->login_retry = vha->hw->login_retry_count;
4183 fcport->plogi_nack_done_deadline = jiffies + HZ;
4184 fcport->scan_state = QLA_FCPORT_FOUND;
4185 fcport->keep_nport_handle = 1;
4186 fcport->n2n_flag = 1;
4187 fcport->d_id.b.domain =
4188 rptid_entry->u.f2.remote_nport_id[2];
4189 fcport->d_id.b.area =
4190 rptid_entry->u.f2.remote_nport_id[1];
4191 fcport->d_id.b.al_pa =
4192 rptid_entry->u.f2.remote_nport_id[0];
4193
4194 /*
4195 * For the case where remote port sending PRLO, FW
4196 * sends up RIDA Format 2 as an indication of session
4197 * loss. In other word, FW state change from PRLI
4198 * complete back to PLOGI complete. Delete the
4199 * session and let relogin drive the reconnect.
4200 */
4201 if (atomic_read(&fcport->state) == FCS_ONLINE)
4202 qlt_schedule_sess_for_deletion(fcport);
4203 }
4204 }
4205 }
4206
4207 /*
4208 * qla24xx_modify_vp_config
4209 * Change VP configuration for vha
4210 *
4211 * Input:
4212 * vha = adapter block pointer.
4213 *
4214 * Returns:
4215 * qla2xxx local function return status code.
4216 *
4217 * Context:
4218 * Kernel context.
4219 */
4220 int
qla24xx_modify_vp_config(scsi_qla_host_t * vha)4221 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4222 {
4223 int rval;
4224 struct vp_config_entry_24xx *vpmod;
4225 dma_addr_t vpmod_dma;
4226 struct qla_hw_data *ha = vha->hw;
4227 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4228
4229 /* This can be called by the parent */
4230
4231 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4232 "Entered %s.\n", __func__);
4233
4234 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4235 if (!vpmod) {
4236 ql_log(ql_log_warn, vha, 0x10bc,
4237 "Failed to allocate modify VP IOCB.\n");
4238 return QLA_MEMORY_ALLOC_FAILED;
4239 }
4240
4241 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4242 vpmod->entry_count = 1;
4243 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4244 vpmod->vp_count = 1;
4245 vpmod->vp_index1 = vha->vp_idx;
4246 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4247
4248 qlt_modify_vp_config(vha, vpmod);
4249
4250 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4251 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4252 vpmod->entry_count = 1;
4253
4254 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4255 if (rval != QLA_SUCCESS) {
4256 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4257 "Failed to issue VP config IOCB (%x).\n", rval);
4258 } else if (vpmod->comp_status != 0) {
4259 ql_dbg(ql_dbg_mbx, vha, 0x10be,
4260 "Failed to complete IOCB -- error status (%x).\n",
4261 vpmod->comp_status);
4262 rval = QLA_FUNCTION_FAILED;
4263 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4264 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4265 "Failed to complete IOCB -- completion status (%x).\n",
4266 le16_to_cpu(vpmod->comp_status));
4267 rval = QLA_FUNCTION_FAILED;
4268 } else {
4269 /* EMPTY */
4270 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4271 "Done %s.\n", __func__);
4272 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4273 }
4274 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4275
4276 return rval;
4277 }
4278
4279 /*
4280 * qla2x00_send_change_request
4281 * Receive or disable RSCN request from fabric controller
4282 *
4283 * Input:
4284 * ha = adapter block pointer
4285 * format = registration format:
4286 * 0 - Reserved
4287 * 1 - Fabric detected registration
4288 * 2 - N_port detected registration
4289 * 3 - Full registration
4290 * FF - clear registration
4291 * vp_idx = Virtual port index
4292 *
4293 * Returns:
4294 * qla2x00 local function return status code.
4295 *
4296 * Context:
4297 * Kernel Context
4298 */
4299
4300 int
qla2x00_send_change_request(scsi_qla_host_t * vha,uint16_t format,uint16_t vp_idx)4301 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4302 uint16_t vp_idx)
4303 {
4304 int rval;
4305 mbx_cmd_t mc;
4306 mbx_cmd_t *mcp = &mc;
4307
4308 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4309 "Entered %s.\n", __func__);
4310
4311 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4312 mcp->mb[1] = format;
4313 mcp->mb[9] = vp_idx;
4314 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4315 mcp->in_mb = MBX_0|MBX_1;
4316 mcp->tov = MBX_TOV_SECONDS;
4317 mcp->flags = 0;
4318 rval = qla2x00_mailbox_command(vha, mcp);
4319
4320 if (rval == QLA_SUCCESS) {
4321 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4322 rval = BIT_1;
4323 }
4324 } else
4325 rval = BIT_1;
4326
4327 return rval;
4328 }
4329
4330 int
qla2x00_dump_ram(scsi_qla_host_t * vha,dma_addr_t req_dma,uint32_t addr,uint32_t size)4331 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4332 uint32_t size)
4333 {
4334 int rval;
4335 mbx_cmd_t mc;
4336 mbx_cmd_t *mcp = &mc;
4337
4338 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4339 "Entered %s.\n", __func__);
4340
4341 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4342 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4343 mcp->mb[8] = MSW(addr);
4344 mcp->mb[10] = 0;
4345 mcp->out_mb = MBX_10|MBX_8|MBX_0;
4346 } else {
4347 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4348 mcp->out_mb = MBX_0;
4349 }
4350 mcp->mb[1] = LSW(addr);
4351 mcp->mb[2] = MSW(req_dma);
4352 mcp->mb[3] = LSW(req_dma);
4353 mcp->mb[6] = MSW(MSD(req_dma));
4354 mcp->mb[7] = LSW(MSD(req_dma));
4355 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4356 if (IS_FWI2_CAPABLE(vha->hw)) {
4357 mcp->mb[4] = MSW(size);
4358 mcp->mb[5] = LSW(size);
4359 mcp->out_mb |= MBX_5|MBX_4;
4360 } else {
4361 mcp->mb[4] = LSW(size);
4362 mcp->out_mb |= MBX_4;
4363 }
4364
4365 mcp->in_mb = MBX_0;
4366 mcp->tov = MBX_TOV_SECONDS;
4367 mcp->flags = 0;
4368 rval = qla2x00_mailbox_command(vha, mcp);
4369
4370 if (rval != QLA_SUCCESS) {
4371 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4372 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4373 } else {
4374 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4375 "Done %s.\n", __func__);
4376 }
4377
4378 return rval;
4379 }
4380 /* 84XX Support **************************************************************/
4381
4382 struct cs84xx_mgmt_cmd {
4383 union {
4384 struct verify_chip_entry_84xx req;
4385 struct verify_chip_rsp_84xx rsp;
4386 } p;
4387 };
4388
4389 int
qla84xx_verify_chip(struct scsi_qla_host * vha,uint16_t * status)4390 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4391 {
4392 int rval, retry;
4393 struct cs84xx_mgmt_cmd *mn;
4394 dma_addr_t mn_dma;
4395 uint16_t options;
4396 unsigned long flags;
4397 struct qla_hw_data *ha = vha->hw;
4398
4399 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4400 "Entered %s.\n", __func__);
4401
4402 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4403 if (mn == NULL) {
4404 return QLA_MEMORY_ALLOC_FAILED;
4405 }
4406
4407 /* Force Update? */
4408 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4409 /* Diagnostic firmware? */
4410 /* options |= MENLO_DIAG_FW; */
4411 /* We update the firmware with only one data sequence. */
4412 options |= VCO_END_OF_DATA;
4413
4414 do {
4415 retry = 0;
4416 memset(mn, 0, sizeof(*mn));
4417 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4418 mn->p.req.entry_count = 1;
4419 mn->p.req.options = cpu_to_le16(options);
4420
4421 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4422 "Dump of Verify Request.\n");
4423 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4424 mn, sizeof(*mn));
4425
4426 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4427 if (rval != QLA_SUCCESS) {
4428 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4429 "Failed to issue verify IOCB (%x).\n", rval);
4430 goto verify_done;
4431 }
4432
4433 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4434 "Dump of Verify Response.\n");
4435 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4436 mn, sizeof(*mn));
4437
4438 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4439 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4440 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4441 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4442 "cs=%x fc=%x.\n", status[0], status[1]);
4443
4444 if (status[0] != CS_COMPLETE) {
4445 rval = QLA_FUNCTION_FAILED;
4446 if (!(options & VCO_DONT_UPDATE_FW)) {
4447 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4448 "Firmware update failed. Retrying "
4449 "without update firmware.\n");
4450 options |= VCO_DONT_UPDATE_FW;
4451 options &= ~VCO_FORCE_UPDATE;
4452 retry = 1;
4453 }
4454 } else {
4455 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4456 "Firmware updated to %x.\n",
4457 le32_to_cpu(mn->p.rsp.fw_ver));
4458
4459 /* NOTE: we only update OP firmware. */
4460 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4461 ha->cs84xx->op_fw_version =
4462 le32_to_cpu(mn->p.rsp.fw_ver);
4463 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4464 flags);
4465 }
4466 } while (retry);
4467
4468 verify_done:
4469 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4470
4471 if (rval != QLA_SUCCESS) {
4472 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4473 "Failed=%x.\n", rval);
4474 } else {
4475 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4476 "Done %s.\n", __func__);
4477 }
4478
4479 return rval;
4480 }
4481
4482 int
qla25xx_init_req_que(struct scsi_qla_host * vha,struct req_que * req)4483 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4484 {
4485 int rval;
4486 unsigned long flags;
4487 mbx_cmd_t mc;
4488 mbx_cmd_t *mcp = &mc;
4489 struct qla_hw_data *ha = vha->hw;
4490
4491 if (!ha->flags.fw_started)
4492 return QLA_SUCCESS;
4493
4494 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4495 "Entered %s.\n", __func__);
4496
4497 if (IS_SHADOW_REG_CAPABLE(ha))
4498 req->options |= BIT_13;
4499
4500 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4501 mcp->mb[1] = req->options;
4502 mcp->mb[2] = MSW(LSD(req->dma));
4503 mcp->mb[3] = LSW(LSD(req->dma));
4504 mcp->mb[6] = MSW(MSD(req->dma));
4505 mcp->mb[7] = LSW(MSD(req->dma));
4506 mcp->mb[5] = req->length;
4507 if (req->rsp)
4508 mcp->mb[10] = req->rsp->id;
4509 mcp->mb[12] = req->qos;
4510 mcp->mb[11] = req->vp_idx;
4511 mcp->mb[13] = req->rid;
4512 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4513 mcp->mb[15] = 0;
4514
4515 mcp->mb[4] = req->id;
4516 /* que in ptr index */
4517 mcp->mb[8] = 0;
4518 /* que out ptr index */
4519 mcp->mb[9] = *req->out_ptr = 0;
4520 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4521 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4522 mcp->in_mb = MBX_0;
4523 mcp->flags = MBX_DMA_OUT;
4524 mcp->tov = MBX_TOV_SECONDS * 2;
4525
4526 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4527 IS_QLA28XX(ha))
4528 mcp->in_mb |= MBX_1;
4529 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4530 mcp->out_mb |= MBX_15;
4531 /* debug q create issue in SR-IOV */
4532 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4533 }
4534
4535 spin_lock_irqsave(&ha->hardware_lock, flags);
4536 if (!(req->options & BIT_0)) {
4537 wrt_reg_dword(req->req_q_in, 0);
4538 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4539 wrt_reg_dword(req->req_q_out, 0);
4540 }
4541 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4542
4543 rval = qla2x00_mailbox_command(vha, mcp);
4544 if (rval != QLA_SUCCESS) {
4545 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4546 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4547 } else {
4548 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4549 "Done %s.\n", __func__);
4550 }
4551
4552 return rval;
4553 }
4554
4555 int
qla25xx_init_rsp_que(struct scsi_qla_host * vha,struct rsp_que * rsp)4556 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4557 {
4558 int rval;
4559 unsigned long flags;
4560 mbx_cmd_t mc;
4561 mbx_cmd_t *mcp = &mc;
4562 struct qla_hw_data *ha = vha->hw;
4563
4564 if (!ha->flags.fw_started)
4565 return QLA_SUCCESS;
4566
4567 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4568 "Entered %s.\n", __func__);
4569
4570 if (IS_SHADOW_REG_CAPABLE(ha))
4571 rsp->options |= BIT_13;
4572
4573 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4574 mcp->mb[1] = rsp->options;
4575 mcp->mb[2] = MSW(LSD(rsp->dma));
4576 mcp->mb[3] = LSW(LSD(rsp->dma));
4577 mcp->mb[6] = MSW(MSD(rsp->dma));
4578 mcp->mb[7] = LSW(MSD(rsp->dma));
4579 mcp->mb[5] = rsp->length;
4580 mcp->mb[14] = rsp->msix->entry;
4581 mcp->mb[13] = rsp->rid;
4582 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4583 mcp->mb[15] = 0;
4584
4585 mcp->mb[4] = rsp->id;
4586 /* que in ptr index */
4587 mcp->mb[8] = *rsp->in_ptr = 0;
4588 /* que out ptr index */
4589 mcp->mb[9] = 0;
4590 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4591 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4592 mcp->in_mb = MBX_0;
4593 mcp->flags = MBX_DMA_OUT;
4594 mcp->tov = MBX_TOV_SECONDS * 2;
4595
4596 if (IS_QLA81XX(ha)) {
4597 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4598 mcp->in_mb |= MBX_1;
4599 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4600 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4601 mcp->in_mb |= MBX_1;
4602 /* debug q create issue in SR-IOV */
4603 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4604 }
4605
4606 spin_lock_irqsave(&ha->hardware_lock, flags);
4607 if (!(rsp->options & BIT_0)) {
4608 wrt_reg_dword(rsp->rsp_q_out, 0);
4609 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4610 wrt_reg_dword(rsp->rsp_q_in, 0);
4611 }
4612
4613 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4614
4615 rval = qla2x00_mailbox_command(vha, mcp);
4616 if (rval != QLA_SUCCESS) {
4617 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4618 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4619 } else {
4620 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4621 "Done %s.\n", __func__);
4622 }
4623
4624 return rval;
4625 }
4626
4627 int
qla81xx_idc_ack(scsi_qla_host_t * vha,uint16_t * mb)4628 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4629 {
4630 int rval;
4631 mbx_cmd_t mc;
4632 mbx_cmd_t *mcp = &mc;
4633
4634 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4635 "Entered %s.\n", __func__);
4636
4637 mcp->mb[0] = MBC_IDC_ACK;
4638 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4639 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4640 mcp->in_mb = MBX_0;
4641 mcp->tov = MBX_TOV_SECONDS;
4642 mcp->flags = 0;
4643 rval = qla2x00_mailbox_command(vha, mcp);
4644
4645 if (rval != QLA_SUCCESS) {
4646 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4647 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4648 } else {
4649 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4650 "Done %s.\n", __func__);
4651 }
4652
4653 return rval;
4654 }
4655
4656 int
qla81xx_fac_get_sector_size(scsi_qla_host_t * vha,uint32_t * sector_size)4657 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4658 {
4659 int rval;
4660 mbx_cmd_t mc;
4661 mbx_cmd_t *mcp = &mc;
4662
4663 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4664 "Entered %s.\n", __func__);
4665
4666 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4667 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4668 return QLA_FUNCTION_FAILED;
4669
4670 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4671 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4672 mcp->out_mb = MBX_1|MBX_0;
4673 mcp->in_mb = MBX_1|MBX_0;
4674 mcp->tov = MBX_TOV_SECONDS;
4675 mcp->flags = 0;
4676 rval = qla2x00_mailbox_command(vha, mcp);
4677
4678 if (rval != QLA_SUCCESS) {
4679 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4680 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4681 rval, mcp->mb[0], mcp->mb[1]);
4682 } else {
4683 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4684 "Done %s.\n", __func__);
4685 *sector_size = mcp->mb[1];
4686 }
4687
4688 return rval;
4689 }
4690
4691 int
qla81xx_fac_do_write_enable(scsi_qla_host_t * vha,int enable)4692 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4693 {
4694 int rval;
4695 mbx_cmd_t mc;
4696 mbx_cmd_t *mcp = &mc;
4697
4698 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4699 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4700 return QLA_FUNCTION_FAILED;
4701
4702 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4703 "Entered %s.\n", __func__);
4704
4705 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4706 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4707 FAC_OPT_CMD_WRITE_PROTECT;
4708 mcp->out_mb = MBX_1|MBX_0;
4709 mcp->in_mb = MBX_1|MBX_0;
4710 mcp->tov = MBX_TOV_SECONDS;
4711 mcp->flags = 0;
4712 rval = qla2x00_mailbox_command(vha, mcp);
4713
4714 if (rval != QLA_SUCCESS) {
4715 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4716 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4717 rval, mcp->mb[0], mcp->mb[1]);
4718 } else {
4719 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4720 "Done %s.\n", __func__);
4721 }
4722
4723 return rval;
4724 }
4725
4726 int
qla81xx_fac_erase_sector(scsi_qla_host_t * vha,uint32_t start,uint32_t finish)4727 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4728 {
4729 int rval;
4730 mbx_cmd_t mc;
4731 mbx_cmd_t *mcp = &mc;
4732
4733 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4734 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4735 return QLA_FUNCTION_FAILED;
4736
4737 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4738 "Entered %s.\n", __func__);
4739
4740 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4741 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4742 mcp->mb[2] = LSW(start);
4743 mcp->mb[3] = MSW(start);
4744 mcp->mb[4] = LSW(finish);
4745 mcp->mb[5] = MSW(finish);
4746 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4747 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4748 mcp->tov = MBX_TOV_SECONDS;
4749 mcp->flags = 0;
4750 rval = qla2x00_mailbox_command(vha, mcp);
4751
4752 if (rval != QLA_SUCCESS) {
4753 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4754 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4755 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4756 } else {
4757 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4758 "Done %s.\n", __func__);
4759 }
4760
4761 return rval;
4762 }
4763
4764 int
qla81xx_fac_semaphore_access(scsi_qla_host_t * vha,int lock)4765 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
4766 {
4767 int rval = QLA_SUCCESS;
4768 mbx_cmd_t mc;
4769 mbx_cmd_t *mcp = &mc;
4770 struct qla_hw_data *ha = vha->hw;
4771
4772 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4773 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4774 return rval;
4775
4776 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4777 "Entered %s.\n", __func__);
4778
4779 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4780 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
4781 FAC_OPT_CMD_UNLOCK_SEMAPHORE);
4782 mcp->out_mb = MBX_1|MBX_0;
4783 mcp->in_mb = MBX_1|MBX_0;
4784 mcp->tov = MBX_TOV_SECONDS;
4785 mcp->flags = 0;
4786 rval = qla2x00_mailbox_command(vha, mcp);
4787
4788 if (rval != QLA_SUCCESS) {
4789 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4790 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4791 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4792 } else {
4793 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4794 "Done %s.\n", __func__);
4795 }
4796
4797 return rval;
4798 }
4799
4800 int
qla81xx_restart_mpi_firmware(scsi_qla_host_t * vha)4801 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4802 {
4803 int rval = 0;
4804 mbx_cmd_t mc;
4805 mbx_cmd_t *mcp = &mc;
4806
4807 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4808 "Entered %s.\n", __func__);
4809
4810 mcp->mb[0] = MBC_RESTART_MPI_FW;
4811 mcp->out_mb = MBX_0;
4812 mcp->in_mb = MBX_0|MBX_1;
4813 mcp->tov = MBX_TOV_SECONDS;
4814 mcp->flags = 0;
4815 rval = qla2x00_mailbox_command(vha, mcp);
4816
4817 if (rval != QLA_SUCCESS) {
4818 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4819 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4820 rval, mcp->mb[0], mcp->mb[1]);
4821 } else {
4822 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4823 "Done %s.\n", __func__);
4824 }
4825
4826 return rval;
4827 }
4828
4829 int
qla82xx_set_driver_version(scsi_qla_host_t * vha,char * version)4830 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4831 {
4832 int rval;
4833 mbx_cmd_t mc;
4834 mbx_cmd_t *mcp = &mc;
4835 int i;
4836 int len;
4837 __le16 *str;
4838 struct qla_hw_data *ha = vha->hw;
4839
4840 if (!IS_P3P_TYPE(ha))
4841 return QLA_FUNCTION_FAILED;
4842
4843 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4844 "Entered %s.\n", __func__);
4845
4846 str = (__force __le16 *)version;
4847 len = strlen(version);
4848
4849 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4850 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4851 mcp->out_mb = MBX_1|MBX_0;
4852 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4853 mcp->mb[i] = le16_to_cpup(str);
4854 mcp->out_mb |= 1<<i;
4855 }
4856 for (; i < 16; i++) {
4857 mcp->mb[i] = 0;
4858 mcp->out_mb |= 1<<i;
4859 }
4860 mcp->in_mb = MBX_1|MBX_0;
4861 mcp->tov = MBX_TOV_SECONDS;
4862 mcp->flags = 0;
4863 rval = qla2x00_mailbox_command(vha, mcp);
4864
4865 if (rval != QLA_SUCCESS) {
4866 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4867 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4868 } else {
4869 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4870 "Done %s.\n", __func__);
4871 }
4872
4873 return rval;
4874 }
4875
4876 int
qla25xx_set_driver_version(scsi_qla_host_t * vha,char * version)4877 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4878 {
4879 int rval;
4880 mbx_cmd_t mc;
4881 mbx_cmd_t *mcp = &mc;
4882 int len;
4883 uint16_t dwlen;
4884 uint8_t *str;
4885 dma_addr_t str_dma;
4886 struct qla_hw_data *ha = vha->hw;
4887
4888 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4889 IS_P3P_TYPE(ha))
4890 return QLA_FUNCTION_FAILED;
4891
4892 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4893 "Entered %s.\n", __func__);
4894
4895 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4896 if (!str) {
4897 ql_log(ql_log_warn, vha, 0x117f,
4898 "Failed to allocate driver version param.\n");
4899 return QLA_MEMORY_ALLOC_FAILED;
4900 }
4901
4902 memcpy(str, "\x7\x3\x11\x0", 4);
4903 dwlen = str[0];
4904 len = dwlen * 4 - 4;
4905 memset(str + 4, 0, len);
4906 if (len > strlen(version))
4907 len = strlen(version);
4908 memcpy(str + 4, version, len);
4909
4910 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4911 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4912 mcp->mb[2] = MSW(LSD(str_dma));
4913 mcp->mb[3] = LSW(LSD(str_dma));
4914 mcp->mb[6] = MSW(MSD(str_dma));
4915 mcp->mb[7] = LSW(MSD(str_dma));
4916 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4917 mcp->in_mb = MBX_1|MBX_0;
4918 mcp->tov = MBX_TOV_SECONDS;
4919 mcp->flags = 0;
4920 rval = qla2x00_mailbox_command(vha, mcp);
4921
4922 if (rval != QLA_SUCCESS) {
4923 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4924 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4925 } else {
4926 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4927 "Done %s.\n", __func__);
4928 }
4929
4930 dma_pool_free(ha->s_dma_pool, str, str_dma);
4931
4932 return rval;
4933 }
4934
4935 int
qla24xx_get_port_login_templ(scsi_qla_host_t * vha,dma_addr_t buf_dma,void * buf,uint16_t bufsiz)4936 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4937 void *buf, uint16_t bufsiz)
4938 {
4939 int rval, i;
4940 mbx_cmd_t mc;
4941 mbx_cmd_t *mcp = &mc;
4942 uint32_t *bp;
4943
4944 if (!IS_FWI2_CAPABLE(vha->hw))
4945 return QLA_FUNCTION_FAILED;
4946
4947 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4948 "Entered %s.\n", __func__);
4949
4950 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4951 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4952 mcp->mb[2] = MSW(buf_dma);
4953 mcp->mb[3] = LSW(buf_dma);
4954 mcp->mb[6] = MSW(MSD(buf_dma));
4955 mcp->mb[7] = LSW(MSD(buf_dma));
4956 mcp->mb[8] = bufsiz/4;
4957 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4958 mcp->in_mb = MBX_1|MBX_0;
4959 mcp->tov = MBX_TOV_SECONDS;
4960 mcp->flags = 0;
4961 rval = qla2x00_mailbox_command(vha, mcp);
4962
4963 if (rval != QLA_SUCCESS) {
4964 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4965 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4966 } else {
4967 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4968 "Done %s.\n", __func__);
4969 bp = (uint32_t *) buf;
4970 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4971 *bp = le32_to_cpu((__force __le32)*bp);
4972 }
4973
4974 return rval;
4975 }
4976
4977 #define PUREX_CMD_COUNT 4
4978 int
qla25xx_set_els_cmds_supported(scsi_qla_host_t * vha)4979 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
4980 {
4981 int rval;
4982 mbx_cmd_t mc;
4983 mbx_cmd_t *mcp = &mc;
4984 uint8_t *els_cmd_map;
4985 uint8_t active_cnt = 0;
4986 dma_addr_t els_cmd_map_dma;
4987 uint8_t cmd_opcode[PUREX_CMD_COUNT];
4988 uint8_t i, index, purex_bit;
4989 struct qla_hw_data *ha = vha->hw;
4990
4991 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) &&
4992 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4993 return QLA_SUCCESS;
4994
4995 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
4996 "Entered %s.\n", __func__);
4997
4998 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
4999 &els_cmd_map_dma, GFP_KERNEL);
5000 if (!els_cmd_map) {
5001 ql_log(ql_log_warn, vha, 0x7101,
5002 "Failed to allocate RDP els command param.\n");
5003 return QLA_MEMORY_ALLOC_FAILED;
5004 }
5005
5006 /* List of Purex ELS */
5007 if (ql2xrdpenable) {
5008 cmd_opcode[active_cnt] = ELS_RDP;
5009 active_cnt++;
5010 }
5011 if (ha->flags.scm_supported_f) {
5012 cmd_opcode[active_cnt] = ELS_FPIN;
5013 active_cnt++;
5014 }
5015 if (ha->flags.edif_enabled) {
5016 cmd_opcode[active_cnt] = ELS_AUTH_ELS;
5017 active_cnt++;
5018 }
5019
5020 for (i = 0; i < active_cnt; i++) {
5021 index = cmd_opcode[i] / 8;
5022 purex_bit = cmd_opcode[i] % 8;
5023 els_cmd_map[index] |= 1 << purex_bit;
5024 }
5025
5026 mcp->mb[0] = MBC_SET_RNID_PARAMS;
5027 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8;
5028 mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
5029 mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
5030 mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
5031 mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
5032 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5033 mcp->in_mb = MBX_1|MBX_0;
5034 mcp->tov = MBX_TOV_SECONDS;
5035 mcp->flags = MBX_DMA_OUT;
5036 mcp->buf_size = ELS_CMD_MAP_SIZE;
5037 rval = qla2x00_mailbox_command(vha, mcp);
5038
5039 if (rval != QLA_SUCCESS) {
5040 ql_dbg(ql_dbg_mbx, vha, 0x118d,
5041 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]);
5042 } else {
5043 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
5044 "Done %s.\n", __func__);
5045 }
5046
5047 dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5048 els_cmd_map, els_cmd_map_dma);
5049
5050 return rval;
5051 }
5052
5053 static int
qla2x00_read_asic_temperature(scsi_qla_host_t * vha,uint16_t * temp)5054 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
5055 {
5056 int rval;
5057 mbx_cmd_t mc;
5058 mbx_cmd_t *mcp = &mc;
5059
5060 if (!IS_FWI2_CAPABLE(vha->hw))
5061 return QLA_FUNCTION_FAILED;
5062
5063 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
5064 "Entered %s.\n", __func__);
5065
5066 mcp->mb[0] = MBC_GET_RNID_PARAMS;
5067 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
5068 mcp->out_mb = MBX_1|MBX_0;
5069 mcp->in_mb = MBX_1|MBX_0;
5070 mcp->tov = MBX_TOV_SECONDS;
5071 mcp->flags = 0;
5072 rval = qla2x00_mailbox_command(vha, mcp);
5073 *temp = mcp->mb[1];
5074
5075 if (rval != QLA_SUCCESS) {
5076 ql_dbg(ql_dbg_mbx, vha, 0x115a,
5077 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
5078 } else {
5079 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
5080 "Done %s.\n", __func__);
5081 }
5082
5083 return rval;
5084 }
5085
5086 int
qla2x00_read_sfp(scsi_qla_host_t * vha,dma_addr_t sfp_dma,uint8_t * sfp,uint16_t dev,uint16_t off,uint16_t len,uint16_t opt)5087 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5088 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5089 {
5090 int rval;
5091 mbx_cmd_t mc;
5092 mbx_cmd_t *mcp = &mc;
5093 struct qla_hw_data *ha = vha->hw;
5094
5095 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
5096 "Entered %s.\n", __func__);
5097
5098 if (!IS_FWI2_CAPABLE(ha))
5099 return QLA_FUNCTION_FAILED;
5100
5101 if (len == 1)
5102 opt |= BIT_0;
5103
5104 mcp->mb[0] = MBC_READ_SFP;
5105 mcp->mb[1] = dev;
5106 mcp->mb[2] = MSW(LSD(sfp_dma));
5107 mcp->mb[3] = LSW(LSD(sfp_dma));
5108 mcp->mb[6] = MSW(MSD(sfp_dma));
5109 mcp->mb[7] = LSW(MSD(sfp_dma));
5110 mcp->mb[8] = len;
5111 mcp->mb[9] = off;
5112 mcp->mb[10] = opt;
5113 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5114 mcp->in_mb = MBX_1|MBX_0;
5115 mcp->tov = MBX_TOV_SECONDS;
5116 mcp->flags = 0;
5117 rval = qla2x00_mailbox_command(vha, mcp);
5118
5119 if (opt & BIT_0)
5120 *sfp = mcp->mb[1];
5121
5122 if (rval != QLA_SUCCESS) {
5123 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
5124 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5125 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
5126 /* sfp is not there */
5127 rval = QLA_INTERFACE_ERROR;
5128 }
5129 } else {
5130 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
5131 "Done %s.\n", __func__);
5132 }
5133
5134 return rval;
5135 }
5136
5137 int
qla2x00_write_sfp(scsi_qla_host_t * vha,dma_addr_t sfp_dma,uint8_t * sfp,uint16_t dev,uint16_t off,uint16_t len,uint16_t opt)5138 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5139 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5140 {
5141 int rval;
5142 mbx_cmd_t mc;
5143 mbx_cmd_t *mcp = &mc;
5144 struct qla_hw_data *ha = vha->hw;
5145
5146 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
5147 "Entered %s.\n", __func__);
5148
5149 if (!IS_FWI2_CAPABLE(ha))
5150 return QLA_FUNCTION_FAILED;
5151
5152 if (len == 1)
5153 opt |= BIT_0;
5154
5155 if (opt & BIT_0)
5156 len = *sfp;
5157
5158 mcp->mb[0] = MBC_WRITE_SFP;
5159 mcp->mb[1] = dev;
5160 mcp->mb[2] = MSW(LSD(sfp_dma));
5161 mcp->mb[3] = LSW(LSD(sfp_dma));
5162 mcp->mb[6] = MSW(MSD(sfp_dma));
5163 mcp->mb[7] = LSW(MSD(sfp_dma));
5164 mcp->mb[8] = len;
5165 mcp->mb[9] = off;
5166 mcp->mb[10] = opt;
5167 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5168 mcp->in_mb = MBX_1|MBX_0;
5169 mcp->tov = MBX_TOV_SECONDS;
5170 mcp->flags = 0;
5171 rval = qla2x00_mailbox_command(vha, mcp);
5172
5173 if (rval != QLA_SUCCESS) {
5174 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
5175 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5176 } else {
5177 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
5178 "Done %s.\n", __func__);
5179 }
5180
5181 return rval;
5182 }
5183
5184 int
qla2x00_get_xgmac_stats(scsi_qla_host_t * vha,dma_addr_t stats_dma,uint16_t size_in_bytes,uint16_t * actual_size)5185 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
5186 uint16_t size_in_bytes, uint16_t *actual_size)
5187 {
5188 int rval;
5189 mbx_cmd_t mc;
5190 mbx_cmd_t *mcp = &mc;
5191
5192 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
5193 "Entered %s.\n", __func__);
5194
5195 if (!IS_CNA_CAPABLE(vha->hw))
5196 return QLA_FUNCTION_FAILED;
5197
5198 mcp->mb[0] = MBC_GET_XGMAC_STATS;
5199 mcp->mb[2] = MSW(stats_dma);
5200 mcp->mb[3] = LSW(stats_dma);
5201 mcp->mb[6] = MSW(MSD(stats_dma));
5202 mcp->mb[7] = LSW(MSD(stats_dma));
5203 mcp->mb[8] = size_in_bytes >> 2;
5204 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
5205 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5206 mcp->tov = MBX_TOV_SECONDS;
5207 mcp->flags = 0;
5208 rval = qla2x00_mailbox_command(vha, mcp);
5209
5210 if (rval != QLA_SUCCESS) {
5211 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
5212 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5213 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5214 } else {
5215 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
5216 "Done %s.\n", __func__);
5217
5218
5219 *actual_size = mcp->mb[2] << 2;
5220 }
5221
5222 return rval;
5223 }
5224
5225 int
qla2x00_get_dcbx_params(scsi_qla_host_t * vha,dma_addr_t tlv_dma,uint16_t size)5226 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
5227 uint16_t size)
5228 {
5229 int rval;
5230 mbx_cmd_t mc;
5231 mbx_cmd_t *mcp = &mc;
5232
5233 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
5234 "Entered %s.\n", __func__);
5235
5236 if (!IS_CNA_CAPABLE(vha->hw))
5237 return QLA_FUNCTION_FAILED;
5238
5239 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
5240 mcp->mb[1] = 0;
5241 mcp->mb[2] = MSW(tlv_dma);
5242 mcp->mb[3] = LSW(tlv_dma);
5243 mcp->mb[6] = MSW(MSD(tlv_dma));
5244 mcp->mb[7] = LSW(MSD(tlv_dma));
5245 mcp->mb[8] = size;
5246 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5247 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5248 mcp->tov = MBX_TOV_SECONDS;
5249 mcp->flags = 0;
5250 rval = qla2x00_mailbox_command(vha, mcp);
5251
5252 if (rval != QLA_SUCCESS) {
5253 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
5254 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5255 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5256 } else {
5257 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
5258 "Done %s.\n", __func__);
5259 }
5260
5261 return rval;
5262 }
5263
5264 int
qla2x00_read_ram_word(scsi_qla_host_t * vha,uint32_t risc_addr,uint32_t * data)5265 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
5266 {
5267 int rval;
5268 mbx_cmd_t mc;
5269 mbx_cmd_t *mcp = &mc;
5270
5271 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
5272 "Entered %s.\n", __func__);
5273
5274 if (!IS_FWI2_CAPABLE(vha->hw))
5275 return QLA_FUNCTION_FAILED;
5276
5277 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
5278 mcp->mb[1] = LSW(risc_addr);
5279 mcp->mb[8] = MSW(risc_addr);
5280 mcp->out_mb = MBX_8|MBX_1|MBX_0;
5281 mcp->in_mb = MBX_3|MBX_2|MBX_0;
5282 mcp->tov = MBX_TOV_SECONDS;
5283 mcp->flags = 0;
5284 rval = qla2x00_mailbox_command(vha, mcp);
5285 if (rval != QLA_SUCCESS) {
5286 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
5287 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5288 } else {
5289 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
5290 "Done %s.\n", __func__);
5291 *data = mcp->mb[3] << 16 | mcp->mb[2];
5292 }
5293
5294 return rval;
5295 }
5296
5297 int
qla2x00_loopback_test(scsi_qla_host_t * vha,struct msg_echo_lb * mreq,uint16_t * mresp)5298 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5299 uint16_t *mresp)
5300 {
5301 int rval;
5302 mbx_cmd_t mc;
5303 mbx_cmd_t *mcp = &mc;
5304
5305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5306 "Entered %s.\n", __func__);
5307
5308 memset(mcp->mb, 0 , sizeof(mcp->mb));
5309 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5310 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
5311
5312 /* transfer count */
5313 mcp->mb[10] = LSW(mreq->transfer_size);
5314 mcp->mb[11] = MSW(mreq->transfer_size);
5315
5316 /* send data address */
5317 mcp->mb[14] = LSW(mreq->send_dma);
5318 mcp->mb[15] = MSW(mreq->send_dma);
5319 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5320 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5321
5322 /* receive data address */
5323 mcp->mb[16] = LSW(mreq->rcv_dma);
5324 mcp->mb[17] = MSW(mreq->rcv_dma);
5325 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5326 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5327
5328 /* Iteration count */
5329 mcp->mb[18] = LSW(mreq->iteration_count);
5330 mcp->mb[19] = MSW(mreq->iteration_count);
5331
5332 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5333 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5334 if (IS_CNA_CAPABLE(vha->hw))
5335 mcp->out_mb |= MBX_2;
5336 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5337
5338 mcp->buf_size = mreq->transfer_size;
5339 mcp->tov = MBX_TOV_SECONDS;
5340 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5341
5342 rval = qla2x00_mailbox_command(vha, mcp);
5343
5344 if (rval != QLA_SUCCESS) {
5345 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5346 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5347 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5348 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5349 } else {
5350 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5351 "Done %s.\n", __func__);
5352 }
5353
5354 /* Copy mailbox information */
5355 memcpy( mresp, mcp->mb, 64);
5356 return rval;
5357 }
5358
5359 int
qla2x00_echo_test(scsi_qla_host_t * vha,struct msg_echo_lb * mreq,uint16_t * mresp)5360 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5361 uint16_t *mresp)
5362 {
5363 int rval;
5364 mbx_cmd_t mc;
5365 mbx_cmd_t *mcp = &mc;
5366 struct qla_hw_data *ha = vha->hw;
5367
5368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5369 "Entered %s.\n", __func__);
5370
5371 memset(mcp->mb, 0 , sizeof(mcp->mb));
5372 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5373 /* BIT_6 specifies 64bit address */
5374 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5375 if (IS_CNA_CAPABLE(ha)) {
5376 mcp->mb[2] = vha->fcoe_fcf_idx;
5377 }
5378 mcp->mb[16] = LSW(mreq->rcv_dma);
5379 mcp->mb[17] = MSW(mreq->rcv_dma);
5380 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5381 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5382
5383 mcp->mb[10] = LSW(mreq->transfer_size);
5384
5385 mcp->mb[14] = LSW(mreq->send_dma);
5386 mcp->mb[15] = MSW(mreq->send_dma);
5387 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5388 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5389
5390 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5391 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5392 if (IS_CNA_CAPABLE(ha))
5393 mcp->out_mb |= MBX_2;
5394
5395 mcp->in_mb = MBX_0;
5396 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5397 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5398 mcp->in_mb |= MBX_1;
5399 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
5400 IS_QLA28XX(ha))
5401 mcp->in_mb |= MBX_3;
5402
5403 mcp->tov = MBX_TOV_SECONDS;
5404 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5405 mcp->buf_size = mreq->transfer_size;
5406
5407 rval = qla2x00_mailbox_command(vha, mcp);
5408
5409 if (rval != QLA_SUCCESS) {
5410 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5411 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5412 rval, mcp->mb[0], mcp->mb[1]);
5413 } else {
5414 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5415 "Done %s.\n", __func__);
5416 }
5417
5418 /* Copy mailbox information */
5419 memcpy(mresp, mcp->mb, 64);
5420 return rval;
5421 }
5422
5423 int
qla84xx_reset_chip(scsi_qla_host_t * vha,uint16_t enable_diagnostic)5424 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5425 {
5426 int rval;
5427 mbx_cmd_t mc;
5428 mbx_cmd_t *mcp = &mc;
5429
5430 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5431 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5432
5433 mcp->mb[0] = MBC_ISP84XX_RESET;
5434 mcp->mb[1] = enable_diagnostic;
5435 mcp->out_mb = MBX_1|MBX_0;
5436 mcp->in_mb = MBX_1|MBX_0;
5437 mcp->tov = MBX_TOV_SECONDS;
5438 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5439 rval = qla2x00_mailbox_command(vha, mcp);
5440
5441 if (rval != QLA_SUCCESS)
5442 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5443 else
5444 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5445 "Done %s.\n", __func__);
5446
5447 return rval;
5448 }
5449
5450 int
qla2x00_write_ram_word(scsi_qla_host_t * vha,uint32_t risc_addr,uint32_t data)5451 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5452 {
5453 int rval;
5454 mbx_cmd_t mc;
5455 mbx_cmd_t *mcp = &mc;
5456
5457 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5458 "Entered %s.\n", __func__);
5459
5460 if (!IS_FWI2_CAPABLE(vha->hw))
5461 return QLA_FUNCTION_FAILED;
5462
5463 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5464 mcp->mb[1] = LSW(risc_addr);
5465 mcp->mb[2] = LSW(data);
5466 mcp->mb[3] = MSW(data);
5467 mcp->mb[8] = MSW(risc_addr);
5468 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5469 mcp->in_mb = MBX_1|MBX_0;
5470 mcp->tov = MBX_TOV_SECONDS;
5471 mcp->flags = 0;
5472 rval = qla2x00_mailbox_command(vha, mcp);
5473 if (rval != QLA_SUCCESS) {
5474 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5475 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5476 rval, mcp->mb[0], mcp->mb[1]);
5477 } else {
5478 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5479 "Done %s.\n", __func__);
5480 }
5481
5482 return rval;
5483 }
5484
5485 int
qla81xx_write_mpi_register(scsi_qla_host_t * vha,uint16_t * mb)5486 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5487 {
5488 int rval;
5489 uint32_t stat, timer;
5490 uint16_t mb0 = 0;
5491 struct qla_hw_data *ha = vha->hw;
5492 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5493
5494 rval = QLA_SUCCESS;
5495
5496 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5497 "Entered %s.\n", __func__);
5498
5499 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5500
5501 /* Write the MBC data to the registers */
5502 wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER);
5503 wrt_reg_word(®->mailbox1, mb[0]);
5504 wrt_reg_word(®->mailbox2, mb[1]);
5505 wrt_reg_word(®->mailbox3, mb[2]);
5506 wrt_reg_word(®->mailbox4, mb[3]);
5507
5508 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT);
5509
5510 /* Poll for MBC interrupt */
5511 for (timer = 6000000; timer; timer--) {
5512 /* Check for pending interrupts. */
5513 stat = rd_reg_dword(®->host_status);
5514 if (stat & HSRX_RISC_INT) {
5515 stat &= 0xff;
5516
5517 if (stat == 0x1 || stat == 0x2 ||
5518 stat == 0x10 || stat == 0x11) {
5519 set_bit(MBX_INTERRUPT,
5520 &ha->mbx_cmd_flags);
5521 mb0 = rd_reg_word(®->mailbox0);
5522 wrt_reg_dword(®->hccr,
5523 HCCRX_CLR_RISC_INT);
5524 rd_reg_dword(®->hccr);
5525 break;
5526 }
5527 }
5528 udelay(5);
5529 }
5530
5531 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5532 rval = mb0 & MBS_MASK;
5533 else
5534 rval = QLA_FUNCTION_FAILED;
5535
5536 if (rval != QLA_SUCCESS) {
5537 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5538 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5539 } else {
5540 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5541 "Done %s.\n", __func__);
5542 }
5543
5544 return rval;
5545 }
5546
5547 /* Set the specified data rate */
5548 int
qla2x00_set_data_rate(scsi_qla_host_t * vha,uint16_t mode)5549 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5550 {
5551 int rval;
5552 mbx_cmd_t mc;
5553 mbx_cmd_t *mcp = &mc;
5554 struct qla_hw_data *ha = vha->hw;
5555 uint16_t val;
5556
5557 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5558 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5559 mode);
5560
5561 if (!IS_FWI2_CAPABLE(ha))
5562 return QLA_FUNCTION_FAILED;
5563
5564 memset(mcp, 0, sizeof(*mcp));
5565 switch (ha->set_data_rate) {
5566 case PORT_SPEED_AUTO:
5567 case PORT_SPEED_4GB:
5568 case PORT_SPEED_8GB:
5569 case PORT_SPEED_16GB:
5570 case PORT_SPEED_32GB:
5571 val = ha->set_data_rate;
5572 break;
5573 default:
5574 ql_log(ql_log_warn, vha, 0x1199,
5575 "Unrecognized speed setting:%d. Setting Autoneg\n",
5576 ha->set_data_rate);
5577 val = ha->set_data_rate = PORT_SPEED_AUTO;
5578 break;
5579 }
5580
5581 mcp->mb[0] = MBC_DATA_RATE;
5582 mcp->mb[1] = mode;
5583 mcp->mb[2] = val;
5584
5585 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5586 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5587 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5588 mcp->in_mb |= MBX_4|MBX_3;
5589 mcp->tov = MBX_TOV_SECONDS;
5590 mcp->flags = 0;
5591 rval = qla2x00_mailbox_command(vha, mcp);
5592 if (rval != QLA_SUCCESS) {
5593 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5594 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5595 } else {
5596 if (mcp->mb[1] != 0x7)
5597 ql_dbg(ql_dbg_mbx, vha, 0x1179,
5598 "Speed set:0x%x\n", mcp->mb[1]);
5599
5600 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5601 "Done %s.\n", __func__);
5602 }
5603
5604 return rval;
5605 }
5606
5607 int
qla2x00_get_data_rate(scsi_qla_host_t * vha)5608 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5609 {
5610 int rval;
5611 mbx_cmd_t mc;
5612 mbx_cmd_t *mcp = &mc;
5613 struct qla_hw_data *ha = vha->hw;
5614
5615 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5616 "Entered %s.\n", __func__);
5617
5618 if (!IS_FWI2_CAPABLE(ha))
5619 return QLA_FUNCTION_FAILED;
5620
5621 mcp->mb[0] = MBC_DATA_RATE;
5622 mcp->mb[1] = QLA_GET_DATA_RATE;
5623 mcp->out_mb = MBX_1|MBX_0;
5624 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5625 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5626 mcp->in_mb |= MBX_3;
5627 mcp->tov = MBX_TOV_SECONDS;
5628 mcp->flags = 0;
5629 rval = qla2x00_mailbox_command(vha, mcp);
5630 if (rval != QLA_SUCCESS) {
5631 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5632 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5633 } else {
5634 if (mcp->mb[1] != 0x7)
5635 ha->link_data_rate = mcp->mb[1];
5636
5637 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
5638 if (mcp->mb[4] & BIT_0)
5639 ql_log(ql_log_info, vha, 0x11a2,
5640 "FEC=enabled (data rate).\n");
5641 }
5642
5643 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5644 "Done %s.\n", __func__);
5645 if (mcp->mb[1] != 0x7)
5646 ha->link_data_rate = mcp->mb[1];
5647 }
5648
5649 return rval;
5650 }
5651
5652 int
qla81xx_get_port_config(scsi_qla_host_t * vha,uint16_t * mb)5653 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5654 {
5655 int rval;
5656 mbx_cmd_t mc;
5657 mbx_cmd_t *mcp = &mc;
5658 struct qla_hw_data *ha = vha->hw;
5659
5660 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5661 "Entered %s.\n", __func__);
5662
5663 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5664 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5665 return QLA_FUNCTION_FAILED;
5666 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5667 mcp->out_mb = MBX_0;
5668 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5669 mcp->tov = MBX_TOV_SECONDS;
5670 mcp->flags = 0;
5671
5672 rval = qla2x00_mailbox_command(vha, mcp);
5673
5674 if (rval != QLA_SUCCESS) {
5675 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5676 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5677 } else {
5678 /* Copy all bits to preserve original value */
5679 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5680
5681 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5682 "Done %s.\n", __func__);
5683 }
5684 return rval;
5685 }
5686
5687 int
qla81xx_set_port_config(scsi_qla_host_t * vha,uint16_t * mb)5688 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5689 {
5690 int rval;
5691 mbx_cmd_t mc;
5692 mbx_cmd_t *mcp = &mc;
5693
5694 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5695 "Entered %s.\n", __func__);
5696
5697 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5698 /* Copy all bits to preserve original setting */
5699 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5700 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5701 mcp->in_mb = MBX_0;
5702 mcp->tov = MBX_TOV_SECONDS;
5703 mcp->flags = 0;
5704 rval = qla2x00_mailbox_command(vha, mcp);
5705
5706 if (rval != QLA_SUCCESS) {
5707 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5708 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5709 } else
5710 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5711 "Done %s.\n", __func__);
5712
5713 return rval;
5714 }
5715
5716
5717 int
qla24xx_set_fcp_prio(scsi_qla_host_t * vha,uint16_t loop_id,uint16_t priority,uint16_t * mb)5718 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5719 uint16_t *mb)
5720 {
5721 int rval;
5722 mbx_cmd_t mc;
5723 mbx_cmd_t *mcp = &mc;
5724 struct qla_hw_data *ha = vha->hw;
5725
5726 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5727 "Entered %s.\n", __func__);
5728
5729 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5730 return QLA_FUNCTION_FAILED;
5731
5732 mcp->mb[0] = MBC_PORT_PARAMS;
5733 mcp->mb[1] = loop_id;
5734 if (ha->flags.fcp_prio_enabled)
5735 mcp->mb[2] = BIT_1;
5736 else
5737 mcp->mb[2] = BIT_2;
5738 mcp->mb[4] = priority & 0xf;
5739 mcp->mb[9] = vha->vp_idx;
5740 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5741 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5742 mcp->tov = MBX_TOV_SECONDS;
5743 mcp->flags = 0;
5744 rval = qla2x00_mailbox_command(vha, mcp);
5745 if (mb != NULL) {
5746 mb[0] = mcp->mb[0];
5747 mb[1] = mcp->mb[1];
5748 mb[3] = mcp->mb[3];
5749 mb[4] = mcp->mb[4];
5750 }
5751
5752 if (rval != QLA_SUCCESS) {
5753 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5754 } else {
5755 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5756 "Done %s.\n", __func__);
5757 }
5758
5759 return rval;
5760 }
5761
5762 int
qla2x00_get_thermal_temp(scsi_qla_host_t * vha,uint16_t * temp)5763 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5764 {
5765 int rval = QLA_FUNCTION_FAILED;
5766 struct qla_hw_data *ha = vha->hw;
5767 uint8_t byte;
5768
5769 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5770 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5771 "Thermal not supported by this card.\n");
5772 return rval;
5773 }
5774
5775 if (IS_QLA25XX(ha)) {
5776 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5777 ha->pdev->subsystem_device == 0x0175) {
5778 rval = qla2x00_read_sfp(vha, 0, &byte,
5779 0x98, 0x1, 1, BIT_13|BIT_0);
5780 *temp = byte;
5781 return rval;
5782 }
5783 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5784 ha->pdev->subsystem_device == 0x338e) {
5785 rval = qla2x00_read_sfp(vha, 0, &byte,
5786 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5787 *temp = byte;
5788 return rval;
5789 }
5790 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5791 "Thermal not supported by this card.\n");
5792 return rval;
5793 }
5794
5795 if (IS_QLA82XX(ha)) {
5796 *temp = qla82xx_read_temperature(vha);
5797 rval = QLA_SUCCESS;
5798 return rval;
5799 } else if (IS_QLA8044(ha)) {
5800 *temp = qla8044_read_temperature(vha);
5801 rval = QLA_SUCCESS;
5802 return rval;
5803 }
5804
5805 rval = qla2x00_read_asic_temperature(vha, temp);
5806 return rval;
5807 }
5808
5809 int
qla82xx_mbx_intr_enable(scsi_qla_host_t * vha)5810 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5811 {
5812 int rval;
5813 struct qla_hw_data *ha = vha->hw;
5814 mbx_cmd_t mc;
5815 mbx_cmd_t *mcp = &mc;
5816
5817 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5818 "Entered %s.\n", __func__);
5819
5820 if (!IS_FWI2_CAPABLE(ha))
5821 return QLA_FUNCTION_FAILED;
5822
5823 memset(mcp, 0, sizeof(mbx_cmd_t));
5824 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5825 mcp->mb[1] = 1;
5826
5827 mcp->out_mb = MBX_1|MBX_0;
5828 mcp->in_mb = MBX_0;
5829 mcp->tov = MBX_TOV_SECONDS;
5830 mcp->flags = 0;
5831
5832 rval = qla2x00_mailbox_command(vha, mcp);
5833 if (rval != QLA_SUCCESS) {
5834 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5835 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5836 } else {
5837 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5838 "Done %s.\n", __func__);
5839 }
5840
5841 return rval;
5842 }
5843
5844 int
qla82xx_mbx_intr_disable(scsi_qla_host_t * vha)5845 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5846 {
5847 int rval;
5848 struct qla_hw_data *ha = vha->hw;
5849 mbx_cmd_t mc;
5850 mbx_cmd_t *mcp = &mc;
5851
5852 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5853 "Entered %s.\n", __func__);
5854
5855 if (!IS_P3P_TYPE(ha))
5856 return QLA_FUNCTION_FAILED;
5857
5858 memset(mcp, 0, sizeof(mbx_cmd_t));
5859 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5860 mcp->mb[1] = 0;
5861
5862 mcp->out_mb = MBX_1|MBX_0;
5863 mcp->in_mb = MBX_0;
5864 mcp->tov = MBX_TOV_SECONDS;
5865 mcp->flags = 0;
5866
5867 rval = qla2x00_mailbox_command(vha, mcp);
5868 if (rval != QLA_SUCCESS) {
5869 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5870 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5871 } else {
5872 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5873 "Done %s.\n", __func__);
5874 }
5875
5876 return rval;
5877 }
5878
5879 int
qla82xx_md_get_template_size(scsi_qla_host_t * vha)5880 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5881 {
5882 struct qla_hw_data *ha = vha->hw;
5883 mbx_cmd_t mc;
5884 mbx_cmd_t *mcp = &mc;
5885 int rval = QLA_FUNCTION_FAILED;
5886
5887 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5888 "Entered %s.\n", __func__);
5889
5890 memset(mcp->mb, 0 , sizeof(mcp->mb));
5891 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5892 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5893 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5894 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5895
5896 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5897 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5898 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5899
5900 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5901 mcp->tov = MBX_TOV_SECONDS;
5902 rval = qla2x00_mailbox_command(vha, mcp);
5903
5904 /* Always copy back return mailbox values. */
5905 if (rval != QLA_SUCCESS) {
5906 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5907 "mailbox command FAILED=0x%x, subcode=%x.\n",
5908 (mcp->mb[1] << 16) | mcp->mb[0],
5909 (mcp->mb[3] << 16) | mcp->mb[2]);
5910 } else {
5911 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5912 "Done %s.\n", __func__);
5913 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5914 if (!ha->md_template_size) {
5915 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5916 "Null template size obtained.\n");
5917 rval = QLA_FUNCTION_FAILED;
5918 }
5919 }
5920 return rval;
5921 }
5922
5923 int
qla82xx_md_get_template(scsi_qla_host_t * vha)5924 qla82xx_md_get_template(scsi_qla_host_t *vha)
5925 {
5926 struct qla_hw_data *ha = vha->hw;
5927 mbx_cmd_t mc;
5928 mbx_cmd_t *mcp = &mc;
5929 int rval = QLA_FUNCTION_FAILED;
5930
5931 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5932 "Entered %s.\n", __func__);
5933
5934 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5935 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5936 if (!ha->md_tmplt_hdr) {
5937 ql_log(ql_log_warn, vha, 0x1124,
5938 "Unable to allocate memory for Minidump template.\n");
5939 return rval;
5940 }
5941
5942 memset(mcp->mb, 0 , sizeof(mcp->mb));
5943 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5944 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5945 mcp->mb[2] = LSW(RQST_TMPLT);
5946 mcp->mb[3] = MSW(RQST_TMPLT);
5947 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5948 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5949 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5950 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5951 mcp->mb[8] = LSW(ha->md_template_size);
5952 mcp->mb[9] = MSW(ha->md_template_size);
5953
5954 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5955 mcp->tov = MBX_TOV_SECONDS;
5956 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5957 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5958 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5959 rval = qla2x00_mailbox_command(vha, mcp);
5960
5961 if (rval != QLA_SUCCESS) {
5962 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5963 "mailbox command FAILED=0x%x, subcode=%x.\n",
5964 ((mcp->mb[1] << 16) | mcp->mb[0]),
5965 ((mcp->mb[3] << 16) | mcp->mb[2]));
5966 } else
5967 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5968 "Done %s.\n", __func__);
5969 return rval;
5970 }
5971
5972 int
qla8044_md_get_template(scsi_qla_host_t * vha)5973 qla8044_md_get_template(scsi_qla_host_t *vha)
5974 {
5975 struct qla_hw_data *ha = vha->hw;
5976 mbx_cmd_t mc;
5977 mbx_cmd_t *mcp = &mc;
5978 int rval = QLA_FUNCTION_FAILED;
5979 int offset = 0, size = MINIDUMP_SIZE_36K;
5980
5981 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5982 "Entered %s.\n", __func__);
5983
5984 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5985 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5986 if (!ha->md_tmplt_hdr) {
5987 ql_log(ql_log_warn, vha, 0xb11b,
5988 "Unable to allocate memory for Minidump template.\n");
5989 return rval;
5990 }
5991
5992 memset(mcp->mb, 0 , sizeof(mcp->mb));
5993 while (offset < ha->md_template_size) {
5994 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5995 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5996 mcp->mb[2] = LSW(RQST_TMPLT);
5997 mcp->mb[3] = MSW(RQST_TMPLT);
5998 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5999 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
6000 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
6001 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
6002 mcp->mb[8] = LSW(size);
6003 mcp->mb[9] = MSW(size);
6004 mcp->mb[10] = offset & 0x0000FFFF;
6005 mcp->mb[11] = offset & 0xFFFF0000;
6006 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
6007 mcp->tov = MBX_TOV_SECONDS;
6008 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
6009 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6010 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6011 rval = qla2x00_mailbox_command(vha, mcp);
6012
6013 if (rval != QLA_SUCCESS) {
6014 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
6015 "mailbox command FAILED=0x%x, subcode=%x.\n",
6016 ((mcp->mb[1] << 16) | mcp->mb[0]),
6017 ((mcp->mb[3] << 16) | mcp->mb[2]));
6018 return rval;
6019 } else
6020 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
6021 "Done %s.\n", __func__);
6022 offset = offset + size;
6023 }
6024 return rval;
6025 }
6026
6027 int
qla81xx_set_led_config(scsi_qla_host_t * vha,uint16_t * led_cfg)6028 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6029 {
6030 int rval;
6031 struct qla_hw_data *ha = vha->hw;
6032 mbx_cmd_t mc;
6033 mbx_cmd_t *mcp = &mc;
6034
6035 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6036 return QLA_FUNCTION_FAILED;
6037
6038 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
6039 "Entered %s.\n", __func__);
6040
6041 memset(mcp, 0, sizeof(mbx_cmd_t));
6042 mcp->mb[0] = MBC_SET_LED_CONFIG;
6043 mcp->mb[1] = led_cfg[0];
6044 mcp->mb[2] = led_cfg[1];
6045 if (IS_QLA8031(ha)) {
6046 mcp->mb[3] = led_cfg[2];
6047 mcp->mb[4] = led_cfg[3];
6048 mcp->mb[5] = led_cfg[4];
6049 mcp->mb[6] = led_cfg[5];
6050 }
6051
6052 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6053 if (IS_QLA8031(ha))
6054 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6055 mcp->in_mb = MBX_0;
6056 mcp->tov = MBX_TOV_SECONDS;
6057 mcp->flags = 0;
6058
6059 rval = qla2x00_mailbox_command(vha, mcp);
6060 if (rval != QLA_SUCCESS) {
6061 ql_dbg(ql_dbg_mbx, vha, 0x1134,
6062 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6063 } else {
6064 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
6065 "Done %s.\n", __func__);
6066 }
6067
6068 return rval;
6069 }
6070
6071 int
qla81xx_get_led_config(scsi_qla_host_t * vha,uint16_t * led_cfg)6072 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6073 {
6074 int rval;
6075 struct qla_hw_data *ha = vha->hw;
6076 mbx_cmd_t mc;
6077 mbx_cmd_t *mcp = &mc;
6078
6079 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6080 return QLA_FUNCTION_FAILED;
6081
6082 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
6083 "Entered %s.\n", __func__);
6084
6085 memset(mcp, 0, sizeof(mbx_cmd_t));
6086 mcp->mb[0] = MBC_GET_LED_CONFIG;
6087
6088 mcp->out_mb = MBX_0;
6089 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6090 if (IS_QLA8031(ha))
6091 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6092 mcp->tov = MBX_TOV_SECONDS;
6093 mcp->flags = 0;
6094
6095 rval = qla2x00_mailbox_command(vha, mcp);
6096 if (rval != QLA_SUCCESS) {
6097 ql_dbg(ql_dbg_mbx, vha, 0x1137,
6098 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6099 } else {
6100 led_cfg[0] = mcp->mb[1];
6101 led_cfg[1] = mcp->mb[2];
6102 if (IS_QLA8031(ha)) {
6103 led_cfg[2] = mcp->mb[3];
6104 led_cfg[3] = mcp->mb[4];
6105 led_cfg[4] = mcp->mb[5];
6106 led_cfg[5] = mcp->mb[6];
6107 }
6108 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
6109 "Done %s.\n", __func__);
6110 }
6111
6112 return rval;
6113 }
6114
6115 int
qla82xx_mbx_beacon_ctl(scsi_qla_host_t * vha,int enable)6116 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
6117 {
6118 int rval;
6119 struct qla_hw_data *ha = vha->hw;
6120 mbx_cmd_t mc;
6121 mbx_cmd_t *mcp = &mc;
6122
6123 if (!IS_P3P_TYPE(ha))
6124 return QLA_FUNCTION_FAILED;
6125
6126 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
6127 "Entered %s.\n", __func__);
6128
6129 memset(mcp, 0, sizeof(mbx_cmd_t));
6130 mcp->mb[0] = MBC_SET_LED_CONFIG;
6131 if (enable)
6132 mcp->mb[7] = 0xE;
6133 else
6134 mcp->mb[7] = 0xD;
6135
6136 mcp->out_mb = MBX_7|MBX_0;
6137 mcp->in_mb = MBX_0;
6138 mcp->tov = MBX_TOV_SECONDS;
6139 mcp->flags = 0;
6140
6141 rval = qla2x00_mailbox_command(vha, mcp);
6142 if (rval != QLA_SUCCESS) {
6143 ql_dbg(ql_dbg_mbx, vha, 0x1128,
6144 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6145 } else {
6146 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
6147 "Done %s.\n", __func__);
6148 }
6149
6150 return rval;
6151 }
6152
6153 int
qla83xx_wr_reg(scsi_qla_host_t * vha,uint32_t reg,uint32_t data)6154 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
6155 {
6156 int rval;
6157 struct qla_hw_data *ha = vha->hw;
6158 mbx_cmd_t mc;
6159 mbx_cmd_t *mcp = &mc;
6160
6161 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6162 return QLA_FUNCTION_FAILED;
6163
6164 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
6165 "Entered %s.\n", __func__);
6166
6167 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6168 mcp->mb[1] = LSW(reg);
6169 mcp->mb[2] = MSW(reg);
6170 mcp->mb[3] = LSW(data);
6171 mcp->mb[4] = MSW(data);
6172 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6173
6174 mcp->in_mb = MBX_1|MBX_0;
6175 mcp->tov = MBX_TOV_SECONDS;
6176 mcp->flags = 0;
6177 rval = qla2x00_mailbox_command(vha, mcp);
6178
6179 if (rval != QLA_SUCCESS) {
6180 ql_dbg(ql_dbg_mbx, vha, 0x1131,
6181 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6182 } else {
6183 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
6184 "Done %s.\n", __func__);
6185 }
6186
6187 return rval;
6188 }
6189
6190 int
qla2x00_port_logout(scsi_qla_host_t * vha,struct fc_port * fcport)6191 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
6192 {
6193 int rval;
6194 struct qla_hw_data *ha = vha->hw;
6195 mbx_cmd_t mc;
6196 mbx_cmd_t *mcp = &mc;
6197
6198 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
6199 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
6200 "Implicit LOGO Unsupported.\n");
6201 return QLA_FUNCTION_FAILED;
6202 }
6203
6204
6205 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
6206 "Entering %s.\n", __func__);
6207
6208 /* Perform Implicit LOGO. */
6209 mcp->mb[0] = MBC_PORT_LOGOUT;
6210 mcp->mb[1] = fcport->loop_id;
6211 mcp->mb[10] = BIT_15;
6212 mcp->out_mb = MBX_10|MBX_1|MBX_0;
6213 mcp->in_mb = MBX_0;
6214 mcp->tov = MBX_TOV_SECONDS;
6215 mcp->flags = 0;
6216 rval = qla2x00_mailbox_command(vha, mcp);
6217 if (rval != QLA_SUCCESS)
6218 ql_dbg(ql_dbg_mbx, vha, 0x113d,
6219 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6220 else
6221 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
6222 "Done %s.\n", __func__);
6223
6224 return rval;
6225 }
6226
6227 int
qla83xx_rd_reg(scsi_qla_host_t * vha,uint32_t reg,uint32_t * data)6228 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
6229 {
6230 int rval;
6231 mbx_cmd_t mc;
6232 mbx_cmd_t *mcp = &mc;
6233 struct qla_hw_data *ha = vha->hw;
6234 unsigned long retry_max_time = jiffies + (2 * HZ);
6235
6236 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6237 return QLA_FUNCTION_FAILED;
6238
6239 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
6240
6241 retry_rd_reg:
6242 mcp->mb[0] = MBC_READ_REMOTE_REG;
6243 mcp->mb[1] = LSW(reg);
6244 mcp->mb[2] = MSW(reg);
6245 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6246 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
6247 mcp->tov = MBX_TOV_SECONDS;
6248 mcp->flags = 0;
6249 rval = qla2x00_mailbox_command(vha, mcp);
6250
6251 if (rval != QLA_SUCCESS) {
6252 ql_dbg(ql_dbg_mbx, vha, 0x114c,
6253 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6254 rval, mcp->mb[0], mcp->mb[1]);
6255 } else {
6256 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
6257 if (*data == QLA8XXX_BAD_VALUE) {
6258 /*
6259 * During soft-reset CAMRAM register reads might
6260 * return 0xbad0bad0. So retry for MAX of 2 sec
6261 * while reading camram registers.
6262 */
6263 if (time_after(jiffies, retry_max_time)) {
6264 ql_dbg(ql_dbg_mbx, vha, 0x1141,
6265 "Failure to read CAMRAM register. "
6266 "data=0x%x.\n", *data);
6267 return QLA_FUNCTION_FAILED;
6268 }
6269 msleep(100);
6270 goto retry_rd_reg;
6271 }
6272 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
6273 }
6274
6275 return rval;
6276 }
6277
6278 int
qla83xx_restart_nic_firmware(scsi_qla_host_t * vha)6279 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
6280 {
6281 int rval;
6282 mbx_cmd_t mc;
6283 mbx_cmd_t *mcp = &mc;
6284 struct qla_hw_data *ha = vha->hw;
6285
6286 if (!IS_QLA83XX(ha))
6287 return QLA_FUNCTION_FAILED;
6288
6289 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
6290
6291 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
6292 mcp->out_mb = MBX_0;
6293 mcp->in_mb = MBX_1|MBX_0;
6294 mcp->tov = MBX_TOV_SECONDS;
6295 mcp->flags = 0;
6296 rval = qla2x00_mailbox_command(vha, mcp);
6297
6298 if (rval != QLA_SUCCESS) {
6299 ql_dbg(ql_dbg_mbx, vha, 0x1144,
6300 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6301 rval, mcp->mb[0], mcp->mb[1]);
6302 qla2xxx_dump_fw(vha);
6303 } else {
6304 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
6305 }
6306
6307 return rval;
6308 }
6309
6310 int
qla83xx_access_control(scsi_qla_host_t * vha,uint16_t options,uint32_t start_addr,uint32_t end_addr,uint16_t * sector_size)6311 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
6312 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
6313 {
6314 int rval;
6315 mbx_cmd_t mc;
6316 mbx_cmd_t *mcp = &mc;
6317 uint8_t subcode = (uint8_t)options;
6318 struct qla_hw_data *ha = vha->hw;
6319
6320 if (!IS_QLA8031(ha))
6321 return QLA_FUNCTION_FAILED;
6322
6323 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
6324
6325 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6326 mcp->mb[1] = options;
6327 mcp->out_mb = MBX_1|MBX_0;
6328 if (subcode & BIT_2) {
6329 mcp->mb[2] = LSW(start_addr);
6330 mcp->mb[3] = MSW(start_addr);
6331 mcp->mb[4] = LSW(end_addr);
6332 mcp->mb[5] = MSW(end_addr);
6333 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6334 }
6335 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6336 if (!(subcode & (BIT_2 | BIT_5)))
6337 mcp->in_mb |= MBX_4|MBX_3;
6338 mcp->tov = MBX_TOV_SECONDS;
6339 mcp->flags = 0;
6340 rval = qla2x00_mailbox_command(vha, mcp);
6341
6342 if (rval != QLA_SUCCESS) {
6343 ql_dbg(ql_dbg_mbx, vha, 0x1147,
6344 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6345 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6346 mcp->mb[4]);
6347 qla2xxx_dump_fw(vha);
6348 } else {
6349 if (subcode & BIT_5)
6350 *sector_size = mcp->mb[1];
6351 else if (subcode & (BIT_6 | BIT_7)) {
6352 ql_dbg(ql_dbg_mbx, vha, 0x1148,
6353 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6354 } else if (subcode & (BIT_3 | BIT_4)) {
6355 ql_dbg(ql_dbg_mbx, vha, 0x1149,
6356 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6357 }
6358 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6359 }
6360
6361 return rval;
6362 }
6363
6364 int
qla2x00_dump_mctp_data(scsi_qla_host_t * vha,dma_addr_t req_dma,uint32_t addr,uint32_t size)6365 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6366 uint32_t size)
6367 {
6368 int rval;
6369 mbx_cmd_t mc;
6370 mbx_cmd_t *mcp = &mc;
6371
6372 if (!IS_MCTP_CAPABLE(vha->hw))
6373 return QLA_FUNCTION_FAILED;
6374
6375 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6376 "Entered %s.\n", __func__);
6377
6378 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6379 mcp->mb[1] = LSW(addr);
6380 mcp->mb[2] = MSW(req_dma);
6381 mcp->mb[3] = LSW(req_dma);
6382 mcp->mb[4] = MSW(size);
6383 mcp->mb[5] = LSW(size);
6384 mcp->mb[6] = MSW(MSD(req_dma));
6385 mcp->mb[7] = LSW(MSD(req_dma));
6386 mcp->mb[8] = MSW(addr);
6387 /* Setting RAM ID to valid */
6388 /* For MCTP RAM ID is 0x40 */
6389 mcp->mb[10] = BIT_7 | 0x40;
6390
6391 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6392 MBX_0;
6393
6394 mcp->in_mb = MBX_0;
6395 mcp->tov = MBX_TOV_SECONDS;
6396 mcp->flags = 0;
6397 rval = qla2x00_mailbox_command(vha, mcp);
6398
6399 if (rval != QLA_SUCCESS) {
6400 ql_dbg(ql_dbg_mbx, vha, 0x114e,
6401 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6402 } else {
6403 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6404 "Done %s.\n", __func__);
6405 }
6406
6407 return rval;
6408 }
6409
6410 int
qla26xx_dport_diagnostics(scsi_qla_host_t * vha,void * dd_buf,uint size,uint options)6411 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6412 void *dd_buf, uint size, uint options)
6413 {
6414 int rval;
6415 mbx_cmd_t mc;
6416 mbx_cmd_t *mcp = &mc;
6417 dma_addr_t dd_dma;
6418
6419 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
6420 !IS_QLA28XX(vha->hw))
6421 return QLA_FUNCTION_FAILED;
6422
6423 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6424 "Entered %s.\n", __func__);
6425
6426 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6427 dd_buf, size, DMA_FROM_DEVICE);
6428 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6429 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6430 return QLA_MEMORY_ALLOC_FAILED;
6431 }
6432
6433 memset(dd_buf, 0, size);
6434
6435 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6436 mcp->mb[1] = options;
6437 mcp->mb[2] = MSW(LSD(dd_dma));
6438 mcp->mb[3] = LSW(LSD(dd_dma));
6439 mcp->mb[6] = MSW(MSD(dd_dma));
6440 mcp->mb[7] = LSW(MSD(dd_dma));
6441 mcp->mb[8] = size;
6442 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6443 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6444 mcp->buf_size = size;
6445 mcp->flags = MBX_DMA_IN;
6446 mcp->tov = MBX_TOV_SECONDS * 4;
6447 rval = qla2x00_mailbox_command(vha, mcp);
6448
6449 if (rval != QLA_SUCCESS) {
6450 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6451 } else {
6452 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6453 "Done %s.\n", __func__);
6454 }
6455
6456 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6457 size, DMA_FROM_DEVICE);
6458
6459 return rval;
6460 }
6461
qla2x00_async_mb_sp_done(srb_t * sp,int res)6462 static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
6463 {
6464 sp->u.iocb_cmd.u.mbx.rc = res;
6465
6466 complete(&sp->u.iocb_cmd.u.mbx.comp);
6467 /* don't free sp here. Let the caller do the free */
6468 }
6469
6470 /*
6471 * This mailbox uses the iocb interface to send MB command.
6472 * This allows non-critial (non chip setup) command to go
6473 * out in parrallel.
6474 */
qla24xx_send_mb_cmd(struct scsi_qla_host * vha,mbx_cmd_t * mcp)6475 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6476 {
6477 int rval = QLA_FUNCTION_FAILED;
6478 srb_t *sp;
6479 struct srb_iocb *c;
6480
6481 if (!vha->hw->flags.fw_started)
6482 goto done;
6483
6484 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6485 if (!sp)
6486 goto done;
6487
6488 sp->type = SRB_MB_IOCB;
6489 sp->name = mb_to_str(mcp->mb[0]);
6490
6491 c = &sp->u.iocb_cmd;
6492 c->timeout = qla2x00_async_iocb_timeout;
6493 init_completion(&c->u.mbx.comp);
6494
6495 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6496
6497 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6498
6499 sp->done = qla2x00_async_mb_sp_done;
6500
6501 rval = qla2x00_start_sp(sp);
6502 if (rval != QLA_SUCCESS) {
6503 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6504 "%s: %s Failed submission. %x.\n",
6505 __func__, sp->name, rval);
6506 goto done_free_sp;
6507 }
6508
6509 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6510 sp->name, sp->handle);
6511
6512 wait_for_completion(&c->u.mbx.comp);
6513 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6514
6515 rval = c->u.mbx.rc;
6516 switch (rval) {
6517 case QLA_FUNCTION_TIMEOUT:
6518 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6519 __func__, sp->name, rval);
6520 break;
6521 case QLA_SUCCESS:
6522 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6523 __func__, sp->name);
6524 break;
6525 default:
6526 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6527 __func__, sp->name, rval);
6528 break;
6529 }
6530
6531 done_free_sp:
6532 sp->free(sp);
6533 done:
6534 return rval;
6535 }
6536
6537 /*
6538 * qla24xx_gpdb_wait
6539 * NOTE: Do not call this routine from DPC thread
6540 */
qla24xx_gpdb_wait(struct scsi_qla_host * vha,fc_port_t * fcport,u8 opt)6541 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6542 {
6543 int rval = QLA_FUNCTION_FAILED;
6544 dma_addr_t pd_dma;
6545 struct port_database_24xx *pd;
6546 struct qla_hw_data *ha = vha->hw;
6547 mbx_cmd_t mc;
6548
6549 if (!vha->hw->flags.fw_started)
6550 goto done;
6551
6552 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6553 if (pd == NULL) {
6554 ql_log(ql_log_warn, vha, 0xd047,
6555 "Failed to allocate port database structure.\n");
6556 goto done_free_sp;
6557 }
6558
6559 memset(&mc, 0, sizeof(mc));
6560 mc.mb[0] = MBC_GET_PORT_DATABASE;
6561 mc.mb[1] = fcport->loop_id;
6562 mc.mb[2] = MSW(pd_dma);
6563 mc.mb[3] = LSW(pd_dma);
6564 mc.mb[6] = MSW(MSD(pd_dma));
6565 mc.mb[7] = LSW(MSD(pd_dma));
6566 mc.mb[9] = vha->vp_idx;
6567 mc.mb[10] = opt;
6568
6569 rval = qla24xx_send_mb_cmd(vha, &mc);
6570 if (rval != QLA_SUCCESS) {
6571 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6572 "%s: %8phC fail\n", __func__, fcport->port_name);
6573 goto done_free_sp;
6574 }
6575
6576 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6577
6578 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6579 __func__, fcport->port_name);
6580
6581 done_free_sp:
6582 if (pd)
6583 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6584 done:
6585 return rval;
6586 }
6587
__qla24xx_parse_gpdb(struct scsi_qla_host * vha,fc_port_t * fcport,struct port_database_24xx * pd)6588 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6589 struct port_database_24xx *pd)
6590 {
6591 int rval = QLA_SUCCESS;
6592 uint64_t zero = 0;
6593 u8 current_login_state, last_login_state;
6594
6595 if (NVME_TARGET(vha->hw, fcport)) {
6596 current_login_state = pd->current_login_state >> 4;
6597 last_login_state = pd->last_login_state >> 4;
6598 } else {
6599 current_login_state = pd->current_login_state & 0xf;
6600 last_login_state = pd->last_login_state & 0xf;
6601 }
6602
6603 /* Check for logged in state. */
6604 if (current_login_state != PDS_PRLI_COMPLETE) {
6605 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6606 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6607 current_login_state, last_login_state, fcport->loop_id);
6608 rval = QLA_FUNCTION_FAILED;
6609 goto gpd_error_out;
6610 }
6611
6612 if (fcport->loop_id == FC_NO_LOOP_ID ||
6613 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6614 memcmp(fcport->port_name, pd->port_name, 8))) {
6615 /* We lost the device mid way. */
6616 rval = QLA_NOT_LOGGED_IN;
6617 goto gpd_error_out;
6618 }
6619
6620 /* Names are little-endian. */
6621 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6622 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6623
6624 /* Get port_id of device. */
6625 fcport->d_id.b.domain = pd->port_id[0];
6626 fcport->d_id.b.area = pd->port_id[1];
6627 fcport->d_id.b.al_pa = pd->port_id[2];
6628 fcport->d_id.b.rsvd_1 = 0;
6629
6630 ql_dbg(ql_dbg_disc, vha, 0x2062,
6631 "%8phC SVC Param w3 %02x%02x",
6632 fcport->port_name,
6633 pd->prli_svc_param_word_3[1],
6634 pd->prli_svc_param_word_3[0]);
6635
6636 if (NVME_TARGET(vha->hw, fcport)) {
6637 fcport->port_type = FCT_NVME;
6638 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
6639 fcport->port_type |= FCT_NVME_INITIATOR;
6640 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6641 fcport->port_type |= FCT_NVME_TARGET;
6642 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
6643 fcport->port_type |= FCT_NVME_DISCOVERY;
6644 } else {
6645 /* If not target must be initiator or unknown type. */
6646 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6647 fcport->port_type = FCT_INITIATOR;
6648 else
6649 fcport->port_type = FCT_TARGET;
6650 }
6651 /* Passback COS information. */
6652 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6653 FC_COS_CLASS2 : FC_COS_CLASS3;
6654
6655 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6656 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6657 fcport->conf_compl_supported = 1;
6658 }
6659
6660 gpd_error_out:
6661 return rval;
6662 }
6663
6664 /*
6665 * qla24xx_gidlist__wait
6666 * NOTE: don't call this routine from DPC thread.
6667 */
qla24xx_gidlist_wait(struct scsi_qla_host * vha,void * id_list,dma_addr_t id_list_dma,uint16_t * entries)6668 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6669 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6670 {
6671 int rval = QLA_FUNCTION_FAILED;
6672 mbx_cmd_t mc;
6673
6674 if (!vha->hw->flags.fw_started)
6675 goto done;
6676
6677 memset(&mc, 0, sizeof(mc));
6678 mc.mb[0] = MBC_GET_ID_LIST;
6679 mc.mb[2] = MSW(id_list_dma);
6680 mc.mb[3] = LSW(id_list_dma);
6681 mc.mb[6] = MSW(MSD(id_list_dma));
6682 mc.mb[7] = LSW(MSD(id_list_dma));
6683 mc.mb[8] = 0;
6684 mc.mb[9] = vha->vp_idx;
6685
6686 rval = qla24xx_send_mb_cmd(vha, &mc);
6687 if (rval != QLA_SUCCESS) {
6688 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6689 "%s: fail\n", __func__);
6690 } else {
6691 *entries = mc.mb[1];
6692 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6693 "%s: done\n", __func__);
6694 }
6695 done:
6696 return rval;
6697 }
6698
qla27xx_set_zio_threshold(scsi_qla_host_t * vha,uint16_t value)6699 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6700 {
6701 int rval;
6702 mbx_cmd_t mc;
6703 mbx_cmd_t *mcp = &mc;
6704
6705 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6706 "Entered %s\n", __func__);
6707
6708 memset(mcp->mb, 0 , sizeof(mcp->mb));
6709 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6710 mcp->mb[1] = 1;
6711 mcp->mb[2] = value;
6712 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6713 mcp->in_mb = MBX_2 | MBX_0;
6714 mcp->tov = MBX_TOV_SECONDS;
6715 mcp->flags = 0;
6716
6717 rval = qla2x00_mailbox_command(vha, mcp);
6718
6719 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6720 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6721
6722 return rval;
6723 }
6724
qla27xx_get_zio_threshold(scsi_qla_host_t * vha,uint16_t * value)6725 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6726 {
6727 int rval;
6728 mbx_cmd_t mc;
6729 mbx_cmd_t *mcp = &mc;
6730
6731 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6732 "Entered %s\n", __func__);
6733
6734 memset(mcp->mb, 0, sizeof(mcp->mb));
6735 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6736 mcp->mb[1] = 0;
6737 mcp->out_mb = MBX_1 | MBX_0;
6738 mcp->in_mb = MBX_2 | MBX_0;
6739 mcp->tov = MBX_TOV_SECONDS;
6740 mcp->flags = 0;
6741
6742 rval = qla2x00_mailbox_command(vha, mcp);
6743 if (rval == QLA_SUCCESS)
6744 *value = mc.mb[2];
6745
6746 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6747 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6748
6749 return rval;
6750 }
6751
6752 int
qla2x00_read_sfp_dev(struct scsi_qla_host * vha,char * buf,int count)6753 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6754 {
6755 struct qla_hw_data *ha = vha->hw;
6756 uint16_t iter, addr, offset;
6757 dma_addr_t phys_addr;
6758 int rval, c;
6759 u8 *sfp_data;
6760
6761 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6762 addr = 0xa0;
6763 phys_addr = ha->sfp_data_dma;
6764 sfp_data = ha->sfp_data;
6765 offset = c = 0;
6766
6767 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6768 if (iter == 4) {
6769 /* Skip to next device address. */
6770 addr = 0xa2;
6771 offset = 0;
6772 }
6773
6774 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6775 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6776 if (rval != QLA_SUCCESS) {
6777 ql_log(ql_log_warn, vha, 0x706d,
6778 "Unable to read SFP data (%x/%x/%x).\n", rval,
6779 addr, offset);
6780
6781 return rval;
6782 }
6783
6784 if (buf && (c < count)) {
6785 u16 sz;
6786
6787 if ((count - c) >= SFP_BLOCK_SIZE)
6788 sz = SFP_BLOCK_SIZE;
6789 else
6790 sz = count - c;
6791
6792 memcpy(buf, sfp_data, sz);
6793 buf += SFP_BLOCK_SIZE;
6794 c += sz;
6795 }
6796 phys_addr += SFP_BLOCK_SIZE;
6797 sfp_data += SFP_BLOCK_SIZE;
6798 offset += SFP_BLOCK_SIZE;
6799 }
6800
6801 return rval;
6802 }
6803
qla24xx_res_count_wait(struct scsi_qla_host * vha,uint16_t * out_mb,int out_mb_sz)6804 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6805 uint16_t *out_mb, int out_mb_sz)
6806 {
6807 int rval = QLA_FUNCTION_FAILED;
6808 mbx_cmd_t mc;
6809
6810 if (!vha->hw->flags.fw_started)
6811 goto done;
6812
6813 memset(&mc, 0, sizeof(mc));
6814 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6815
6816 rval = qla24xx_send_mb_cmd(vha, &mc);
6817 if (rval != QLA_SUCCESS) {
6818 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6819 "%s: fail\n", __func__);
6820 } else {
6821 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6822 memcpy(out_mb, mc.mb, out_mb_sz);
6823 else
6824 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6825
6826 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6827 "%s: done\n", __func__);
6828 }
6829 done:
6830 return rval;
6831 }
6832
qla28xx_secure_flash_update(scsi_qla_host_t * vha,uint16_t opts,uint16_t region,uint32_t len,dma_addr_t sfub_dma_addr,uint32_t sfub_len)6833 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
6834 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
6835 uint32_t sfub_len)
6836 {
6837 int rval;
6838 mbx_cmd_t mc;
6839 mbx_cmd_t *mcp = &mc;
6840
6841 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
6842 mcp->mb[1] = opts;
6843 mcp->mb[2] = region;
6844 mcp->mb[3] = MSW(len);
6845 mcp->mb[4] = LSW(len);
6846 mcp->mb[5] = MSW(sfub_dma_addr);
6847 mcp->mb[6] = LSW(sfub_dma_addr);
6848 mcp->mb[7] = MSW(MSD(sfub_dma_addr));
6849 mcp->mb[8] = LSW(MSD(sfub_dma_addr));
6850 mcp->mb[9] = sfub_len;
6851 mcp->out_mb =
6852 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6853 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6854 mcp->tov = MBX_TOV_SECONDS;
6855 mcp->flags = 0;
6856 rval = qla2x00_mailbox_command(vha, mcp);
6857
6858 if (rval != QLA_SUCCESS) {
6859 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
6860 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
6861 mcp->mb[2]);
6862 }
6863
6864 return rval;
6865 }
6866
qla2xxx_write_remote_register(scsi_qla_host_t * vha,uint32_t addr,uint32_t data)6867 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6868 uint32_t data)
6869 {
6870 int rval;
6871 mbx_cmd_t mc;
6872 mbx_cmd_t *mcp = &mc;
6873
6874 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6875 "Entered %s.\n", __func__);
6876
6877 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6878 mcp->mb[1] = LSW(addr);
6879 mcp->mb[2] = MSW(addr);
6880 mcp->mb[3] = LSW(data);
6881 mcp->mb[4] = MSW(data);
6882 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6883 mcp->in_mb = MBX_1|MBX_0;
6884 mcp->tov = MBX_TOV_SECONDS;
6885 mcp->flags = 0;
6886 rval = qla2x00_mailbox_command(vha, mcp);
6887
6888 if (rval != QLA_SUCCESS) {
6889 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6890 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6891 } else {
6892 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6893 "Done %s.\n", __func__);
6894 }
6895
6896 return rval;
6897 }
6898
qla2xxx_read_remote_register(scsi_qla_host_t * vha,uint32_t addr,uint32_t * data)6899 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6900 uint32_t *data)
6901 {
6902 int rval;
6903 mbx_cmd_t mc;
6904 mbx_cmd_t *mcp = &mc;
6905
6906 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6907 "Entered %s.\n", __func__);
6908
6909 mcp->mb[0] = MBC_READ_REMOTE_REG;
6910 mcp->mb[1] = LSW(addr);
6911 mcp->mb[2] = MSW(addr);
6912 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6913 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6914 mcp->tov = MBX_TOV_SECONDS;
6915 mcp->flags = 0;
6916 rval = qla2x00_mailbox_command(vha, mcp);
6917
6918 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
6919
6920 if (rval != QLA_SUCCESS) {
6921 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6922 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6923 } else {
6924 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6925 "Done %s.\n", __func__);
6926 }
6927
6928 return rval;
6929 }
6930
6931 int
ql26xx_led_config(scsi_qla_host_t * vha,uint16_t options,uint16_t * led)6932 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
6933 {
6934 struct qla_hw_data *ha = vha->hw;
6935 mbx_cmd_t mc;
6936 mbx_cmd_t *mcp = &mc;
6937 int rval;
6938
6939 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6940 return QLA_FUNCTION_FAILED;
6941
6942 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n",
6943 __func__, options);
6944
6945 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG;
6946 mcp->mb[1] = options;
6947 mcp->out_mb = MBX_1|MBX_0;
6948 mcp->in_mb = MBX_1|MBX_0;
6949 if (options & BIT_0) {
6950 if (options & BIT_1) {
6951 mcp->mb[2] = led[2];
6952 mcp->out_mb |= MBX_2;
6953 }
6954 if (options & BIT_2) {
6955 mcp->mb[3] = led[0];
6956 mcp->out_mb |= MBX_3;
6957 }
6958 if (options & BIT_3) {
6959 mcp->mb[4] = led[1];
6960 mcp->out_mb |= MBX_4;
6961 }
6962 } else {
6963 mcp->in_mb |= MBX_4|MBX_3|MBX_2;
6964 }
6965 mcp->tov = MBX_TOV_SECONDS;
6966 mcp->flags = 0;
6967 rval = qla2x00_mailbox_command(vha, mcp);
6968 if (rval) {
6969 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n",
6970 __func__, rval, mcp->mb[0], mcp->mb[1]);
6971 return rval;
6972 }
6973
6974 if (options & BIT_0) {
6975 ha->beacon_blink_led = 0;
6976 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__);
6977 } else {
6978 led[2] = mcp->mb[2];
6979 led[0] = mcp->mb[3];
6980 led[1] = mcp->mb[4];
6981 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n",
6982 __func__, led[0], led[1], led[2]);
6983 }
6984
6985 return rval;
6986 }
6987
6988 /**
6989 * qla_no_op_mb(): This MB is used to check if FW is still alive and
6990 * able to generate an interrupt. Otherwise, a timeout will trigger
6991 * FW dump + reset
6992 * @vha: host adapter pointer
6993 * Return: None
6994 */
qla_no_op_mb(struct scsi_qla_host * vha)6995 void qla_no_op_mb(struct scsi_qla_host *vha)
6996 {
6997 mbx_cmd_t mc;
6998 mbx_cmd_t *mcp = &mc;
6999 int rval;
7000
7001 memset(&mc, 0, sizeof(mc));
7002 mcp->mb[0] = 0; // noop cmd= 0
7003 mcp->out_mb = MBX_0;
7004 mcp->in_mb = MBX_0;
7005 mcp->tov = 5;
7006 mcp->flags = 0;
7007 rval = qla2x00_mailbox_command(vha, mcp);
7008
7009 if (rval) {
7010 ql_dbg(ql_dbg_async, vha, 0x7071,
7011 "Failed %s %x\n", __func__, rval);
7012 }
7013 }
7014