1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2014 QLogic Corporation
5  */
6 
7 /*
8  * Table for showing the current message id in use for particular level
9  * Change this table for addition of log/debug messages.
10  * ----------------------------------------------------------------------
11  * |             Level            |   Last Value Used  |     Holes	|
12  * ----------------------------------------------------------------------
13  * | Module Init and Probe        |       0x0199       |                |
14  * | Mailbox commands             |       0x1206       | 0x11a5-0x11ff	|
15  * | Device Discovery             |       0x2134       | 0x210e-0x2116  |
16  * |				  | 		       | 0x211a         |
17  * |                              |                    | 0x211c-0x2128  |
18  * |                              |                    | 0x212c-0x2134  |
19  * | Queue Command and IO tracing |       0x3074       | 0x300b         |
20  * |                              |                    | 0x3027-0x3028  |
21  * |                              |                    | 0x303d-0x3041  |
22  * |                              |                    | 0x302d,0x3033  |
23  * |                              |                    | 0x3036,0x3038  |
24  * |                              |                    | 0x303a		|
25  * | DPC Thread                   |       0x4023       | 0x4002,0x4013  |
26  * | Async Events                 |       0x509c       |                |
27  * | Timer Routines               |       0x6012       |                |
28  * | User Space Interactions      |       0x70e3       | 0x7018,0x702e  |
29  * |				  |		       | 0x7020,0x7024  |
30  * |                              |                    | 0x7039,0x7045  |
31  * |                              |                    | 0x7073-0x7075  |
32  * |                              |                    | 0x70a5-0x70a6  |
33  * |                              |                    | 0x70a8,0x70ab  |
34  * |                              |                    | 0x70ad-0x70ae  |
35  * |                              |                    | 0x70d0-0x70d6	|
36  * |                              |                    | 0x70d7-0x70db  |
37  * | Task Management              |       0x8042       | 0x8000         |
38  * |                              |                    | 0x8019         |
39  * |                              |                    | 0x8025,0x8026  |
40  * |                              |                    | 0x8031,0x8032  |
41  * |                              |                    | 0x8039,0x803c  |
42  * | AER/EEH                      |       0x9011       |		|
43  * | Virtual Port                 |       0xa007       |		|
44  * | ISP82XX Specific             |       0xb157       | 0xb002,0xb024  |
45  * |                              |                    | 0xb09e,0xb0ae  |
46  * |				  |		       | 0xb0c3,0xb0c6  |
47  * |                              |                    | 0xb0e0-0xb0ef  |
48  * |                              |                    | 0xb085,0xb0dc  |
49  * |                              |                    | 0xb107,0xb108  |
50  * |                              |                    | 0xb111,0xb11e  |
51  * |                              |                    | 0xb12c,0xb12d  |
52  * |                              |                    | 0xb13a,0xb142  |
53  * |                              |                    | 0xb13c-0xb140  |
54  * |                              |                    | 0xb149		|
55  * | MultiQ                       |       0xc010       |		|
56  * | Misc                         |       0xd303       | 0xd031-0xd0ff	|
57  * |                              |                    | 0xd101-0xd1fe	|
58  * |                              |                    | 0xd214-0xd2fe	|
59  * | Target Mode		  |	  0xe081       |		|
60  * | Target Mode Management	  |	  0xf09b       | 0xf002		|
61  * |                              |                    | 0xf046-0xf049  |
62  * | Target Mode Task Management  |	  0x1000d      |		|
63  * ----------------------------------------------------------------------
64  */
65 
66 #include "qla_def.h"
67 
68 #include <linux/delay.h>
69 #define CREATE_TRACE_POINTS
70 #include <trace/events/qla.h>
71 
72 static uint32_t ql_dbg_offset = 0x800;
73 
74 static inline void
qla2xxx_prep_dump(struct qla_hw_data * ha,struct qla2xxx_fw_dump * fw_dump)75 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
76 {
77 	fw_dump->fw_major_version = htonl(ha->fw_major_version);
78 	fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
79 	fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
80 	fw_dump->fw_attributes = htonl(ha->fw_attributes);
81 
82 	fw_dump->vendor = htonl(ha->pdev->vendor);
83 	fw_dump->device = htonl(ha->pdev->device);
84 	fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
85 	fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
86 }
87 
88 static inline void *
qla2xxx_copy_queues(struct qla_hw_data * ha,void * ptr)89 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
90 {
91 	struct req_que *req = ha->req_q_map[0];
92 	struct rsp_que *rsp = ha->rsp_q_map[0];
93 	/* Request queue. */
94 	memcpy(ptr, req->ring, req->length *
95 	    sizeof(request_t));
96 
97 	/* Response queue. */
98 	ptr += req->length * sizeof(request_t);
99 	memcpy(ptr, rsp->ring, rsp->length  *
100 	    sizeof(response_t));
101 
102 	return ptr + (rsp->length * sizeof(response_t));
103 }
104 
105 int
qla27xx_dump_mpi_ram(struct qla_hw_data * ha,uint32_t addr,uint32_t * ram,uint32_t ram_dwords,void ** nxt)106 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
107 	uint32_t ram_dwords, void **nxt)
108 {
109 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
110 	dma_addr_t dump_dma = ha->gid_list_dma;
111 	uint32_t *chunk = (uint32_t *)ha->gid_list;
112 	uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
113 	uint32_t stat;
114 	ulong i, j, timer = 6000000;
115 	int rval = QLA_FUNCTION_FAILED;
116 
117 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
118 	for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
119 		if (i + dwords > ram_dwords)
120 			dwords = ram_dwords - i;
121 
122 		wrt_reg_word(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
123 		wrt_reg_word(&reg->mailbox1, LSW(addr));
124 		wrt_reg_word(&reg->mailbox8, MSW(addr));
125 
126 		wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
127 		wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
128 		wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma)));
129 		wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma)));
130 
131 		wrt_reg_word(&reg->mailbox4, MSW(dwords));
132 		wrt_reg_word(&reg->mailbox5, LSW(dwords));
133 
134 		wrt_reg_word(&reg->mailbox9, 0);
135 		wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
136 
137 		ha->flags.mbox_int = 0;
138 		while (timer--) {
139 			udelay(5);
140 
141 			stat = rd_reg_dword(&reg->host_status);
142 			/* Check for pending interrupts. */
143 			if (!(stat & HSRX_RISC_INT))
144 				continue;
145 
146 			stat &= 0xff;
147 			if (stat != 0x1 && stat != 0x2 &&
148 			    stat != 0x10 && stat != 0x11) {
149 
150 				/* Clear this intr; it wasn't a mailbox intr */
151 				wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
152 				rd_reg_dword(&reg->hccr);
153 				continue;
154 			}
155 
156 			set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
157 			rval = rd_reg_word(&reg->mailbox0) & MBS_MASK;
158 			wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
159 			rd_reg_dword(&reg->hccr);
160 			break;
161 		}
162 		ha->flags.mbox_int = 1;
163 		*nxt = ram + i;
164 
165 		if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
166 			/* no interrupt, timed out*/
167 			return rval;
168 		}
169 		if (rval) {
170 			/* error completion status */
171 			return rval;
172 		}
173 		for (j = 0; j < dwords; j++) {
174 			ram[i + j] =
175 			    (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
176 			    chunk[j] : swab32(chunk[j]);
177 		}
178 	}
179 
180 	*nxt = ram + i;
181 	return QLA_SUCCESS;
182 }
183 
184 int
qla24xx_dump_ram(struct qla_hw_data * ha,uint32_t addr,__be32 * ram,uint32_t ram_dwords,void ** nxt)185 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram,
186 		 uint32_t ram_dwords, void **nxt)
187 {
188 	int rval = QLA_FUNCTION_FAILED;
189 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
190 	dma_addr_t dump_dma = ha->gid_list_dma;
191 	uint32_t *chunk = (uint32_t *)ha->gid_list;
192 	uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
193 	uint32_t stat;
194 	ulong i, j, timer = 6000000;
195 
196 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
197 
198 	for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
199 		if (i + dwords > ram_dwords)
200 			dwords = ram_dwords - i;
201 
202 		wrt_reg_word(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
203 		wrt_reg_word(&reg->mailbox1, LSW(addr));
204 		wrt_reg_word(&reg->mailbox8, MSW(addr));
205 
206 		wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
207 		wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
208 		wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma)));
209 		wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma)));
210 
211 		wrt_reg_word(&reg->mailbox4, MSW(dwords));
212 		wrt_reg_word(&reg->mailbox5, LSW(dwords));
213 		wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
214 
215 		ha->flags.mbox_int = 0;
216 		while (timer--) {
217 			udelay(5);
218 			stat = rd_reg_dword(&reg->host_status);
219 
220 			/* Check for pending interrupts. */
221 			if (!(stat & HSRX_RISC_INT))
222 				continue;
223 
224 			stat &= 0xff;
225 			if (stat != 0x1 && stat != 0x2 &&
226 			    stat != 0x10 && stat != 0x11) {
227 				wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
228 				rd_reg_dword(&reg->hccr);
229 				continue;
230 			}
231 
232 			set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
233 			rval = rd_reg_word(&reg->mailbox0) & MBS_MASK;
234 			wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
235 			rd_reg_dword(&reg->hccr);
236 			break;
237 		}
238 		ha->flags.mbox_int = 1;
239 		*nxt = ram + i;
240 
241 		if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
242 			/* no interrupt, timed out*/
243 			return rval;
244 		}
245 		if (rval) {
246 			/* error completion status */
247 			return rval;
248 		}
249 		for (j = 0; j < dwords; j++) {
250 			ram[i + j] = (__force __be32)
251 				((IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
252 				 chunk[j] : swab32(chunk[j]));
253 		}
254 	}
255 
256 	*nxt = ram + i;
257 	return QLA_SUCCESS;
258 }
259 
260 static int
qla24xx_dump_memory(struct qla_hw_data * ha,__be32 * code_ram,uint32_t cram_size,void ** nxt)261 qla24xx_dump_memory(struct qla_hw_data *ha, __be32 *code_ram,
262 		    uint32_t cram_size, void **nxt)
263 {
264 	int rval;
265 
266 	/* Code RAM. */
267 	rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
268 	if (rval != QLA_SUCCESS)
269 		return rval;
270 
271 	set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
272 
273 	/* External Memory. */
274 	rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
275 	    ha->fw_memory_size - 0x100000 + 1, nxt);
276 	if (rval == QLA_SUCCESS)
277 		set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
278 
279 	return rval;
280 }
281 
282 static __be32 *
qla24xx_read_window(struct device_reg_24xx __iomem * reg,uint32_t iobase,uint32_t count,__be32 * buf)283 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
284 		    uint32_t count, __be32 *buf)
285 {
286 	__le32 __iomem *dmp_reg;
287 
288 	wrt_reg_dword(&reg->iobase_addr, iobase);
289 	dmp_reg = &reg->iobase_window;
290 	for ( ; count--; dmp_reg++)
291 		*buf++ = htonl(rd_reg_dword(dmp_reg));
292 
293 	return buf;
294 }
295 
296 void
qla24xx_pause_risc(struct device_reg_24xx __iomem * reg,struct qla_hw_data * ha)297 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
298 {
299 	wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_PAUSE);
300 
301 	/* 100 usec delay is sufficient enough for hardware to pause RISC */
302 	udelay(100);
303 	if (rd_reg_dword(&reg->host_status) & HSRX_RISC_PAUSED)
304 		set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
305 }
306 
307 int
qla24xx_soft_reset(struct qla_hw_data * ha)308 qla24xx_soft_reset(struct qla_hw_data *ha)
309 {
310 	int rval = QLA_SUCCESS;
311 	uint32_t cnt;
312 	uint16_t wd;
313 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
314 
315 	/*
316 	 * Reset RISC. The delay is dependent on system architecture.
317 	 * Driver can proceed with the reset sequence after waiting
318 	 * for a timeout period.
319 	 */
320 	wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
321 	for (cnt = 0; cnt < 30000; cnt++) {
322 		if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
323 			break;
324 
325 		udelay(10);
326 	}
327 	if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
328 		set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
329 
330 	wrt_reg_dword(&reg->ctrl_status,
331 	    CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
332 	pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
333 
334 	udelay(100);
335 
336 	/* Wait for soft-reset to complete. */
337 	for (cnt = 0; cnt < 30000; cnt++) {
338 		if ((rd_reg_dword(&reg->ctrl_status) &
339 		    CSRX_ISP_SOFT_RESET) == 0)
340 			break;
341 
342 		udelay(10);
343 	}
344 	if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
345 		set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
346 
347 	wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
348 	rd_reg_dword(&reg->hccr);             /* PCI Posting. */
349 
350 	for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 &&
351 	    rval == QLA_SUCCESS; cnt--) {
352 		if (cnt)
353 			udelay(10);
354 		else
355 			rval = QLA_FUNCTION_TIMEOUT;
356 	}
357 	if (rval == QLA_SUCCESS)
358 		set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
359 
360 	return rval;
361 }
362 
363 static int
qla2xxx_dump_ram(struct qla_hw_data * ha,uint32_t addr,__be16 * ram,uint32_t ram_words,void ** nxt)364 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram,
365     uint32_t ram_words, void **nxt)
366 {
367 	int rval;
368 	uint32_t cnt, stat, timer, words, idx;
369 	uint16_t mb0;
370 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
371 	dma_addr_t dump_dma = ha->gid_list_dma;
372 	__le16 *dump = (__force __le16 *)ha->gid_list;
373 
374 	rval = QLA_SUCCESS;
375 	mb0 = 0;
376 
377 	WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
378 	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
379 
380 	words = qla2x00_gid_list_size(ha) / 2;
381 	for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
382 	    cnt += words, addr += words) {
383 		if (cnt + words > ram_words)
384 			words = ram_words - cnt;
385 
386 		WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
387 		WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
388 
389 		WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
390 		WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
391 		WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
392 		WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
393 
394 		WRT_MAILBOX_REG(ha, reg, 4, words);
395 		wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT);
396 
397 		for (timer = 6000000; timer; timer--) {
398 			/* Check for pending interrupts. */
399 			stat = rd_reg_dword(&reg->u.isp2300.host_status);
400 			if (stat & HSR_RISC_INT) {
401 				stat &= 0xff;
402 
403 				if (stat == 0x1 || stat == 0x2) {
404 					set_bit(MBX_INTERRUPT,
405 					    &ha->mbx_cmd_flags);
406 
407 					mb0 = RD_MAILBOX_REG(ha, reg, 0);
408 
409 					/* Release mailbox registers. */
410 					wrt_reg_word(&reg->semaphore, 0);
411 					wrt_reg_word(&reg->hccr,
412 					    HCCR_CLR_RISC_INT);
413 					rd_reg_word(&reg->hccr);
414 					break;
415 				} else if (stat == 0x10 || stat == 0x11) {
416 					set_bit(MBX_INTERRUPT,
417 					    &ha->mbx_cmd_flags);
418 
419 					mb0 = RD_MAILBOX_REG(ha, reg, 0);
420 
421 					wrt_reg_word(&reg->hccr,
422 					    HCCR_CLR_RISC_INT);
423 					rd_reg_word(&reg->hccr);
424 					break;
425 				}
426 
427 				/* clear this intr; it wasn't a mailbox intr */
428 				wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
429 				rd_reg_word(&reg->hccr);
430 			}
431 			udelay(5);
432 		}
433 
434 		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
435 			rval = mb0 & MBS_MASK;
436 			for (idx = 0; idx < words; idx++)
437 				ram[cnt + idx] =
438 					cpu_to_be16(le16_to_cpu(dump[idx]));
439 		} else {
440 			rval = QLA_FUNCTION_FAILED;
441 		}
442 	}
443 
444 	*nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
445 	return rval;
446 }
447 
448 static inline void
qla2xxx_read_window(struct device_reg_2xxx __iomem * reg,uint32_t count,__be16 * buf)449 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
450 		    __be16 *buf)
451 {
452 	__le16 __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
453 
454 	for ( ; count--; dmp_reg++)
455 		*buf++ = htons(rd_reg_word(dmp_reg));
456 }
457 
458 static inline void *
qla24xx_copy_eft(struct qla_hw_data * ha,void * ptr)459 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
460 {
461 	if (!ha->eft)
462 		return ptr;
463 
464 	memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
465 	return ptr + ntohl(ha->fw_dump->eft_size);
466 }
467 
468 static inline void *
qla25xx_copy_fce(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)469 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
470 {
471 	uint32_t cnt;
472 	__be32 *iter_reg;
473 	struct qla2xxx_fce_chain *fcec = ptr;
474 
475 	if (!ha->fce)
476 		return ptr;
477 
478 	*last_chain = &fcec->type;
479 	fcec->type = htonl(DUMP_CHAIN_FCE);
480 	fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
481 	    fce_calc_size(ha->fce_bufs));
482 	fcec->size = htonl(fce_calc_size(ha->fce_bufs));
483 	fcec->addr_l = htonl(LSD(ha->fce_dma));
484 	fcec->addr_h = htonl(MSD(ha->fce_dma));
485 
486 	iter_reg = fcec->eregs;
487 	for (cnt = 0; cnt < 8; cnt++)
488 		*iter_reg++ = htonl(ha->fce_mb[cnt]);
489 
490 	memcpy(iter_reg, ha->fce, ntohl(fcec->size));
491 
492 	return (char *)iter_reg + ntohl(fcec->size);
493 }
494 
495 static inline void *
qla25xx_copy_exlogin(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)496 qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
497 {
498 	struct qla2xxx_offld_chain *c = ptr;
499 
500 	if (!ha->exlogin_buf)
501 		return ptr;
502 
503 	*last_chain = &c->type;
504 
505 	c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN);
506 	c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
507 	    ha->exlogin_size);
508 	c->size = cpu_to_be32(ha->exlogin_size);
509 	c->addr = cpu_to_be64(ha->exlogin_buf_dma);
510 
511 	ptr += sizeof(struct qla2xxx_offld_chain);
512 	memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
513 
514 	return (char *)ptr + be32_to_cpu(c->size);
515 }
516 
517 static inline void *
qla81xx_copy_exchoffld(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)518 qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
519 {
520 	struct qla2xxx_offld_chain *c = ptr;
521 
522 	if (!ha->exchoffld_buf)
523 		return ptr;
524 
525 	*last_chain = &c->type;
526 
527 	c->type = cpu_to_be32(DUMP_CHAIN_EXCHG);
528 	c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
529 	    ha->exchoffld_size);
530 	c->size = cpu_to_be32(ha->exchoffld_size);
531 	c->addr = cpu_to_be64(ha->exchoffld_buf_dma);
532 
533 	ptr += sizeof(struct qla2xxx_offld_chain);
534 	memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
535 
536 	return (char *)ptr + be32_to_cpu(c->size);
537 }
538 
539 static inline void *
qla2xxx_copy_atioqueues(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)540 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
541 			__be32 **last_chain)
542 {
543 	struct qla2xxx_mqueue_chain *q;
544 	struct qla2xxx_mqueue_header *qh;
545 	uint32_t num_queues;
546 	int que;
547 	struct {
548 		int length;
549 		void *ring;
550 	} aq, *aqp;
551 
552 	if (!ha->tgt.atio_ring)
553 		return ptr;
554 
555 	num_queues = 1;
556 	aqp = &aq;
557 	aqp->length = ha->tgt.atio_q_length;
558 	aqp->ring = ha->tgt.atio_ring;
559 
560 	for (que = 0; que < num_queues; que++) {
561 		/* aqp = ha->atio_q_map[que]; */
562 		q = ptr;
563 		*last_chain = &q->type;
564 		q->type = htonl(DUMP_CHAIN_QUEUE);
565 		q->chain_size = htonl(
566 		    sizeof(struct qla2xxx_mqueue_chain) +
567 		    sizeof(struct qla2xxx_mqueue_header) +
568 		    (aqp->length * sizeof(request_t)));
569 		ptr += sizeof(struct qla2xxx_mqueue_chain);
570 
571 		/* Add header. */
572 		qh = ptr;
573 		qh->queue = htonl(TYPE_ATIO_QUEUE);
574 		qh->number = htonl(que);
575 		qh->size = htonl(aqp->length * sizeof(request_t));
576 		ptr += sizeof(struct qla2xxx_mqueue_header);
577 
578 		/* Add data. */
579 		memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
580 
581 		ptr += aqp->length * sizeof(request_t);
582 	}
583 
584 	return ptr;
585 }
586 
587 static inline void *
qla25xx_copy_mqueues(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)588 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
589 {
590 	struct qla2xxx_mqueue_chain *q;
591 	struct qla2xxx_mqueue_header *qh;
592 	struct req_que *req;
593 	struct rsp_que *rsp;
594 	int que;
595 
596 	if (!ha->mqenable)
597 		return ptr;
598 
599 	/* Request queues */
600 	for (que = 1; que < ha->max_req_queues; que++) {
601 		req = ha->req_q_map[que];
602 		if (!req)
603 			break;
604 
605 		/* Add chain. */
606 		q = ptr;
607 		*last_chain = &q->type;
608 		q->type = htonl(DUMP_CHAIN_QUEUE);
609 		q->chain_size = htonl(
610 		    sizeof(struct qla2xxx_mqueue_chain) +
611 		    sizeof(struct qla2xxx_mqueue_header) +
612 		    (req->length * sizeof(request_t)));
613 		ptr += sizeof(struct qla2xxx_mqueue_chain);
614 
615 		/* Add header. */
616 		qh = ptr;
617 		qh->queue = htonl(TYPE_REQUEST_QUEUE);
618 		qh->number = htonl(que);
619 		qh->size = htonl(req->length * sizeof(request_t));
620 		ptr += sizeof(struct qla2xxx_mqueue_header);
621 
622 		/* Add data. */
623 		memcpy(ptr, req->ring, req->length * sizeof(request_t));
624 		ptr += req->length * sizeof(request_t);
625 	}
626 
627 	/* Response queues */
628 	for (que = 1; que < ha->max_rsp_queues; que++) {
629 		rsp = ha->rsp_q_map[que];
630 		if (!rsp)
631 			break;
632 
633 		/* Add chain. */
634 		q = ptr;
635 		*last_chain = &q->type;
636 		q->type = htonl(DUMP_CHAIN_QUEUE);
637 		q->chain_size = htonl(
638 		    sizeof(struct qla2xxx_mqueue_chain) +
639 		    sizeof(struct qla2xxx_mqueue_header) +
640 		    (rsp->length * sizeof(response_t)));
641 		ptr += sizeof(struct qla2xxx_mqueue_chain);
642 
643 		/* Add header. */
644 		qh = ptr;
645 		qh->queue = htonl(TYPE_RESPONSE_QUEUE);
646 		qh->number = htonl(que);
647 		qh->size = htonl(rsp->length * sizeof(response_t));
648 		ptr += sizeof(struct qla2xxx_mqueue_header);
649 
650 		/* Add data. */
651 		memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
652 		ptr += rsp->length * sizeof(response_t);
653 	}
654 
655 	return ptr;
656 }
657 
658 static inline void *
qla25xx_copy_mq(struct qla_hw_data * ha,void * ptr,__be32 ** last_chain)659 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
660 {
661 	uint32_t cnt, que_idx;
662 	uint8_t que_cnt;
663 	struct qla2xxx_mq_chain *mq = ptr;
664 	device_reg_t *reg;
665 
666 	if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
667 	    IS_QLA28XX(ha))
668 		return ptr;
669 
670 	mq = ptr;
671 	*last_chain = &mq->type;
672 	mq->type = htonl(DUMP_CHAIN_MQ);
673 	mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
674 
675 	que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
676 		ha->max_req_queues : ha->max_rsp_queues;
677 	mq->count = htonl(que_cnt);
678 	for (cnt = 0; cnt < que_cnt; cnt++) {
679 		reg = ISP_QUE_REG(ha, cnt);
680 		que_idx = cnt * 4;
681 		mq->qregs[que_idx] =
682 		    htonl(rd_reg_dword(&reg->isp25mq.req_q_in));
683 		mq->qregs[que_idx+1] =
684 		    htonl(rd_reg_dword(&reg->isp25mq.req_q_out));
685 		mq->qregs[que_idx+2] =
686 		    htonl(rd_reg_dword(&reg->isp25mq.rsp_q_in));
687 		mq->qregs[que_idx+3] =
688 		    htonl(rd_reg_dword(&reg->isp25mq.rsp_q_out));
689 	}
690 
691 	return ptr + sizeof(struct qla2xxx_mq_chain);
692 }
693 
694 void
qla2xxx_dump_post_process(scsi_qla_host_t * vha,int rval)695 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
696 {
697 	struct qla_hw_data *ha = vha->hw;
698 
699 	if (rval != QLA_SUCCESS) {
700 		ql_log(ql_log_warn, vha, 0xd000,
701 		    "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
702 		    rval, ha->fw_dump_cap_flags);
703 		ha->fw_dumped = false;
704 	} else {
705 		ql_log(ql_log_info, vha, 0xd001,
706 		    "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
707 		    vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
708 		ha->fw_dumped = true;
709 		qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
710 	}
711 }
712 
qla2xxx_dump_fw(scsi_qla_host_t * vha)713 void qla2xxx_dump_fw(scsi_qla_host_t *vha)
714 {
715 	unsigned long flags;
716 
717 	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
718 	vha->hw->isp_ops->fw_dump(vha);
719 	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
720 }
721 
722 /**
723  * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
724  * @vha: HA context
725  */
726 void
qla2300_fw_dump(scsi_qla_host_t * vha)727 qla2300_fw_dump(scsi_qla_host_t *vha)
728 {
729 	int		rval;
730 	uint32_t	cnt;
731 	struct qla_hw_data *ha = vha->hw;
732 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
733 	__le16 __iomem *dmp_reg;
734 	struct qla2300_fw_dump	*fw;
735 	void		*nxt;
736 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
737 
738 	lockdep_assert_held(&ha->hardware_lock);
739 
740 	if (!ha->fw_dump) {
741 		ql_log(ql_log_warn, vha, 0xd002,
742 		    "No buffer available for dump.\n");
743 		return;
744 	}
745 
746 	if (ha->fw_dumped) {
747 		ql_log(ql_log_warn, vha, 0xd003,
748 		    "Firmware has been previously dumped (%p) "
749 		    "-- ignoring request.\n",
750 		    ha->fw_dump);
751 		return;
752 	}
753 	fw = &ha->fw_dump->isp.isp23;
754 	qla2xxx_prep_dump(ha, ha->fw_dump);
755 
756 	rval = QLA_SUCCESS;
757 	fw->hccr = htons(rd_reg_word(&reg->hccr));
758 
759 	/* Pause RISC. */
760 	wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
761 	if (IS_QLA2300(ha)) {
762 		for (cnt = 30000;
763 		    (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
764 			rval == QLA_SUCCESS; cnt--) {
765 			if (cnt)
766 				udelay(100);
767 			else
768 				rval = QLA_FUNCTION_TIMEOUT;
769 		}
770 	} else {
771 		rd_reg_word(&reg->hccr);		/* PCI Posting. */
772 		udelay(10);
773 	}
774 
775 	if (rval == QLA_SUCCESS) {
776 		dmp_reg = &reg->flash_address;
777 		for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
778 			fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
779 
780 		dmp_reg = &reg->u.isp2300.req_q_in;
781 		for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_host_reg);
782 		    cnt++, dmp_reg++)
783 			fw->risc_host_reg[cnt] = htons(rd_reg_word(dmp_reg));
784 
785 		dmp_reg = &reg->u.isp2300.mailbox0;
786 		for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg);
787 		    cnt++, dmp_reg++)
788 			fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
789 
790 		wrt_reg_word(&reg->ctrl_status, 0x40);
791 		qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
792 
793 		wrt_reg_word(&reg->ctrl_status, 0x50);
794 		qla2xxx_read_window(reg, 48, fw->dma_reg);
795 
796 		wrt_reg_word(&reg->ctrl_status, 0x00);
797 		dmp_reg = &reg->risc_hw;
798 		for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg);
799 		    cnt++, dmp_reg++)
800 			fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
801 
802 		wrt_reg_word(&reg->pcr, 0x2000);
803 		qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
804 
805 		wrt_reg_word(&reg->pcr, 0x2200);
806 		qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
807 
808 		wrt_reg_word(&reg->pcr, 0x2400);
809 		qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
810 
811 		wrt_reg_word(&reg->pcr, 0x2600);
812 		qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
813 
814 		wrt_reg_word(&reg->pcr, 0x2800);
815 		qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
816 
817 		wrt_reg_word(&reg->pcr, 0x2A00);
818 		qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
819 
820 		wrt_reg_word(&reg->pcr, 0x2C00);
821 		qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
822 
823 		wrt_reg_word(&reg->pcr, 0x2E00);
824 		qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
825 
826 		wrt_reg_word(&reg->ctrl_status, 0x10);
827 		qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
828 
829 		wrt_reg_word(&reg->ctrl_status, 0x20);
830 		qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
831 
832 		wrt_reg_word(&reg->ctrl_status, 0x30);
833 		qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
834 
835 		/* Reset RISC. */
836 		wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
837 		for (cnt = 0; cnt < 30000; cnt++) {
838 			if ((rd_reg_word(&reg->ctrl_status) &
839 			    CSR_ISP_SOFT_RESET) == 0)
840 				break;
841 
842 			udelay(10);
843 		}
844 	}
845 
846 	if (!IS_QLA2300(ha)) {
847 		for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
848 		    rval == QLA_SUCCESS; cnt--) {
849 			if (cnt)
850 				udelay(100);
851 			else
852 				rval = QLA_FUNCTION_TIMEOUT;
853 		}
854 	}
855 
856 	/* Get RISC SRAM. */
857 	if (rval == QLA_SUCCESS)
858 		rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
859 					ARRAY_SIZE(fw->risc_ram), &nxt);
860 
861 	/* Get stack SRAM. */
862 	if (rval == QLA_SUCCESS)
863 		rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
864 					ARRAY_SIZE(fw->stack_ram), &nxt);
865 
866 	/* Get data SRAM. */
867 	if (rval == QLA_SUCCESS)
868 		rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
869 		    ha->fw_memory_size - 0x11000 + 1, &nxt);
870 
871 	if (rval == QLA_SUCCESS)
872 		qla2xxx_copy_queues(ha, nxt);
873 
874 	qla2xxx_dump_post_process(base_vha, rval);
875 }
876 
877 /**
878  * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
879  * @vha: HA context
880  */
881 void
qla2100_fw_dump(scsi_qla_host_t * vha)882 qla2100_fw_dump(scsi_qla_host_t *vha)
883 {
884 	int		rval;
885 	uint32_t	cnt, timer;
886 	uint16_t	risc_address = 0;
887 	uint16_t	mb0 = 0, mb2 = 0;
888 	struct qla_hw_data *ha = vha->hw;
889 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
890 	__le16 __iomem *dmp_reg;
891 	struct qla2100_fw_dump	*fw;
892 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
893 
894 	lockdep_assert_held(&ha->hardware_lock);
895 
896 	if (!ha->fw_dump) {
897 		ql_log(ql_log_warn, vha, 0xd004,
898 		    "No buffer available for dump.\n");
899 		return;
900 	}
901 
902 	if (ha->fw_dumped) {
903 		ql_log(ql_log_warn, vha, 0xd005,
904 		    "Firmware has been previously dumped (%p) "
905 		    "-- ignoring request.\n",
906 		    ha->fw_dump);
907 		return;
908 	}
909 	fw = &ha->fw_dump->isp.isp21;
910 	qla2xxx_prep_dump(ha, ha->fw_dump);
911 
912 	rval = QLA_SUCCESS;
913 	fw->hccr = htons(rd_reg_word(&reg->hccr));
914 
915 	/* Pause RISC. */
916 	wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
917 	for (cnt = 30000; (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
918 	    rval == QLA_SUCCESS; cnt--) {
919 		if (cnt)
920 			udelay(100);
921 		else
922 			rval = QLA_FUNCTION_TIMEOUT;
923 	}
924 	if (rval == QLA_SUCCESS) {
925 		dmp_reg = &reg->flash_address;
926 		for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
927 			fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
928 
929 		dmp_reg = &reg->u.isp2100.mailbox0;
930 		for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
931 			if (cnt == 8)
932 				dmp_reg = &reg->u_end.isp2200.mailbox8;
933 
934 			fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
935 		}
936 
937 		dmp_reg = &reg->u.isp2100.unused_2[0];
938 		for (cnt = 0; cnt < ARRAY_SIZE(fw->dma_reg); cnt++, dmp_reg++)
939 			fw->dma_reg[cnt] = htons(rd_reg_word(dmp_reg));
940 
941 		wrt_reg_word(&reg->ctrl_status, 0x00);
942 		dmp_reg = &reg->risc_hw;
943 		for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++)
944 			fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
945 
946 		wrt_reg_word(&reg->pcr, 0x2000);
947 		qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
948 
949 		wrt_reg_word(&reg->pcr, 0x2100);
950 		qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
951 
952 		wrt_reg_word(&reg->pcr, 0x2200);
953 		qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
954 
955 		wrt_reg_word(&reg->pcr, 0x2300);
956 		qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
957 
958 		wrt_reg_word(&reg->pcr, 0x2400);
959 		qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
960 
961 		wrt_reg_word(&reg->pcr, 0x2500);
962 		qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
963 
964 		wrt_reg_word(&reg->pcr, 0x2600);
965 		qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
966 
967 		wrt_reg_word(&reg->pcr, 0x2700);
968 		qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
969 
970 		wrt_reg_word(&reg->ctrl_status, 0x10);
971 		qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
972 
973 		wrt_reg_word(&reg->ctrl_status, 0x20);
974 		qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
975 
976 		wrt_reg_word(&reg->ctrl_status, 0x30);
977 		qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
978 
979 		/* Reset the ISP. */
980 		wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
981 	}
982 
983 	for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
984 	    rval == QLA_SUCCESS; cnt--) {
985 		if (cnt)
986 			udelay(100);
987 		else
988 			rval = QLA_FUNCTION_TIMEOUT;
989 	}
990 
991 	/* Pause RISC. */
992 	if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
993 	    (rd_reg_word(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
994 
995 		wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
996 		for (cnt = 30000;
997 		    (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
998 		    rval == QLA_SUCCESS; cnt--) {
999 			if (cnt)
1000 				udelay(100);
1001 			else
1002 				rval = QLA_FUNCTION_TIMEOUT;
1003 		}
1004 		if (rval == QLA_SUCCESS) {
1005 			/* Set memory configuration and timing. */
1006 			if (IS_QLA2100(ha))
1007 				wrt_reg_word(&reg->mctr, 0xf1);
1008 			else
1009 				wrt_reg_word(&reg->mctr, 0xf2);
1010 			rd_reg_word(&reg->mctr);	/* PCI Posting. */
1011 
1012 			/* Release RISC. */
1013 			wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
1014 		}
1015 	}
1016 
1017 	if (rval == QLA_SUCCESS) {
1018 		/* Get RISC SRAM. */
1019 		risc_address = 0x1000;
1020  		WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
1021 		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1022 	}
1023 	for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_ram) && rval == QLA_SUCCESS;
1024 	    cnt++, risc_address++) {
1025  		WRT_MAILBOX_REG(ha, reg, 1, risc_address);
1026 		wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT);
1027 
1028 		for (timer = 6000000; timer != 0; timer--) {
1029 			/* Check for pending interrupts. */
1030 			if (rd_reg_word(&reg->istatus) & ISR_RISC_INT) {
1031 				if (rd_reg_word(&reg->semaphore) & BIT_0) {
1032 					set_bit(MBX_INTERRUPT,
1033 					    &ha->mbx_cmd_flags);
1034 
1035 					mb0 = RD_MAILBOX_REG(ha, reg, 0);
1036 					mb2 = RD_MAILBOX_REG(ha, reg, 2);
1037 
1038 					wrt_reg_word(&reg->semaphore, 0);
1039 					wrt_reg_word(&reg->hccr,
1040 					    HCCR_CLR_RISC_INT);
1041 					rd_reg_word(&reg->hccr);
1042 					break;
1043 				}
1044 				wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
1045 				rd_reg_word(&reg->hccr);
1046 			}
1047 			udelay(5);
1048 		}
1049 
1050 		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1051 			rval = mb0 & MBS_MASK;
1052 			fw->risc_ram[cnt] = htons(mb2);
1053 		} else {
1054 			rval = QLA_FUNCTION_FAILED;
1055 		}
1056 	}
1057 
1058 	if (rval == QLA_SUCCESS)
1059 		qla2xxx_copy_queues(ha, &fw->queue_dump[0]);
1060 
1061 	qla2xxx_dump_post_process(base_vha, rval);
1062 }
1063 
1064 void
qla24xx_fw_dump(scsi_qla_host_t * vha)1065 qla24xx_fw_dump(scsi_qla_host_t *vha)
1066 {
1067 	int		rval;
1068 	uint32_t	cnt;
1069 	struct qla_hw_data *ha = vha->hw;
1070 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1071 	__le32 __iomem *dmp_reg;
1072 	__be32		*iter_reg;
1073 	__le16 __iomem *mbx_reg;
1074 	struct qla24xx_fw_dump *fw;
1075 	void		*nxt;
1076 	void		*nxt_chain;
1077 	__be32		*last_chain = NULL;
1078 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1079 
1080 	lockdep_assert_held(&ha->hardware_lock);
1081 
1082 	if (IS_P3P_TYPE(ha))
1083 		return;
1084 
1085 	ha->fw_dump_cap_flags = 0;
1086 
1087 	if (!ha->fw_dump) {
1088 		ql_log(ql_log_warn, vha, 0xd006,
1089 		    "No buffer available for dump.\n");
1090 		return;
1091 	}
1092 
1093 	if (ha->fw_dumped) {
1094 		ql_log(ql_log_warn, vha, 0xd007,
1095 		    "Firmware has been previously dumped (%p) "
1096 		    "-- ignoring request.\n",
1097 		    ha->fw_dump);
1098 		return;
1099 	}
1100 	QLA_FW_STOPPED(ha);
1101 	fw = &ha->fw_dump->isp.isp24;
1102 	qla2xxx_prep_dump(ha, ha->fw_dump);
1103 
1104 	fw->host_status = htonl(rd_reg_dword(&reg->host_status));
1105 
1106 	/*
1107 	 * Pause RISC. No need to track timeout, as resetting the chip
1108 	 * is the right approach incase of pause timeout
1109 	 */
1110 	qla24xx_pause_risc(reg, ha);
1111 
1112 	/* Host interface registers. */
1113 	dmp_reg = &reg->flash_addr;
1114 	for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
1115 		fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
1116 
1117 	/* Disable interrupts. */
1118 	wrt_reg_dword(&reg->ictrl, 0);
1119 	rd_reg_dword(&reg->ictrl);
1120 
1121 	/* Shadow registers. */
1122 	wrt_reg_dword(&reg->iobase_addr, 0x0F70);
1123 	rd_reg_dword(&reg->iobase_addr);
1124 	wrt_reg_dword(&reg->iobase_select, 0xB0000000);
1125 	fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
1126 
1127 	wrt_reg_dword(&reg->iobase_select, 0xB0100000);
1128 	fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
1129 
1130 	wrt_reg_dword(&reg->iobase_select, 0xB0200000);
1131 	fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
1132 
1133 	wrt_reg_dword(&reg->iobase_select, 0xB0300000);
1134 	fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
1135 
1136 	wrt_reg_dword(&reg->iobase_select, 0xB0400000);
1137 	fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
1138 
1139 	wrt_reg_dword(&reg->iobase_select, 0xB0500000);
1140 	fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
1141 
1142 	wrt_reg_dword(&reg->iobase_select, 0xB0600000);
1143 	fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
1144 
1145 	/* Mailbox registers. */
1146 	mbx_reg = &reg->mailbox0;
1147 	for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
1148 		fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
1149 
1150 	/* Transfer sequence registers. */
1151 	iter_reg = fw->xseq_gp_reg;
1152 	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1153 	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1154 	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1155 	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1156 	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1157 	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1158 	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1159 	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1160 
1161 	qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
1162 	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1163 
1164 	/* Receive sequence registers. */
1165 	iter_reg = fw->rseq_gp_reg;
1166 	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1167 	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1168 	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1169 	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1170 	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1171 	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1172 	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1173 	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1174 
1175 	qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
1176 	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1177 	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1178 
1179 	/* Command DMA registers. */
1180 	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1181 
1182 	/* Queues. */
1183 	iter_reg = fw->req0_dma_reg;
1184 	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1185 	dmp_reg = &reg->iobase_q;
1186 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1187 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1188 
1189 	iter_reg = fw->resp0_dma_reg;
1190 	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1191 	dmp_reg = &reg->iobase_q;
1192 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1193 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1194 
1195 	iter_reg = fw->req1_dma_reg;
1196 	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1197 	dmp_reg = &reg->iobase_q;
1198 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1199 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1200 
1201 	/* Transmit DMA registers. */
1202 	iter_reg = fw->xmt0_dma_reg;
1203 	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1204 	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1205 
1206 	iter_reg = fw->xmt1_dma_reg;
1207 	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1208 	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1209 
1210 	iter_reg = fw->xmt2_dma_reg;
1211 	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1212 	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1213 
1214 	iter_reg = fw->xmt3_dma_reg;
1215 	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1216 	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1217 
1218 	iter_reg = fw->xmt4_dma_reg;
1219 	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1220 	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1221 
1222 	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1223 
1224 	/* Receive DMA registers. */
1225 	iter_reg = fw->rcvt0_data_dma_reg;
1226 	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1227 	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1228 
1229 	iter_reg = fw->rcvt1_data_dma_reg;
1230 	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1231 	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1232 
1233 	/* RISC registers. */
1234 	iter_reg = fw->risc_gp_reg;
1235 	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1236 	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1237 	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1238 	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1239 	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1240 	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1241 	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1242 	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1243 
1244 	/* Local memory controller registers. */
1245 	iter_reg = fw->lmc_reg;
1246 	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1247 	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1248 	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1249 	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1250 	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1251 	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1252 	qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1253 
1254 	/* Fibre Protocol Module registers. */
1255 	iter_reg = fw->fpm_hdw_reg;
1256 	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1257 	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1258 	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1259 	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1260 	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1261 	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1262 	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1263 	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1264 	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1265 	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1266 	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1267 	qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1268 
1269 	/* Frame Buffer registers. */
1270 	iter_reg = fw->fb_hdw_reg;
1271 	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1272 	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1273 	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1274 	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1275 	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1276 	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1277 	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1278 	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1279 	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1280 	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1281 	qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1282 
1283 	rval = qla24xx_soft_reset(ha);
1284 	if (rval != QLA_SUCCESS)
1285 		goto qla24xx_fw_dump_failed_0;
1286 
1287 	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1288 	    &nxt);
1289 	if (rval != QLA_SUCCESS)
1290 		goto qla24xx_fw_dump_failed_0;
1291 
1292 	nxt = qla2xxx_copy_queues(ha, nxt);
1293 
1294 	qla24xx_copy_eft(ha, nxt);
1295 
1296 	nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1297 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1298 	if (last_chain) {
1299 		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1300 		*last_chain |= htonl(DUMP_CHAIN_LAST);
1301 	}
1302 
1303 	/* Adjust valid length. */
1304 	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1305 
1306 qla24xx_fw_dump_failed_0:
1307 	qla2xxx_dump_post_process(base_vha, rval);
1308 }
1309 
1310 void
qla25xx_fw_dump(scsi_qla_host_t * vha)1311 qla25xx_fw_dump(scsi_qla_host_t *vha)
1312 {
1313 	int		rval;
1314 	uint32_t	cnt;
1315 	struct qla_hw_data *ha = vha->hw;
1316 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1317 	__le32 __iomem *dmp_reg;
1318 	__be32		*iter_reg;
1319 	__le16 __iomem *mbx_reg;
1320 	struct qla25xx_fw_dump *fw;
1321 	void		*nxt, *nxt_chain;
1322 	__be32		*last_chain = NULL;
1323 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1324 
1325 	lockdep_assert_held(&ha->hardware_lock);
1326 
1327 	ha->fw_dump_cap_flags = 0;
1328 
1329 	if (!ha->fw_dump) {
1330 		ql_log(ql_log_warn, vha, 0xd008,
1331 		    "No buffer available for dump.\n");
1332 		return;
1333 	}
1334 
1335 	if (ha->fw_dumped) {
1336 		ql_log(ql_log_warn, vha, 0xd009,
1337 		    "Firmware has been previously dumped (%p) "
1338 		    "-- ignoring request.\n",
1339 		    ha->fw_dump);
1340 		return;
1341 	}
1342 	QLA_FW_STOPPED(ha);
1343 	fw = &ha->fw_dump->isp.isp25;
1344 	qla2xxx_prep_dump(ha, ha->fw_dump);
1345 	ha->fw_dump->version = htonl(2);
1346 
1347 	fw->host_status = htonl(rd_reg_dword(&reg->host_status));
1348 
1349 	/*
1350 	 * Pause RISC. No need to track timeout, as resetting the chip
1351 	 * is the right approach incase of pause timeout
1352 	 */
1353 	qla24xx_pause_risc(reg, ha);
1354 
1355 	/* Host/Risc registers. */
1356 	iter_reg = fw->host_risc_reg;
1357 	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1358 	qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1359 
1360 	/* PCIe registers. */
1361 	wrt_reg_dword(&reg->iobase_addr, 0x7C00);
1362 	rd_reg_dword(&reg->iobase_addr);
1363 	wrt_reg_dword(&reg->iobase_window, 0x01);
1364 	dmp_reg = &reg->iobase_c4;
1365 	fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
1366 	dmp_reg++;
1367 	fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
1368 	dmp_reg++;
1369 	fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
1370 	fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
1371 
1372 	wrt_reg_dword(&reg->iobase_window, 0x00);
1373 	rd_reg_dword(&reg->iobase_window);
1374 
1375 	/* Host interface registers. */
1376 	dmp_reg = &reg->flash_addr;
1377 	for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
1378 		fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
1379 
1380 	/* Disable interrupts. */
1381 	wrt_reg_dword(&reg->ictrl, 0);
1382 	rd_reg_dword(&reg->ictrl);
1383 
1384 	/* Shadow registers. */
1385 	wrt_reg_dword(&reg->iobase_addr, 0x0F70);
1386 	rd_reg_dword(&reg->iobase_addr);
1387 	wrt_reg_dword(&reg->iobase_select, 0xB0000000);
1388 	fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
1389 
1390 	wrt_reg_dword(&reg->iobase_select, 0xB0100000);
1391 	fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
1392 
1393 	wrt_reg_dword(&reg->iobase_select, 0xB0200000);
1394 	fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
1395 
1396 	wrt_reg_dword(&reg->iobase_select, 0xB0300000);
1397 	fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
1398 
1399 	wrt_reg_dword(&reg->iobase_select, 0xB0400000);
1400 	fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
1401 
1402 	wrt_reg_dword(&reg->iobase_select, 0xB0500000);
1403 	fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
1404 
1405 	wrt_reg_dword(&reg->iobase_select, 0xB0600000);
1406 	fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
1407 
1408 	wrt_reg_dword(&reg->iobase_select, 0xB0700000);
1409 	fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
1410 
1411 	wrt_reg_dword(&reg->iobase_select, 0xB0800000);
1412 	fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
1413 
1414 	wrt_reg_dword(&reg->iobase_select, 0xB0900000);
1415 	fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
1416 
1417 	wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
1418 	fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
1419 
1420 	/* RISC I/O register. */
1421 	wrt_reg_dword(&reg->iobase_addr, 0x0010);
1422 	fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
1423 
1424 	/* Mailbox registers. */
1425 	mbx_reg = &reg->mailbox0;
1426 	for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
1427 		fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
1428 
1429 	/* Transfer sequence registers. */
1430 	iter_reg = fw->xseq_gp_reg;
1431 	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1432 	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1433 	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1434 	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1435 	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1436 	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1437 	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1438 	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1439 
1440 	iter_reg = fw->xseq_0_reg;
1441 	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1442 	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1443 	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1444 
1445 	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1446 
1447 	/* Receive sequence registers. */
1448 	iter_reg = fw->rseq_gp_reg;
1449 	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1450 	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1451 	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1452 	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1453 	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1454 	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1455 	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1456 	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1457 
1458 	iter_reg = fw->rseq_0_reg;
1459 	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1460 	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1461 
1462 	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1463 	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1464 
1465 	/* Auxiliary sequence registers. */
1466 	iter_reg = fw->aseq_gp_reg;
1467 	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1468 	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1469 	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1470 	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1471 	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1472 	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1473 	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1474 	qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1475 
1476 	iter_reg = fw->aseq_0_reg;
1477 	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1478 	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1479 
1480 	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1481 	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1482 
1483 	/* Command DMA registers. */
1484 	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1485 
1486 	/* Queues. */
1487 	iter_reg = fw->req0_dma_reg;
1488 	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1489 	dmp_reg = &reg->iobase_q;
1490 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1491 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1492 
1493 	iter_reg = fw->resp0_dma_reg;
1494 	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1495 	dmp_reg = &reg->iobase_q;
1496 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1497 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1498 
1499 	iter_reg = fw->req1_dma_reg;
1500 	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1501 	dmp_reg = &reg->iobase_q;
1502 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1503 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1504 
1505 	/* Transmit DMA registers. */
1506 	iter_reg = fw->xmt0_dma_reg;
1507 	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1508 	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1509 
1510 	iter_reg = fw->xmt1_dma_reg;
1511 	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1512 	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1513 
1514 	iter_reg = fw->xmt2_dma_reg;
1515 	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1516 	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1517 
1518 	iter_reg = fw->xmt3_dma_reg;
1519 	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1520 	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1521 
1522 	iter_reg = fw->xmt4_dma_reg;
1523 	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1524 	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1525 
1526 	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1527 
1528 	/* Receive DMA registers. */
1529 	iter_reg = fw->rcvt0_data_dma_reg;
1530 	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1531 	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1532 
1533 	iter_reg = fw->rcvt1_data_dma_reg;
1534 	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1535 	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1536 
1537 	/* RISC registers. */
1538 	iter_reg = fw->risc_gp_reg;
1539 	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1540 	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1541 	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1542 	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1543 	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1544 	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1545 	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1546 	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1547 
1548 	/* Local memory controller registers. */
1549 	iter_reg = fw->lmc_reg;
1550 	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1551 	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1552 	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1553 	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1554 	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1555 	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1556 	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1557 	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1558 
1559 	/* Fibre Protocol Module registers. */
1560 	iter_reg = fw->fpm_hdw_reg;
1561 	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1562 	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1563 	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1564 	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1565 	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1566 	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1567 	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1568 	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1569 	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1570 	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1571 	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1572 	qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1573 
1574 	/* Frame Buffer registers. */
1575 	iter_reg = fw->fb_hdw_reg;
1576 	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1577 	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1578 	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1579 	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1580 	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1581 	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1582 	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1583 	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1584 	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1585 	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1586 	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1587 	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1588 
1589 	/* Multi queue registers */
1590 	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1591 	    &last_chain);
1592 
1593 	rval = qla24xx_soft_reset(ha);
1594 	if (rval != QLA_SUCCESS)
1595 		goto qla25xx_fw_dump_failed_0;
1596 
1597 	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1598 	    &nxt);
1599 	if (rval != QLA_SUCCESS)
1600 		goto qla25xx_fw_dump_failed_0;
1601 
1602 	nxt = qla2xxx_copy_queues(ha, nxt);
1603 
1604 	qla24xx_copy_eft(ha, nxt);
1605 
1606 	/* Chain entries -- started with MQ. */
1607 	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1608 	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1609 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1610 	nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1611 	if (last_chain) {
1612 		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1613 		*last_chain |= htonl(DUMP_CHAIN_LAST);
1614 	}
1615 
1616 	/* Adjust valid length. */
1617 	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1618 
1619 qla25xx_fw_dump_failed_0:
1620 	qla2xxx_dump_post_process(base_vha, rval);
1621 }
1622 
1623 void
qla81xx_fw_dump(scsi_qla_host_t * vha)1624 qla81xx_fw_dump(scsi_qla_host_t *vha)
1625 {
1626 	int		rval;
1627 	uint32_t	cnt;
1628 	struct qla_hw_data *ha = vha->hw;
1629 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1630 	__le32 __iomem *dmp_reg;
1631 	__be32		*iter_reg;
1632 	__le16 __iomem *mbx_reg;
1633 	struct qla81xx_fw_dump *fw;
1634 	void		*nxt, *nxt_chain;
1635 	__be32		*last_chain = NULL;
1636 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1637 
1638 	lockdep_assert_held(&ha->hardware_lock);
1639 
1640 	ha->fw_dump_cap_flags = 0;
1641 
1642 	if (!ha->fw_dump) {
1643 		ql_log(ql_log_warn, vha, 0xd00a,
1644 		    "No buffer available for dump.\n");
1645 		return;
1646 	}
1647 
1648 	if (ha->fw_dumped) {
1649 		ql_log(ql_log_warn, vha, 0xd00b,
1650 		    "Firmware has been previously dumped (%p) "
1651 		    "-- ignoring request.\n",
1652 		    ha->fw_dump);
1653 		return;
1654 	}
1655 	fw = &ha->fw_dump->isp.isp81;
1656 	qla2xxx_prep_dump(ha, ha->fw_dump);
1657 
1658 	fw->host_status = htonl(rd_reg_dword(&reg->host_status));
1659 
1660 	/*
1661 	 * Pause RISC. No need to track timeout, as resetting the chip
1662 	 * is the right approach incase of pause timeout
1663 	 */
1664 	qla24xx_pause_risc(reg, ha);
1665 
1666 	/* Host/Risc registers. */
1667 	iter_reg = fw->host_risc_reg;
1668 	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1669 	qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1670 
1671 	/* PCIe registers. */
1672 	wrt_reg_dword(&reg->iobase_addr, 0x7C00);
1673 	rd_reg_dword(&reg->iobase_addr);
1674 	wrt_reg_dword(&reg->iobase_window, 0x01);
1675 	dmp_reg = &reg->iobase_c4;
1676 	fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
1677 	dmp_reg++;
1678 	fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
1679 	dmp_reg++;
1680 	fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
1681 	fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
1682 
1683 	wrt_reg_dword(&reg->iobase_window, 0x00);
1684 	rd_reg_dword(&reg->iobase_window);
1685 
1686 	/* Host interface registers. */
1687 	dmp_reg = &reg->flash_addr;
1688 	for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
1689 		fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
1690 
1691 	/* Disable interrupts. */
1692 	wrt_reg_dword(&reg->ictrl, 0);
1693 	rd_reg_dword(&reg->ictrl);
1694 
1695 	/* Shadow registers. */
1696 	wrt_reg_dword(&reg->iobase_addr, 0x0F70);
1697 	rd_reg_dword(&reg->iobase_addr);
1698 	wrt_reg_dword(&reg->iobase_select, 0xB0000000);
1699 	fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
1700 
1701 	wrt_reg_dword(&reg->iobase_select, 0xB0100000);
1702 	fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
1703 
1704 	wrt_reg_dword(&reg->iobase_select, 0xB0200000);
1705 	fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
1706 
1707 	wrt_reg_dword(&reg->iobase_select, 0xB0300000);
1708 	fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
1709 
1710 	wrt_reg_dword(&reg->iobase_select, 0xB0400000);
1711 	fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
1712 
1713 	wrt_reg_dword(&reg->iobase_select, 0xB0500000);
1714 	fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
1715 
1716 	wrt_reg_dword(&reg->iobase_select, 0xB0600000);
1717 	fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
1718 
1719 	wrt_reg_dword(&reg->iobase_select, 0xB0700000);
1720 	fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
1721 
1722 	wrt_reg_dword(&reg->iobase_select, 0xB0800000);
1723 	fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
1724 
1725 	wrt_reg_dword(&reg->iobase_select, 0xB0900000);
1726 	fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
1727 
1728 	wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
1729 	fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
1730 
1731 	/* RISC I/O register. */
1732 	wrt_reg_dword(&reg->iobase_addr, 0x0010);
1733 	fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
1734 
1735 	/* Mailbox registers. */
1736 	mbx_reg = &reg->mailbox0;
1737 	for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
1738 		fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
1739 
1740 	/* Transfer sequence registers. */
1741 	iter_reg = fw->xseq_gp_reg;
1742 	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1743 	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1744 	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1745 	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1746 	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1747 	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1748 	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1749 	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1750 
1751 	iter_reg = fw->xseq_0_reg;
1752 	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1753 	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1754 	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1755 
1756 	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1757 
1758 	/* Receive sequence registers. */
1759 	iter_reg = fw->rseq_gp_reg;
1760 	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1761 	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1762 	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1763 	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1764 	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1765 	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1766 	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1767 	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1768 
1769 	iter_reg = fw->rseq_0_reg;
1770 	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1771 	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1772 
1773 	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1774 	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1775 
1776 	/* Auxiliary sequence registers. */
1777 	iter_reg = fw->aseq_gp_reg;
1778 	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1779 	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1780 	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1781 	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1782 	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1783 	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1784 	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1785 	qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1786 
1787 	iter_reg = fw->aseq_0_reg;
1788 	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1789 	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1790 
1791 	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1792 	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1793 
1794 	/* Command DMA registers. */
1795 	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1796 
1797 	/* Queues. */
1798 	iter_reg = fw->req0_dma_reg;
1799 	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1800 	dmp_reg = &reg->iobase_q;
1801 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1802 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1803 
1804 	iter_reg = fw->resp0_dma_reg;
1805 	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1806 	dmp_reg = &reg->iobase_q;
1807 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1808 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1809 
1810 	iter_reg = fw->req1_dma_reg;
1811 	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1812 	dmp_reg = &reg->iobase_q;
1813 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1814 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1815 
1816 	/* Transmit DMA registers. */
1817 	iter_reg = fw->xmt0_dma_reg;
1818 	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1819 	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1820 
1821 	iter_reg = fw->xmt1_dma_reg;
1822 	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1823 	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1824 
1825 	iter_reg = fw->xmt2_dma_reg;
1826 	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1827 	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1828 
1829 	iter_reg = fw->xmt3_dma_reg;
1830 	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1831 	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1832 
1833 	iter_reg = fw->xmt4_dma_reg;
1834 	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1835 	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1836 
1837 	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1838 
1839 	/* Receive DMA registers. */
1840 	iter_reg = fw->rcvt0_data_dma_reg;
1841 	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1842 	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1843 
1844 	iter_reg = fw->rcvt1_data_dma_reg;
1845 	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1846 	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1847 
1848 	/* RISC registers. */
1849 	iter_reg = fw->risc_gp_reg;
1850 	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1851 	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1852 	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1853 	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1854 	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1855 	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1856 	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1857 	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1858 
1859 	/* Local memory controller registers. */
1860 	iter_reg = fw->lmc_reg;
1861 	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1862 	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1863 	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1864 	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1865 	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1866 	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1867 	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1868 	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1869 
1870 	/* Fibre Protocol Module registers. */
1871 	iter_reg = fw->fpm_hdw_reg;
1872 	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1873 	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1874 	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1875 	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1876 	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1877 	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1878 	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1879 	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1880 	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1881 	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1882 	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1883 	iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1884 	iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1885 	qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1886 
1887 	/* Frame Buffer registers. */
1888 	iter_reg = fw->fb_hdw_reg;
1889 	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1890 	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1891 	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1892 	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1893 	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1894 	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1895 	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1896 	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1897 	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1898 	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1899 	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1900 	iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1901 	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1902 
1903 	/* Multi queue registers */
1904 	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1905 	    &last_chain);
1906 
1907 	rval = qla24xx_soft_reset(ha);
1908 	if (rval != QLA_SUCCESS)
1909 		goto qla81xx_fw_dump_failed_0;
1910 
1911 	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1912 	    &nxt);
1913 	if (rval != QLA_SUCCESS)
1914 		goto qla81xx_fw_dump_failed_0;
1915 
1916 	nxt = qla2xxx_copy_queues(ha, nxt);
1917 
1918 	qla24xx_copy_eft(ha, nxt);
1919 
1920 	/* Chain entries -- started with MQ. */
1921 	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1922 	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1923 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1924 	nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1925 	nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
1926 	if (last_chain) {
1927 		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1928 		*last_chain |= htonl(DUMP_CHAIN_LAST);
1929 	}
1930 
1931 	/* Adjust valid length. */
1932 	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1933 
1934 qla81xx_fw_dump_failed_0:
1935 	qla2xxx_dump_post_process(base_vha, rval);
1936 }
1937 
1938 void
qla83xx_fw_dump(scsi_qla_host_t * vha)1939 qla83xx_fw_dump(scsi_qla_host_t *vha)
1940 {
1941 	int		rval;
1942 	uint32_t	cnt;
1943 	struct qla_hw_data *ha = vha->hw;
1944 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1945 	__le32 __iomem *dmp_reg;
1946 	__be32		*iter_reg;
1947 	__le16 __iomem *mbx_reg;
1948 	struct qla83xx_fw_dump *fw;
1949 	void		*nxt, *nxt_chain;
1950 	__be32		*last_chain = NULL;
1951 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1952 
1953 	lockdep_assert_held(&ha->hardware_lock);
1954 
1955 	ha->fw_dump_cap_flags = 0;
1956 
1957 	if (!ha->fw_dump) {
1958 		ql_log(ql_log_warn, vha, 0xd00c,
1959 		    "No buffer available for dump!!!\n");
1960 		return;
1961 	}
1962 
1963 	if (ha->fw_dumped) {
1964 		ql_log(ql_log_warn, vha, 0xd00d,
1965 		    "Firmware has been previously dumped (%p) -- ignoring "
1966 		    "request...\n", ha->fw_dump);
1967 		return;
1968 	}
1969 	QLA_FW_STOPPED(ha);
1970 	fw = &ha->fw_dump->isp.isp83;
1971 	qla2xxx_prep_dump(ha, ha->fw_dump);
1972 
1973 	fw->host_status = htonl(rd_reg_dword(&reg->host_status));
1974 
1975 	/*
1976 	 * Pause RISC. No need to track timeout, as resetting the chip
1977 	 * is the right approach incase of pause timeout
1978 	 */
1979 	qla24xx_pause_risc(reg, ha);
1980 
1981 	wrt_reg_dword(&reg->iobase_addr, 0x6000);
1982 	dmp_reg = &reg->iobase_window;
1983 	rd_reg_dword(dmp_reg);
1984 	wrt_reg_dword(dmp_reg, 0);
1985 
1986 	dmp_reg = &reg->unused_4_1[0];
1987 	rd_reg_dword(dmp_reg);
1988 	wrt_reg_dword(dmp_reg, 0);
1989 
1990 	wrt_reg_dword(&reg->iobase_addr, 0x6010);
1991 	dmp_reg = &reg->unused_4_1[2];
1992 	rd_reg_dword(dmp_reg);
1993 	wrt_reg_dword(dmp_reg, 0);
1994 
1995 	/* select PCR and disable ecc checking and correction */
1996 	wrt_reg_dword(&reg->iobase_addr, 0x0F70);
1997 	rd_reg_dword(&reg->iobase_addr);
1998 	wrt_reg_dword(&reg->iobase_select, 0x60000000);	/* write to F0h = PCR */
1999 
2000 	/* Host/Risc registers. */
2001 	iter_reg = fw->host_risc_reg;
2002 	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
2003 	iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
2004 	qla24xx_read_window(reg, 0x7040, 16, iter_reg);
2005 
2006 	/* PCIe registers. */
2007 	wrt_reg_dword(&reg->iobase_addr, 0x7C00);
2008 	rd_reg_dword(&reg->iobase_addr);
2009 	wrt_reg_dword(&reg->iobase_window, 0x01);
2010 	dmp_reg = &reg->iobase_c4;
2011 	fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
2012 	dmp_reg++;
2013 	fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
2014 	dmp_reg++;
2015 	fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
2016 	fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
2017 
2018 	wrt_reg_dword(&reg->iobase_window, 0x00);
2019 	rd_reg_dword(&reg->iobase_window);
2020 
2021 	/* Host interface registers. */
2022 	dmp_reg = &reg->flash_addr;
2023 	for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
2024 		fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
2025 
2026 	/* Disable interrupts. */
2027 	wrt_reg_dword(&reg->ictrl, 0);
2028 	rd_reg_dword(&reg->ictrl);
2029 
2030 	/* Shadow registers. */
2031 	wrt_reg_dword(&reg->iobase_addr, 0x0F70);
2032 	rd_reg_dword(&reg->iobase_addr);
2033 	wrt_reg_dword(&reg->iobase_select, 0xB0000000);
2034 	fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
2035 
2036 	wrt_reg_dword(&reg->iobase_select, 0xB0100000);
2037 	fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
2038 
2039 	wrt_reg_dword(&reg->iobase_select, 0xB0200000);
2040 	fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
2041 
2042 	wrt_reg_dword(&reg->iobase_select, 0xB0300000);
2043 	fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
2044 
2045 	wrt_reg_dword(&reg->iobase_select, 0xB0400000);
2046 	fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
2047 
2048 	wrt_reg_dword(&reg->iobase_select, 0xB0500000);
2049 	fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
2050 
2051 	wrt_reg_dword(&reg->iobase_select, 0xB0600000);
2052 	fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
2053 
2054 	wrt_reg_dword(&reg->iobase_select, 0xB0700000);
2055 	fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
2056 
2057 	wrt_reg_dword(&reg->iobase_select, 0xB0800000);
2058 	fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
2059 
2060 	wrt_reg_dword(&reg->iobase_select, 0xB0900000);
2061 	fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
2062 
2063 	wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
2064 	fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
2065 
2066 	/* RISC I/O register. */
2067 	wrt_reg_dword(&reg->iobase_addr, 0x0010);
2068 	fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
2069 
2070 	/* Mailbox registers. */
2071 	mbx_reg = &reg->mailbox0;
2072 	for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
2073 		fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
2074 
2075 	/* Transfer sequence registers. */
2076 	iter_reg = fw->xseq_gp_reg;
2077 	iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
2078 	iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
2079 	iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
2080 	iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
2081 	iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
2082 	iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
2083 	iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
2084 	iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
2085 	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
2086 	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
2087 	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
2088 	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
2089 	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
2090 	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
2091 	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
2092 	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
2093 
2094 	iter_reg = fw->xseq_0_reg;
2095 	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
2096 	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
2097 	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
2098 
2099 	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
2100 
2101 	qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
2102 
2103 	/* Receive sequence registers. */
2104 	iter_reg = fw->rseq_gp_reg;
2105 	iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
2106 	iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
2107 	iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
2108 	iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
2109 	iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
2110 	iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
2111 	iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
2112 	iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
2113 	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
2114 	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
2115 	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
2116 	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
2117 	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
2118 	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
2119 	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
2120 	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
2121 
2122 	iter_reg = fw->rseq_0_reg;
2123 	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
2124 	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
2125 
2126 	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
2127 	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
2128 	qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
2129 
2130 	/* Auxiliary sequence registers. */
2131 	iter_reg = fw->aseq_gp_reg;
2132 	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
2133 	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
2134 	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
2135 	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
2136 	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
2137 	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
2138 	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
2139 	iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
2140 	iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
2141 	iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
2142 	iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
2143 	iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
2144 	iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
2145 	iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
2146 	iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
2147 	qla24xx_read_window(reg, 0xB170, 16, iter_reg);
2148 
2149 	iter_reg = fw->aseq_0_reg;
2150 	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
2151 	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
2152 
2153 	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
2154 	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
2155 	qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
2156 
2157 	/* Command DMA registers. */
2158 	iter_reg = fw->cmd_dma_reg;
2159 	iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
2160 	iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
2161 	iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
2162 	qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
2163 
2164 	/* Queues. */
2165 	iter_reg = fw->req0_dma_reg;
2166 	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
2167 	dmp_reg = &reg->iobase_q;
2168 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2169 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
2170 
2171 	iter_reg = fw->resp0_dma_reg;
2172 	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
2173 	dmp_reg = &reg->iobase_q;
2174 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2175 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
2176 
2177 	iter_reg = fw->req1_dma_reg;
2178 	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
2179 	dmp_reg = &reg->iobase_q;
2180 	for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2181 		*iter_reg++ = htonl(rd_reg_dword(dmp_reg));
2182 
2183 	/* Transmit DMA registers. */
2184 	iter_reg = fw->xmt0_dma_reg;
2185 	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
2186 	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
2187 
2188 	iter_reg = fw->xmt1_dma_reg;
2189 	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
2190 	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
2191 
2192 	iter_reg = fw->xmt2_dma_reg;
2193 	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
2194 	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
2195 
2196 	iter_reg = fw->xmt3_dma_reg;
2197 	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
2198 	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
2199 
2200 	iter_reg = fw->xmt4_dma_reg;
2201 	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
2202 	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
2203 
2204 	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
2205 
2206 	/* Receive DMA registers. */
2207 	iter_reg = fw->rcvt0_data_dma_reg;
2208 	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
2209 	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
2210 
2211 	iter_reg = fw->rcvt1_data_dma_reg;
2212 	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
2213 	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
2214 
2215 	/* RISC registers. */
2216 	iter_reg = fw->risc_gp_reg;
2217 	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
2218 	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
2219 	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
2220 	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
2221 	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
2222 	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
2223 	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
2224 	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
2225 
2226 	/* Local memory controller registers. */
2227 	iter_reg = fw->lmc_reg;
2228 	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
2229 	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
2230 	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
2231 	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
2232 	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
2233 	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
2234 	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
2235 	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
2236 
2237 	/* Fibre Protocol Module registers. */
2238 	iter_reg = fw->fpm_hdw_reg;
2239 	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
2240 	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
2241 	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
2242 	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
2243 	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
2244 	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
2245 	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
2246 	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
2247 	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
2248 	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
2249 	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
2250 	iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
2251 	iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
2252 	iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
2253 	iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
2254 	qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
2255 
2256 	/* RQ0 Array registers. */
2257 	iter_reg = fw->rq0_array_reg;
2258 	iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
2259 	iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
2260 	iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
2261 	iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
2262 	iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
2263 	iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
2264 	iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
2265 	iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
2266 	iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
2267 	iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
2268 	iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
2269 	iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
2270 	iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
2271 	iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
2272 	iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
2273 	qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
2274 
2275 	/* RQ1 Array registers. */
2276 	iter_reg = fw->rq1_array_reg;
2277 	iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
2278 	iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
2279 	iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
2280 	iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
2281 	iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
2282 	iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
2283 	iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
2284 	iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
2285 	iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
2286 	iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
2287 	iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
2288 	iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
2289 	iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
2290 	iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
2291 	iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
2292 	qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
2293 
2294 	/* RP0 Array registers. */
2295 	iter_reg = fw->rp0_array_reg;
2296 	iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
2297 	iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
2298 	iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
2299 	iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
2300 	iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
2301 	iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
2302 	iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
2303 	iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
2304 	iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
2305 	iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
2306 	iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
2307 	iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
2308 	iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
2309 	iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
2310 	iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
2311 	qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
2312 
2313 	/* RP1 Array registers. */
2314 	iter_reg = fw->rp1_array_reg;
2315 	iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
2316 	iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
2317 	iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
2318 	iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
2319 	iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
2320 	iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
2321 	iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
2322 	iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
2323 	iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
2324 	iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
2325 	iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
2326 	iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
2327 	iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
2328 	iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
2329 	iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
2330 	qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
2331 
2332 	iter_reg = fw->at0_array_reg;
2333 	iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
2334 	iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
2335 	iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
2336 	iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
2337 	iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
2338 	iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
2339 	iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
2340 	qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
2341 
2342 	/* I/O Queue Control registers. */
2343 	qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
2344 
2345 	/* Frame Buffer registers. */
2346 	iter_reg = fw->fb_hdw_reg;
2347 	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
2348 	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
2349 	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
2350 	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
2351 	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
2352 	iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
2353 	iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
2354 	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
2355 	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
2356 	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
2357 	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
2358 	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
2359 	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
2360 	iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
2361 	iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
2362 	iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
2363 	iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
2364 	iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
2365 	iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
2366 	iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
2367 	iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
2368 	iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
2369 	iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
2370 	iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
2371 	iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
2372 	iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
2373 	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
2374 
2375 	/* Multi queue registers */
2376 	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
2377 	    &last_chain);
2378 
2379 	rval = qla24xx_soft_reset(ha);
2380 	if (rval != QLA_SUCCESS) {
2381 		ql_log(ql_log_warn, vha, 0xd00e,
2382 		    "SOFT RESET FAILED, forcing continuation of dump!!!\n");
2383 		rval = QLA_SUCCESS;
2384 
2385 		ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
2386 
2387 		wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
2388 		rd_reg_dword(&reg->hccr);
2389 
2390 		wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2391 		rd_reg_dword(&reg->hccr);
2392 
2393 		wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
2394 		rd_reg_dword(&reg->hccr);
2395 
2396 		for (cnt = 30000; cnt && (rd_reg_word(&reg->mailbox0)); cnt--)
2397 			udelay(5);
2398 
2399 		if (!cnt) {
2400 			nxt = fw->code_ram;
2401 			nxt += sizeof(fw->code_ram);
2402 			nxt += (ha->fw_memory_size - 0x100000 + 1);
2403 			goto copy_queue;
2404 		} else {
2405 			set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2406 			ql_log(ql_log_warn, vha, 0xd010,
2407 			    "bigger hammer success?\n");
2408 		}
2409 	}
2410 
2411 	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
2412 	    &nxt);
2413 	if (rval != QLA_SUCCESS)
2414 		goto qla83xx_fw_dump_failed_0;
2415 
2416 copy_queue:
2417 	nxt = qla2xxx_copy_queues(ha, nxt);
2418 
2419 	qla24xx_copy_eft(ha, nxt);
2420 
2421 	/* Chain entries -- started with MQ. */
2422 	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2423 	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2424 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2425 	nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
2426 	nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
2427 	if (last_chain) {
2428 		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
2429 		*last_chain |= htonl(DUMP_CHAIN_LAST);
2430 	}
2431 
2432 	/* Adjust valid length. */
2433 	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
2434 
2435 qla83xx_fw_dump_failed_0:
2436 	qla2xxx_dump_post_process(base_vha, rval);
2437 }
2438 
2439 /****************************************************************************/
2440 /*                         Driver Debug Functions.                          */
2441 /****************************************************************************/
2442 
2443 /* Write the debug message prefix into @pbuf. */
ql_dbg_prefix(char * pbuf,int pbuf_size,const scsi_qla_host_t * vha,uint msg_id)2444 static void ql_dbg_prefix(char *pbuf, int pbuf_size,
2445 			  const scsi_qla_host_t *vha, uint msg_id)
2446 {
2447 	if (vha) {
2448 		const struct pci_dev *pdev = vha->hw->pdev;
2449 
2450 		/* <module-name> [<dev-name>]-<msg-id>:<host>: */
2451 		snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR,
2452 			 dev_name(&(pdev->dev)), msg_id, vha->host_no);
2453 	} else {
2454 		/* <module-name> [<dev-name>]-<msg-id>: : */
2455 		snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR,
2456 			 "0000:00:00.0", msg_id);
2457 	}
2458 }
2459 
2460 /*
2461  * This function is for formatting and logging debug information.
2462  * It is to be used when vha is available. It formats the message
2463  * and logs it to the messages file.
2464  * parameters:
2465  * level: The level of the debug messages to be printed.
2466  *        If ql2xextended_error_logging value is correctly set,
2467  *        this message will appear in the messages file.
2468  * vha:   Pointer to the scsi_qla_host_t.
2469  * id:    This is a unique identifier for the level. It identifies the
2470  *        part of the code from where the message originated.
2471  * msg:   The message to be displayed.
2472  */
2473 void
ql_dbg(uint level,scsi_qla_host_t * vha,uint id,const char * fmt,...)2474 ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
2475 {
2476 	va_list va;
2477 	struct va_format vaf;
2478 	char pbuf[64];
2479 
2480 	va_start(va, fmt);
2481 
2482 	vaf.fmt = fmt;
2483 	vaf.va = &va;
2484 
2485 	ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id);
2486 
2487 	if (!ql_mask_match(level))
2488 		trace_ql_dbg_log(pbuf, &vaf);
2489 	else
2490 		pr_warn("%s%pV", pbuf, &vaf);
2491 
2492 	va_end(va);
2493 
2494 }
2495 
2496 /*
2497  * This function is for formatting and logging debug information.
2498  * It is to be used when vha is not available and pci is available,
2499  * i.e., before host allocation. It formats the message and logs it
2500  * to the messages file.
2501  * parameters:
2502  * level: The level of the debug messages to be printed.
2503  *        If ql2xextended_error_logging value is correctly set,
2504  *        this message will appear in the messages file.
2505  * pdev:  Pointer to the struct pci_dev.
2506  * id:    This is a unique id for the level. It identifies the part
2507  *        of the code from where the message originated.
2508  * msg:   The message to be displayed.
2509  */
2510 void
ql_dbg_pci(uint level,struct pci_dev * pdev,uint id,const char * fmt,...)2511 ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
2512 {
2513 	va_list va;
2514 	struct va_format vaf;
2515 	char pbuf[128];
2516 
2517 	if (pdev == NULL)
2518 		return;
2519 	if (!ql_mask_match(level))
2520 		return;
2521 
2522 	va_start(va, fmt);
2523 
2524 	vaf.fmt = fmt;
2525 	vaf.va = &va;
2526 
2527 	ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id + ql_dbg_offset);
2528 	pr_warn("%s%pV", pbuf, &vaf);
2529 
2530 	va_end(va);
2531 }
2532 
2533 /*
2534  * This function is for formatting and logging log messages.
2535  * It is to be used when vha is available. It formats the message
2536  * and logs it to the messages file. All the messages will be logged
2537  * irrespective of value of ql2xextended_error_logging.
2538  * parameters:
2539  * level: The level of the log messages to be printed in the
2540  *        messages file.
2541  * vha:   Pointer to the scsi_qla_host_t
2542  * id:    This is a unique id for the level. It identifies the
2543  *        part of the code from where the message originated.
2544  * msg:   The message to be displayed.
2545  */
2546 void
ql_log(uint level,scsi_qla_host_t * vha,uint id,const char * fmt,...)2547 ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
2548 {
2549 	va_list va;
2550 	struct va_format vaf;
2551 	char pbuf[128];
2552 
2553 	if (level > ql_errlev)
2554 		return;
2555 
2556 	ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id);
2557 
2558 	va_start(va, fmt);
2559 
2560 	vaf.fmt = fmt;
2561 	vaf.va = &va;
2562 
2563 	switch (level) {
2564 	case ql_log_fatal: /* FATAL LOG */
2565 		pr_crit("%s%pV", pbuf, &vaf);
2566 		break;
2567 	case ql_log_warn:
2568 		pr_err("%s%pV", pbuf, &vaf);
2569 		break;
2570 	case ql_log_info:
2571 		pr_warn("%s%pV", pbuf, &vaf);
2572 		break;
2573 	default:
2574 		pr_info("%s%pV", pbuf, &vaf);
2575 		break;
2576 	}
2577 
2578 	va_end(va);
2579 }
2580 
2581 /*
2582  * This function is for formatting and logging log messages.
2583  * It is to be used when vha is not available and pci is available,
2584  * i.e., before host allocation. It formats the message and logs
2585  * it to the messages file. All the messages are logged irrespective
2586  * of the value of ql2xextended_error_logging.
2587  * parameters:
2588  * level: The level of the log messages to be printed in the
2589  *        messages file.
2590  * pdev:  Pointer to the struct pci_dev.
2591  * id:    This is a unique id for the level. It identifies the
2592  *        part of the code from where the message originated.
2593  * msg:   The message to be displayed.
2594  */
2595 void
ql_log_pci(uint level,struct pci_dev * pdev,uint id,const char * fmt,...)2596 ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
2597 {
2598 	va_list va;
2599 	struct va_format vaf;
2600 	char pbuf[128];
2601 
2602 	if (pdev == NULL)
2603 		return;
2604 	if (level > ql_errlev)
2605 		return;
2606 
2607 	ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id);
2608 
2609 	va_start(va, fmt);
2610 
2611 	vaf.fmt = fmt;
2612 	vaf.va = &va;
2613 
2614 	switch (level) {
2615 	case ql_log_fatal: /* FATAL LOG */
2616 		pr_crit("%s%pV", pbuf, &vaf);
2617 		break;
2618 	case ql_log_warn:
2619 		pr_err("%s%pV", pbuf, &vaf);
2620 		break;
2621 	case ql_log_info:
2622 		pr_warn("%s%pV", pbuf, &vaf);
2623 		break;
2624 	default:
2625 		pr_info("%s%pV", pbuf, &vaf);
2626 		break;
2627 	}
2628 
2629 	va_end(va);
2630 }
2631 
2632 void
ql_dump_regs(uint level,scsi_qla_host_t * vha,uint id)2633 ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
2634 {
2635 	int i;
2636 	struct qla_hw_data *ha = vha->hw;
2637 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2638 	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2639 	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
2640 	__le16 __iomem *mbx_reg;
2641 
2642 	if (!ql_mask_match(level))
2643 		return;
2644 
2645 	if (IS_P3P_TYPE(ha))
2646 		mbx_reg = &reg82->mailbox_in[0];
2647 	else if (IS_FWI2_CAPABLE(ha))
2648 		mbx_reg = &reg24->mailbox0;
2649 	else
2650 		mbx_reg = MAILBOX_REG(ha, reg, 0);
2651 
2652 	ql_dbg(level, vha, id, "Mailbox registers:\n");
2653 	for (i = 0; i < 6; i++, mbx_reg++)
2654 		ql_dbg(level, vha, id,
2655 		    "mbox[%d] %#04x\n", i, rd_reg_word(mbx_reg));
2656 }
2657 
2658 void
ql_dump_buffer(uint level,scsi_qla_host_t * vha,uint id,const void * buf,uint size)2659 ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf,
2660 	       uint size)
2661 {
2662 	uint cnt;
2663 
2664 	if (!ql_mask_match(level))
2665 		return;
2666 
2667 	ql_dbg(level, vha, id,
2668 	    "%-+5d  0  1  2  3  4  5  6  7  8  9  A  B  C  D  E  F\n", size);
2669 	ql_dbg(level, vha, id,
2670 	    "----- -----------------------------------------------\n");
2671 	for (cnt = 0; cnt < size; cnt += 16) {
2672 		ql_dbg(level, vha, id, "%04x: ", cnt);
2673 		print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
2674 			       buf + cnt, min(16U, size - cnt), false);
2675 	}
2676 }
2677 
2678 /*
2679  * This function is for formatting and logging log messages.
2680  * It is to be used when vha is available. It formats the message
2681  * and logs it to the messages file. All the messages will be logged
2682  * irrespective of value of ql2xextended_error_logging.
2683  * parameters:
2684  * level: The level of the log messages to be printed in the
2685  *        messages file.
2686  * vha:   Pointer to the scsi_qla_host_t
2687  * id:    This is a unique id for the level. It identifies the
2688  *        part of the code from where the message originated.
2689  * msg:   The message to be displayed.
2690  */
2691 void
ql_log_qp(uint32_t level,struct qla_qpair * qpair,int32_t id,const char * fmt,...)2692 ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2693     const char *fmt, ...)
2694 {
2695 	va_list va;
2696 	struct va_format vaf;
2697 	char pbuf[128];
2698 
2699 	if (level > ql_errlev)
2700 		return;
2701 
2702 	ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL, id);
2703 
2704 	va_start(va, fmt);
2705 
2706 	vaf.fmt = fmt;
2707 	vaf.va = &va;
2708 
2709 	switch (level) {
2710 	case ql_log_fatal: /* FATAL LOG */
2711 		pr_crit("%s%pV", pbuf, &vaf);
2712 		break;
2713 	case ql_log_warn:
2714 		pr_err("%s%pV", pbuf, &vaf);
2715 		break;
2716 	case ql_log_info:
2717 		pr_warn("%s%pV", pbuf, &vaf);
2718 		break;
2719 	default:
2720 		pr_info("%s%pV", pbuf, &vaf);
2721 		break;
2722 	}
2723 
2724 	va_end(va);
2725 }
2726 
2727 /*
2728  * This function is for formatting and logging debug information.
2729  * It is to be used when vha is available. It formats the message
2730  * and logs it to the messages file.
2731  * parameters:
2732  * level: The level of the debug messages to be printed.
2733  *        If ql2xextended_error_logging value is correctly set,
2734  *        this message will appear in the messages file.
2735  * vha:   Pointer to the scsi_qla_host_t.
2736  * id:    This is a unique identifier for the level. It identifies the
2737  *        part of the code from where the message originated.
2738  * msg:   The message to be displayed.
2739  */
2740 void
ql_dbg_qp(uint32_t level,struct qla_qpair * qpair,int32_t id,const char * fmt,...)2741 ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2742     const char *fmt, ...)
2743 {
2744 	va_list va;
2745 	struct va_format vaf;
2746 	char pbuf[128];
2747 
2748 	if (!ql_mask_match(level))
2749 		return;
2750 
2751 	va_start(va, fmt);
2752 
2753 	vaf.fmt = fmt;
2754 	vaf.va = &va;
2755 
2756 	ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL,
2757 		      id + ql_dbg_offset);
2758 	pr_warn("%s%pV", pbuf, &vaf);
2759 
2760 	va_end(va);
2761 
2762 }
2763