1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/slab.h>
5 
6 #include "qlge.h"
7 
8 /* Read a NIC register from the alternate function. */
ql_read_other_func_reg(struct ql_adapter * qdev,u32 reg)9 static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
10 				  u32 reg)
11 {
12 	u32 register_to_read;
13 	u32 reg_val;
14 	unsigned int status = 0;
15 
16 	register_to_read = MPI_NIC_REG_BLOCK
17 				| MPI_NIC_READ
18 				| (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
19 				| reg;
20 	status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
21 	if (status != 0)
22 		return 0xffffffff;
23 
24 	return reg_val;
25 }
26 
27 /* Write a NIC register from the alternate function. */
ql_write_other_func_reg(struct ql_adapter * qdev,u32 reg,u32 reg_val)28 static int ql_write_other_func_reg(struct ql_adapter *qdev,
29 				   u32 reg, u32 reg_val)
30 {
31 	u32 register_to_read;
32 
33 	register_to_read = MPI_NIC_REG_BLOCK
34 				| MPI_NIC_READ
35 				| (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
36 				| reg;
37 
38 	return ql_write_mpi_reg(qdev, register_to_read, reg_val);
39 }
40 
ql_wait_other_func_reg_rdy(struct ql_adapter * qdev,u32 reg,u32 bit,u32 err_bit)41 static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
42 				      u32 bit, u32 err_bit)
43 {
44 	u32 temp;
45 	int count;
46 
47 	for (count = 10; count; count--) {
48 		temp = ql_read_other_func_reg(qdev, reg);
49 
50 		/* check for errors */
51 		if (temp & err_bit)
52 			return -1;
53 		else if (temp & bit)
54 			return 0;
55 		mdelay(10);
56 	}
57 	return -1;
58 }
59 
ql_read_other_func_serdes_reg(struct ql_adapter * qdev,u32 reg,u32 * data)60 static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
61 					 u32 *data)
62 {
63 	int status;
64 
65 	/* wait for reg to come ready */
66 	status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
67 					    XG_SERDES_ADDR_RDY, 0);
68 	if (status)
69 		goto exit;
70 
71 	/* set up for reg read */
72 	ql_write_other_func_reg(qdev, XG_SERDES_ADDR / 4, reg | PROC_ADDR_R);
73 
74 	/* wait for reg to come ready */
75 	status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
76 					    XG_SERDES_ADDR_RDY, 0);
77 	if (status)
78 		goto exit;
79 
80 	/* get the data */
81 	*data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
82 exit:
83 	return status;
84 }
85 
86 /* Read out the SERDES registers */
ql_read_serdes_reg(struct ql_adapter * qdev,u32 reg,u32 * data)87 static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
88 {
89 	int status;
90 
91 	/* wait for reg to come ready */
92 	status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
93 	if (status)
94 		goto exit;
95 
96 	/* set up for reg read */
97 	ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
98 
99 	/* wait for reg to come ready */
100 	status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
101 	if (status)
102 		goto exit;
103 
104 	/* get the data */
105 	*data = ql_read32(qdev, XG_SERDES_DATA);
106 exit:
107 	return status;
108 }
109 
ql_get_both_serdes(struct ql_adapter * qdev,u32 addr,u32 * direct_ptr,u32 * indirect_ptr,bool direct_valid,bool indirect_valid)110 static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
111 			       u32 *direct_ptr, u32 *indirect_ptr,
112 			       bool direct_valid, bool indirect_valid)
113 {
114 	unsigned int status;
115 
116 	status = 1;
117 	if (direct_valid)
118 		status = ql_read_serdes_reg(qdev, addr, direct_ptr);
119 	/* Dead fill any failures or invalids. */
120 	if (status)
121 		*direct_ptr = 0xDEADBEEF;
122 
123 	status = 1;
124 	if (indirect_valid)
125 		status = ql_read_other_func_serdes_reg(
126 						qdev, addr, indirect_ptr);
127 	/* Dead fill any failures or invalids. */
128 	if (status)
129 		*indirect_ptr = 0xDEADBEEF;
130 }
131 
ql_get_serdes_regs(struct ql_adapter * qdev,struct ql_mpi_coredump * mpi_coredump)132 static int ql_get_serdes_regs(struct ql_adapter *qdev,
133 			      struct ql_mpi_coredump *mpi_coredump)
134 {
135 	int status;
136 	bool xfi_direct_valid = false, xfi_indirect_valid = false;
137 	bool xaui_direct_valid = true, xaui_indirect_valid = true;
138 	unsigned int i;
139 	u32 *direct_ptr, temp;
140 	u32 *indirect_ptr;
141 
142 	/* The XAUI needs to be read out per port */
143 	status = ql_read_other_func_serdes_reg(qdev,
144 					       XG_SERDES_XAUI_HSS_PCS_START,
145 					       &temp);
146 	if (status)
147 		temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
148 
149 	if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
150 				XG_SERDES_ADDR_XAUI_PWR_DOWN)
151 		xaui_indirect_valid = false;
152 
153 	status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
154 
155 	if (status)
156 		temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
157 
158 	if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
159 				XG_SERDES_ADDR_XAUI_PWR_DOWN)
160 		xaui_direct_valid = false;
161 
162 	/*
163 	 * XFI register is shared so only need to read one
164 	 * functions and then check the bits.
165 	 */
166 	status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
167 	if (status)
168 		temp = 0;
169 
170 	if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
171 					XG_SERDES_ADDR_XFI1_PWR_UP) {
172 		/* now see if i'm NIC 1 or NIC 2 */
173 		if (qdev->func & 1)
174 			/* I'm NIC 2, so the indirect (NIC1) xfi is up. */
175 			xfi_indirect_valid = true;
176 		else
177 			xfi_direct_valid = true;
178 	}
179 	if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
180 					XG_SERDES_ADDR_XFI2_PWR_UP) {
181 		/* now see if i'm NIC 1 or NIC 2 */
182 		if (qdev->func & 1)
183 			/* I'm NIC 2, so the indirect (NIC1) xfi is up. */
184 			xfi_direct_valid = true;
185 		else
186 			xfi_indirect_valid = true;
187 	}
188 
189 	/* Get XAUI_AN register block. */
190 	if (qdev->func & 1) {
191 		/* Function 2 is direct	*/
192 		direct_ptr = mpi_coredump->serdes2_xaui_an;
193 		indirect_ptr = mpi_coredump->serdes_xaui_an;
194 	} else {
195 		/* Function 1 is direct	*/
196 		direct_ptr = mpi_coredump->serdes_xaui_an;
197 		indirect_ptr = mpi_coredump->serdes2_xaui_an;
198 	}
199 
200 	for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
201 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
202 				   xaui_direct_valid, xaui_indirect_valid);
203 
204 	/* Get XAUI_HSS_PCS register block. */
205 	if (qdev->func & 1) {
206 		direct_ptr =
207 			mpi_coredump->serdes2_xaui_hss_pcs;
208 		indirect_ptr =
209 			mpi_coredump->serdes_xaui_hss_pcs;
210 	} else {
211 		direct_ptr =
212 			mpi_coredump->serdes_xaui_hss_pcs;
213 		indirect_ptr =
214 			mpi_coredump->serdes2_xaui_hss_pcs;
215 	}
216 
217 	for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
218 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
219 				   xaui_direct_valid, xaui_indirect_valid);
220 
221 	/* Get XAUI_XFI_AN register block. */
222 	if (qdev->func & 1) {
223 		direct_ptr = mpi_coredump->serdes2_xfi_an;
224 		indirect_ptr = mpi_coredump->serdes_xfi_an;
225 	} else {
226 		direct_ptr = mpi_coredump->serdes_xfi_an;
227 		indirect_ptr = mpi_coredump->serdes2_xfi_an;
228 	}
229 
230 	for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
231 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
232 				   xfi_direct_valid, xfi_indirect_valid);
233 
234 	/* Get XAUI_XFI_TRAIN register block. */
235 	if (qdev->func & 1) {
236 		direct_ptr = mpi_coredump->serdes2_xfi_train;
237 		indirect_ptr =
238 			mpi_coredump->serdes_xfi_train;
239 	} else {
240 		direct_ptr = mpi_coredump->serdes_xfi_train;
241 		indirect_ptr =
242 			mpi_coredump->serdes2_xfi_train;
243 	}
244 
245 	for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
246 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
247 				   xfi_direct_valid, xfi_indirect_valid);
248 
249 	/* Get XAUI_XFI_HSS_PCS register block. */
250 	if (qdev->func & 1) {
251 		direct_ptr =
252 			mpi_coredump->serdes2_xfi_hss_pcs;
253 		indirect_ptr =
254 			mpi_coredump->serdes_xfi_hss_pcs;
255 	} else {
256 		direct_ptr =
257 			mpi_coredump->serdes_xfi_hss_pcs;
258 		indirect_ptr =
259 			mpi_coredump->serdes2_xfi_hss_pcs;
260 	}
261 
262 	for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
263 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
264 				   xfi_direct_valid, xfi_indirect_valid);
265 
266 	/* Get XAUI_XFI_HSS_TX register block. */
267 	if (qdev->func & 1) {
268 		direct_ptr =
269 			mpi_coredump->serdes2_xfi_hss_tx;
270 		indirect_ptr =
271 			mpi_coredump->serdes_xfi_hss_tx;
272 	} else {
273 		direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
274 		indirect_ptr =
275 			mpi_coredump->serdes2_xfi_hss_tx;
276 	}
277 	for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
278 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
279 				   xfi_direct_valid, xfi_indirect_valid);
280 
281 	/* Get XAUI_XFI_HSS_RX register block. */
282 	if (qdev->func & 1) {
283 		direct_ptr =
284 			mpi_coredump->serdes2_xfi_hss_rx;
285 		indirect_ptr =
286 			mpi_coredump->serdes_xfi_hss_rx;
287 	} else {
288 		direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
289 		indirect_ptr =
290 			mpi_coredump->serdes2_xfi_hss_rx;
291 	}
292 
293 	for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
294 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
295 				   xfi_direct_valid, xfi_indirect_valid);
296 
297 	/* Get XAUI_XFI_HSS_PLL register block. */
298 	if (qdev->func & 1) {
299 		direct_ptr =
300 			mpi_coredump->serdes2_xfi_hss_pll;
301 		indirect_ptr =
302 			mpi_coredump->serdes_xfi_hss_pll;
303 	} else {
304 		direct_ptr =
305 			mpi_coredump->serdes_xfi_hss_pll;
306 		indirect_ptr =
307 			mpi_coredump->serdes2_xfi_hss_pll;
308 	}
309 	for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
310 		ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
311 				   xfi_direct_valid, xfi_indirect_valid);
312 	return 0;
313 }
314 
ql_read_other_func_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 * data)315 static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
316 					u32 *data)
317 {
318 	int status = 0;
319 
320 	/* wait for reg to come ready */
321 	status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
322 					    XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
323 	if (status)
324 		goto exit;
325 
326 	/* set up for reg read */
327 	ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
328 
329 	/* wait for reg to come ready */
330 	status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
331 					    XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
332 	if (status)
333 		goto exit;
334 
335 	/* get the data */
336 	*data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
337 exit:
338 	return status;
339 }
340 
341 /* Read the 400 xgmac control/statistics registers
342  * skipping unused locations.
343  */
ql_get_xgmac_regs(struct ql_adapter * qdev,u32 * buf,unsigned int other_function)344 static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
345 			     unsigned int other_function)
346 {
347 	int status = 0;
348 	int i;
349 
350 	for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
351 		/* We're reading 400 xgmac registers, but we filter out
352 		 * several locations that are non-responsive to reads.
353 		 */
354 		if ((i == 0x00000114) ||
355 		    (i == 0x00000118) ||
356 			(i == 0x0000013c) ||
357 			(i == 0x00000140) ||
358 			(i > 0x00000150 && i < 0x000001fc) ||
359 			(i > 0x00000278 && i < 0x000002a0) ||
360 			(i > 0x000002c0 && i < 0x000002cf) ||
361 			(i > 0x000002dc && i < 0x000002f0) ||
362 			(i > 0x000003c8 && i < 0x00000400) ||
363 			(i > 0x00000400 && i < 0x00000410) ||
364 			(i > 0x00000410 && i < 0x00000420) ||
365 			(i > 0x00000420 && i < 0x00000430) ||
366 			(i > 0x00000430 && i < 0x00000440) ||
367 			(i > 0x00000440 && i < 0x00000450) ||
368 			(i > 0x00000450 && i < 0x00000500) ||
369 			(i > 0x0000054c && i < 0x00000568) ||
370 			(i > 0x000005c8 && i < 0x00000600)) {
371 			if (other_function)
372 				status =
373 				ql_read_other_func_xgmac_reg(qdev, i, buf);
374 			else
375 				status = ql_read_xgmac_reg(qdev, i, buf);
376 
377 			if (status)
378 				*buf = 0xdeadbeef;
379 			break;
380 		}
381 	}
382 	return status;
383 }
384 
ql_get_ets_regs(struct ql_adapter * qdev,u32 * buf)385 static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
386 {
387 	int i;
388 
389 	for (i = 0; i < 8; i++, buf++) {
390 		ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
391 		*buf = ql_read32(qdev, NIC_ETS);
392 	}
393 
394 	for (i = 0; i < 2; i++, buf++) {
395 		ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
396 		*buf = ql_read32(qdev, CNA_ETS);
397 	}
398 
399 	return 0;
400 }
401 
ql_get_intr_states(struct ql_adapter * qdev,u32 * buf)402 static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
403 {
404 	int i;
405 
406 	for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
407 		ql_write32(qdev, INTR_EN,
408 			   qdev->intr_context[i].intr_read_mask);
409 		*buf = ql_read32(qdev, INTR_EN);
410 	}
411 }
412 
ql_get_cam_entries(struct ql_adapter * qdev,u32 * buf)413 static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
414 {
415 	int i, status;
416 	u32 value[3];
417 
418 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
419 	if (status)
420 		return status;
421 
422 	for (i = 0; i < 16; i++) {
423 		status = ql_get_mac_addr_reg(qdev,
424 					     MAC_ADDR_TYPE_CAM_MAC, i, value);
425 		if (status) {
426 			netif_err(qdev, drv, qdev->ndev,
427 				  "Failed read of mac index register\n");
428 			goto err;
429 		}
430 		*buf++ = value[0];	/* lower MAC address */
431 		*buf++ = value[1];	/* upper MAC address */
432 		*buf++ = value[2];	/* output */
433 	}
434 	for (i = 0; i < 32; i++) {
435 		status = ql_get_mac_addr_reg(qdev,
436 					     MAC_ADDR_TYPE_MULTI_MAC, i, value);
437 		if (status) {
438 			netif_err(qdev, drv, qdev->ndev,
439 				  "Failed read of mac index register\n");
440 			goto err;
441 		}
442 		*buf++ = value[0];	/* lower Mcast address */
443 		*buf++ = value[1];	/* upper Mcast address */
444 	}
445 err:
446 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
447 	return status;
448 }
449 
ql_get_routing_entries(struct ql_adapter * qdev,u32 * buf)450 static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
451 {
452 	int status;
453 	u32 value, i;
454 
455 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
456 	if (status)
457 		return status;
458 
459 	for (i = 0; i < 16; i++) {
460 		status = ql_get_routing_reg(qdev, i, &value);
461 		if (status) {
462 			netif_err(qdev, drv, qdev->ndev,
463 				  "Failed read of routing index register\n");
464 			goto err;
465 		} else {
466 			*buf++ = value;
467 		}
468 	}
469 err:
470 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
471 	return status;
472 }
473 
474 /* Read the MPI Processor shadow registers */
ql_get_mpi_shadow_regs(struct ql_adapter * qdev,u32 * buf)475 static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
476 {
477 	u32 i;
478 	int status;
479 
480 	for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
481 		status = ql_write_mpi_reg(qdev,
482 					  RISC_124,
483 				(SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
484 		if (status)
485 			goto end;
486 		status = ql_read_mpi_reg(qdev, RISC_127, buf);
487 		if (status)
488 			goto end;
489 	}
490 end:
491 	return status;
492 }
493 
494 /* Read the MPI Processor core registers */
ql_get_mpi_regs(struct ql_adapter * qdev,u32 * buf,u32 offset,u32 count)495 static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
496 			   u32 offset, u32 count)
497 {
498 	int i, status = 0;
499 
500 	for (i = 0; i < count; i++, buf++) {
501 		status = ql_read_mpi_reg(qdev, offset + i, buf);
502 		if (status)
503 			return status;
504 	}
505 	return status;
506 }
507 
508 /* Read the ASIC probe dump */
ql_get_probe(struct ql_adapter * qdev,u32 clock,u32 valid,u32 * buf)509 static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
510 				  u32 valid, u32 *buf)
511 {
512 	u32 module, mux_sel, probe, lo_val, hi_val;
513 
514 	for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
515 		if (!((valid >> module) & 1))
516 			continue;
517 		for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
518 			probe = clock
519 				| PRB_MX_ADDR_ARE
520 				| mux_sel
521 				| (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
522 			ql_write32(qdev, PRB_MX_ADDR, probe);
523 			lo_val = ql_read32(qdev, PRB_MX_DATA);
524 			if (mux_sel == 0) {
525 				*buf = probe;
526 				buf++;
527 			}
528 			probe |= PRB_MX_ADDR_UP;
529 			ql_write32(qdev, PRB_MX_ADDR, probe);
530 			hi_val = ql_read32(qdev, PRB_MX_DATA);
531 			*buf = lo_val;
532 			buf++;
533 			*buf = hi_val;
534 			buf++;
535 		}
536 	}
537 	return buf;
538 }
539 
ql_get_probe_dump(struct ql_adapter * qdev,unsigned int * buf)540 static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
541 {
542 	/* First we have to enable the probe mux */
543 	ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
544 	buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
545 			   PRB_MX_ADDR_VALID_SYS_MOD, buf);
546 	buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
547 			   PRB_MX_ADDR_VALID_PCI_MOD, buf);
548 	buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
549 			   PRB_MX_ADDR_VALID_XGM_MOD, buf);
550 	buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
551 			   PRB_MX_ADDR_VALID_FC_MOD, buf);
552 	return 0;
553 }
554 
555 /* Read out the routing index registers */
ql_get_routing_index_registers(struct ql_adapter * qdev,u32 * buf)556 static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
557 {
558 	int status;
559 	u32 type, index, index_max;
560 	u32 result_index;
561 	u32 result_data;
562 	u32 val;
563 
564 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
565 	if (status)
566 		return status;
567 
568 	for (type = 0; type < 4; type++) {
569 		if (type < 2)
570 			index_max = 8;
571 		else
572 			index_max = 16;
573 		for (index = 0; index < index_max; index++) {
574 			val = RT_IDX_RS
575 				| (type << RT_IDX_TYPE_SHIFT)
576 				| (index << RT_IDX_IDX_SHIFT);
577 			ql_write32(qdev, RT_IDX, val);
578 			result_index = 0;
579 			while ((result_index & RT_IDX_MR) == 0)
580 				result_index = ql_read32(qdev, RT_IDX);
581 			result_data = ql_read32(qdev, RT_DATA);
582 			*buf = type;
583 			buf++;
584 			*buf = index;
585 			buf++;
586 			*buf = result_index;
587 			buf++;
588 			*buf = result_data;
589 			buf++;
590 		}
591 	}
592 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
593 	return status;
594 }
595 
596 /* Read out the MAC protocol registers */
ql_get_mac_protocol_registers(struct ql_adapter * qdev,u32 * buf)597 static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
598 {
599 	u32 result_index, result_data;
600 	u32 type;
601 	u32 index;
602 	u32 offset;
603 	u32 val;
604 	u32 initial_val = MAC_ADDR_RS;
605 	u32 max_index;
606 	u32 max_offset;
607 
608 	for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
609 		switch (type) {
610 		case 0: /* CAM */
611 			initial_val |= MAC_ADDR_ADR;
612 			max_index = MAC_ADDR_MAX_CAM_ENTRIES;
613 			max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
614 			break;
615 		case 1: /* Multicast MAC Address */
616 			max_index = MAC_ADDR_MAX_CAM_WCOUNT;
617 			max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
618 			break;
619 		case 2: /* VLAN filter mask */
620 		case 3: /* MC filter mask */
621 			max_index = MAC_ADDR_MAX_CAM_WCOUNT;
622 			max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
623 			break;
624 		case 4: /* FC MAC addresses */
625 			max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
626 			max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
627 			break;
628 		case 5: /* Mgmt MAC addresses */
629 			max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
630 			max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
631 			break;
632 		case 6: /* Mgmt VLAN addresses */
633 			max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
634 			max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
635 			break;
636 		case 7: /* Mgmt IPv4 address */
637 			max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
638 			max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
639 			break;
640 		case 8: /* Mgmt IPv6 address */
641 			max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
642 			max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
643 			break;
644 		case 9: /* Mgmt TCP/UDP Dest port */
645 			max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
646 			max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
647 			break;
648 		default:
649 			netdev_err(qdev->ndev, "Bad type!!! 0x%08x\n", type);
650 			max_index = 0;
651 			max_offset = 0;
652 			break;
653 		}
654 		for (index = 0; index < max_index; index++) {
655 			for (offset = 0; offset < max_offset; offset++) {
656 				val = initial_val
657 					| (type << MAC_ADDR_TYPE_SHIFT)
658 					| (index << MAC_ADDR_IDX_SHIFT)
659 					| (offset);
660 				ql_write32(qdev, MAC_ADDR_IDX, val);
661 				result_index = 0;
662 				while ((result_index & MAC_ADDR_MR) == 0) {
663 					result_index = ql_read32(qdev,
664 								 MAC_ADDR_IDX);
665 				}
666 				result_data = ql_read32(qdev, MAC_ADDR_DATA);
667 				*buf = result_index;
668 				buf++;
669 				*buf = result_data;
670 				buf++;
671 			}
672 		}
673 	}
674 }
675 
ql_get_sem_registers(struct ql_adapter * qdev,u32 * buf)676 static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
677 {
678 	u32 func_num, reg, reg_val;
679 	int status;
680 
681 	for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
682 		reg = MPI_NIC_REG_BLOCK
683 			| (func_num << MPI_NIC_FUNCTION_SHIFT)
684 			| (SEM / 4);
685 		status = ql_read_mpi_reg(qdev, reg, &reg_val);
686 		*buf = reg_val;
687 		/* if the read failed then dead fill the element. */
688 		if (!status)
689 			*buf = 0xdeadbeef;
690 		buf++;
691 	}
692 }
693 
694 /* Create a coredump segment header */
ql_build_coredump_seg_header(struct mpi_coredump_segment_header * seg_hdr,u32 seg_number,u32 seg_size,u8 * desc)695 static void ql_build_coredump_seg_header(
696 		struct mpi_coredump_segment_header *seg_hdr,
697 		u32 seg_number, u32 seg_size, u8 *desc)
698 {
699 	memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
700 	seg_hdr->cookie = MPI_COREDUMP_COOKIE;
701 	seg_hdr->seg_num = seg_number;
702 	seg_hdr->seg_size = seg_size;
703 	strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
704 }
705 
706 /*
707  * This function should be called when a coredump / probedump
708  * is to be extracted from the HBA. It is assumed there is a
709  * qdev structure that contains the base address of the register
710  * space for this function as well as a coredump structure that
711  * will contain the dump.
712  */
ql_core_dump(struct ql_adapter * qdev,struct ql_mpi_coredump * mpi_coredump)713 int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
714 {
715 	int status;
716 	int i;
717 
718 	if (!mpi_coredump) {
719 		netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
720 		return -EINVAL;
721 	}
722 
723 	/* Try to get the spinlock, but dont worry if
724 	 * it isn't available.  If the firmware died it
725 	 * might be holding the sem.
726 	 */
727 	ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
728 
729 	status = ql_pause_mpi_risc(qdev);
730 	if (status) {
731 		netif_err(qdev, drv, qdev->ndev,
732 			  "Failed RISC pause. Status = 0x%.08x\n", status);
733 		goto err;
734 	}
735 
736 	/* Insert the global header */
737 	memset(&(mpi_coredump->mpi_global_header), 0,
738 	       sizeof(struct mpi_coredump_global_header));
739 	mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
740 	mpi_coredump->mpi_global_header.header_size =
741 		sizeof(struct mpi_coredump_global_header);
742 	mpi_coredump->mpi_global_header.image_size =
743 		sizeof(struct ql_mpi_coredump);
744 	strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
745 		sizeof(mpi_coredump->mpi_global_header.id_string));
746 
747 	/* Get generic NIC reg dump */
748 	ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
749 				     NIC1_CONTROL_SEG_NUM,
750 			sizeof(struct mpi_coredump_segment_header) +
751 			sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
752 
753 	ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
754 				     NIC2_CONTROL_SEG_NUM,
755 			sizeof(struct mpi_coredump_segment_header) +
756 			sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
757 
758 	/* Get XGMac registers. (Segment 18, Rev C. step 21) */
759 	ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
760 				     NIC1_XGMAC_SEG_NUM,
761 			sizeof(struct mpi_coredump_segment_header) +
762 			sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
763 
764 	ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
765 				     NIC2_XGMAC_SEG_NUM,
766 			sizeof(struct mpi_coredump_segment_header) +
767 			sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
768 
769 	if (qdev->func & 1) {
770 		/* Odd means our function is NIC 2 */
771 		for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
772 			mpi_coredump->nic2_regs[i] =
773 					 ql_read32(qdev, i * sizeof(u32));
774 
775 		for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
776 			mpi_coredump->nic_regs[i] =
777 			ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
778 
779 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
780 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
781 	} else {
782 		/* Even means our function is NIC 1 */
783 		for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
784 			mpi_coredump->nic_regs[i] =
785 					ql_read32(qdev, i * sizeof(u32));
786 		for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
787 			mpi_coredump->nic2_regs[i] =
788 			ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
789 
790 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
791 		ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
792 	}
793 
794 	/* Rev C. Step 20a */
795 	ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
796 				     XAUI_AN_SEG_NUM,
797 			sizeof(struct mpi_coredump_segment_header) +
798 			sizeof(mpi_coredump->serdes_xaui_an),
799 			"XAUI AN Registers");
800 
801 	/* Rev C. Step 20b */
802 	ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
803 				     XAUI_HSS_PCS_SEG_NUM,
804 			sizeof(struct mpi_coredump_segment_header) +
805 			sizeof(mpi_coredump->serdes_xaui_hss_pcs),
806 			"XAUI HSS PCS Registers");
807 
808 	ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
809 				     sizeof(struct mpi_coredump_segment_header) +
810 			sizeof(mpi_coredump->serdes_xfi_an),
811 			"XFI AN Registers");
812 
813 	ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
814 				     XFI_TRAIN_SEG_NUM,
815 			sizeof(struct mpi_coredump_segment_header) +
816 			sizeof(mpi_coredump->serdes_xfi_train),
817 			"XFI TRAIN Registers");
818 
819 	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
820 				     XFI_HSS_PCS_SEG_NUM,
821 			sizeof(struct mpi_coredump_segment_header) +
822 			sizeof(mpi_coredump->serdes_xfi_hss_pcs),
823 			"XFI HSS PCS Registers");
824 
825 	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
826 				     XFI_HSS_TX_SEG_NUM,
827 			sizeof(struct mpi_coredump_segment_header) +
828 			sizeof(mpi_coredump->serdes_xfi_hss_tx),
829 			"XFI HSS TX Registers");
830 
831 	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
832 				     XFI_HSS_RX_SEG_NUM,
833 			sizeof(struct mpi_coredump_segment_header) +
834 			sizeof(mpi_coredump->serdes_xfi_hss_rx),
835 			"XFI HSS RX Registers");
836 
837 	ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
838 				     XFI_HSS_PLL_SEG_NUM,
839 			sizeof(struct mpi_coredump_segment_header) +
840 			sizeof(mpi_coredump->serdes_xfi_hss_pll),
841 			"XFI HSS PLL Registers");
842 
843 	ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
844 				     XAUI2_AN_SEG_NUM,
845 			sizeof(struct mpi_coredump_segment_header) +
846 			sizeof(mpi_coredump->serdes2_xaui_an),
847 			"XAUI2 AN Registers");
848 
849 	ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
850 				     XAUI2_HSS_PCS_SEG_NUM,
851 			sizeof(struct mpi_coredump_segment_header) +
852 			sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
853 			"XAUI2 HSS PCS Registers");
854 
855 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
856 				     XFI2_AN_SEG_NUM,
857 			sizeof(struct mpi_coredump_segment_header) +
858 			sizeof(mpi_coredump->serdes2_xfi_an),
859 			"XFI2 AN Registers");
860 
861 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
862 				     XFI2_TRAIN_SEG_NUM,
863 			sizeof(struct mpi_coredump_segment_header) +
864 			sizeof(mpi_coredump->serdes2_xfi_train),
865 			"XFI2 TRAIN Registers");
866 
867 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
868 				     XFI2_HSS_PCS_SEG_NUM,
869 			sizeof(struct mpi_coredump_segment_header) +
870 			sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
871 			"XFI2 HSS PCS Registers");
872 
873 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
874 				     XFI2_HSS_TX_SEG_NUM,
875 			sizeof(struct mpi_coredump_segment_header) +
876 			sizeof(mpi_coredump->serdes2_xfi_hss_tx),
877 			"XFI2 HSS TX Registers");
878 
879 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
880 				     XFI2_HSS_RX_SEG_NUM,
881 			sizeof(struct mpi_coredump_segment_header) +
882 			sizeof(mpi_coredump->serdes2_xfi_hss_rx),
883 			"XFI2 HSS RX Registers");
884 
885 	ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
886 				     XFI2_HSS_PLL_SEG_NUM,
887 			sizeof(struct mpi_coredump_segment_header) +
888 			sizeof(mpi_coredump->serdes2_xfi_hss_pll),
889 			"XFI2 HSS PLL Registers");
890 
891 	status = ql_get_serdes_regs(qdev, mpi_coredump);
892 	if (status) {
893 		netif_err(qdev, drv, qdev->ndev,
894 			  "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
895 			  status);
896 		goto err;
897 	}
898 
899 	ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
900 				     CORE_SEG_NUM,
901 				sizeof(mpi_coredump->core_regs_seg_hdr) +
902 				sizeof(mpi_coredump->mpi_core_regs) +
903 				sizeof(mpi_coredump->mpi_core_sh_regs),
904 				"Core Registers");
905 
906 	/* Get the MPI Core Registers */
907 	status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
908 				 MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
909 	if (status)
910 		goto err;
911 	/* Get the 16 MPI shadow registers */
912 	status = ql_get_mpi_shadow_regs(qdev,
913 					&mpi_coredump->mpi_core_sh_regs[0]);
914 	if (status)
915 		goto err;
916 
917 	/* Get the Test Logic Registers */
918 	ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
919 				     TEST_LOGIC_SEG_NUM,
920 				sizeof(struct mpi_coredump_segment_header)
921 				+ sizeof(mpi_coredump->test_logic_regs),
922 				"Test Logic Regs");
923 	status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
924 				 TEST_REGS_ADDR, TEST_REGS_CNT);
925 	if (status)
926 		goto err;
927 
928 	/* Get the RMII Registers */
929 	ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
930 				     RMII_SEG_NUM,
931 				sizeof(struct mpi_coredump_segment_header)
932 				+ sizeof(mpi_coredump->rmii_regs),
933 				"RMII Registers");
934 	status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
935 				 RMII_REGS_ADDR, RMII_REGS_CNT);
936 	if (status)
937 		goto err;
938 
939 	/* Get the FCMAC1 Registers */
940 	ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
941 				     FCMAC1_SEG_NUM,
942 				sizeof(struct mpi_coredump_segment_header)
943 				+ sizeof(mpi_coredump->fcmac1_regs),
944 				"FCMAC1 Registers");
945 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
946 				 FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
947 	if (status)
948 		goto err;
949 
950 	/* Get the FCMAC2 Registers */
951 
952 	ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
953 				     FCMAC2_SEG_NUM,
954 				sizeof(struct mpi_coredump_segment_header)
955 				+ sizeof(mpi_coredump->fcmac2_regs),
956 				"FCMAC2 Registers");
957 
958 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
959 				 FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
960 	if (status)
961 		goto err;
962 
963 	/* Get the FC1 MBX Registers */
964 	ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
965 				     FC1_MBOX_SEG_NUM,
966 				sizeof(struct mpi_coredump_segment_header)
967 				+ sizeof(mpi_coredump->fc1_mbx_regs),
968 				"FC1 MBox Regs");
969 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
970 				 FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
971 	if (status)
972 		goto err;
973 
974 	/* Get the IDE Registers */
975 	ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
976 				     IDE_SEG_NUM,
977 				sizeof(struct mpi_coredump_segment_header)
978 				+ sizeof(mpi_coredump->ide_regs),
979 				"IDE Registers");
980 	status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
981 				 IDE_REGS_ADDR, IDE_REGS_CNT);
982 	if (status)
983 		goto err;
984 
985 	/* Get the NIC1 MBX Registers */
986 	ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
987 				     NIC1_MBOX_SEG_NUM,
988 				sizeof(struct mpi_coredump_segment_header)
989 				+ sizeof(mpi_coredump->nic1_mbx_regs),
990 				"NIC1 MBox Regs");
991 	status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
992 				 NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
993 	if (status)
994 		goto err;
995 
996 	/* Get the SMBus Registers */
997 	ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
998 				     SMBUS_SEG_NUM,
999 				sizeof(struct mpi_coredump_segment_header)
1000 				+ sizeof(mpi_coredump->smbus_regs),
1001 				"SMBus Registers");
1002 	status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
1003 				 SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
1004 	if (status)
1005 		goto err;
1006 
1007 	/* Get the FC2 MBX Registers */
1008 	ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
1009 				     FC2_MBOX_SEG_NUM,
1010 				sizeof(struct mpi_coredump_segment_header)
1011 				+ sizeof(mpi_coredump->fc2_mbx_regs),
1012 				"FC2 MBox Regs");
1013 	status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
1014 				 FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
1015 	if (status)
1016 		goto err;
1017 
1018 	/* Get the NIC2 MBX Registers */
1019 	ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
1020 				     NIC2_MBOX_SEG_NUM,
1021 				sizeof(struct mpi_coredump_segment_header)
1022 				+ sizeof(mpi_coredump->nic2_mbx_regs),
1023 				"NIC2 MBox Regs");
1024 	status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
1025 				 NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1026 	if (status)
1027 		goto err;
1028 
1029 	/* Get the I2C Registers */
1030 	ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
1031 				     I2C_SEG_NUM,
1032 				sizeof(struct mpi_coredump_segment_header)
1033 				+ sizeof(mpi_coredump->i2c_regs),
1034 				"I2C Registers");
1035 	status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
1036 				 I2C_REGS_ADDR, I2C_REGS_CNT);
1037 	if (status)
1038 		goto err;
1039 
1040 	/* Get the MEMC Registers */
1041 	ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
1042 				     MEMC_SEG_NUM,
1043 				sizeof(struct mpi_coredump_segment_header)
1044 				+ sizeof(mpi_coredump->memc_regs),
1045 				"MEMC Registers");
1046 	status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
1047 				 MEMC_REGS_ADDR, MEMC_REGS_CNT);
1048 	if (status)
1049 		goto err;
1050 
1051 	/* Get the PBus Registers */
1052 	ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
1053 				     PBUS_SEG_NUM,
1054 				sizeof(struct mpi_coredump_segment_header)
1055 				+ sizeof(mpi_coredump->pbus_regs),
1056 				"PBUS Registers");
1057 	status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
1058 				 PBUS_REGS_ADDR, PBUS_REGS_CNT);
1059 	if (status)
1060 		goto err;
1061 
1062 	/* Get the MDE Registers */
1063 	ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
1064 				     MDE_SEG_NUM,
1065 				sizeof(struct mpi_coredump_segment_header)
1066 				+ sizeof(mpi_coredump->mde_regs),
1067 				"MDE Registers");
1068 	status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
1069 				 MDE_REGS_ADDR, MDE_REGS_CNT);
1070 	if (status)
1071 		goto err;
1072 
1073 	ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1074 				     MISC_NIC_INFO_SEG_NUM,
1075 				sizeof(struct mpi_coredump_segment_header)
1076 				+ sizeof(mpi_coredump->misc_nic_info),
1077 				"MISC NIC INFO");
1078 	mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1079 	mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1080 	mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1081 	mpi_coredump->misc_nic_info.function = qdev->func;
1082 
1083 	/* Segment 31 */
1084 	/* Get indexed register values. */
1085 	ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1086 				     INTR_STATES_SEG_NUM,
1087 				sizeof(struct mpi_coredump_segment_header)
1088 				+ sizeof(mpi_coredump->intr_states),
1089 				"INTR States");
1090 	ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1091 
1092 	ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1093 				     CAM_ENTRIES_SEG_NUM,
1094 				sizeof(struct mpi_coredump_segment_header)
1095 				+ sizeof(mpi_coredump->cam_entries),
1096 				"CAM Entries");
1097 	status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1098 	if (status)
1099 		goto err;
1100 
1101 	ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1102 				     ROUTING_WORDS_SEG_NUM,
1103 				sizeof(struct mpi_coredump_segment_header)
1104 				+ sizeof(mpi_coredump->nic_routing_words),
1105 				"Routing Words");
1106 	status = ql_get_routing_entries(qdev,
1107 					&mpi_coredump->nic_routing_words[0]);
1108 	if (status)
1109 		goto err;
1110 
1111 	/* Segment 34 (Rev C. step 23) */
1112 	ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1113 				     ETS_SEG_NUM,
1114 				sizeof(struct mpi_coredump_segment_header)
1115 				+ sizeof(mpi_coredump->ets),
1116 				"ETS Registers");
1117 	status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1118 	if (status)
1119 		goto err;
1120 
1121 	ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
1122 				     PROBE_DUMP_SEG_NUM,
1123 				sizeof(struct mpi_coredump_segment_header)
1124 				+ sizeof(mpi_coredump->probe_dump),
1125 				"Probe Dump");
1126 	ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
1127 
1128 	ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
1129 				     ROUTING_INDEX_SEG_NUM,
1130 				sizeof(struct mpi_coredump_segment_header)
1131 				+ sizeof(mpi_coredump->routing_regs),
1132 				"Routing Regs");
1133 	status = ql_get_routing_index_registers(qdev,
1134 						&mpi_coredump->routing_regs[0]);
1135 	if (status)
1136 		goto err;
1137 
1138 	ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
1139 				     MAC_PROTOCOL_SEG_NUM,
1140 				sizeof(struct mpi_coredump_segment_header)
1141 				+ sizeof(mpi_coredump->mac_prot_regs),
1142 				"MAC Prot Regs");
1143 	ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
1144 
1145 	/* Get the semaphore registers for all 5 functions */
1146 	ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
1147 				     SEM_REGS_SEG_NUM,
1148 			sizeof(struct mpi_coredump_segment_header) +
1149 			sizeof(mpi_coredump->sem_regs),	"Sem Registers");
1150 
1151 	ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
1152 
1153 	/* Prevent the mpi restarting while we dump the memory.*/
1154 	ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
1155 
1156 	/* clear the pause */
1157 	status = ql_unpause_mpi_risc(qdev);
1158 	if (status) {
1159 		netif_err(qdev, drv, qdev->ndev,
1160 			  "Failed RISC unpause. Status = 0x%.08x\n", status);
1161 		goto err;
1162 	}
1163 
1164 	/* Reset the RISC so we can dump RAM */
1165 	status = ql_hard_reset_mpi_risc(qdev);
1166 	if (status) {
1167 		netif_err(qdev, drv, qdev->ndev,
1168 			  "Failed RISC reset. Status = 0x%.08x\n", status);
1169 		goto err;
1170 	}
1171 
1172 	ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
1173 				     WCS_RAM_SEG_NUM,
1174 				sizeof(struct mpi_coredump_segment_header)
1175 				+ sizeof(mpi_coredump->code_ram),
1176 				"WCS RAM");
1177 	status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
1178 				       CODE_RAM_ADDR, CODE_RAM_CNT);
1179 	if (status) {
1180 		netif_err(qdev, drv, qdev->ndev,
1181 			  "Failed Dump of CODE RAM. Status = 0x%.08x\n",
1182 			  status);
1183 		goto err;
1184 	}
1185 
1186 	/* Insert the segment header */
1187 	ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
1188 				     MEMC_RAM_SEG_NUM,
1189 				sizeof(struct mpi_coredump_segment_header)
1190 				+ sizeof(mpi_coredump->memc_ram),
1191 				"MEMC RAM");
1192 	status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
1193 				       MEMC_RAM_ADDR, MEMC_RAM_CNT);
1194 	if (status) {
1195 		netif_err(qdev, drv, qdev->ndev,
1196 			  "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
1197 			  status);
1198 		goto err;
1199 	}
1200 err:
1201 	ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
1202 	return status;
1203 }
1204 
ql_get_core_dump(struct ql_adapter * qdev)1205 static void ql_get_core_dump(struct ql_adapter *qdev)
1206 {
1207 	if (!ql_own_firmware(qdev)) {
1208 		netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
1209 		return;
1210 	}
1211 
1212 	if (!netif_running(qdev->ndev)) {
1213 		netif_err(qdev, ifup, qdev->ndev,
1214 			  "Force Coredump can only be done from interface that is up\n");
1215 		return;
1216 	}
1217 	ql_queue_fw_error(qdev);
1218 }
1219 
ql_gen_reg_dump(struct ql_adapter * qdev,struct ql_reg_dump * mpi_coredump)1220 static void ql_gen_reg_dump(struct ql_adapter *qdev,
1221 			    struct ql_reg_dump *mpi_coredump)
1222 {
1223 	int i, status;
1224 
1225 	memset(&(mpi_coredump->mpi_global_header), 0,
1226 	       sizeof(struct mpi_coredump_global_header));
1227 	mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
1228 	mpi_coredump->mpi_global_header.header_size =
1229 		sizeof(struct mpi_coredump_global_header);
1230 	mpi_coredump->mpi_global_header.image_size =
1231 		sizeof(struct ql_reg_dump);
1232 	strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
1233 		sizeof(mpi_coredump->mpi_global_header.id_string));
1234 
1235 	/* segment 16 */
1236 	ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1237 				     MISC_NIC_INFO_SEG_NUM,
1238 				sizeof(struct mpi_coredump_segment_header)
1239 				+ sizeof(mpi_coredump->misc_nic_info),
1240 				"MISC NIC INFO");
1241 	mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1242 	mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1243 	mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1244 	mpi_coredump->misc_nic_info.function = qdev->func;
1245 
1246 	/* Segment 16, Rev C. Step 18 */
1247 	ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
1248 				     NIC1_CONTROL_SEG_NUM,
1249 				sizeof(struct mpi_coredump_segment_header)
1250 				+ sizeof(mpi_coredump->nic_regs),
1251 				"NIC Registers");
1252 	/* Get generic reg dump */
1253 	for (i = 0; i < 64; i++)
1254 		mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
1255 
1256 	/* Segment 31 */
1257 	/* Get indexed register values. */
1258 	ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1259 				     INTR_STATES_SEG_NUM,
1260 				sizeof(struct mpi_coredump_segment_header)
1261 				+ sizeof(mpi_coredump->intr_states),
1262 				"INTR States");
1263 	ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1264 
1265 	ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1266 				     CAM_ENTRIES_SEG_NUM,
1267 				sizeof(struct mpi_coredump_segment_header)
1268 				+ sizeof(mpi_coredump->cam_entries),
1269 				"CAM Entries");
1270 	status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1271 	if (status)
1272 		return;
1273 
1274 	ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1275 				     ROUTING_WORDS_SEG_NUM,
1276 				sizeof(struct mpi_coredump_segment_header)
1277 				+ sizeof(mpi_coredump->nic_routing_words),
1278 				"Routing Words");
1279 	status = ql_get_routing_entries(qdev,
1280 					&mpi_coredump->nic_routing_words[0]);
1281 	if (status)
1282 		return;
1283 
1284 	/* Segment 34 (Rev C. step 23) */
1285 	ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1286 				     ETS_SEG_NUM,
1287 				sizeof(struct mpi_coredump_segment_header)
1288 				+ sizeof(mpi_coredump->ets),
1289 				"ETS Registers");
1290 	status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1291 	if (status)
1292 		return;
1293 }
1294 
ql_get_dump(struct ql_adapter * qdev,void * buff)1295 void ql_get_dump(struct ql_adapter *qdev, void *buff)
1296 {
1297 	/*
1298 	 * If the dump has already been taken and is stored
1299 	 * in our internal buffer and if force dump is set then
1300 	 * just start the spool to dump it to the log file
1301 	 * and also, take a snapshot of the general regs
1302 	 * to the user's buffer or else take complete dump
1303 	 * to the user's buffer if force is not set.
1304 	 */
1305 
1306 	if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
1307 		if (!ql_core_dump(qdev, buff))
1308 			ql_soft_reset_mpi_risc(qdev);
1309 		else
1310 			netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
1311 	} else {
1312 		ql_gen_reg_dump(qdev, buff);
1313 		ql_get_core_dump(qdev);
1314 	}
1315 }
1316 
1317 /* Coredump to messages log file using separate worker thread */
ql_mpi_core_to_log(struct work_struct * work)1318 void ql_mpi_core_to_log(struct work_struct *work)
1319 {
1320 	struct ql_adapter *qdev =
1321 		container_of(work, struct ql_adapter, mpi_core_to_log.work);
1322 
1323 	print_hex_dump(KERN_DEBUG, "Core is dumping to log file!\n",
1324 		       DUMP_PREFIX_OFFSET, 32, 4, qdev->mpi_coredump,
1325 		       sizeof(*qdev->mpi_coredump), false);
1326 }
1327 
1328 #ifdef QL_REG_DUMP
ql_dump_intr_states(struct ql_adapter * qdev)1329 static void ql_dump_intr_states(struct ql_adapter *qdev)
1330 {
1331 	int i;
1332 	u32 value;
1333 
1334 	for (i = 0; i < qdev->intr_count; i++) {
1335 		ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
1336 		value = ql_read32(qdev, INTR_EN);
1337 		netdev_err(qdev->ndev, "Interrupt %d is %s\n", i,
1338 			   (value & INTR_EN_EN ? "enabled" : "disabled"));
1339 	}
1340 }
1341 
1342 #define DUMP_XGMAC(qdev, reg)					\
1343 do {								\
1344 	u32 data;						\
1345 	ql_read_xgmac_reg(qdev, reg, &data);			\
1346 	netdev_err(qdev->ndev, "%s = 0x%.08x\n", #reg, data); \
1347 } while (0)
1348 
ql_dump_xgmac_control_regs(struct ql_adapter * qdev)1349 void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
1350 {
1351 	if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
1352 		netdev_err(qdev->ndev, "%s: Couldn't get xgmac sem\n",
1353 			   __func__);
1354 		return;
1355 	}
1356 	DUMP_XGMAC(qdev, PAUSE_SRC_LO);
1357 	DUMP_XGMAC(qdev, PAUSE_SRC_HI);
1358 	DUMP_XGMAC(qdev, GLOBAL_CFG);
1359 	DUMP_XGMAC(qdev, TX_CFG);
1360 	DUMP_XGMAC(qdev, RX_CFG);
1361 	DUMP_XGMAC(qdev, FLOW_CTL);
1362 	DUMP_XGMAC(qdev, PAUSE_OPCODE);
1363 	DUMP_XGMAC(qdev, PAUSE_TIMER);
1364 	DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
1365 	DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
1366 	DUMP_XGMAC(qdev, MAC_TX_PARAMS);
1367 	DUMP_XGMAC(qdev, MAC_RX_PARAMS);
1368 	DUMP_XGMAC(qdev, MAC_SYS_INT);
1369 	DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
1370 	DUMP_XGMAC(qdev, MAC_MGMT_INT);
1371 	DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
1372 	DUMP_XGMAC(qdev, EXT_ARB_MODE);
1373 	ql_sem_unlock(qdev, qdev->xg_sem_mask);
1374 }
1375 
ql_dump_ets_regs(struct ql_adapter * qdev)1376 static void ql_dump_ets_regs(struct ql_adapter *qdev)
1377 {
1378 }
1379 
ql_dump_cam_entries(struct ql_adapter * qdev)1380 static void ql_dump_cam_entries(struct ql_adapter *qdev)
1381 {
1382 	int i;
1383 	u32 value[3];
1384 
1385 	i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1386 	if (i)
1387 		return;
1388 	for (i = 0; i < 4; i++) {
1389 		if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
1390 			netdev_err(qdev->ndev,
1391 				   "%s: Failed read of mac index register\n",
1392 				   __func__);
1393 			break;
1394 		}
1395 		if (value[0])
1396 			netdev_err(qdev->ndev,
1397 				   "CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
1398 				   i, value[1], value[0], value[2]);
1399 	}
1400 	for (i = 0; i < 32; i++) {
1401 		if (ql_get_mac_addr_reg
1402 		    (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
1403 			netdev_err(qdev->ndev,
1404 				   "%s: Failed read of mac index register\n",
1405 				   __func__);
1406 			break;
1407 		}
1408 		if (value[0])
1409 			netdev_err(qdev->ndev,
1410 				   "MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
1411 				   i, value[1], value[0]);
1412 	}
1413 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1414 }
1415 
ql_dump_routing_entries(struct ql_adapter * qdev)1416 void ql_dump_routing_entries(struct ql_adapter *qdev)
1417 {
1418 	int i;
1419 	u32 value;
1420 
1421 	i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
1422 	if (i)
1423 		return;
1424 	for (i = 0; i < 16; i++) {
1425 		value = 0;
1426 		if (ql_get_routing_reg(qdev, i, &value)) {
1427 			netdev_err(qdev->ndev,
1428 				   "%s: Failed read of routing index register\n",
1429 				   __func__);
1430 			break;
1431 		}
1432 		if (value)
1433 			netdev_err(qdev->ndev,
1434 				   "Routing Mask %d = 0x%.08x\n",
1435 				   i, value);
1436 	}
1437 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
1438 }
1439 
1440 #define DUMP_REG(qdev, reg)			\
1441 	netdev_err(qdev->ndev, "%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
1442 
ql_dump_regs(struct ql_adapter * qdev)1443 void ql_dump_regs(struct ql_adapter *qdev)
1444 {
1445 	netdev_err(qdev->ndev, "reg dump for function #%d\n", qdev->func);
1446 	DUMP_REG(qdev, SYS);
1447 	DUMP_REG(qdev, RST_FO);
1448 	DUMP_REG(qdev, FSC);
1449 	DUMP_REG(qdev, CSR);
1450 	DUMP_REG(qdev, ICB_RID);
1451 	DUMP_REG(qdev, ICB_L);
1452 	DUMP_REG(qdev, ICB_H);
1453 	DUMP_REG(qdev, CFG);
1454 	DUMP_REG(qdev, BIOS_ADDR);
1455 	DUMP_REG(qdev, STS);
1456 	DUMP_REG(qdev, INTR_EN);
1457 	DUMP_REG(qdev, INTR_MASK);
1458 	DUMP_REG(qdev, ISR1);
1459 	DUMP_REG(qdev, ISR2);
1460 	DUMP_REG(qdev, ISR3);
1461 	DUMP_REG(qdev, ISR4);
1462 	DUMP_REG(qdev, REV_ID);
1463 	DUMP_REG(qdev, FRC_ECC_ERR);
1464 	DUMP_REG(qdev, ERR_STS);
1465 	DUMP_REG(qdev, RAM_DBG_ADDR);
1466 	DUMP_REG(qdev, RAM_DBG_DATA);
1467 	DUMP_REG(qdev, ECC_ERR_CNT);
1468 	DUMP_REG(qdev, SEM);
1469 	DUMP_REG(qdev, GPIO_1);
1470 	DUMP_REG(qdev, GPIO_2);
1471 	DUMP_REG(qdev, GPIO_3);
1472 	DUMP_REG(qdev, XGMAC_ADDR);
1473 	DUMP_REG(qdev, XGMAC_DATA);
1474 	DUMP_REG(qdev, NIC_ETS);
1475 	DUMP_REG(qdev, CNA_ETS);
1476 	DUMP_REG(qdev, FLASH_ADDR);
1477 	DUMP_REG(qdev, FLASH_DATA);
1478 	DUMP_REG(qdev, CQ_STOP);
1479 	DUMP_REG(qdev, PAGE_TBL_RID);
1480 	DUMP_REG(qdev, WQ_PAGE_TBL_LO);
1481 	DUMP_REG(qdev, WQ_PAGE_TBL_HI);
1482 	DUMP_REG(qdev, CQ_PAGE_TBL_LO);
1483 	DUMP_REG(qdev, CQ_PAGE_TBL_HI);
1484 	DUMP_REG(qdev, COS_DFLT_CQ1);
1485 	DUMP_REG(qdev, COS_DFLT_CQ2);
1486 	DUMP_REG(qdev, SPLT_HDR);
1487 	DUMP_REG(qdev, FC_PAUSE_THRES);
1488 	DUMP_REG(qdev, NIC_PAUSE_THRES);
1489 	DUMP_REG(qdev, FC_ETHERTYPE);
1490 	DUMP_REG(qdev, FC_RCV_CFG);
1491 	DUMP_REG(qdev, NIC_RCV_CFG);
1492 	DUMP_REG(qdev, FC_COS_TAGS);
1493 	DUMP_REG(qdev, NIC_COS_TAGS);
1494 	DUMP_REG(qdev, MGMT_RCV_CFG);
1495 	DUMP_REG(qdev, XG_SERDES_ADDR);
1496 	DUMP_REG(qdev, XG_SERDES_DATA);
1497 	DUMP_REG(qdev, PRB_MX_ADDR);
1498 	DUMP_REG(qdev, PRB_MX_DATA);
1499 	ql_dump_intr_states(qdev);
1500 	ql_dump_xgmac_control_regs(qdev);
1501 	ql_dump_ets_regs(qdev);
1502 	ql_dump_cam_entries(qdev);
1503 	ql_dump_routing_entries(qdev);
1504 }
1505 #endif
1506 
1507 #ifdef QL_STAT_DUMP
1508 
1509 #define DUMP_STAT(qdev, stat)	\
1510 	netdev_err(qdev->ndev, "%s = %ld\n", #stat,  \
1511 		   (unsigned long)(qdev)->nic_stats.stat)
1512 
ql_dump_stat(struct ql_adapter * qdev)1513 void ql_dump_stat(struct ql_adapter *qdev)
1514 {
1515 	netdev_err(qdev->ndev, "%s: Enter\n", __func__);
1516 	DUMP_STAT(qdev, tx_pkts);
1517 	DUMP_STAT(qdev, tx_bytes);
1518 	DUMP_STAT(qdev, tx_mcast_pkts);
1519 	DUMP_STAT(qdev, tx_bcast_pkts);
1520 	DUMP_STAT(qdev, tx_ucast_pkts);
1521 	DUMP_STAT(qdev, tx_ctl_pkts);
1522 	DUMP_STAT(qdev, tx_pause_pkts);
1523 	DUMP_STAT(qdev, tx_64_pkt);
1524 	DUMP_STAT(qdev, tx_65_to_127_pkt);
1525 	DUMP_STAT(qdev, tx_128_to_255_pkt);
1526 	DUMP_STAT(qdev, tx_256_511_pkt);
1527 	DUMP_STAT(qdev, tx_512_to_1023_pkt);
1528 	DUMP_STAT(qdev, tx_1024_to_1518_pkt);
1529 	DUMP_STAT(qdev, tx_1519_to_max_pkt);
1530 	DUMP_STAT(qdev, tx_undersize_pkt);
1531 	DUMP_STAT(qdev, tx_oversize_pkt);
1532 	DUMP_STAT(qdev, rx_bytes);
1533 	DUMP_STAT(qdev, rx_bytes_ok);
1534 	DUMP_STAT(qdev, rx_pkts);
1535 	DUMP_STAT(qdev, rx_pkts_ok);
1536 	DUMP_STAT(qdev, rx_bcast_pkts);
1537 	DUMP_STAT(qdev, rx_mcast_pkts);
1538 	DUMP_STAT(qdev, rx_ucast_pkts);
1539 	DUMP_STAT(qdev, rx_undersize_pkts);
1540 	DUMP_STAT(qdev, rx_oversize_pkts);
1541 	DUMP_STAT(qdev, rx_jabber_pkts);
1542 	DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
1543 	DUMP_STAT(qdev, rx_drop_events);
1544 	DUMP_STAT(qdev, rx_fcerr_pkts);
1545 	DUMP_STAT(qdev, rx_align_err);
1546 	DUMP_STAT(qdev, rx_symbol_err);
1547 	DUMP_STAT(qdev, rx_mac_err);
1548 	DUMP_STAT(qdev, rx_ctl_pkts);
1549 	DUMP_STAT(qdev, rx_pause_pkts);
1550 	DUMP_STAT(qdev, rx_64_pkts);
1551 	DUMP_STAT(qdev, rx_65_to_127_pkts);
1552 	DUMP_STAT(qdev, rx_128_255_pkts);
1553 	DUMP_STAT(qdev, rx_256_511_pkts);
1554 	DUMP_STAT(qdev, rx_512_to_1023_pkts);
1555 	DUMP_STAT(qdev, rx_1024_to_1518_pkts);
1556 	DUMP_STAT(qdev, rx_1519_to_max_pkts);
1557 	DUMP_STAT(qdev, rx_len_err_pkts);
1558 };
1559 #endif
1560 
1561 #ifdef QL_DEV_DUMP
1562 
1563 #define DUMP_QDEV_FIELD(qdev, type, field)		\
1564 	netdev_err(qdev->ndev, "qdev->%-24s = " type "\n", #field, (qdev)->field)
1565 #define DUMP_QDEV_DMA_FIELD(qdev, field)		\
1566 	netdev_err(qdev->ndev, "qdev->%-24s = %llx\n", #field, \
1567 		   (unsigned long long)qdev->field)
1568 #define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
1569 	netdev_err(qdev->ndev, "%s[%d].%s = " type "\n",		 \
1570 	       #array, index, #field, (qdev)->array[index].field)
ql_dump_qdev(struct ql_adapter * qdev)1571 void ql_dump_qdev(struct ql_adapter *qdev)
1572 {
1573 	int i;
1574 
1575 	DUMP_QDEV_FIELD(qdev, "%lx", flags);
1576 	DUMP_QDEV_FIELD(qdev, "%p", pdev);
1577 	DUMP_QDEV_FIELD(qdev, "%p", ndev);
1578 	DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
1579 	DUMP_QDEV_FIELD(qdev, "%p", reg_base);
1580 	DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
1581 	DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
1582 	DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
1583 	DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
1584 	DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
1585 	DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
1586 	DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
1587 	DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1588 	if (qdev->msi_x_entry)
1589 		for (i = 0; i < qdev->intr_count; i++) {
1590 			DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
1591 			DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
1592 		}
1593 	for (i = 0; i < qdev->intr_count; i++) {
1594 		DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
1595 		DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
1596 		DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
1597 		DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
1598 		DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
1599 		DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
1600 	}
1601 	DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
1602 	DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
1603 	DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
1604 	DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
1605 	DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1606 	DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
1607 	DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
1608 	DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
1609 	DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
1610 	DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
1611 	DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
1612 	DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
1613 	DUMP_QDEV_FIELD(qdev, "%u", lbq_buf_size);
1614 }
1615 #endif
1616 
1617 #ifdef QL_CB_DUMP
ql_dump_wqicb(struct wqicb * wqicb)1618 void ql_dump_wqicb(struct wqicb *wqicb)
1619 {
1620 	struct tx_ring *tx_ring = container_of(wqicb, struct tx_ring, wqicb);
1621 	struct ql_adapter *qdev = tx_ring->qdev;
1622 
1623 	netdev_err(qdev->ndev, "Dumping wqicb stuff...\n");
1624 	netdev_err(qdev->ndev, "wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
1625 	netdev_err(qdev->ndev, "wqicb->flags = %x\n",
1626 		   le16_to_cpu(wqicb->flags));
1627 	netdev_err(qdev->ndev, "wqicb->cq_id_rss = %d\n",
1628 		   le16_to_cpu(wqicb->cq_id_rss));
1629 	netdev_err(qdev->ndev, "wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
1630 	netdev_err(qdev->ndev, "wqicb->wq_addr = 0x%llx\n",
1631 		   (unsigned long long)le64_to_cpu(wqicb->addr));
1632 	netdev_err(qdev->ndev, "wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
1633 		   (unsigned long long)le64_to_cpu(wqicb->cnsmr_idx_addr));
1634 }
1635 
ql_dump_tx_ring(struct tx_ring * tx_ring)1636 void ql_dump_tx_ring(struct tx_ring *tx_ring)
1637 {
1638 	struct ql_adapter *qdev = tx_ring->qdev;
1639 
1640 	netdev_err(qdev->ndev, "===================== Dumping tx_ring %d ===============\n",
1641 		   tx_ring->wq_id);
1642 	netdev_err(qdev->ndev, "tx_ring->base = %p\n", tx_ring->wq_base);
1643 	netdev_err(qdev->ndev, "tx_ring->base_dma = 0x%llx\n",
1644 		   (unsigned long long)tx_ring->wq_base_dma);
1645 	netdev_err(qdev->ndev, "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
1646 		   tx_ring->cnsmr_idx_sh_reg,
1647 		   tx_ring->cnsmr_idx_sh_reg
1648 			? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
1649 	netdev_err(qdev->ndev, "tx_ring->size = %d\n", tx_ring->wq_size);
1650 	netdev_err(qdev->ndev, "tx_ring->len = %d\n", tx_ring->wq_len);
1651 	netdev_err(qdev->ndev, "tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
1652 	netdev_err(qdev->ndev, "tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
1653 	netdev_err(qdev->ndev, "tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
1654 	netdev_err(qdev->ndev, "tx_ring->cq_id = %d\n", tx_ring->cq_id);
1655 	netdev_err(qdev->ndev, "tx_ring->wq_id = %d\n", tx_ring->wq_id);
1656 	netdev_err(qdev->ndev, "tx_ring->q = %p\n", tx_ring->q);
1657 	netdev_err(qdev->ndev, "tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
1658 }
1659 
ql_dump_ricb(struct ricb * ricb)1660 void ql_dump_ricb(struct ricb *ricb)
1661 {
1662 	int i;
1663 	struct ql_adapter *qdev =
1664 		container_of(ricb, struct ql_adapter, ricb);
1665 
1666 	netdev_err(qdev->ndev, "===================== Dumping ricb ===============\n");
1667 	netdev_err(qdev->ndev, "Dumping ricb stuff...\n");
1668 
1669 	netdev_err(qdev->ndev, "ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
1670 	netdev_err(qdev->ndev, "ricb->flags = %s%s%s%s%s%s%s%s%s\n",
1671 		   ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
1672 		   ricb->flags & RSS_L6K ? "RSS_L6K " : "",
1673 		   ricb->flags & RSS_LI ? "RSS_LI " : "",
1674 		   ricb->flags & RSS_LB ? "RSS_LB " : "",
1675 		   ricb->flags & RSS_LM ? "RSS_LM " : "",
1676 		   ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
1677 		   ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
1678 		   ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
1679 		   ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
1680 	netdev_err(qdev->ndev, "ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
1681 	for (i = 0; i < 16; i++)
1682 		netdev_err(qdev->ndev, "ricb->hash_cq_id[%d] = 0x%.08x\n", i,
1683 			   le32_to_cpu(ricb->hash_cq_id[i]));
1684 	for (i = 0; i < 10; i++)
1685 		netdev_err(qdev->ndev, "ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
1686 			   le32_to_cpu(ricb->ipv6_hash_key[i]));
1687 	for (i = 0; i < 4; i++)
1688 		netdev_err(qdev->ndev, "ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
1689 			   le32_to_cpu(ricb->ipv4_hash_key[i]));
1690 }
1691 
ql_dump_cqicb(struct cqicb * cqicb)1692 void ql_dump_cqicb(struct cqicb *cqicb)
1693 {
1694 	struct rx_ring *rx_ring = container_of(cqicb, struct rx_ring, cqicb);
1695 	struct ql_adapter *qdev = rx_ring->qdev;
1696 
1697 	netdev_err(qdev->ndev, "Dumping cqicb stuff...\n");
1698 
1699 	netdev_err(qdev->ndev, "cqicb->msix_vect = %d\n", cqicb->msix_vect);
1700 	netdev_err(qdev->ndev, "cqicb->flags = %x\n", cqicb->flags);
1701 	netdev_err(qdev->ndev, "cqicb->len = %d\n", le16_to_cpu(cqicb->len));
1702 	netdev_err(qdev->ndev, "cqicb->addr = 0x%llx\n",
1703 		   (unsigned long long)le64_to_cpu(cqicb->addr));
1704 	netdev_err(qdev->ndev, "cqicb->prod_idx_addr = 0x%llx\n",
1705 		   (unsigned long long)le64_to_cpu(cqicb->prod_idx_addr));
1706 	netdev_err(qdev->ndev, "cqicb->pkt_delay = 0x%.04x\n",
1707 		   le16_to_cpu(cqicb->pkt_delay));
1708 	netdev_err(qdev->ndev, "cqicb->irq_delay = 0x%.04x\n",
1709 		   le16_to_cpu(cqicb->irq_delay));
1710 	netdev_err(qdev->ndev, "cqicb->lbq_addr = 0x%llx\n",
1711 		   (unsigned long long)le64_to_cpu(cqicb->lbq_addr));
1712 	netdev_err(qdev->ndev, "cqicb->lbq_buf_size = 0x%.04x\n",
1713 		   le16_to_cpu(cqicb->lbq_buf_size));
1714 	netdev_err(qdev->ndev, "cqicb->lbq_len = 0x%.04x\n",
1715 		   le16_to_cpu(cqicb->lbq_len));
1716 	netdev_err(qdev->ndev, "cqicb->sbq_addr = 0x%llx\n",
1717 		   (unsigned long long)le64_to_cpu(cqicb->sbq_addr));
1718 	netdev_err(qdev->ndev, "cqicb->sbq_buf_size = 0x%.04x\n",
1719 		   le16_to_cpu(cqicb->sbq_buf_size));
1720 	netdev_err(qdev->ndev, "cqicb->sbq_len = 0x%.04x\n",
1721 		   le16_to_cpu(cqicb->sbq_len));
1722 }
1723 
qlge_rx_ring_type_name(struct rx_ring * rx_ring)1724 static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring)
1725 {
1726 	struct ql_adapter *qdev = rx_ring->qdev;
1727 
1728 	if (rx_ring->cq_id < qdev->rss_ring_count)
1729 		return "RX COMPLETION";
1730 	else
1731 		return "TX COMPLETION";
1732 };
1733 
ql_dump_rx_ring(struct rx_ring * rx_ring)1734 void ql_dump_rx_ring(struct rx_ring *rx_ring)
1735 {
1736 	struct ql_adapter *qdev = rx_ring->qdev;
1737 
1738 	netdev_err(qdev->ndev,
1739 		   "===================== Dumping rx_ring %d ===============\n",
1740 		   rx_ring->cq_id);
1741 	netdev_err(qdev->ndev,
1742 		   "Dumping rx_ring %d, type = %s\n", rx_ring->cq_id,
1743 		   qlge_rx_ring_type_name(rx_ring));
1744 	netdev_err(qdev->ndev, "rx_ring->cqicb = %p\n", &rx_ring->cqicb);
1745 	netdev_err(qdev->ndev, "rx_ring->cq_base = %p\n", rx_ring->cq_base);
1746 	netdev_err(qdev->ndev, "rx_ring->cq_base_dma = %llx\n",
1747 		   (unsigned long long)rx_ring->cq_base_dma);
1748 	netdev_err(qdev->ndev, "rx_ring->cq_size = %d\n", rx_ring->cq_size);
1749 	netdev_err(qdev->ndev, "rx_ring->cq_len = %d\n", rx_ring->cq_len);
1750 	netdev_err(qdev->ndev,
1751 		   "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
1752 		   rx_ring->prod_idx_sh_reg,
1753 		   rx_ring->prod_idx_sh_reg ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
1754 	netdev_err(qdev->ndev, "rx_ring->prod_idx_sh_reg_dma = %llx\n",
1755 		   (unsigned long long)rx_ring->prod_idx_sh_reg_dma);
1756 	netdev_err(qdev->ndev, "rx_ring->cnsmr_idx_db_reg = %p\n",
1757 		   rx_ring->cnsmr_idx_db_reg);
1758 	netdev_err(qdev->ndev, "rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
1759 	netdev_err(qdev->ndev, "rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
1760 	netdev_err(qdev->ndev, "rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
1761 
1762 	netdev_err(qdev->ndev, "rx_ring->lbq.base = %p\n", rx_ring->lbq.base);
1763 	netdev_err(qdev->ndev, "rx_ring->lbq.base_dma = %llx\n",
1764 		   (unsigned long long)rx_ring->lbq.base_dma);
1765 	netdev_err(qdev->ndev, "rx_ring->lbq.base_indirect = %p\n",
1766 		   rx_ring->lbq.base_indirect);
1767 	netdev_err(qdev->ndev, "rx_ring->lbq.base_indirect_dma = %llx\n",
1768 		   (unsigned long long)rx_ring->lbq.base_indirect_dma);
1769 	netdev_err(qdev->ndev, "rx_ring->lbq = %p\n", rx_ring->lbq.queue);
1770 	netdev_err(qdev->ndev, "rx_ring->lbq.prod_idx_db_reg = %p\n",
1771 		   rx_ring->lbq.prod_idx_db_reg);
1772 	netdev_err(qdev->ndev, "rx_ring->lbq.next_to_use = %d\n", rx_ring->lbq.next_to_use);
1773 	netdev_err(qdev->ndev, "rx_ring->lbq.next_to_clean = %d\n", rx_ring->lbq.next_to_clean);
1774 
1775 	netdev_err(qdev->ndev, "rx_ring->sbq.base = %p\n", rx_ring->sbq.base);
1776 	netdev_err(qdev->ndev, "rx_ring->sbq.base_dma = %llx\n",
1777 		   (unsigned long long)rx_ring->sbq.base_dma);
1778 	netdev_err(qdev->ndev, "rx_ring->sbq.base_indirect = %p\n",
1779 		   rx_ring->sbq.base_indirect);
1780 	netdev_err(qdev->ndev, "rx_ring->sbq.base_indirect_dma = %llx\n",
1781 		   (unsigned long long)rx_ring->sbq.base_indirect_dma);
1782 	netdev_err(qdev->ndev, "rx_ring->sbq = %p\n", rx_ring->sbq.queue);
1783 	netdev_err(qdev->ndev, "rx_ring->sbq.prod_idx_db_reg addr = %p\n",
1784 		   rx_ring->sbq.prod_idx_db_reg);
1785 	netdev_err(qdev->ndev, "rx_ring->sbq.next_to_use = %d\n", rx_ring->sbq.next_to_use);
1786 	netdev_err(qdev->ndev, "rx_ring->sbq.next_to_clean = %d\n", rx_ring->sbq.next_to_clean);
1787 	netdev_err(qdev->ndev, "rx_ring->cq_id = %d\n", rx_ring->cq_id);
1788 	netdev_err(qdev->ndev, "rx_ring->irq = %d\n", rx_ring->irq);
1789 	netdev_err(qdev->ndev, "rx_ring->cpu = %d\n", rx_ring->cpu);
1790 	netdev_err(qdev->ndev, "rx_ring->qdev = %p\n", rx_ring->qdev);
1791 }
1792 
ql_dump_hw_cb(struct ql_adapter * qdev,int size,u32 bit,u16 q_id)1793 void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
1794 {
1795 	void *ptr;
1796 
1797 	netdev_err(qdev->ndev, "%s: Enter\n", __func__);
1798 
1799 	ptr = kmalloc(size, GFP_ATOMIC);
1800 	if (!ptr)
1801 		return;
1802 
1803 	if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
1804 		netdev_err(qdev->ndev, "%s: Failed to upload control block!\n", __func__);
1805 		goto fail_it;
1806 	}
1807 	switch (bit) {
1808 	case CFG_DRQ:
1809 		ql_dump_wqicb((struct wqicb *)ptr);
1810 		break;
1811 	case CFG_DCQ:
1812 		ql_dump_cqicb((struct cqicb *)ptr);
1813 		break;
1814 	case CFG_DR:
1815 		ql_dump_ricb((struct ricb *)ptr);
1816 		break;
1817 	default:
1818 		netdev_err(qdev->ndev, "%s: Invalid bit value = %x\n", __func__, bit);
1819 		break;
1820 	}
1821 fail_it:
1822 	kfree(ptr);
1823 }
1824 #endif
1825 
1826 #ifdef QL_OB_DUMP
ql_dump_tx_desc(struct ql_adapter * qdev,struct tx_buf_desc * tbd)1827 void ql_dump_tx_desc(struct ql_adapter *qdev, struct tx_buf_desc *tbd)
1828 {
1829 	netdev_err(qdev->ndev, "tbd->addr  = 0x%llx\n",
1830 		   le64_to_cpu((u64)tbd->addr));
1831 	netdev_err(qdev->ndev, "tbd->len   = %d\n",
1832 		   le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1833 	netdev_err(qdev->ndev, "tbd->flags = %s %s\n",
1834 		   tbd->len & TX_DESC_C ? "C" : ".",
1835 		   tbd->len & TX_DESC_E ? "E" : ".");
1836 	tbd++;
1837 	netdev_err(qdev->ndev, "tbd->addr  = 0x%llx\n",
1838 		   le64_to_cpu((u64)tbd->addr));
1839 	netdev_err(qdev->ndev, "tbd->len   = %d\n",
1840 		   le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1841 	netdev_err(qdev->ndev, "tbd->flags = %s %s\n",
1842 		   tbd->len & TX_DESC_C ? "C" : ".",
1843 		   tbd->len & TX_DESC_E ? "E" : ".");
1844 	tbd++;
1845 	netdev_err(qdev->ndev, "tbd->addr  = 0x%llx\n",
1846 		   le64_to_cpu((u64)tbd->addr));
1847 	netdev_err(qdev->ndev, "tbd->len   = %d\n",
1848 		   le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1849 	netdev_err(qdev->ndev, "tbd->flags = %s %s\n",
1850 		   tbd->len & TX_DESC_C ? "C" : ".",
1851 		   tbd->len & TX_DESC_E ? "E" : ".");
1852 }
1853 
ql_dump_ob_mac_iocb(struct ql_adapter * qdev,struct ob_mac_iocb_req * ob_mac_iocb)1854 void ql_dump_ob_mac_iocb(struct ql_adapter *qdev, struct ob_mac_iocb_req *ob_mac_iocb)
1855 {
1856 	struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
1857 	    (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
1858 	struct tx_buf_desc *tbd;
1859 	u16 frame_len;
1860 
1861 	netdev_err(qdev->ndev, "%s\n", __func__);
1862 	netdev_err(qdev->ndev, "opcode         = %s\n",
1863 		   (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
1864 	netdev_err(qdev->ndev, "flags1          = %s %s %s %s %s\n",
1865 		   ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
1866 		   ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
1867 		   ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
1868 		   ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
1869 		   ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
1870 	netdev_err(qdev->ndev, "flags2          = %s %s %s\n",
1871 		   ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
1872 		   ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
1873 		   ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
1874 	netdev_err(qdev->ndev, "flags3          = %s %s %s\n",
1875 		   ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
1876 		   ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
1877 		   ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
1878 	netdev_err(qdev->ndev, "tid = %x\n", ob_mac_iocb->tid);
1879 	netdev_err(qdev->ndev, "txq_idx = %d\n", ob_mac_iocb->txq_idx);
1880 	netdev_err(qdev->ndev, "vlan_tci      = %x\n", ob_mac_tso_iocb->vlan_tci);
1881 	if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
1882 		netdev_err(qdev->ndev, "frame_len      = %d\n",
1883 			   le32_to_cpu(ob_mac_tso_iocb->frame_len));
1884 		netdev_err(qdev->ndev, "mss      = %d\n",
1885 			   le16_to_cpu(ob_mac_tso_iocb->mss));
1886 		netdev_err(qdev->ndev, "prot_hdr_len   = %d\n",
1887 			   le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
1888 		netdev_err(qdev->ndev, "hdr_offset     = 0x%.04x\n",
1889 			   le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
1890 		frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
1891 	} else {
1892 		netdev_err(qdev->ndev, "frame_len      = %d\n",
1893 			   le16_to_cpu(ob_mac_iocb->frame_len));
1894 		frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
1895 	}
1896 	tbd = &ob_mac_iocb->tbd[0];
1897 	ql_dump_tx_desc(qdev, tbd);
1898 }
1899 
ql_dump_ob_mac_rsp(struct ql_adapter * qdev,struct ob_mac_iocb_rsp * ob_mac_rsp)1900 void ql_dump_ob_mac_rsp(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *ob_mac_rsp)
1901 {
1902 	netdev_err(qdev->ndev, "%s\n", __func__);
1903 	netdev_err(qdev->ndev, "opcode         = %d\n", ob_mac_rsp->opcode);
1904 	netdev_err(qdev->ndev, "flags          = %s %s %s %s %s %s %s\n",
1905 		   ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ?
1906 			"OI" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
1907 		   ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
1908 		   ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
1909 		   ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
1910 		   ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
1911 		   ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
1912 	netdev_err(qdev->ndev, "tid = %x\n", ob_mac_rsp->tid);
1913 }
1914 #endif
1915 
1916 #ifdef QL_IB_DUMP
ql_dump_ib_mac_rsp(struct ql_adapter * qdev,struct ib_mac_iocb_rsp * ib_mac_rsp)1917 void ql_dump_ib_mac_rsp(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp)
1918 {
1919 	netdev_err(qdev->ndev, "%s\n", __func__);
1920 	netdev_err(qdev->ndev, "opcode         = 0x%x\n", ib_mac_rsp->opcode);
1921 	netdev_err(qdev->ndev, "flags1 = %s%s%s%s%s%s\n",
1922 		   ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
1923 		   ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
1924 		   ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
1925 		   ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
1926 		   ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
1927 		   ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
1928 
1929 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
1930 		netdev_err(qdev->ndev, "%s%s%s Multicast\n",
1931 			   (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1932 			   IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1933 			   (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1934 			   IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1935 			   (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1936 			   IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1937 
1938 	netdev_err(qdev->ndev, "flags2 = %s%s%s%s%s\n",
1939 		   (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
1940 		   (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
1941 		   (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
1942 		   (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
1943 		   (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
1944 
1945 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
1946 		netdev_err(qdev->ndev, "%s%s%s%s%s error\n",
1947 			   (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1948 			   IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
1949 			   (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1950 			   IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
1951 			   (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1952 			   IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
1953 			   (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1954 			   IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
1955 			   (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1956 			   IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
1957 
1958 	netdev_err(qdev->ndev, "flags3 = %s%s\n",
1959 		   ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
1960 		   ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
1961 
1962 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
1963 		netdev_err(qdev->ndev, "RSS flags = %s%s%s%s\n",
1964 			   ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1965 			    IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
1966 			   ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1967 			    IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
1968 			   ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1969 			    IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
1970 			   ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1971 			    IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
1972 
1973 	netdev_err(qdev->ndev, "data_len	= %d\n",
1974 		   le32_to_cpu(ib_mac_rsp->data_len));
1975 	netdev_err(qdev->ndev, "data_addr    = 0x%llx\n",
1976 		   (unsigned long long)le64_to_cpu(ib_mac_rsp->data_addr));
1977 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
1978 		netdev_err(qdev->ndev, "rss    = %x\n",
1979 			   le32_to_cpu(ib_mac_rsp->rss));
1980 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
1981 		netdev_err(qdev->ndev, "vlan_id    = %x\n",
1982 			   le16_to_cpu(ib_mac_rsp->vlan_id));
1983 
1984 	netdev_err(qdev->ndev, "flags4 = %s%s%s\n",
1985 		   ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
1986 		   ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
1987 		   ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
1988 
1989 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1990 		netdev_err(qdev->ndev, "hdr length	= %d\n",
1991 			   le32_to_cpu(ib_mac_rsp->hdr_len));
1992 		netdev_err(qdev->ndev, "hdr addr    = 0x%llx\n",
1993 			   (unsigned long long)le64_to_cpu(ib_mac_rsp->hdr_addr));
1994 	}
1995 }
1996 #endif
1997 
1998 #ifdef QL_ALL_DUMP
ql_dump_all(struct ql_adapter * qdev)1999 void ql_dump_all(struct ql_adapter *qdev)
2000 {
2001 	int i;
2002 
2003 	QL_DUMP_REGS(qdev);
2004 	QL_DUMP_QDEV(qdev);
2005 	for (i = 0; i < qdev->tx_ring_count; i++) {
2006 		QL_DUMP_TX_RING(&qdev->tx_ring[i]);
2007 		QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
2008 	}
2009 	for (i = 0; i < qdev->rx_ring_count; i++) {
2010 		QL_DUMP_RX_RING(&qdev->rx_ring[i]);
2011 		QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
2012 	}
2013 }
2014 #endif
2015