1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9 #include <linux/module.h>
10 #include <linux/vmalloc.h>
11 #include <linux/crc32.h>
12 #include "qed.h"
13 #include "qed_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_mcp.h"
16 #include "qed_reg_addr.h"
17
18 /* Memory groups enum */
19 enum mem_groups {
20 MEM_GROUP_PXP_MEM,
21 MEM_GROUP_DMAE_MEM,
22 MEM_GROUP_CM_MEM,
23 MEM_GROUP_QM_MEM,
24 MEM_GROUP_DORQ_MEM,
25 MEM_GROUP_BRB_RAM,
26 MEM_GROUP_BRB_MEM,
27 MEM_GROUP_PRS_MEM,
28 MEM_GROUP_IOR,
29 MEM_GROUP_BTB_RAM,
30 MEM_GROUP_CONN_CFC_MEM,
31 MEM_GROUP_TASK_CFC_MEM,
32 MEM_GROUP_CAU_PI,
33 MEM_GROUP_CAU_MEM,
34 MEM_GROUP_PXP_ILT,
35 MEM_GROUP_TM_MEM,
36 MEM_GROUP_SDM_MEM,
37 MEM_GROUP_PBUF,
38 MEM_GROUP_RAM,
39 MEM_GROUP_MULD_MEM,
40 MEM_GROUP_BTB_MEM,
41 MEM_GROUP_RDIF_CTX,
42 MEM_GROUP_TDIF_CTX,
43 MEM_GROUP_CFC_MEM,
44 MEM_GROUP_IGU_MEM,
45 MEM_GROUP_IGU_MSIX,
46 MEM_GROUP_CAU_SB,
47 MEM_GROUP_BMB_RAM,
48 MEM_GROUP_BMB_MEM,
49 MEM_GROUPS_NUM
50 };
51
52 /* Memory groups names */
53 static const char * const s_mem_group_names[] = {
54 "PXP_MEM",
55 "DMAE_MEM",
56 "CM_MEM",
57 "QM_MEM",
58 "DORQ_MEM",
59 "BRB_RAM",
60 "BRB_MEM",
61 "PRS_MEM",
62 "IOR",
63 "BTB_RAM",
64 "CONN_CFC_MEM",
65 "TASK_CFC_MEM",
66 "CAU_PI",
67 "CAU_MEM",
68 "PXP_ILT",
69 "TM_MEM",
70 "SDM_MEM",
71 "PBUF",
72 "RAM",
73 "MULD_MEM",
74 "BTB_MEM",
75 "RDIF_CTX",
76 "TDIF_CTX",
77 "CFC_MEM",
78 "IGU_MEM",
79 "IGU_MSIX",
80 "CAU_SB",
81 "BMB_RAM",
82 "BMB_MEM",
83 };
84
85 /* Idle check conditions */
86
cond5(const u32 * r,const u32 * imm)87 static u32 cond5(const u32 *r, const u32 *imm)
88 {
89 return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
90 }
91
cond7(const u32 * r,const u32 * imm)92 static u32 cond7(const u32 *r, const u32 *imm)
93 {
94 return ((r[0] >> imm[0]) & imm[1]) != imm[2];
95 }
96
cond6(const u32 * r,const u32 * imm)97 static u32 cond6(const u32 *r, const u32 *imm)
98 {
99 return (r[0] & imm[0]) != imm[1];
100 }
101
cond9(const u32 * r,const u32 * imm)102 static u32 cond9(const u32 *r, const u32 *imm)
103 {
104 return ((r[0] & imm[0]) >> imm[1]) !=
105 (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
106 }
107
cond10(const u32 * r,const u32 * imm)108 static u32 cond10(const u32 *r, const u32 *imm)
109 {
110 return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
111 }
112
cond4(const u32 * r,const u32 * imm)113 static u32 cond4(const u32 *r, const u32 *imm)
114 {
115 return (r[0] & ~imm[0]) != imm[1];
116 }
117
cond0(const u32 * r,const u32 * imm)118 static u32 cond0(const u32 *r, const u32 *imm)
119 {
120 return (r[0] & ~r[1]) != imm[0];
121 }
122
cond1(const u32 * r,const u32 * imm)123 static u32 cond1(const u32 *r, const u32 *imm)
124 {
125 return r[0] != imm[0];
126 }
127
cond11(const u32 * r,const u32 * imm)128 static u32 cond11(const u32 *r, const u32 *imm)
129 {
130 return r[0] != r[1] && r[2] == imm[0];
131 }
132
cond12(const u32 * r,const u32 * imm)133 static u32 cond12(const u32 *r, const u32 *imm)
134 {
135 return r[0] != r[1] && r[2] > imm[0];
136 }
137
cond3(const u32 * r,const u32 * imm)138 static u32 cond3(const u32 *r, const u32 *imm)
139 {
140 return r[0] != r[1];
141 }
142
cond13(const u32 * r,const u32 * imm)143 static u32 cond13(const u32 *r, const u32 *imm)
144 {
145 return r[0] & imm[0];
146 }
147
cond8(const u32 * r,const u32 * imm)148 static u32 cond8(const u32 *r, const u32 *imm)
149 {
150 return r[0] < (r[1] - imm[0]);
151 }
152
cond2(const u32 * r,const u32 * imm)153 static u32 cond2(const u32 *r, const u32 *imm)
154 {
155 return r[0] > imm[0];
156 }
157
158 /* Array of Idle Check conditions */
159 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
160 cond0,
161 cond1,
162 cond2,
163 cond3,
164 cond4,
165 cond5,
166 cond6,
167 cond7,
168 cond8,
169 cond9,
170 cond10,
171 cond11,
172 cond12,
173 cond13,
174 };
175
176 /******************************* Data Types **********************************/
177
178 enum platform_ids {
179 PLATFORM_ASIC,
180 PLATFORM_RESERVED,
181 PLATFORM_RESERVED2,
182 PLATFORM_RESERVED3,
183 MAX_PLATFORM_IDS
184 };
185
186 /* Chip constant definitions */
187 struct chip_defs {
188 const char *name;
189 };
190
191 /* Platform constant definitions */
192 struct platform_defs {
193 const char *name;
194 u32 delay_factor;
195 u32 dmae_thresh;
196 u32 log_thresh;
197 };
198
199 /* Storm constant definitions.
200 * Addresses are in bytes, sizes are in quad-regs.
201 */
202 struct storm_defs {
203 char letter;
204 enum block_id block_id;
205 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
206 bool has_vfc;
207 u32 sem_fast_mem_addr;
208 u32 sem_frame_mode_addr;
209 u32 sem_slow_enable_addr;
210 u32 sem_slow_mode_addr;
211 u32 sem_slow_mode1_conf_addr;
212 u32 sem_sync_dbg_empty_addr;
213 u32 sem_slow_dbg_empty_addr;
214 u32 cm_ctx_wr_addr;
215 u32 cm_conn_ag_ctx_lid_size;
216 u32 cm_conn_ag_ctx_rd_addr;
217 u32 cm_conn_st_ctx_lid_size;
218 u32 cm_conn_st_ctx_rd_addr;
219 u32 cm_task_ag_ctx_lid_size;
220 u32 cm_task_ag_ctx_rd_addr;
221 u32 cm_task_st_ctx_lid_size;
222 u32 cm_task_st_ctx_rd_addr;
223 };
224
225 /* Block constant definitions */
226 struct block_defs {
227 const char *name;
228 bool exists[MAX_CHIP_IDS];
229 bool associated_to_storm;
230
231 /* Valid only if associated_to_storm is true */
232 u32 storm_id;
233 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
234 u32 dbg_select_addr;
235 u32 dbg_enable_addr;
236 u32 dbg_shift_addr;
237 u32 dbg_force_valid_addr;
238 u32 dbg_force_frame_addr;
239 bool has_reset_bit;
240
241 /* If true, block is taken out of reset before dump */
242 bool unreset;
243 enum dbg_reset_regs reset_reg;
244
245 /* Bit offset in reset register */
246 u8 reset_bit_offset;
247 };
248
249 /* Reset register definitions */
250 struct reset_reg_defs {
251 u32 addr;
252 bool exists[MAX_CHIP_IDS];
253 u32 unreset_val[MAX_CHIP_IDS];
254 };
255
256 struct grc_param_defs {
257 u32 default_val[MAX_CHIP_IDS];
258 u32 min;
259 u32 max;
260 bool is_preset;
261 bool is_persistent;
262 u32 exclude_all_preset_val;
263 u32 crash_preset_val;
264 };
265
266 /* Address is in 128b units. Width is in bits. */
267 struct rss_mem_defs {
268 const char *mem_name;
269 const char *type_name;
270 u32 addr;
271 u32 entry_width;
272 u32 num_entries[MAX_CHIP_IDS];
273 };
274
275 struct vfc_ram_defs {
276 const char *mem_name;
277 const char *type_name;
278 u32 base_row;
279 u32 num_rows;
280 };
281
282 struct big_ram_defs {
283 const char *instance_name;
284 enum mem_groups mem_group_id;
285 enum mem_groups ram_mem_group_id;
286 enum dbg_grc_params grc_param;
287 u32 addr_reg_addr;
288 u32 data_reg_addr;
289 u32 is_256b_reg_addr;
290 u32 is_256b_bit_offset[MAX_CHIP_IDS];
291 u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
292 };
293
294 struct phy_defs {
295 const char *phy_name;
296
297 /* PHY base GRC address */
298 u32 base_addr;
299
300 /* Relative address of indirect TBUS address register (bits 0..7) */
301 u32 tbus_addr_lo_addr;
302
303 /* Relative address of indirect TBUS address register (bits 8..10) */
304 u32 tbus_addr_hi_addr;
305
306 /* Relative address of indirect TBUS data register (bits 0..7) */
307 u32 tbus_data_lo_addr;
308
309 /* Relative address of indirect TBUS data register (bits 8..11) */
310 u32 tbus_data_hi_addr;
311 };
312
313 /* Split type definitions */
314 struct split_type_defs {
315 const char *name;
316 };
317
318 /******************************** Constants **********************************/
319
320 #define MAX_LCIDS 320
321 #define MAX_LTIDS 320
322
323 #define NUM_IOR_SETS 2
324 #define IORS_PER_SET 176
325 #define IOR_SET_OFFSET(set_id) ((set_id) * 256)
326
327 #define BYTES_IN_DWORD sizeof(u32)
328
329 /* In the macros below, size and offset are specified in bits */
330 #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
331 #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
332 #define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
333 #define FIELD_DWORD_OFFSET(type, field) \
334 (int)(FIELD_BIT_OFFSET(type, field) / 32)
335 #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
336 #define FIELD_BIT_MASK(type, field) \
337 (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
338 FIELD_DWORD_SHIFT(type, field))
339
340 #define SET_VAR_FIELD(var, type, field, val) \
341 do { \
342 var[FIELD_DWORD_OFFSET(type, field)] &= \
343 (~FIELD_BIT_MASK(type, field)); \
344 var[FIELD_DWORD_OFFSET(type, field)] |= \
345 (val) << FIELD_DWORD_SHIFT(type, field); \
346 } while (0)
347
348 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
349 do { \
350 for (i = 0; i < (arr_size); i++) \
351 qed_wr(dev, ptt, addr, (arr)[i]); \
352 } while (0)
353
354 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
355 do { \
356 for (i = 0; i < (arr_size); i++) \
357 (arr)[i] = qed_rd(dev, ptt, addr); \
358 } while (0)
359
360 #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
361 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
362
363 /* Extra lines include a signature line + optional latency events line */
364 #define NUM_EXTRA_DBG_LINES(block_desc) \
365 (1 + ((block_desc)->has_latency_events ? 1 : 0))
366 #define NUM_DBG_LINES(block_desc) \
367 ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
368
369 #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
370 #define RAM_LINES_TO_BYTES(lines) \
371 DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
372
373 #define REG_DUMP_LEN_SHIFT 24
374 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
375 BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
376
377 #define IDLE_CHK_RULE_SIZE_DWORDS \
378 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
379
380 #define IDLE_CHK_RESULT_HDR_DWORDS \
381 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
382
383 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
384 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
385
386 #define IDLE_CHK_MAX_ENTRIES_SIZE 32
387
388 /* The sizes and offsets below are specified in bits */
389 #define VFC_CAM_CMD_STRUCT_SIZE 64
390 #define VFC_CAM_CMD_ROW_OFFSET 48
391 #define VFC_CAM_CMD_ROW_SIZE 9
392 #define VFC_CAM_ADDR_STRUCT_SIZE 16
393 #define VFC_CAM_ADDR_OP_OFFSET 0
394 #define VFC_CAM_ADDR_OP_SIZE 4
395 #define VFC_CAM_RESP_STRUCT_SIZE 256
396 #define VFC_RAM_ADDR_STRUCT_SIZE 16
397 #define VFC_RAM_ADDR_OP_OFFSET 0
398 #define VFC_RAM_ADDR_OP_SIZE 2
399 #define VFC_RAM_ADDR_ROW_OFFSET 2
400 #define VFC_RAM_ADDR_ROW_SIZE 10
401 #define VFC_RAM_RESP_STRUCT_SIZE 256
402
403 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
404 #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
405 #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
406 #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
407 #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
408 #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
409
410 #define NUM_VFC_RAM_TYPES 4
411
412 #define VFC_CAM_NUM_ROWS 512
413
414 #define VFC_OPCODE_CAM_RD 14
415 #define VFC_OPCODE_RAM_RD 0
416
417 #define NUM_RSS_MEM_TYPES 5
418
419 #define NUM_BIG_RAM_TYPES 3
420 #define BIG_RAM_NAME_LEN 3
421
422 #define NUM_PHY_TBUS_ADDRESSES 2048
423 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
424
425 #define RESET_REG_UNRESET_OFFSET 4
426
427 #define STALL_DELAY_MS 500
428
429 #define STATIC_DEBUG_LINE_DWORDS 9
430
431 #define NUM_COMMON_GLOBAL_PARAMS 8
432
433 #define FW_IMG_MAIN 1
434
435 #define REG_FIFO_ELEMENT_DWORDS 2
436 #define REG_FIFO_DEPTH_ELEMENTS 32
437 #define REG_FIFO_DEPTH_DWORDS \
438 (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
439
440 #define IGU_FIFO_ELEMENT_DWORDS 4
441 #define IGU_FIFO_DEPTH_ELEMENTS 64
442 #define IGU_FIFO_DEPTH_DWORDS \
443 (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
444
445 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
446 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
447 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
448 (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
449 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
450
451 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
452 (MCP_REG_SCRATCH + \
453 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
454
455 #define EMPTY_FW_VERSION_STR "???_???_???_???"
456 #define EMPTY_FW_IMAGE_STR "???????????????"
457
458 /***************************** Constant Arrays *******************************/
459
460 struct dbg_array {
461 const u32 *ptr;
462 u32 size_in_dwords;
463 };
464
465 /* Debug arrays */
466 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
467
468 /* Chip constant definitions array */
469 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
470 {"bb"},
471 {"ah"},
472 {"reserved"},
473 };
474
475 /* Storm constant definitions array */
476 static struct storm_defs s_storm_defs[] = {
477 /* Tstorm */
478 {'T', BLOCK_TSEM,
479 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
480 DBG_BUS_CLIENT_RBCT}, true,
481 TSEM_REG_FAST_MEMORY,
482 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
483 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
484 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
485 TCM_REG_CTX_RBC_ACCS,
486 4, TCM_REG_AGG_CON_CTX,
487 16, TCM_REG_SM_CON_CTX,
488 2, TCM_REG_AGG_TASK_CTX,
489 4, TCM_REG_SM_TASK_CTX},
490
491 /* Mstorm */
492 {'M', BLOCK_MSEM,
493 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
494 DBG_BUS_CLIENT_RBCM}, false,
495 MSEM_REG_FAST_MEMORY,
496 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
497 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
498 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
499 MCM_REG_CTX_RBC_ACCS,
500 1, MCM_REG_AGG_CON_CTX,
501 10, MCM_REG_SM_CON_CTX,
502 2, MCM_REG_AGG_TASK_CTX,
503 7, MCM_REG_SM_TASK_CTX},
504
505 /* Ustorm */
506 {'U', BLOCK_USEM,
507 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
508 DBG_BUS_CLIENT_RBCU}, false,
509 USEM_REG_FAST_MEMORY,
510 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
511 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
512 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
513 UCM_REG_CTX_RBC_ACCS,
514 2, UCM_REG_AGG_CON_CTX,
515 13, UCM_REG_SM_CON_CTX,
516 3, UCM_REG_AGG_TASK_CTX,
517 3, UCM_REG_SM_TASK_CTX},
518
519 /* Xstorm */
520 {'X', BLOCK_XSEM,
521 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
522 DBG_BUS_CLIENT_RBCX}, false,
523 XSEM_REG_FAST_MEMORY,
524 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
525 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
526 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
527 XCM_REG_CTX_RBC_ACCS,
528 9, XCM_REG_AGG_CON_CTX,
529 15, XCM_REG_SM_CON_CTX,
530 0, 0,
531 0, 0},
532
533 /* Ystorm */
534 {'Y', BLOCK_YSEM,
535 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
536 DBG_BUS_CLIENT_RBCY}, false,
537 YSEM_REG_FAST_MEMORY,
538 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
539 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
540 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
541 YCM_REG_CTX_RBC_ACCS,
542 2, YCM_REG_AGG_CON_CTX,
543 3, YCM_REG_SM_CON_CTX,
544 2, YCM_REG_AGG_TASK_CTX,
545 12, YCM_REG_SM_TASK_CTX},
546
547 /* Pstorm */
548 {'P', BLOCK_PSEM,
549 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
550 DBG_BUS_CLIENT_RBCS}, true,
551 PSEM_REG_FAST_MEMORY,
552 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
553 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
554 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
555 PCM_REG_CTX_RBC_ACCS,
556 0, 0,
557 10, PCM_REG_SM_CON_CTX,
558 0, 0,
559 0, 0}
560 };
561
562 /* Block definitions array */
563
564 static struct block_defs block_grc_defs = {
565 "grc",
566 {true, true, true}, false, 0,
567 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
568 GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
569 GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
570 GRC_REG_DBG_FORCE_FRAME,
571 true, false, DBG_RESET_REG_MISC_PL_UA, 1
572 };
573
574 static struct block_defs block_miscs_defs = {
575 "miscs", {true, true, true}, false, 0,
576 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
577 0, 0, 0, 0, 0,
578 false, false, MAX_DBG_RESET_REGS, 0
579 };
580
581 static struct block_defs block_misc_defs = {
582 "misc", {true, true, true}, false, 0,
583 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
584 0, 0, 0, 0, 0,
585 false, false, MAX_DBG_RESET_REGS, 0
586 };
587
588 static struct block_defs block_dbu_defs = {
589 "dbu", {true, true, true}, false, 0,
590 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
591 0, 0, 0, 0, 0,
592 false, false, MAX_DBG_RESET_REGS, 0
593 };
594
595 static struct block_defs block_pglue_b_defs = {
596 "pglue_b",
597 {true, true, true}, false, 0,
598 {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
599 PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
600 PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
601 PGLUE_B_REG_DBG_FORCE_FRAME,
602 true, false, DBG_RESET_REG_MISCS_PL_HV, 1
603 };
604
605 static struct block_defs block_cnig_defs = {
606 "cnig",
607 {true, true, true}, false, 0,
608 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
609 DBG_BUS_CLIENT_RBCW},
610 CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
611 CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
612 CNIG_REG_DBG_FORCE_FRAME_K2_E5,
613 true, false, DBG_RESET_REG_MISCS_PL_HV, 0
614 };
615
616 static struct block_defs block_cpmu_defs = {
617 "cpmu", {true, true, true}, false, 0,
618 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
619 0, 0, 0, 0, 0,
620 true, false, DBG_RESET_REG_MISCS_PL_HV, 8
621 };
622
623 static struct block_defs block_ncsi_defs = {
624 "ncsi",
625 {true, true, true}, false, 0,
626 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
627 NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
628 NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
629 NCSI_REG_DBG_FORCE_FRAME,
630 true, false, DBG_RESET_REG_MISCS_PL_HV, 5
631 };
632
633 static struct block_defs block_opte_defs = {
634 "opte", {true, true, false}, false, 0,
635 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
636 0, 0, 0, 0, 0,
637 true, false, DBG_RESET_REG_MISCS_PL_HV, 4
638 };
639
640 static struct block_defs block_bmb_defs = {
641 "bmb",
642 {true, true, true}, false, 0,
643 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
644 BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
645 BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
646 BMB_REG_DBG_FORCE_FRAME,
647 true, false, DBG_RESET_REG_MISCS_PL_UA, 7
648 };
649
650 static struct block_defs block_pcie_defs = {
651 "pcie",
652 {true, true, true}, false, 0,
653 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
654 DBG_BUS_CLIENT_RBCH},
655 PCIE_REG_DBG_COMMON_SELECT_K2_E5,
656 PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
657 PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
658 PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
659 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
660 false, false, MAX_DBG_RESET_REGS, 0
661 };
662
663 static struct block_defs block_mcp_defs = {
664 "mcp", {true, true, true}, false, 0,
665 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
666 0, 0, 0, 0, 0,
667 false, false, MAX_DBG_RESET_REGS, 0
668 };
669
670 static struct block_defs block_mcp2_defs = {
671 "mcp2",
672 {true, true, true}, false, 0,
673 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
674 MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
675 MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
676 MCP2_REG_DBG_FORCE_FRAME,
677 false, false, MAX_DBG_RESET_REGS, 0
678 };
679
680 static struct block_defs block_pswhst_defs = {
681 "pswhst",
682 {true, true, true}, false, 0,
683 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
684 PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
685 PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
686 PSWHST_REG_DBG_FORCE_FRAME,
687 true, false, DBG_RESET_REG_MISC_PL_HV, 0
688 };
689
690 static struct block_defs block_pswhst2_defs = {
691 "pswhst2",
692 {true, true, true}, false, 0,
693 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
694 PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
695 PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
696 PSWHST2_REG_DBG_FORCE_FRAME,
697 true, false, DBG_RESET_REG_MISC_PL_HV, 0
698 };
699
700 static struct block_defs block_pswrd_defs = {
701 "pswrd",
702 {true, true, true}, false, 0,
703 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
704 PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
705 PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
706 PSWRD_REG_DBG_FORCE_FRAME,
707 true, false, DBG_RESET_REG_MISC_PL_HV, 2
708 };
709
710 static struct block_defs block_pswrd2_defs = {
711 "pswrd2",
712 {true, true, true}, false, 0,
713 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
714 PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
715 PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
716 PSWRD2_REG_DBG_FORCE_FRAME,
717 true, false, DBG_RESET_REG_MISC_PL_HV, 2
718 };
719
720 static struct block_defs block_pswwr_defs = {
721 "pswwr",
722 {true, true, true}, false, 0,
723 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
724 PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
725 PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
726 PSWWR_REG_DBG_FORCE_FRAME,
727 true, false, DBG_RESET_REG_MISC_PL_HV, 3
728 };
729
730 static struct block_defs block_pswwr2_defs = {
731 "pswwr2", {true, true, true}, false, 0,
732 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
733 0, 0, 0, 0, 0,
734 true, false, DBG_RESET_REG_MISC_PL_HV, 3
735 };
736
737 static struct block_defs block_pswrq_defs = {
738 "pswrq",
739 {true, true, true}, false, 0,
740 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
741 PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
742 PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
743 PSWRQ_REG_DBG_FORCE_FRAME,
744 true, false, DBG_RESET_REG_MISC_PL_HV, 1
745 };
746
747 static struct block_defs block_pswrq2_defs = {
748 "pswrq2",
749 {true, true, true}, false, 0,
750 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
751 PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
752 PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
753 PSWRQ2_REG_DBG_FORCE_FRAME,
754 true, false, DBG_RESET_REG_MISC_PL_HV, 1
755 };
756
757 static struct block_defs block_pglcs_defs = {
758 "pglcs",
759 {true, true, true}, false, 0,
760 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
761 DBG_BUS_CLIENT_RBCH},
762 PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
763 PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
764 PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
765 true, false, DBG_RESET_REG_MISCS_PL_HV, 2
766 };
767
768 static struct block_defs block_ptu_defs = {
769 "ptu",
770 {true, true, true}, false, 0,
771 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
772 PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
773 PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
774 PTU_REG_DBG_FORCE_FRAME,
775 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
776 };
777
778 static struct block_defs block_dmae_defs = {
779 "dmae",
780 {true, true, true}, false, 0,
781 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
782 DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
783 DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
784 DMAE_REG_DBG_FORCE_FRAME,
785 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
786 };
787
788 static struct block_defs block_tcm_defs = {
789 "tcm",
790 {true, true, true}, true, DBG_TSTORM_ID,
791 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
792 TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
793 TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
794 TCM_REG_DBG_FORCE_FRAME,
795 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
796 };
797
798 static struct block_defs block_mcm_defs = {
799 "mcm",
800 {true, true, true}, true, DBG_MSTORM_ID,
801 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
802 MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
803 MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
804 MCM_REG_DBG_FORCE_FRAME,
805 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
806 };
807
808 static struct block_defs block_ucm_defs = {
809 "ucm",
810 {true, true, true}, true, DBG_USTORM_ID,
811 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
812 UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
813 UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
814 UCM_REG_DBG_FORCE_FRAME,
815 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
816 };
817
818 static struct block_defs block_xcm_defs = {
819 "xcm",
820 {true, true, true}, true, DBG_XSTORM_ID,
821 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
822 XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
823 XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
824 XCM_REG_DBG_FORCE_FRAME,
825 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
826 };
827
828 static struct block_defs block_ycm_defs = {
829 "ycm",
830 {true, true, true}, true, DBG_YSTORM_ID,
831 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
832 YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
833 YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
834 YCM_REG_DBG_FORCE_FRAME,
835 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
836 };
837
838 static struct block_defs block_pcm_defs = {
839 "pcm",
840 {true, true, true}, true, DBG_PSTORM_ID,
841 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
842 PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
843 PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
844 PCM_REG_DBG_FORCE_FRAME,
845 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
846 };
847
848 static struct block_defs block_qm_defs = {
849 "qm",
850 {true, true, true}, false, 0,
851 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
852 QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
853 QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
854 QM_REG_DBG_FORCE_FRAME,
855 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
856 };
857
858 static struct block_defs block_tm_defs = {
859 "tm",
860 {true, true, true}, false, 0,
861 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
862 TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
863 TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
864 TM_REG_DBG_FORCE_FRAME,
865 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
866 };
867
868 static struct block_defs block_dorq_defs = {
869 "dorq",
870 {true, true, true}, false, 0,
871 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
872 DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
873 DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
874 DORQ_REG_DBG_FORCE_FRAME,
875 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
876 };
877
878 static struct block_defs block_brb_defs = {
879 "brb",
880 {true, true, true}, false, 0,
881 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
882 BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
883 BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
884 BRB_REG_DBG_FORCE_FRAME,
885 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
886 };
887
888 static struct block_defs block_src_defs = {
889 "src",
890 {true, true, true}, false, 0,
891 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
892 SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
893 SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
894 SRC_REG_DBG_FORCE_FRAME,
895 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
896 };
897
898 static struct block_defs block_prs_defs = {
899 "prs",
900 {true, true, true}, false, 0,
901 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
902 PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
903 PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
904 PRS_REG_DBG_FORCE_FRAME,
905 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
906 };
907
908 static struct block_defs block_tsdm_defs = {
909 "tsdm",
910 {true, true, true}, true, DBG_TSTORM_ID,
911 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
912 TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
913 TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
914 TSDM_REG_DBG_FORCE_FRAME,
915 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
916 };
917
918 static struct block_defs block_msdm_defs = {
919 "msdm",
920 {true, true, true}, true, DBG_MSTORM_ID,
921 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
922 MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
923 MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
924 MSDM_REG_DBG_FORCE_FRAME,
925 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
926 };
927
928 static struct block_defs block_usdm_defs = {
929 "usdm",
930 {true, true, true}, true, DBG_USTORM_ID,
931 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
932 USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
933 USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
934 USDM_REG_DBG_FORCE_FRAME,
935 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
936 };
937
938 static struct block_defs block_xsdm_defs = {
939 "xsdm",
940 {true, true, true}, true, DBG_XSTORM_ID,
941 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
942 XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
943 XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
944 XSDM_REG_DBG_FORCE_FRAME,
945 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
946 };
947
948 static struct block_defs block_ysdm_defs = {
949 "ysdm",
950 {true, true, true}, true, DBG_YSTORM_ID,
951 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
952 YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
953 YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
954 YSDM_REG_DBG_FORCE_FRAME,
955 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
956 };
957
958 static struct block_defs block_psdm_defs = {
959 "psdm",
960 {true, true, true}, true, DBG_PSTORM_ID,
961 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
962 PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
963 PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
964 PSDM_REG_DBG_FORCE_FRAME,
965 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
966 };
967
968 static struct block_defs block_tsem_defs = {
969 "tsem",
970 {true, true, true}, true, DBG_TSTORM_ID,
971 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
972 TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
973 TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
974 TSEM_REG_DBG_FORCE_FRAME,
975 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
976 };
977
978 static struct block_defs block_msem_defs = {
979 "msem",
980 {true, true, true}, true, DBG_MSTORM_ID,
981 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
982 MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
983 MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
984 MSEM_REG_DBG_FORCE_FRAME,
985 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
986 };
987
988 static struct block_defs block_usem_defs = {
989 "usem",
990 {true, true, true}, true, DBG_USTORM_ID,
991 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
992 USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
993 USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
994 USEM_REG_DBG_FORCE_FRAME,
995 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
996 };
997
998 static struct block_defs block_xsem_defs = {
999 "xsem",
1000 {true, true, true}, true, DBG_XSTORM_ID,
1001 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1002 XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1003 XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1004 XSEM_REG_DBG_FORCE_FRAME,
1005 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
1006 };
1007
1008 static struct block_defs block_ysem_defs = {
1009 "ysem",
1010 {true, true, true}, true, DBG_YSTORM_ID,
1011 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
1012 YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1013 YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1014 YSEM_REG_DBG_FORCE_FRAME,
1015 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
1016 };
1017
1018 static struct block_defs block_psem_defs = {
1019 "psem",
1020 {true, true, true}, true, DBG_PSTORM_ID,
1021 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1022 PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1023 PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1024 PSEM_REG_DBG_FORCE_FRAME,
1025 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
1026 };
1027
1028 static struct block_defs block_rss_defs = {
1029 "rss",
1030 {true, true, true}, false, 0,
1031 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
1032 RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1033 RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1034 RSS_REG_DBG_FORCE_FRAME,
1035 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
1036 };
1037
1038 static struct block_defs block_tmld_defs = {
1039 "tmld",
1040 {true, true, true}, false, 0,
1041 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1042 TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1043 TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1044 TMLD_REG_DBG_FORCE_FRAME,
1045 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
1046 };
1047
1048 static struct block_defs block_muld_defs = {
1049 "muld",
1050 {true, true, true}, false, 0,
1051 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1052 MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1053 MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1054 MULD_REG_DBG_FORCE_FRAME,
1055 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
1056 };
1057
1058 static struct block_defs block_yuld_defs = {
1059 "yuld",
1060 {true, true, false}, false, 0,
1061 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
1062 MAX_DBG_BUS_CLIENTS},
1063 YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1064 YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1065 YULD_REG_DBG_FORCE_FRAME_BB_K2,
1066 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1067 15
1068 };
1069
1070 static struct block_defs block_xyld_defs = {
1071 "xyld",
1072 {true, true, true}, false, 0,
1073 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1074 XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1075 XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1076 XYLD_REG_DBG_FORCE_FRAME,
1077 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
1078 };
1079
1080 static struct block_defs block_ptld_defs = {
1081 "ptld",
1082 {false, false, true}, false, 0,
1083 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
1084 PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1085 PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1086 PTLD_REG_DBG_FORCE_FRAME_E5,
1087 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1088 28
1089 };
1090
1091 static struct block_defs block_ypld_defs = {
1092 "ypld",
1093 {false, false, true}, false, 0,
1094 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
1095 YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1096 YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1097 YPLD_REG_DBG_FORCE_FRAME_E5,
1098 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1099 27
1100 };
1101
1102 static struct block_defs block_prm_defs = {
1103 "prm",
1104 {true, true, true}, false, 0,
1105 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1106 PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1107 PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1108 PRM_REG_DBG_FORCE_FRAME,
1109 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
1110 };
1111
1112 static struct block_defs block_pbf_pb1_defs = {
1113 "pbf_pb1",
1114 {true, true, true}, false, 0,
1115 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1116 PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1117 PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1118 PBF_PB1_REG_DBG_FORCE_FRAME,
1119 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1120 11
1121 };
1122
1123 static struct block_defs block_pbf_pb2_defs = {
1124 "pbf_pb2",
1125 {true, true, true}, false, 0,
1126 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1127 PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1128 PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1129 PBF_PB2_REG_DBG_FORCE_FRAME,
1130 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1131 12
1132 };
1133
1134 static struct block_defs block_rpb_defs = {
1135 "rpb",
1136 {true, true, true}, false, 0,
1137 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1138 RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1139 RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1140 RPB_REG_DBG_FORCE_FRAME,
1141 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1142 };
1143
1144 static struct block_defs block_btb_defs = {
1145 "btb",
1146 {true, true, true}, false, 0,
1147 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1148 BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1149 BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1150 BTB_REG_DBG_FORCE_FRAME,
1151 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1152 };
1153
1154 static struct block_defs block_pbf_defs = {
1155 "pbf",
1156 {true, true, true}, false, 0,
1157 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1158 PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1159 PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1160 PBF_REG_DBG_FORCE_FRAME,
1161 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1162 };
1163
1164 static struct block_defs block_rdif_defs = {
1165 "rdif",
1166 {true, true, true}, false, 0,
1167 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1168 RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1169 RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1170 RDIF_REG_DBG_FORCE_FRAME,
1171 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1172 };
1173
1174 static struct block_defs block_tdif_defs = {
1175 "tdif",
1176 {true, true, true}, false, 0,
1177 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1178 TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1179 TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1180 TDIF_REG_DBG_FORCE_FRAME,
1181 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1182 };
1183
1184 static struct block_defs block_cdu_defs = {
1185 "cdu",
1186 {true, true, true}, false, 0,
1187 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1188 CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1189 CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1190 CDU_REG_DBG_FORCE_FRAME,
1191 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1192 };
1193
1194 static struct block_defs block_ccfc_defs = {
1195 "ccfc",
1196 {true, true, true}, false, 0,
1197 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1198 CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1199 CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1200 CCFC_REG_DBG_FORCE_FRAME,
1201 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1202 };
1203
1204 static struct block_defs block_tcfc_defs = {
1205 "tcfc",
1206 {true, true, true}, false, 0,
1207 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1208 TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1209 TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1210 TCFC_REG_DBG_FORCE_FRAME,
1211 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1212 };
1213
1214 static struct block_defs block_igu_defs = {
1215 "igu",
1216 {true, true, true}, false, 0,
1217 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1218 IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1219 IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1220 IGU_REG_DBG_FORCE_FRAME,
1221 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1222 };
1223
1224 static struct block_defs block_cau_defs = {
1225 "cau",
1226 {true, true, true}, false, 0,
1227 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1228 CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1229 CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1230 CAU_REG_DBG_FORCE_FRAME,
1231 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1232 };
1233
1234 static struct block_defs block_rgfs_defs = {
1235 "rgfs", {false, false, true}, false, 0,
1236 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1237 0, 0, 0, 0, 0,
1238 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
1239 };
1240
1241 static struct block_defs block_rgsrc_defs = {
1242 "rgsrc",
1243 {false, false, true}, false, 0,
1244 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1245 RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1246 RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1247 RGSRC_REG_DBG_FORCE_FRAME_E5,
1248 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1249 30
1250 };
1251
1252 static struct block_defs block_tgfs_defs = {
1253 "tgfs", {false, false, true}, false, 0,
1254 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1255 0, 0, 0, 0, 0,
1256 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
1257 };
1258
1259 static struct block_defs block_tgsrc_defs = {
1260 "tgsrc",
1261 {false, false, true}, false, 0,
1262 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
1263 TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1264 TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1265 TGSRC_REG_DBG_FORCE_FRAME_E5,
1266 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1267 31
1268 };
1269
1270 static struct block_defs block_umac_defs = {
1271 "umac",
1272 {true, true, true}, false, 0,
1273 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
1274 DBG_BUS_CLIENT_RBCZ},
1275 UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1276 UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1277 UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1278 true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1279 };
1280
1281 static struct block_defs block_xmac_defs = {
1282 "xmac", {true, false, false}, false, 0,
1283 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1284 0, 0, 0, 0, 0,
1285 false, false, MAX_DBG_RESET_REGS, 0
1286 };
1287
1288 static struct block_defs block_dbg_defs = {
1289 "dbg", {true, true, true}, false, 0,
1290 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1291 0, 0, 0, 0, 0,
1292 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1293 };
1294
1295 static struct block_defs block_nig_defs = {
1296 "nig",
1297 {true, true, true}, false, 0,
1298 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1299 NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1300 NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1301 NIG_REG_DBG_FORCE_FRAME,
1302 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1303 };
1304
1305 static struct block_defs block_wol_defs = {
1306 "wol",
1307 {false, true, true}, false, 0,
1308 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1309 WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1310 WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1311 WOL_REG_DBG_FORCE_FRAME_K2_E5,
1312 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1313 };
1314
1315 static struct block_defs block_bmbn_defs = {
1316 "bmbn",
1317 {false, true, true}, false, 0,
1318 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
1319 DBG_BUS_CLIENT_RBCB},
1320 BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1321 BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1322 BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1323 false, false, MAX_DBG_RESET_REGS, 0
1324 };
1325
1326 static struct block_defs block_ipc_defs = {
1327 "ipc", {true, true, true}, false, 0,
1328 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1329 0, 0, 0, 0, 0,
1330 true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1331 };
1332
1333 static struct block_defs block_nwm_defs = {
1334 "nwm",
1335 {false, true, true}, false, 0,
1336 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1337 NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1338 NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1339 NWM_REG_DBG_FORCE_FRAME_K2_E5,
1340 true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1341 };
1342
1343 static struct block_defs block_nws_defs = {
1344 "nws",
1345 {false, true, true}, false, 0,
1346 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1347 NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1348 NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1349 NWS_REG_DBG_FORCE_FRAME_K2_E5,
1350 true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1351 };
1352
1353 static struct block_defs block_ms_defs = {
1354 "ms",
1355 {false, true, true}, false, 0,
1356 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1357 MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1358 MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1359 MS_REG_DBG_FORCE_FRAME_K2_E5,
1360 true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1361 };
1362
1363 static struct block_defs block_phy_pcie_defs = {
1364 "phy_pcie",
1365 {false, true, true}, false, 0,
1366 {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
1367 DBG_BUS_CLIENT_RBCH},
1368 PCIE_REG_DBG_COMMON_SELECT_K2_E5,
1369 PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1370 PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
1371 PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1372 PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1373 false, false, MAX_DBG_RESET_REGS, 0
1374 };
1375
1376 static struct block_defs block_led_defs = {
1377 "led", {false, true, true}, false, 0,
1378 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1379 0, 0, 0, 0, 0,
1380 true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1381 };
1382
1383 static struct block_defs block_avs_wrap_defs = {
1384 "avs_wrap", {false, true, false}, false, 0,
1385 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1386 0, 0, 0, 0, 0,
1387 true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1388 };
1389
1390 static struct block_defs block_pxpreqbus_defs = {
1391 "pxpreqbus", {false, false, false}, false, 0,
1392 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1393 0, 0, 0, 0, 0,
1394 false, false, MAX_DBG_RESET_REGS, 0
1395 };
1396
1397 static struct block_defs block_misc_aeu_defs = {
1398 "misc_aeu", {true, true, true}, false, 0,
1399 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1400 0, 0, 0, 0, 0,
1401 false, false, MAX_DBG_RESET_REGS, 0
1402 };
1403
1404 static struct block_defs block_bar0_map_defs = {
1405 "bar0_map", {true, true, true}, false, 0,
1406 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1407 0, 0, 0, 0, 0,
1408 false, false, MAX_DBG_RESET_REGS, 0
1409 };
1410
1411 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1412 &block_grc_defs,
1413 &block_miscs_defs,
1414 &block_misc_defs,
1415 &block_dbu_defs,
1416 &block_pglue_b_defs,
1417 &block_cnig_defs,
1418 &block_cpmu_defs,
1419 &block_ncsi_defs,
1420 &block_opte_defs,
1421 &block_bmb_defs,
1422 &block_pcie_defs,
1423 &block_mcp_defs,
1424 &block_mcp2_defs,
1425 &block_pswhst_defs,
1426 &block_pswhst2_defs,
1427 &block_pswrd_defs,
1428 &block_pswrd2_defs,
1429 &block_pswwr_defs,
1430 &block_pswwr2_defs,
1431 &block_pswrq_defs,
1432 &block_pswrq2_defs,
1433 &block_pglcs_defs,
1434 &block_dmae_defs,
1435 &block_ptu_defs,
1436 &block_tcm_defs,
1437 &block_mcm_defs,
1438 &block_ucm_defs,
1439 &block_xcm_defs,
1440 &block_ycm_defs,
1441 &block_pcm_defs,
1442 &block_qm_defs,
1443 &block_tm_defs,
1444 &block_dorq_defs,
1445 &block_brb_defs,
1446 &block_src_defs,
1447 &block_prs_defs,
1448 &block_tsdm_defs,
1449 &block_msdm_defs,
1450 &block_usdm_defs,
1451 &block_xsdm_defs,
1452 &block_ysdm_defs,
1453 &block_psdm_defs,
1454 &block_tsem_defs,
1455 &block_msem_defs,
1456 &block_usem_defs,
1457 &block_xsem_defs,
1458 &block_ysem_defs,
1459 &block_psem_defs,
1460 &block_rss_defs,
1461 &block_tmld_defs,
1462 &block_muld_defs,
1463 &block_yuld_defs,
1464 &block_xyld_defs,
1465 &block_ptld_defs,
1466 &block_ypld_defs,
1467 &block_prm_defs,
1468 &block_pbf_pb1_defs,
1469 &block_pbf_pb2_defs,
1470 &block_rpb_defs,
1471 &block_btb_defs,
1472 &block_pbf_defs,
1473 &block_rdif_defs,
1474 &block_tdif_defs,
1475 &block_cdu_defs,
1476 &block_ccfc_defs,
1477 &block_tcfc_defs,
1478 &block_igu_defs,
1479 &block_cau_defs,
1480 &block_rgfs_defs,
1481 &block_rgsrc_defs,
1482 &block_tgfs_defs,
1483 &block_tgsrc_defs,
1484 &block_umac_defs,
1485 &block_xmac_defs,
1486 &block_dbg_defs,
1487 &block_nig_defs,
1488 &block_wol_defs,
1489 &block_bmbn_defs,
1490 &block_ipc_defs,
1491 &block_nwm_defs,
1492 &block_nws_defs,
1493 &block_ms_defs,
1494 &block_phy_pcie_defs,
1495 &block_led_defs,
1496 &block_avs_wrap_defs,
1497 &block_pxpreqbus_defs,
1498 &block_misc_aeu_defs,
1499 &block_bar0_map_defs,
1500 };
1501
1502 static struct platform_defs s_platform_defs[] = {
1503 {"asic", 1, 256, 32768},
1504 {"reserved", 0, 0, 0},
1505 {"reserved2", 0, 0, 0},
1506 {"reserved3", 0, 0, 0}
1507 };
1508
1509 static struct grc_param_defs s_grc_param_defs[] = {
1510 /* DBG_GRC_PARAM_DUMP_TSTORM */
1511 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1512
1513 /* DBG_GRC_PARAM_DUMP_MSTORM */
1514 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1515
1516 /* DBG_GRC_PARAM_DUMP_USTORM */
1517 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1518
1519 /* DBG_GRC_PARAM_DUMP_XSTORM */
1520 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1521
1522 /* DBG_GRC_PARAM_DUMP_YSTORM */
1523 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1524
1525 /* DBG_GRC_PARAM_DUMP_PSTORM */
1526 {{1, 1, 1}, 0, 1, false, false, 1, 1},
1527
1528 /* DBG_GRC_PARAM_DUMP_REGS */
1529 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1530
1531 /* DBG_GRC_PARAM_DUMP_RAM */
1532 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1533
1534 /* DBG_GRC_PARAM_DUMP_PBUF */
1535 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1536
1537 /* DBG_GRC_PARAM_DUMP_IOR */
1538 {{0, 0, 0}, 0, 1, false, false, 0, 1},
1539
1540 /* DBG_GRC_PARAM_DUMP_VFC */
1541 {{0, 0, 0}, 0, 1, false, false, 0, 1},
1542
1543 /* DBG_GRC_PARAM_DUMP_CM_CTX */
1544 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1545
1546 /* DBG_GRC_PARAM_DUMP_ILT */
1547 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1548
1549 /* DBG_GRC_PARAM_DUMP_RSS */
1550 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1551
1552 /* DBG_GRC_PARAM_DUMP_CAU */
1553 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1554
1555 /* DBG_GRC_PARAM_DUMP_QM */
1556 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1557
1558 /* DBG_GRC_PARAM_DUMP_MCP */
1559 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1560
1561 /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
1562 {{1, 1, 1}, 1, 0xffffffff, false, true, 0, 1},
1563
1564 /* DBG_GRC_PARAM_DUMP_CFC */
1565 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1566
1567 /* DBG_GRC_PARAM_DUMP_IGU */
1568 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1569
1570 /* DBG_GRC_PARAM_DUMP_BRB */
1571 {{0, 0, 0}, 0, 1, false, false, 0, 1},
1572
1573 /* DBG_GRC_PARAM_DUMP_BTB */
1574 {{0, 0, 0}, 0, 1, false, false, 0, 1},
1575
1576 /* DBG_GRC_PARAM_DUMP_BMB */
1577 {{0, 0, 0}, 0, 1, false, false, 0, 0},
1578
1579 /* DBG_GRC_PARAM_DUMP_NIG */
1580 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1581
1582 /* DBG_GRC_PARAM_DUMP_MULD */
1583 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1584
1585 /* DBG_GRC_PARAM_DUMP_PRS */
1586 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1587
1588 /* DBG_GRC_PARAM_DUMP_DMAE */
1589 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1590
1591 /* DBG_GRC_PARAM_DUMP_TM */
1592 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1593
1594 /* DBG_GRC_PARAM_DUMP_SDM */
1595 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1596
1597 /* DBG_GRC_PARAM_DUMP_DIF */
1598 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1599
1600 /* DBG_GRC_PARAM_DUMP_STATIC */
1601 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1602
1603 /* DBG_GRC_PARAM_UNSTALL */
1604 {{0, 0, 0}, 0, 1, false, false, 0, 0},
1605
1606 /* DBG_GRC_PARAM_NUM_LCIDS */
1607 {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, false,
1608 MAX_LCIDS, MAX_LCIDS},
1609
1610 /* DBG_GRC_PARAM_NUM_LTIDS */
1611 {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, false,
1612 MAX_LTIDS, MAX_LTIDS},
1613
1614 /* DBG_GRC_PARAM_EXCLUDE_ALL */
1615 {{0, 0, 0}, 0, 1, true, false, 0, 0},
1616
1617 /* DBG_GRC_PARAM_CRASH */
1618 {{0, 0, 0}, 0, 1, true, false, 0, 0},
1619
1620 /* DBG_GRC_PARAM_PARITY_SAFE */
1621 {{0, 0, 0}, 0, 1, false, false, 1, 0},
1622
1623 /* DBG_GRC_PARAM_DUMP_CM */
1624 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1625
1626 /* DBG_GRC_PARAM_DUMP_PHY */
1627 {{1, 1, 1}, 0, 1, false, false, 0, 1},
1628
1629 /* DBG_GRC_PARAM_NO_MCP */
1630 {{0, 0, 0}, 0, 1, false, false, 0, 0},
1631
1632 /* DBG_GRC_PARAM_NO_FW_VER */
1633 {{0, 0, 0}, 0, 1, false, false, 0, 0}
1634 };
1635
1636 static struct rss_mem_defs s_rss_mem_defs[] = {
1637 { "rss_mem_cid", "rss_cid", 0, 32,
1638 {256, 320, 512} },
1639
1640 { "rss_mem_key_msb", "rss_key", 1024, 256,
1641 {128, 208, 257} },
1642
1643 { "rss_mem_key_lsb", "rss_key", 2048, 64,
1644 {128, 208, 257} },
1645
1646 { "rss_mem_info", "rss_info", 3072, 16,
1647 {128, 208, 256} },
1648
1649 { "rss_mem_ind", "rss_ind", 4096, 16,
1650 {16384, 26624, 32768} }
1651 };
1652
1653 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1654 {"vfc_ram_tt1", "vfc_ram", 0, 512},
1655 {"vfc_ram_mtt2", "vfc_ram", 512, 128},
1656 {"vfc_ram_stt2", "vfc_ram", 640, 32},
1657 {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1658 };
1659
1660 static struct big_ram_defs s_big_ram_defs[] = {
1661 { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1662 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1663 MISC_REG_BLOCK_256B_EN, {0, 0, 0},
1664 {153600, 180224, 282624} },
1665
1666 { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1667 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1668 MISC_REG_BLOCK_256B_EN, {0, 1, 1},
1669 {92160, 117760, 168960} },
1670
1671 { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1672 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1673 MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
1674 {36864, 36864, 36864} }
1675 };
1676
1677 static struct reset_reg_defs s_reset_regs_defs[] = {
1678 /* DBG_RESET_REG_MISCS_PL_UA */
1679 { MISCS_REG_RESET_PL_UA,
1680 {true, true, true}, {0x0, 0x0, 0x0} },
1681
1682 /* DBG_RESET_REG_MISCS_PL_HV */
1683 { MISCS_REG_RESET_PL_HV,
1684 {true, true, true}, {0x0, 0x400, 0x600} },
1685
1686 /* DBG_RESET_REG_MISCS_PL_HV_2 */
1687 { MISCS_REG_RESET_PL_HV_2_K2_E5,
1688 {false, true, true}, {0x0, 0x0, 0x0} },
1689
1690 /* DBG_RESET_REG_MISC_PL_UA */
1691 { MISC_REG_RESET_PL_UA,
1692 {true, true, true}, {0x0, 0x0, 0x0} },
1693
1694 /* DBG_RESET_REG_MISC_PL_HV */
1695 { MISC_REG_RESET_PL_HV,
1696 {true, true, true}, {0x0, 0x0, 0x0} },
1697
1698 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1699 { MISC_REG_RESET_PL_PDA_VMAIN_1,
1700 {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
1701
1702 /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1703 { MISC_REG_RESET_PL_PDA_VMAIN_2,
1704 {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
1705
1706 /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1707 { MISC_REG_RESET_PL_PDA_VAUX,
1708 {true, true, true}, {0x2, 0x2, 0x2} },
1709 };
1710
1711 static struct phy_defs s_phy_defs[] = {
1712 {"nw_phy", NWS_REG_NWS_CMU_K2,
1713 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
1714 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
1715 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
1716 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
1717 {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
1718 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1719 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1720 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1721 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1722 {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
1723 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1724 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1725 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1726 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1727 {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
1728 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1729 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1730 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1731 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1732 };
1733
1734 static struct split_type_defs s_split_type_defs[] = {
1735 /* SPLIT_TYPE_NONE */
1736 {"eng"},
1737
1738 /* SPLIT_TYPE_PORT */
1739 {"port"},
1740
1741 /* SPLIT_TYPE_PF */
1742 {"pf"},
1743
1744 /* SPLIT_TYPE_PORT_PF */
1745 {"port"},
1746
1747 /* SPLIT_TYPE_VF */
1748 {"vf"}
1749 };
1750
1751 /**************************** Private Functions ******************************/
1752
1753 /* Reads and returns a single dword from the specified unaligned buffer */
qed_read_unaligned_dword(u8 * buf)1754 static u32 qed_read_unaligned_dword(u8 *buf)
1755 {
1756 u32 dword;
1757
1758 memcpy((u8 *)&dword, buf, sizeof(dword));
1759 return dword;
1760 }
1761
1762 /* Returns the value of the specified GRC param */
qed_grc_get_param(struct qed_hwfn * p_hwfn,enum dbg_grc_params grc_param)1763 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1764 enum dbg_grc_params grc_param)
1765 {
1766 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1767
1768 return dev_data->grc.param_val[grc_param];
1769 }
1770
1771 /* Initializes the GRC parameters */
qed_dbg_grc_init_params(struct qed_hwfn * p_hwfn)1772 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1773 {
1774 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1775
1776 if (!dev_data->grc.params_initialized) {
1777 qed_dbg_grc_set_params_default(p_hwfn);
1778 dev_data->grc.params_initialized = 1;
1779 }
1780 }
1781
1782 /* Initializes debug data for the specified device */
qed_dbg_dev_init(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1783 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1784 struct qed_ptt *p_ptt)
1785 {
1786 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1787 u8 num_pfs = 0, max_pfs_per_port = 0;
1788
1789 if (dev_data->initialized)
1790 return DBG_STATUS_OK;
1791
1792 /* Set chip */
1793 if (QED_IS_K2(p_hwfn->cdev)) {
1794 dev_data->chip_id = CHIP_K2;
1795 dev_data->mode_enable[MODE_K2] = 1;
1796 dev_data->num_vfs = MAX_NUM_VFS_K2;
1797 num_pfs = MAX_NUM_PFS_K2;
1798 max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
1799 } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1800 dev_data->chip_id = CHIP_BB;
1801 dev_data->mode_enable[MODE_BB] = 1;
1802 dev_data->num_vfs = MAX_NUM_VFS_BB;
1803 num_pfs = MAX_NUM_PFS_BB;
1804 max_pfs_per_port = MAX_NUM_PFS_BB;
1805 } else {
1806 return DBG_STATUS_UNKNOWN_CHIP;
1807 }
1808
1809 /* Set platofrm */
1810 dev_data->platform_id = PLATFORM_ASIC;
1811 dev_data->mode_enable[MODE_ASIC] = 1;
1812
1813 /* Set port mode */
1814 switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
1815 case 0:
1816 dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
1817 break;
1818 case 1:
1819 dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
1820 break;
1821 case 2:
1822 dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
1823 break;
1824 }
1825
1826 /* Set 100G mode */
1827 if (dev_data->chip_id == CHIP_BB &&
1828 qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2)
1829 dev_data->mode_enable[MODE_100G] = 1;
1830
1831 /* Set number of ports */
1832 if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
1833 dev_data->mode_enable[MODE_100G])
1834 dev_data->num_ports = 1;
1835 else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
1836 dev_data->num_ports = 2;
1837 else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
1838 dev_data->num_ports = 4;
1839
1840 /* Set number of PFs per port */
1841 dev_data->num_pfs_per_port = min_t(u32,
1842 num_pfs / dev_data->num_ports,
1843 max_pfs_per_port);
1844
1845 /* Initializes the GRC parameters */
1846 qed_dbg_grc_init_params(p_hwfn);
1847
1848 dev_data->use_dmae = true;
1849 dev_data->initialized = 1;
1850
1851 return DBG_STATUS_OK;
1852 }
1853
get_dbg_bus_block_desc(struct qed_hwfn * p_hwfn,enum block_id block_id)1854 static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
1855 enum block_id block_id)
1856 {
1857 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1858
1859 return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
1860 MAX_CHIP_IDS +
1861 dev_data->chip_id];
1862 }
1863
1864 /* Reads the FW info structure for the specified Storm from the chip,
1865 * and writes it to the specified fw_info pointer.
1866 */
qed_read_storm_fw_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 storm_id,struct fw_info * fw_info)1867 static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
1868 struct qed_ptt *p_ptt,
1869 u8 storm_id, struct fw_info *fw_info)
1870 {
1871 struct storm_defs *storm = &s_storm_defs[storm_id];
1872 struct fw_info_location fw_info_location;
1873 u32 addr, i, *dest;
1874
1875 memset(&fw_info_location, 0, sizeof(fw_info_location));
1876 memset(fw_info, 0, sizeof(*fw_info));
1877
1878 /* Read first the address that points to fw_info location.
1879 * The address is located in the last line of the Storm RAM.
1880 */
1881 addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1882 DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
1883 sizeof(fw_info_location);
1884 dest = (u32 *)&fw_info_location;
1885
1886 for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1887 i++, addr += BYTES_IN_DWORD)
1888 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1889
1890 /* Read FW version info from Storm RAM */
1891 if (fw_info_location.size > 0 && fw_info_location.size <=
1892 sizeof(*fw_info)) {
1893 addr = fw_info_location.grc_addr;
1894 dest = (u32 *)fw_info;
1895 for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1896 i++, addr += BYTES_IN_DWORD)
1897 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1898 }
1899 }
1900
1901 /* Dumps the specified string to the specified buffer.
1902 * Returns the dumped size in bytes.
1903 */
qed_dump_str(char * dump_buf,bool dump,const char * str)1904 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1905 {
1906 if (dump)
1907 strcpy(dump_buf, str);
1908
1909 return (u32)strlen(str) + 1;
1910 }
1911
1912 /* Dumps zeros to align the specified buffer to dwords.
1913 * Returns the dumped size in bytes.
1914 */
qed_dump_align(char * dump_buf,bool dump,u32 byte_offset)1915 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1916 {
1917 u8 offset_in_dword, align_size;
1918
1919 offset_in_dword = (u8)(byte_offset & 0x3);
1920 align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1921
1922 if (dump && align_size)
1923 memset(dump_buf, 0, align_size);
1924
1925 return align_size;
1926 }
1927
1928 /* Writes the specified string param to the specified buffer.
1929 * Returns the dumped size in dwords.
1930 */
qed_dump_str_param(u32 * dump_buf,bool dump,const char * param_name,const char * param_val)1931 static u32 qed_dump_str_param(u32 *dump_buf,
1932 bool dump,
1933 const char *param_name, const char *param_val)
1934 {
1935 char *char_buf = (char *)dump_buf;
1936 u32 offset = 0;
1937
1938 /* Dump param name */
1939 offset += qed_dump_str(char_buf + offset, dump, param_name);
1940
1941 /* Indicate a string param value */
1942 if (dump)
1943 *(char_buf + offset) = 1;
1944 offset++;
1945
1946 /* Dump param value */
1947 offset += qed_dump_str(char_buf + offset, dump, param_val);
1948
1949 /* Align buffer to next dword */
1950 offset += qed_dump_align(char_buf + offset, dump, offset);
1951
1952 return BYTES_TO_DWORDS(offset);
1953 }
1954
1955 /* Writes the specified numeric param to the specified buffer.
1956 * Returns the dumped size in dwords.
1957 */
qed_dump_num_param(u32 * dump_buf,bool dump,const char * param_name,u32 param_val)1958 static u32 qed_dump_num_param(u32 *dump_buf,
1959 bool dump, const char *param_name, u32 param_val)
1960 {
1961 char *char_buf = (char *)dump_buf;
1962 u32 offset = 0;
1963
1964 /* Dump param name */
1965 offset += qed_dump_str(char_buf + offset, dump, param_name);
1966
1967 /* Indicate a numeric param value */
1968 if (dump)
1969 *(char_buf + offset) = 0;
1970 offset++;
1971
1972 /* Align buffer to next dword */
1973 offset += qed_dump_align(char_buf + offset, dump, offset);
1974
1975 /* Dump param value (and change offset from bytes to dwords) */
1976 offset = BYTES_TO_DWORDS(offset);
1977 if (dump)
1978 *(dump_buf + offset) = param_val;
1979 offset++;
1980
1981 return offset;
1982 }
1983
1984 /* Reads the FW version and writes it as a param to the specified buffer.
1985 * Returns the dumped size in dwords.
1986 */
qed_dump_fw_ver_param(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)1987 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1988 struct qed_ptt *p_ptt,
1989 u32 *dump_buf, bool dump)
1990 {
1991 char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1992 char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1993 struct fw_info fw_info = { {0}, {0} };
1994 u32 offset = 0;
1995
1996 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1997 /* Read FW info from chip */
1998 qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
1999
2000 /* Create FW version/image strings */
2001 if (snprintf(fw_ver_str, sizeof(fw_ver_str),
2002 "%d_%d_%d_%d", fw_info.ver.num.major,
2003 fw_info.ver.num.minor, fw_info.ver.num.rev,
2004 fw_info.ver.num.eng) < 0)
2005 DP_NOTICE(p_hwfn,
2006 "Unexpected debug error: invalid FW version string\n");
2007 switch (fw_info.ver.image_id) {
2008 case FW_IMG_MAIN:
2009 strcpy(fw_img_str, "main");
2010 break;
2011 default:
2012 strcpy(fw_img_str, "unknown");
2013 break;
2014 }
2015 }
2016
2017 /* Dump FW version, image and timestamp */
2018 offset += qed_dump_str_param(dump_buf + offset,
2019 dump, "fw-version", fw_ver_str);
2020 offset += qed_dump_str_param(dump_buf + offset,
2021 dump, "fw-image", fw_img_str);
2022 offset += qed_dump_num_param(dump_buf + offset,
2023 dump,
2024 "fw-timestamp", fw_info.ver.timestamp);
2025
2026 return offset;
2027 }
2028
2029 /* Reads the MFW version and writes it as a param to the specified buffer.
2030 * Returns the dumped size in dwords.
2031 */
qed_dump_mfw_ver_param(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2032 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
2033 struct qed_ptt *p_ptt,
2034 u32 *dump_buf, bool dump)
2035 {
2036 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2037
2038 if (dump &&
2039 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2040 u32 global_section_offsize, global_section_addr, mfw_ver;
2041 u32 public_data_addr, global_section_offsize_addr;
2042
2043 /* Find MCP public data GRC address. Needs to be ORed with
2044 * MCP_REG_SCRATCH due to a HW bug.
2045 */
2046 public_data_addr = qed_rd(p_hwfn,
2047 p_ptt,
2048 MISC_REG_SHARED_MEM_ADDR) |
2049 MCP_REG_SCRATCH;
2050
2051 /* Find MCP public global section offset */
2052 global_section_offsize_addr = public_data_addr +
2053 offsetof(struct mcp_public_data,
2054 sections) +
2055 sizeof(offsize_t) * PUBLIC_GLOBAL;
2056 global_section_offsize = qed_rd(p_hwfn, p_ptt,
2057 global_section_offsize_addr);
2058 global_section_addr =
2059 MCP_REG_SCRATCH +
2060 (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2061
2062 /* Read MFW version from MCP public global section */
2063 mfw_ver = qed_rd(p_hwfn, p_ptt,
2064 global_section_addr +
2065 offsetof(struct public_global, mfw_ver));
2066
2067 /* Dump MFW version param */
2068 if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
2069 (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
2070 (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2071 DP_NOTICE(p_hwfn,
2072 "Unexpected debug error: invalid MFW version string\n");
2073 }
2074
2075 return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2076 }
2077
2078 /* Writes a section header to the specified buffer.
2079 * Returns the dumped size in dwords.
2080 */
qed_dump_section_hdr(u32 * dump_buf,bool dump,const char * name,u32 num_params)2081 static u32 qed_dump_section_hdr(u32 *dump_buf,
2082 bool dump, const char *name, u32 num_params)
2083 {
2084 return qed_dump_num_param(dump_buf, dump, name, num_params);
2085 }
2086
2087 /* Writes the common global params to the specified buffer.
2088 * Returns the dumped size in dwords.
2089 */
qed_dump_common_global_params(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 num_specific_global_params)2090 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
2091 struct qed_ptt *p_ptt,
2092 u32 *dump_buf,
2093 bool dump,
2094 u8 num_specific_global_params)
2095 {
2096 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2097 u32 offset = 0;
2098 u8 num_params;
2099
2100 /* Dump global params section header */
2101 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2102 offset += qed_dump_section_hdr(dump_buf + offset,
2103 dump, "global_params", num_params);
2104
2105 /* Store params */
2106 offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2107 offset += qed_dump_mfw_ver_param(p_hwfn,
2108 p_ptt, dump_buf + offset, dump);
2109 offset += qed_dump_num_param(dump_buf + offset,
2110 dump, "tools-version", TOOLS_VERSION);
2111 offset += qed_dump_str_param(dump_buf + offset,
2112 dump,
2113 "chip",
2114 s_chip_defs[dev_data->chip_id].name);
2115 offset += qed_dump_str_param(dump_buf + offset,
2116 dump,
2117 "platform",
2118 s_platform_defs[dev_data->platform_id].
2119 name);
2120 offset +=
2121 qed_dump_num_param(dump_buf + offset, dump, "pci-func",
2122 p_hwfn->abs_pf_id);
2123
2124 return offset;
2125 }
2126
2127 /* Writes the "last" section (including CRC) to the specified buffer at the
2128 * given offset. Returns the dumped size in dwords.
2129 */
qed_dump_last_section(u32 * dump_buf,u32 offset,bool dump)2130 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
2131 {
2132 u32 start_offset = offset;
2133
2134 /* Dump CRC section header */
2135 offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2136
2137 /* Calculate CRC32 and add it to the dword after the "last" section */
2138 if (dump)
2139 *(dump_buf + offset) = ~crc32(0xffffffff,
2140 (u8 *)dump_buf,
2141 DWORDS_TO_BYTES(offset));
2142
2143 offset++;
2144
2145 return offset - start_offset;
2146 }
2147
2148 /* Update blocks reset state */
qed_update_blocks_reset_state(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2149 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
2150 struct qed_ptt *p_ptt)
2151 {
2152 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2153 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2154 u32 i;
2155
2156 /* Read reset registers */
2157 for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2158 if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2159 reg_val[i] = qed_rd(p_hwfn,
2160 p_ptt, s_reset_regs_defs[i].addr);
2161
2162 /* Check if blocks are in reset */
2163 for (i = 0; i < MAX_BLOCK_ID; i++) {
2164 struct block_defs *block = s_block_defs[i];
2165
2166 dev_data->block_in_reset[i] = block->has_reset_bit &&
2167 !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
2168 }
2169 }
2170
2171 /* Enable / disable the Debug block */
qed_bus_enable_dbg_block(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool enable)2172 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
2173 struct qed_ptt *p_ptt, bool enable)
2174 {
2175 qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2176 }
2177
2178 /* Resets the Debug block */
qed_bus_reset_dbg_block(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2179 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
2180 struct qed_ptt *p_ptt)
2181 {
2182 u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2183 struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2184
2185 dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2186 old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2187 new_reset_reg_val =
2188 old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
2189
2190 qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2191 qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2192 }
2193
qed_bus_set_framing_mode(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum dbg_bus_frame_modes mode)2194 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
2195 struct qed_ptt *p_ptt,
2196 enum dbg_bus_frame_modes mode)
2197 {
2198 qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2199 }
2200
2201 /* Enable / disable Debug Bus clients according to the specified mask
2202 * (1 = enable, 0 = disable).
2203 */
qed_bus_enable_clients(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 client_mask)2204 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
2205 struct qed_ptt *p_ptt, u32 client_mask)
2206 {
2207 qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2208 }
2209
qed_is_mode_match(struct qed_hwfn * p_hwfn,u16 * modes_buf_offset)2210 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
2211 {
2212 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2213 bool arg1, arg2;
2214 const u32 *ptr;
2215 u8 tree_val;
2216
2217 /* Get next element from modes tree buffer */
2218 ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
2219 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
2220
2221 switch (tree_val) {
2222 case INIT_MODE_OP_NOT:
2223 return !qed_is_mode_match(p_hwfn, modes_buf_offset);
2224 case INIT_MODE_OP_OR:
2225 case INIT_MODE_OP_AND:
2226 arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2227 arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2228 return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
2229 arg2) : (arg1 && arg2);
2230 default:
2231 return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2232 }
2233 }
2234
2235 /* Returns true if the specified entity (indicated by GRC param) should be
2236 * included in the dump, false otherwise.
2237 */
qed_grc_is_included(struct qed_hwfn * p_hwfn,enum dbg_grc_params grc_param)2238 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
2239 enum dbg_grc_params grc_param)
2240 {
2241 return qed_grc_get_param(p_hwfn, grc_param) > 0;
2242 }
2243
2244 /* Returns true of the specified Storm should be included in the dump, false
2245 * otherwise.
2246 */
qed_grc_is_storm_included(struct qed_hwfn * p_hwfn,enum dbg_storms storm)2247 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
2248 enum dbg_storms storm)
2249 {
2250 return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2251 }
2252
2253 /* Returns true if the specified memory should be included in the dump, false
2254 * otherwise.
2255 */
qed_grc_is_mem_included(struct qed_hwfn * p_hwfn,enum block_id block_id,u8 mem_group_id)2256 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
2257 enum block_id block_id, u8 mem_group_id)
2258 {
2259 struct block_defs *block = s_block_defs[block_id];
2260 u8 i;
2261
2262 /* Check Storm match */
2263 if (block->associated_to_storm &&
2264 !qed_grc_is_storm_included(p_hwfn,
2265 (enum dbg_storms)block->storm_id))
2266 return false;
2267
2268 for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2269 struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2270
2271 if (mem_group_id == big_ram->mem_group_id ||
2272 mem_group_id == big_ram->ram_mem_group_id)
2273 return qed_grc_is_included(p_hwfn, big_ram->grc_param);
2274 }
2275
2276 switch (mem_group_id) {
2277 case MEM_GROUP_PXP_ILT:
2278 case MEM_GROUP_PXP_MEM:
2279 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2280 case MEM_GROUP_RAM:
2281 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2282 case MEM_GROUP_PBUF:
2283 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2284 case MEM_GROUP_CAU_MEM:
2285 case MEM_GROUP_CAU_SB:
2286 case MEM_GROUP_CAU_PI:
2287 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2288 case MEM_GROUP_QM_MEM:
2289 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2290 case MEM_GROUP_CFC_MEM:
2291 case MEM_GROUP_CONN_CFC_MEM:
2292 case MEM_GROUP_TASK_CFC_MEM:
2293 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
2294 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2295 case MEM_GROUP_IGU_MEM:
2296 case MEM_GROUP_IGU_MSIX:
2297 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2298 case MEM_GROUP_MULD_MEM:
2299 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2300 case MEM_GROUP_PRS_MEM:
2301 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2302 case MEM_GROUP_DMAE_MEM:
2303 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2304 case MEM_GROUP_TM_MEM:
2305 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2306 case MEM_GROUP_SDM_MEM:
2307 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2308 case MEM_GROUP_TDIF_CTX:
2309 case MEM_GROUP_RDIF_CTX:
2310 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2311 case MEM_GROUP_CM_MEM:
2312 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2313 case MEM_GROUP_IOR:
2314 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2315 default:
2316 return true;
2317 }
2318 }
2319
2320 /* Stalls all Storms */
qed_grc_stall_storms(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool stall)2321 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
2322 struct qed_ptt *p_ptt, bool stall)
2323 {
2324 u32 reg_addr;
2325 u8 storm_id;
2326
2327 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2328 if (!qed_grc_is_storm_included(p_hwfn,
2329 (enum dbg_storms)storm_id))
2330 continue;
2331
2332 reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
2333 SEM_FAST_REG_STALL_0_BB_K2;
2334 qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2335 }
2336
2337 msleep(STALL_DELAY_MS);
2338 }
2339
2340 /* Takes all blocks out of reset */
qed_grc_unreset_blocks(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2341 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
2342 struct qed_ptt *p_ptt)
2343 {
2344 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2345 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2346 u32 block_id, i;
2347
2348 /* Fill reset regs values */
2349 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2350 struct block_defs *block = s_block_defs[block_id];
2351
2352 if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
2353 block->unreset)
2354 reg_val[block->reset_reg] |=
2355 BIT(block->reset_bit_offset);
2356 }
2357
2358 /* Write reset registers */
2359 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2360 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2361 continue;
2362
2363 reg_val[i] |=
2364 s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2365
2366 if (reg_val[i])
2367 qed_wr(p_hwfn,
2368 p_ptt,
2369 s_reset_regs_defs[i].addr +
2370 RESET_REG_UNRESET_OFFSET, reg_val[i]);
2371 }
2372 }
2373
2374 /* Returns the attention block data of the specified block */
2375 static const struct dbg_attn_block_type_data *
qed_get_block_attn_data(enum block_id block_id,enum dbg_attn_type attn_type)2376 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
2377 {
2378 const struct dbg_attn_block *base_attn_block_arr =
2379 (const struct dbg_attn_block *)
2380 s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2381
2382 return &base_attn_block_arr[block_id].per_type_data[attn_type];
2383 }
2384
2385 /* Returns the attention registers of the specified block */
2386 static const struct dbg_attn_reg *
qed_get_block_attn_regs(enum block_id block_id,enum dbg_attn_type attn_type,u8 * num_attn_regs)2387 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
2388 u8 *num_attn_regs)
2389 {
2390 const struct dbg_attn_block_type_data *block_type_data =
2391 qed_get_block_attn_data(block_id, attn_type);
2392
2393 *num_attn_regs = block_type_data->num_regs;
2394
2395 return &((const struct dbg_attn_reg *)
2396 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
2397 regs_offset];
2398 }
2399
2400 /* For each block, clear the status of all parities */
qed_grc_clear_all_prty(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2401 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2402 struct qed_ptt *p_ptt)
2403 {
2404 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2405 const struct dbg_attn_reg *attn_reg_arr;
2406 u8 reg_idx, num_attn_regs;
2407 u32 block_id;
2408
2409 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2410 if (dev_data->block_in_reset[block_id])
2411 continue;
2412
2413 attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2414 ATTN_TYPE_PARITY,
2415 &num_attn_regs);
2416
2417 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2418 const struct dbg_attn_reg *reg_data =
2419 &attn_reg_arr[reg_idx];
2420 u16 modes_buf_offset;
2421 bool eval_mode;
2422
2423 /* Check mode */
2424 eval_mode = GET_FIELD(reg_data->mode.data,
2425 DBG_MODE_HDR_EVAL_MODE) > 0;
2426 modes_buf_offset =
2427 GET_FIELD(reg_data->mode.data,
2428 DBG_MODE_HDR_MODES_BUF_OFFSET);
2429
2430 /* If Mode match: clear parity status */
2431 if (!eval_mode ||
2432 qed_is_mode_match(p_hwfn, &modes_buf_offset))
2433 qed_rd(p_hwfn, p_ptt,
2434 DWORDS_TO_BYTES(reg_data->
2435 sts_clr_address));
2436 }
2437 }
2438 }
2439
2440 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2441 * The following parameters are dumped:
2442 * - count: no. of dumped entries
2443 * - split_type: split type
2444 * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
2445 * - param_name: user parameter value (dumped only if param_name != NULL
2446 * and param_val != NULL).
2447 */
qed_grc_dump_regs_hdr(u32 * dump_buf,bool dump,u32 num_reg_entries,enum init_split_types split_type,u8 split_id,const char * param_name,const char * param_val)2448 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2449 bool dump,
2450 u32 num_reg_entries,
2451 enum init_split_types split_type,
2452 u8 split_id,
2453 const char *param_name, const char *param_val)
2454 {
2455 u8 num_params = 2 +
2456 (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0);
2457 u32 offset = 0;
2458
2459 offset += qed_dump_section_hdr(dump_buf + offset,
2460 dump, "grc_regs", num_params);
2461 offset += qed_dump_num_param(dump_buf + offset,
2462 dump, "count", num_reg_entries);
2463 offset += qed_dump_str_param(dump_buf + offset,
2464 dump, "split",
2465 s_split_type_defs[split_type].name);
2466 if (split_type != SPLIT_TYPE_NONE)
2467 offset += qed_dump_num_param(dump_buf + offset,
2468 dump, "id", split_id);
2469 if (param_name && param_val)
2470 offset += qed_dump_str_param(dump_buf + offset,
2471 dump, param_name, param_val);
2472
2473 return offset;
2474 }
2475
2476 /* Reads the specified registers into the specified buffer.
2477 * The addr and len arguments are specified in dwords.
2478 */
qed_read_regs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf,u32 addr,u32 len)2479 void qed_read_regs(struct qed_hwfn *p_hwfn,
2480 struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
2481 {
2482 u32 i;
2483
2484 for (i = 0; i < len; i++)
2485 buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
2486 }
2487
2488 /* Dumps the GRC registers in the specified address range.
2489 * Returns the dumped size in dwords.
2490 * The addr and len arguments are specified in dwords.
2491 */
qed_grc_dump_addr_range(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 len,bool wide_bus,enum init_split_types split_type,u8 split_id)2492 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2493 struct qed_ptt *p_ptt,
2494 u32 *dump_buf,
2495 bool dump, u32 addr, u32 len, bool wide_bus,
2496 enum init_split_types split_type,
2497 u8 split_id)
2498 {
2499 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2500 u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
2501
2502 if (!dump)
2503 return len;
2504
2505 /* Print log if needed */
2506 dev_data->num_regs_read += len;
2507 if (dev_data->num_regs_read >=
2508 s_platform_defs[dev_data->platform_id].log_thresh) {
2509 DP_VERBOSE(p_hwfn,
2510 QED_MSG_DEBUG,
2511 "Dumping %d registers...\n",
2512 dev_data->num_regs_read);
2513 dev_data->num_regs_read = 0;
2514 }
2515
2516 switch (split_type) {
2517 case SPLIT_TYPE_PORT:
2518 port_id = split_id;
2519 break;
2520 case SPLIT_TYPE_PF:
2521 pf_id = split_id;
2522 break;
2523 case SPLIT_TYPE_PORT_PF:
2524 port_id = split_id / dev_data->num_pfs_per_port;
2525 pf_id = port_id + dev_data->num_ports *
2526 (split_id % dev_data->num_pfs_per_port);
2527 break;
2528 case SPLIT_TYPE_VF:
2529 vf_id = split_id;
2530 break;
2531 default:
2532 break;
2533 }
2534
2535 /* Try reading using DMAE */
2536 if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE &&
2537 (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
2538 wide_bus)) {
2539 if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
2540 (u64)(uintptr_t)(dump_buf), len, 0))
2541 return len;
2542 dev_data->use_dmae = 0;
2543 DP_VERBOSE(p_hwfn,
2544 QED_MSG_DEBUG,
2545 "Failed reading from chip using DMAE, using GRC instead\n");
2546 }
2547
2548 /* If not read using DMAE, read using GRC */
2549
2550 /* Set pretend */
2551 if (split_type != dev_data->pretend.split_type || split_id !=
2552 dev_data->pretend.split_id) {
2553 switch (split_type) {
2554 case SPLIT_TYPE_PORT:
2555 qed_port_pretend(p_hwfn, p_ptt, port_id);
2556 break;
2557 case SPLIT_TYPE_PF:
2558 fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2559 qed_fid_pretend(p_hwfn, p_ptt, fid);
2560 break;
2561 case SPLIT_TYPE_PORT_PF:
2562 fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2563 qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
2564 break;
2565 case SPLIT_TYPE_VF:
2566 fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) |
2567 (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT);
2568 qed_fid_pretend(p_hwfn, p_ptt, fid);
2569 break;
2570 default:
2571 break;
2572 }
2573
2574 dev_data->pretend.split_type = (u8)split_type;
2575 dev_data->pretend.split_id = split_id;
2576 }
2577
2578 /* Read registers using GRC */
2579 qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
2580
2581 return len;
2582 }
2583
2584 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2585 * The addr and len arguments are specified in dwords.
2586 */
qed_grc_dump_reg_entry_hdr(u32 * dump_buf,bool dump,u32 addr,u32 len)2587 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2588 bool dump, u32 addr, u32 len)
2589 {
2590 if (dump)
2591 *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2592
2593 return 1;
2594 }
2595
2596 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2597 * The addr and len arguments are specified in dwords.
2598 */
qed_grc_dump_reg_entry(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 len,bool wide_bus,enum init_split_types split_type,u8 split_id)2599 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2600 struct qed_ptt *p_ptt,
2601 u32 *dump_buf,
2602 bool dump, u32 addr, u32 len, bool wide_bus,
2603 enum init_split_types split_type, u8 split_id)
2604 {
2605 u32 offset = 0;
2606
2607 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2608 offset += qed_grc_dump_addr_range(p_hwfn,
2609 p_ptt,
2610 dump_buf + offset,
2611 dump, addr, len, wide_bus,
2612 split_type, split_id);
2613
2614 return offset;
2615 }
2616
2617 /* Dumps GRC registers sequence with skip cycle.
2618 * Returns the dumped size in dwords.
2619 * - addr: start GRC address in dwords
2620 * - total_len: total no. of dwords to dump
2621 * - read_len: no. consecutive dwords to read
2622 * - skip_len: no. of dwords to skip (and fill with zeros)
2623 */
qed_grc_dump_reg_entry_skip(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 total_len,u32 read_len,u32 skip_len)2624 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2625 struct qed_ptt *p_ptt,
2626 u32 *dump_buf,
2627 bool dump,
2628 u32 addr,
2629 u32 total_len,
2630 u32 read_len, u32 skip_len)
2631 {
2632 u32 offset = 0, reg_offset = 0;
2633
2634 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2635
2636 if (!dump)
2637 return offset + total_len;
2638
2639 while (reg_offset < total_len) {
2640 u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2641
2642 offset += qed_grc_dump_addr_range(p_hwfn,
2643 p_ptt,
2644 dump_buf + offset,
2645 dump, addr, curr_len, false,
2646 SPLIT_TYPE_NONE, 0);
2647 reg_offset += curr_len;
2648 addr += curr_len;
2649
2650 if (reg_offset < total_len) {
2651 curr_len = min_t(u32, skip_len, total_len - skip_len);
2652 memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2653 offset += curr_len;
2654 reg_offset += curr_len;
2655 addr += curr_len;
2656 }
2657 }
2658
2659 return offset;
2660 }
2661
2662 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
qed_grc_dump_regs_entries(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct dbg_array input_regs_arr,u32 * dump_buf,bool dump,enum init_split_types split_type,u8 split_id,bool block_enable[MAX_BLOCK_ID],u32 * num_dumped_reg_entries)2663 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2664 struct qed_ptt *p_ptt,
2665 struct dbg_array input_regs_arr,
2666 u32 *dump_buf,
2667 bool dump,
2668 enum init_split_types split_type,
2669 u8 split_id,
2670 bool block_enable[MAX_BLOCK_ID],
2671 u32 *num_dumped_reg_entries)
2672 {
2673 u32 i, offset = 0, input_offset = 0;
2674 bool mode_match = true;
2675
2676 *num_dumped_reg_entries = 0;
2677
2678 while (input_offset < input_regs_arr.size_in_dwords) {
2679 const struct dbg_dump_cond_hdr *cond_hdr =
2680 (const struct dbg_dump_cond_hdr *)
2681 &input_regs_arr.ptr[input_offset++];
2682 u16 modes_buf_offset;
2683 bool eval_mode;
2684
2685 /* Check mode/block */
2686 eval_mode = GET_FIELD(cond_hdr->mode.data,
2687 DBG_MODE_HDR_EVAL_MODE) > 0;
2688 if (eval_mode) {
2689 modes_buf_offset =
2690 GET_FIELD(cond_hdr->mode.data,
2691 DBG_MODE_HDR_MODES_BUF_OFFSET);
2692 mode_match = qed_is_mode_match(p_hwfn,
2693 &modes_buf_offset);
2694 }
2695
2696 if (!mode_match || !block_enable[cond_hdr->block_id]) {
2697 input_offset += cond_hdr->data_size;
2698 continue;
2699 }
2700
2701 for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2702 const struct dbg_dump_reg *reg =
2703 (const struct dbg_dump_reg *)
2704 &input_regs_arr.ptr[input_offset];
2705 u32 addr, len;
2706 bool wide_bus;
2707
2708 addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2709 len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2710 wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2711 offset += qed_grc_dump_reg_entry(p_hwfn,
2712 p_ptt,
2713 dump_buf + offset,
2714 dump,
2715 addr,
2716 len,
2717 wide_bus,
2718 split_type, split_id);
2719 (*num_dumped_reg_entries)++;
2720 }
2721 }
2722
2723 return offset;
2724 }
2725
2726 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
qed_grc_dump_split_data(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct dbg_array input_regs_arr,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],enum init_split_types split_type,u8 split_id,const char * param_name,const char * param_val)2727 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2728 struct qed_ptt *p_ptt,
2729 struct dbg_array input_regs_arr,
2730 u32 *dump_buf,
2731 bool dump,
2732 bool block_enable[MAX_BLOCK_ID],
2733 enum init_split_types split_type,
2734 u8 split_id,
2735 const char *param_name,
2736 const char *param_val)
2737 {
2738 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2739 enum init_split_types hdr_split_type = split_type;
2740 u32 num_dumped_reg_entries, offset;
2741 u8 hdr_split_id = split_id;
2742
2743 /* In PORT_PF split type, print a port split header */
2744 if (split_type == SPLIT_TYPE_PORT_PF) {
2745 hdr_split_type = SPLIT_TYPE_PORT;
2746 hdr_split_id = split_id / dev_data->num_pfs_per_port;
2747 }
2748
2749 /* Calculate register dump header size (and skip it for now) */
2750 offset = qed_grc_dump_regs_hdr(dump_buf,
2751 false,
2752 0,
2753 hdr_split_type,
2754 hdr_split_id, param_name, param_val);
2755
2756 /* Dump registers */
2757 offset += qed_grc_dump_regs_entries(p_hwfn,
2758 p_ptt,
2759 input_regs_arr,
2760 dump_buf + offset,
2761 dump,
2762 split_type,
2763 split_id,
2764 block_enable,
2765 &num_dumped_reg_entries);
2766
2767 /* Write register dump header */
2768 if (dump && num_dumped_reg_entries > 0)
2769 qed_grc_dump_regs_hdr(dump_buf,
2770 dump,
2771 num_dumped_reg_entries,
2772 hdr_split_type,
2773 hdr_split_id, param_name, param_val);
2774
2775 return num_dumped_reg_entries > 0 ? offset : 0;
2776 }
2777
2778 /* Dumps registers according to the input registers array. Returns the dumped
2779 * size in dwords.
2780 */
qed_grc_dump_registers(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],const char * param_name,const char * param_val)2781 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2782 struct qed_ptt *p_ptt,
2783 u32 *dump_buf,
2784 bool dump,
2785 bool block_enable[MAX_BLOCK_ID],
2786 const char *param_name, const char *param_val)
2787 {
2788 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2789 u32 offset = 0, input_offset = 0;
2790 u16 fid;
2791 while (input_offset <
2792 s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2793 const struct dbg_dump_split_hdr *split_hdr;
2794 struct dbg_array curr_input_regs_arr;
2795 enum init_split_types split_type;
2796 u16 split_count = 0;
2797 u32 split_data_size;
2798 u8 split_id;
2799
2800 split_hdr =
2801 (const struct dbg_dump_split_hdr *)
2802 &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2803 split_type =
2804 GET_FIELD(split_hdr->hdr,
2805 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2806 split_data_size =
2807 GET_FIELD(split_hdr->hdr,
2808 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2809 curr_input_regs_arr.ptr =
2810 &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
2811 curr_input_regs_arr.size_in_dwords = split_data_size;
2812
2813 switch (split_type) {
2814 case SPLIT_TYPE_NONE:
2815 split_count = 1;
2816 break;
2817 case SPLIT_TYPE_PORT:
2818 split_count = dev_data->num_ports;
2819 break;
2820 case SPLIT_TYPE_PF:
2821 case SPLIT_TYPE_PORT_PF:
2822 split_count = dev_data->num_ports *
2823 dev_data->num_pfs_per_port;
2824 break;
2825 case SPLIT_TYPE_VF:
2826 split_count = dev_data->num_vfs;
2827 break;
2828 default:
2829 return 0;
2830 }
2831
2832 for (split_id = 0; split_id < split_count; split_id++)
2833 offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2834 curr_input_regs_arr,
2835 dump_buf + offset,
2836 dump, block_enable,
2837 split_type,
2838 split_id,
2839 param_name,
2840 param_val);
2841
2842 input_offset += split_data_size;
2843 }
2844
2845 /* Cancel pretends (pretend to original PF) */
2846 if (dump) {
2847 fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2848 qed_fid_pretend(p_hwfn, p_ptt, fid);
2849 dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2850 dev_data->pretend.split_id = 0;
2851 }
2852
2853 return offset;
2854 }
2855
2856 /* Dump reset registers. Returns the dumped size in dwords. */
qed_grc_dump_reset_regs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2857 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2858 struct qed_ptt *p_ptt,
2859 u32 *dump_buf, bool dump)
2860 {
2861 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2862 u32 i, offset = 0, num_regs = 0;
2863
2864 /* Calculate header size */
2865 offset += qed_grc_dump_regs_hdr(dump_buf,
2866 false, 0,
2867 SPLIT_TYPE_NONE, 0, NULL, NULL);
2868
2869 /* Write reset registers */
2870 for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2871 if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2872 continue;
2873
2874 offset += qed_grc_dump_reg_entry(p_hwfn,
2875 p_ptt,
2876 dump_buf + offset,
2877 dump,
2878 BYTES_TO_DWORDS
2879 (s_reset_regs_defs[i].addr), 1,
2880 false, SPLIT_TYPE_NONE, 0);
2881 num_regs++;
2882 }
2883
2884 /* Write header */
2885 if (dump)
2886 qed_grc_dump_regs_hdr(dump_buf,
2887 true, num_regs, SPLIT_TYPE_NONE,
2888 0, NULL, NULL);
2889
2890 return offset;
2891 }
2892
2893 /* Dump registers that are modified during GRC Dump and therefore must be
2894 * dumped first. Returns the dumped size in dwords.
2895 */
qed_grc_dump_modified_regs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2896 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2897 struct qed_ptt *p_ptt,
2898 u32 *dump_buf, bool dump)
2899 {
2900 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2901 u32 block_id, offset = 0, num_reg_entries = 0;
2902 const struct dbg_attn_reg *attn_reg_arr;
2903 u8 storm_id, reg_idx, num_attn_regs;
2904
2905 /* Calculate header size */
2906 offset += qed_grc_dump_regs_hdr(dump_buf,
2907 false, 0, SPLIT_TYPE_NONE,
2908 0, NULL, NULL);
2909
2910 /* Write parity registers */
2911 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2912 if (dev_data->block_in_reset[block_id] && dump)
2913 continue;
2914
2915 attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2916 ATTN_TYPE_PARITY,
2917 &num_attn_regs);
2918
2919 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2920 const struct dbg_attn_reg *reg_data =
2921 &attn_reg_arr[reg_idx];
2922 u16 modes_buf_offset;
2923 bool eval_mode;
2924 u32 addr;
2925
2926 /* Check mode */
2927 eval_mode = GET_FIELD(reg_data->mode.data,
2928 DBG_MODE_HDR_EVAL_MODE) > 0;
2929 modes_buf_offset =
2930 GET_FIELD(reg_data->mode.data,
2931 DBG_MODE_HDR_MODES_BUF_OFFSET);
2932 if (eval_mode &&
2933 !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2934 continue;
2935
2936 /* Mode match: read & dump registers */
2937 addr = reg_data->mask_address;
2938 offset += qed_grc_dump_reg_entry(p_hwfn,
2939 p_ptt,
2940 dump_buf + offset,
2941 dump,
2942 addr,
2943 1, false,
2944 SPLIT_TYPE_NONE, 0);
2945 addr = GET_FIELD(reg_data->data,
2946 DBG_ATTN_REG_STS_ADDRESS);
2947 offset += qed_grc_dump_reg_entry(p_hwfn,
2948 p_ptt,
2949 dump_buf + offset,
2950 dump,
2951 addr,
2952 1, false,
2953 SPLIT_TYPE_NONE, 0);
2954 num_reg_entries += 2;
2955 }
2956 }
2957
2958 /* Write Storm stall status registers */
2959 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2960 struct storm_defs *storm = &s_storm_defs[storm_id];
2961 u32 addr;
2962
2963 if (dev_data->block_in_reset[storm->block_id] && dump)
2964 continue;
2965
2966 addr =
2967 BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2968 SEM_FAST_REG_STALLED);
2969 offset += qed_grc_dump_reg_entry(p_hwfn,
2970 p_ptt,
2971 dump_buf + offset,
2972 dump,
2973 addr,
2974 1,
2975 false, SPLIT_TYPE_NONE, 0);
2976 num_reg_entries++;
2977 }
2978
2979 /* Write header */
2980 if (dump)
2981 qed_grc_dump_regs_hdr(dump_buf,
2982 true,
2983 num_reg_entries, SPLIT_TYPE_NONE,
2984 0, NULL, NULL);
2985
2986 return offset;
2987 }
2988
2989 /* Dumps registers that can't be represented in the debug arrays */
qed_grc_dump_special_regs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2990 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2991 struct qed_ptt *p_ptt,
2992 u32 *dump_buf, bool dump)
2993 {
2994 u32 offset = 0, addr;
2995
2996 offset += qed_grc_dump_regs_hdr(dump_buf,
2997 dump, 2, SPLIT_TYPE_NONE, 0,
2998 NULL, NULL);
2999
3000 /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
3001 * skipped).
3002 */
3003 addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
3004 offset += qed_grc_dump_reg_entry_skip(p_hwfn,
3005 p_ptt,
3006 dump_buf + offset,
3007 dump,
3008 addr,
3009 RDIF_REG_DEBUG_ERROR_INFO_SIZE,
3010 7,
3011 1);
3012 addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
3013 offset +=
3014 qed_grc_dump_reg_entry_skip(p_hwfn,
3015 p_ptt,
3016 dump_buf + offset,
3017 dump,
3018 addr,
3019 TDIF_REG_DEBUG_ERROR_INFO_SIZE,
3020 7,
3021 1);
3022
3023 return offset;
3024 }
3025
3026 /* Dumps a GRC memory header (section and params). Returns the dumped size in
3027 * dwords. The following parameters are dumped:
3028 * - name: dumped only if it's not NULL.
3029 * - addr: in dwords, dumped only if name is NULL.
3030 * - len: in dwords, always dumped.
3031 * - width: dumped if it's not zero.
3032 * - packed: dumped only if it's not false.
3033 * - mem_group: always dumped.
3034 * - is_storm: true only if the memory is related to a Storm.
3035 * - storm_letter: valid only if is_storm is true.
3036 *
3037 */
qed_grc_dump_mem_hdr(struct qed_hwfn * p_hwfn,u32 * dump_buf,bool dump,const char * name,u32 addr,u32 len,u32 bit_width,bool packed,const char * mem_group,bool is_storm,char storm_letter)3038 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
3039 u32 *dump_buf,
3040 bool dump,
3041 const char *name,
3042 u32 addr,
3043 u32 len,
3044 u32 bit_width,
3045 bool packed,
3046 const char *mem_group,
3047 bool is_storm, char storm_letter)
3048 {
3049 u8 num_params = 3;
3050 u32 offset = 0;
3051 char buf[64];
3052
3053 if (!len)
3054 DP_NOTICE(p_hwfn,
3055 "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3056
3057 if (bit_width)
3058 num_params++;
3059 if (packed)
3060 num_params++;
3061
3062 /* Dump section header */
3063 offset += qed_dump_section_hdr(dump_buf + offset,
3064 dump, "grc_mem", num_params);
3065
3066 if (name) {
3067 /* Dump name */
3068 if (is_storm) {
3069 strcpy(buf, "?STORM_");
3070 buf[0] = storm_letter;
3071 strcpy(buf + strlen(buf), name);
3072 } else {
3073 strcpy(buf, name);
3074 }
3075
3076 offset += qed_dump_str_param(dump_buf + offset,
3077 dump, "name", buf);
3078 } else {
3079 /* Dump address */
3080 u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3081
3082 offset += qed_dump_num_param(dump_buf + offset,
3083 dump, "addr", addr_in_bytes);
3084 }
3085
3086 /* Dump len */
3087 offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
3088
3089 /* Dump bit width */
3090 if (bit_width)
3091 offset += qed_dump_num_param(dump_buf + offset,
3092 dump, "width", bit_width);
3093
3094 /* Dump packed */
3095 if (packed)
3096 offset += qed_dump_num_param(dump_buf + offset,
3097 dump, "packed", 1);
3098
3099 /* Dump reg type */
3100 if (is_storm) {
3101 strcpy(buf, "?STORM_");
3102 buf[0] = storm_letter;
3103 strcpy(buf + strlen(buf), mem_group);
3104 } else {
3105 strcpy(buf, mem_group);
3106 }
3107
3108 offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
3109
3110 return offset;
3111 }
3112
3113 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
3114 * Returns the dumped size in dwords.
3115 * The addr and len arguments are specified in dwords.
3116 */
qed_grc_dump_mem(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,const char * name,u32 addr,u32 len,bool wide_bus,u32 bit_width,bool packed,const char * mem_group,bool is_storm,char storm_letter)3117 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
3118 struct qed_ptt *p_ptt,
3119 u32 *dump_buf,
3120 bool dump,
3121 const char *name,
3122 u32 addr,
3123 u32 len,
3124 bool wide_bus,
3125 u32 bit_width,
3126 bool packed,
3127 const char *mem_group,
3128 bool is_storm, char storm_letter)
3129 {
3130 u32 offset = 0;
3131
3132 offset += qed_grc_dump_mem_hdr(p_hwfn,
3133 dump_buf + offset,
3134 dump,
3135 name,
3136 addr,
3137 len,
3138 bit_width,
3139 packed,
3140 mem_group, is_storm, storm_letter);
3141 offset += qed_grc_dump_addr_range(p_hwfn,
3142 p_ptt,
3143 dump_buf + offset,
3144 dump, addr, len, wide_bus,
3145 SPLIT_TYPE_NONE, 0);
3146
3147 return offset;
3148 }
3149
3150 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
qed_grc_dump_mem_entries(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct dbg_array input_mems_arr,u32 * dump_buf,bool dump)3151 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
3152 struct qed_ptt *p_ptt,
3153 struct dbg_array input_mems_arr,
3154 u32 *dump_buf, bool dump)
3155 {
3156 u32 i, offset = 0, input_offset = 0;
3157 bool mode_match = true;
3158
3159 while (input_offset < input_mems_arr.size_in_dwords) {
3160 const struct dbg_dump_cond_hdr *cond_hdr;
3161 u16 modes_buf_offset;
3162 u32 num_entries;
3163 bool eval_mode;
3164
3165 cond_hdr = (const struct dbg_dump_cond_hdr *)
3166 &input_mems_arr.ptr[input_offset++];
3167 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3168
3169 /* Check required mode */
3170 eval_mode = GET_FIELD(cond_hdr->mode.data,
3171 DBG_MODE_HDR_EVAL_MODE) > 0;
3172 if (eval_mode) {
3173 modes_buf_offset =
3174 GET_FIELD(cond_hdr->mode.data,
3175 DBG_MODE_HDR_MODES_BUF_OFFSET);
3176 mode_match = qed_is_mode_match(p_hwfn,
3177 &modes_buf_offset);
3178 }
3179
3180 if (!mode_match) {
3181 input_offset += cond_hdr->data_size;
3182 continue;
3183 }
3184
3185 for (i = 0; i < num_entries;
3186 i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3187 const struct dbg_dump_mem *mem =
3188 (const struct dbg_dump_mem *)
3189 &input_mems_arr.ptr[input_offset];
3190 u8 mem_group_id = GET_FIELD(mem->dword0,
3191 DBG_DUMP_MEM_MEM_GROUP_ID);
3192 bool is_storm = false, mem_wide_bus;
3193 enum dbg_grc_params grc_param;
3194 char storm_letter = 'a';
3195 enum block_id block_id;
3196 u32 mem_addr, mem_len;
3197
3198 if (mem_group_id >= MEM_GROUPS_NUM) {
3199 DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
3200 return 0;
3201 }
3202
3203 block_id = (enum block_id)cond_hdr->block_id;
3204 if (!qed_grc_is_mem_included(p_hwfn,
3205 block_id,
3206 mem_group_id))
3207 continue;
3208
3209 mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3210 mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3211 mem_wide_bus = GET_FIELD(mem->dword1,
3212 DBG_DUMP_MEM_WIDE_BUS);
3213
3214 /* Update memory length for CCFC/TCFC memories
3215 * according to number of LCIDs/LTIDs.
3216 */
3217 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3218 if (mem_len % MAX_LCIDS) {
3219 DP_NOTICE(p_hwfn,
3220 "Invalid CCFC connection memory size\n");
3221 return 0;
3222 }
3223
3224 grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3225 mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3226 (mem_len / MAX_LCIDS);
3227 } else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3228 if (mem_len % MAX_LTIDS) {
3229 DP_NOTICE(p_hwfn,
3230 "Invalid TCFC task memory size\n");
3231 return 0;
3232 }
3233
3234 grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3235 mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3236 (mem_len / MAX_LTIDS);
3237 }
3238
3239 /* If memory is associated with Storm, update Storm
3240 * details.
3241 */
3242 if (s_block_defs
3243 [cond_hdr->block_id]->associated_to_storm) {
3244 is_storm = true;
3245 storm_letter =
3246 s_storm_defs[s_block_defs
3247 [cond_hdr->block_id]->
3248 storm_id].letter;
3249 }
3250
3251 /* Dump memory */
3252 offset += qed_grc_dump_mem(p_hwfn,
3253 p_ptt,
3254 dump_buf + offset,
3255 dump,
3256 NULL,
3257 mem_addr,
3258 mem_len,
3259 mem_wide_bus,
3260 0,
3261 false,
3262 s_mem_group_names[mem_group_id],
3263 is_storm,
3264 storm_letter);
3265 }
3266 }
3267
3268 return offset;
3269 }
3270
3271 /* Dumps GRC memories according to the input array dump_mem.
3272 * Returns the dumped size in dwords.
3273 */
qed_grc_dump_memories(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3274 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
3275 struct qed_ptt *p_ptt,
3276 u32 *dump_buf, bool dump)
3277 {
3278 u32 offset = 0, input_offset = 0;
3279
3280 while (input_offset <
3281 s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3282 const struct dbg_dump_split_hdr *split_hdr;
3283 struct dbg_array curr_input_mems_arr;
3284 enum init_split_types split_type;
3285 u32 split_data_size;
3286
3287 split_hdr = (const struct dbg_dump_split_hdr *)
3288 &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3289 split_type =
3290 GET_FIELD(split_hdr->hdr,
3291 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3292 split_data_size =
3293 GET_FIELD(split_hdr->hdr,
3294 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3295 curr_input_mems_arr.ptr =
3296 &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3297 curr_input_mems_arr.size_in_dwords = split_data_size;
3298
3299 if (split_type == SPLIT_TYPE_NONE)
3300 offset += qed_grc_dump_mem_entries(p_hwfn,
3301 p_ptt,
3302 curr_input_mems_arr,
3303 dump_buf + offset,
3304 dump);
3305 else
3306 DP_NOTICE(p_hwfn,
3307 "Dumping split memories is currently not supported\n");
3308
3309 input_offset += split_data_size;
3310 }
3311
3312 return offset;
3313 }
3314
3315 /* Dumps GRC context data for the specified Storm.
3316 * Returns the dumped size in dwords.
3317 * The lid_size argument is specified in quad-regs.
3318 */
qed_grc_dump_ctx_data(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,const char * name,u32 num_lids,u32 lid_size,u32 rd_reg_addr,u8 storm_id)3319 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
3320 struct qed_ptt *p_ptt,
3321 u32 *dump_buf,
3322 bool dump,
3323 const char *name,
3324 u32 num_lids,
3325 u32 lid_size,
3326 u32 rd_reg_addr,
3327 u8 storm_id)
3328 {
3329 struct storm_defs *storm = &s_storm_defs[storm_id];
3330 u32 i, lid, total_size, offset = 0;
3331
3332 if (!lid_size)
3333 return 0;
3334
3335 lid_size *= BYTES_IN_DWORD;
3336 total_size = num_lids * lid_size;
3337
3338 offset += qed_grc_dump_mem_hdr(p_hwfn,
3339 dump_buf + offset,
3340 dump,
3341 name,
3342 0,
3343 total_size,
3344 lid_size * 32,
3345 false, name, true, storm->letter);
3346
3347 if (!dump)
3348 return offset + total_size;
3349
3350 /* Dump context data */
3351 for (lid = 0; lid < num_lids; lid++) {
3352 for (i = 0; i < lid_size; i++, offset++) {
3353 qed_wr(p_hwfn,
3354 p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3355 *(dump_buf + offset) = qed_rd(p_hwfn,
3356 p_ptt, rd_reg_addr);
3357 }
3358 }
3359
3360 return offset;
3361 }
3362
3363 /* Dumps GRC contexts. Returns the dumped size in dwords. */
qed_grc_dump_ctx(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3364 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
3365 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3366 {
3367 enum dbg_grc_params grc_param;
3368 u32 offset = 0;
3369 u8 storm_id;
3370
3371 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3372 struct storm_defs *storm = &s_storm_defs[storm_id];
3373
3374 if (!qed_grc_is_storm_included(p_hwfn,
3375 (enum dbg_storms)storm_id))
3376 continue;
3377
3378 /* Dump Conn AG context size */
3379 grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3380 offset +=
3381 qed_grc_dump_ctx_data(p_hwfn,
3382 p_ptt,
3383 dump_buf + offset,
3384 dump,
3385 "CONN_AG_CTX",
3386 qed_grc_get_param(p_hwfn,
3387 grc_param),
3388 storm->cm_conn_ag_ctx_lid_size,
3389 storm->cm_conn_ag_ctx_rd_addr,
3390 storm_id);
3391
3392 /* Dump Conn ST context size */
3393 grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3394 offset +=
3395 qed_grc_dump_ctx_data(p_hwfn,
3396 p_ptt,
3397 dump_buf + offset,
3398 dump,
3399 "CONN_ST_CTX",
3400 qed_grc_get_param(p_hwfn,
3401 grc_param),
3402 storm->cm_conn_st_ctx_lid_size,
3403 storm->cm_conn_st_ctx_rd_addr,
3404 storm_id);
3405
3406 /* Dump Task AG context size */
3407 grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3408 offset +=
3409 qed_grc_dump_ctx_data(p_hwfn,
3410 p_ptt,
3411 dump_buf + offset,
3412 dump,
3413 "TASK_AG_CTX",
3414 qed_grc_get_param(p_hwfn,
3415 grc_param),
3416 storm->cm_task_ag_ctx_lid_size,
3417 storm->cm_task_ag_ctx_rd_addr,
3418 storm_id);
3419
3420 /* Dump Task ST context size */
3421 grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3422 offset +=
3423 qed_grc_dump_ctx_data(p_hwfn,
3424 p_ptt,
3425 dump_buf + offset,
3426 dump,
3427 "TASK_ST_CTX",
3428 qed_grc_get_param(p_hwfn,
3429 grc_param),
3430 storm->cm_task_st_ctx_lid_size,
3431 storm->cm_task_st_ctx_rd_addr,
3432 storm_id);
3433 }
3434
3435 return offset;
3436 }
3437
3438 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
qed_grc_dump_iors(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3439 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
3440 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3441 {
3442 char buf[10] = "IOR_SET_?";
3443 u32 addr, offset = 0;
3444 u8 storm_id, set_id;
3445
3446 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3447 struct storm_defs *storm = &s_storm_defs[storm_id];
3448
3449 if (!qed_grc_is_storm_included(p_hwfn,
3450 (enum dbg_storms)storm_id))
3451 continue;
3452
3453 for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3454 addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
3455 SEM_FAST_REG_STORM_REG_FILE) +
3456 IOR_SET_OFFSET(set_id);
3457 buf[strlen(buf) - 1] = '0' + set_id;
3458 offset += qed_grc_dump_mem(p_hwfn,
3459 p_ptt,
3460 dump_buf + offset,
3461 dump,
3462 buf,
3463 addr,
3464 IORS_PER_SET,
3465 false,
3466 32,
3467 false,
3468 "ior",
3469 true,
3470 storm->letter);
3471 }
3472 }
3473
3474 return offset;
3475 }
3476
3477 /* Dump VFC CAM. Returns the dumped size in dwords. */
qed_grc_dump_vfc_cam(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 storm_id)3478 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3479 struct qed_ptt *p_ptt,
3480 u32 *dump_buf, bool dump, u8 storm_id)
3481 {
3482 u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3483 struct storm_defs *storm = &s_storm_defs[storm_id];
3484 u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3485 u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3486 u32 row, i, offset = 0;
3487
3488 offset += qed_grc_dump_mem_hdr(p_hwfn,
3489 dump_buf + offset,
3490 dump,
3491 "vfc_cam",
3492 0,
3493 total_size,
3494 256,
3495 false, "vfc_cam", true, storm->letter);
3496
3497 if (!dump)
3498 return offset + total_size;
3499
3500 /* Prepare CAM address */
3501 SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3502
3503 for (row = 0; row < VFC_CAM_NUM_ROWS;
3504 row++, offset += VFC_CAM_RESP_DWORDS) {
3505 /* Write VFC CAM command */
3506 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3507 ARR_REG_WR(p_hwfn,
3508 p_ptt,
3509 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3510 cam_cmd, VFC_CAM_CMD_DWORDS);
3511
3512 /* Write VFC CAM address */
3513 ARR_REG_WR(p_hwfn,
3514 p_ptt,
3515 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3516 cam_addr, VFC_CAM_ADDR_DWORDS);
3517
3518 /* Read VFC CAM read response */
3519 ARR_REG_RD(p_hwfn,
3520 p_ptt,
3521 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3522 dump_buf + offset, VFC_CAM_RESP_DWORDS);
3523 }
3524
3525 return offset;
3526 }
3527
3528 /* Dump VFC RAM. Returns the dumped size in dwords. */
qed_grc_dump_vfc_ram(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 storm_id,struct vfc_ram_defs * ram_defs)3529 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3530 struct qed_ptt *p_ptt,
3531 u32 *dump_buf,
3532 bool dump,
3533 u8 storm_id, struct vfc_ram_defs *ram_defs)
3534 {
3535 u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3536 struct storm_defs *storm = &s_storm_defs[storm_id];
3537 u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3538 u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3539 u32 row, i, offset = 0;
3540
3541 offset += qed_grc_dump_mem_hdr(p_hwfn,
3542 dump_buf + offset,
3543 dump,
3544 ram_defs->mem_name,
3545 0,
3546 total_size,
3547 256,
3548 false,
3549 ram_defs->type_name,
3550 true, storm->letter);
3551
3552 /* Prepare RAM address */
3553 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3554
3555 if (!dump)
3556 return offset + total_size;
3557
3558 for (row = ram_defs->base_row;
3559 row < ram_defs->base_row + ram_defs->num_rows;
3560 row++, offset += VFC_RAM_RESP_DWORDS) {
3561 /* Write VFC RAM command */
3562 ARR_REG_WR(p_hwfn,
3563 p_ptt,
3564 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3565 ram_cmd, VFC_RAM_CMD_DWORDS);
3566
3567 /* Write VFC RAM address */
3568 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3569 ARR_REG_WR(p_hwfn,
3570 p_ptt,
3571 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3572 ram_addr, VFC_RAM_ADDR_DWORDS);
3573
3574 /* Read VFC RAM read response */
3575 ARR_REG_RD(p_hwfn,
3576 p_ptt,
3577 storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3578 dump_buf + offset, VFC_RAM_RESP_DWORDS);
3579 }
3580
3581 return offset;
3582 }
3583
3584 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
qed_grc_dump_vfc(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3585 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3586 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3587 {
3588 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3589 u8 storm_id, i;
3590 u32 offset = 0;
3591
3592 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3593 if (!qed_grc_is_storm_included(p_hwfn,
3594 (enum dbg_storms)storm_id) ||
3595 !s_storm_defs[storm_id].has_vfc ||
3596 (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
3597 PLATFORM_ASIC))
3598 continue;
3599
3600 /* Read CAM */
3601 offset += qed_grc_dump_vfc_cam(p_hwfn,
3602 p_ptt,
3603 dump_buf + offset,
3604 dump, storm_id);
3605
3606 /* Read RAM */
3607 for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3608 offset += qed_grc_dump_vfc_ram(p_hwfn,
3609 p_ptt,
3610 dump_buf + offset,
3611 dump,
3612 storm_id,
3613 &s_vfc_ram_defs[i]);
3614 }
3615
3616 return offset;
3617 }
3618
3619 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
qed_grc_dump_rss(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3620 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3621 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3622 {
3623 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3624 u32 offset = 0;
3625 u8 rss_mem_id;
3626
3627 for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3628 u32 rss_addr, num_entries, total_dwords;
3629 struct rss_mem_defs *rss_defs;
3630 u32 addr, num_dwords_to_read;
3631 bool packed;
3632
3633 rss_defs = &s_rss_mem_defs[rss_mem_id];
3634 rss_addr = rss_defs->addr;
3635 num_entries = rss_defs->num_entries[dev_data->chip_id];
3636 total_dwords = (num_entries * rss_defs->entry_width) / 32;
3637 packed = (rss_defs->entry_width == 16);
3638
3639 offset += qed_grc_dump_mem_hdr(p_hwfn,
3640 dump_buf + offset,
3641 dump,
3642 rss_defs->mem_name,
3643 0,
3644 total_dwords,
3645 rss_defs->entry_width,
3646 packed,
3647 rss_defs->type_name, false, 0);
3648
3649 /* Dump RSS data */
3650 if (!dump) {
3651 offset += total_dwords;
3652 continue;
3653 }
3654
3655 addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3656 while (total_dwords) {
3657 num_dwords_to_read = min_t(u32,
3658 RSS_REG_RSS_RAM_DATA_SIZE,
3659 total_dwords);
3660 qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3661 offset += qed_grc_dump_addr_range(p_hwfn,
3662 p_ptt,
3663 dump_buf + offset,
3664 dump,
3665 addr,
3666 num_dwords_to_read,
3667 false,
3668 SPLIT_TYPE_NONE, 0);
3669 total_dwords -= num_dwords_to_read;
3670 rss_addr++;
3671 }
3672 }
3673
3674 return offset;
3675 }
3676
3677 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
qed_grc_dump_big_ram(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 big_ram_id)3678 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3679 struct qed_ptt *p_ptt,
3680 u32 *dump_buf, bool dump, u8 big_ram_id)
3681 {
3682 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3683 u32 block_size, ram_size, offset = 0, reg_val, i;
3684 char mem_name[12] = "???_BIG_RAM";
3685 char type_name[8] = "???_RAM";
3686 struct big_ram_defs *big_ram;
3687
3688 big_ram = &s_big_ram_defs[big_ram_id];
3689 ram_size = big_ram->ram_size[dev_data->chip_id];
3690
3691 reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3692 block_size = reg_val &
3693 BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3694 : 128;
3695
3696 strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3697 strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3698
3699 /* Dump memory header */
3700 offset += qed_grc_dump_mem_hdr(p_hwfn,
3701 dump_buf + offset,
3702 dump,
3703 mem_name,
3704 0,
3705 ram_size,
3706 block_size * 8,
3707 false, type_name, false, 0);
3708
3709 /* Read and dump Big RAM data */
3710 if (!dump)
3711 return offset + ram_size;
3712
3713 /* Dump Big RAM */
3714 for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3715 i++) {
3716 u32 addr, len;
3717
3718 qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3719 addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3720 len = BRB_REG_BIG_RAM_DATA_SIZE;
3721 offset += qed_grc_dump_addr_range(p_hwfn,
3722 p_ptt,
3723 dump_buf + offset,
3724 dump,
3725 addr,
3726 len,
3727 false, SPLIT_TYPE_NONE, 0);
3728 }
3729
3730 return offset;
3731 }
3732
qed_grc_dump_mcp(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3733 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3734 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3735 {
3736 bool block_enable[MAX_BLOCK_ID] = { 0 };
3737 u32 offset = 0, addr;
3738 bool halted = false;
3739
3740 /* Halt MCP */
3741 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3742 halted = !qed_mcp_halt(p_hwfn, p_ptt);
3743 if (!halted)
3744 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3745 }
3746
3747 /* Dump MCP scratchpad */
3748 offset += qed_grc_dump_mem(p_hwfn,
3749 p_ptt,
3750 dump_buf + offset,
3751 dump,
3752 NULL,
3753 BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3754 MCP_REG_SCRATCH_SIZE_BB_K2,
3755 false, 0, false, "MCP", false, 0);
3756
3757 /* Dump MCP cpu_reg_file */
3758 offset += qed_grc_dump_mem(p_hwfn,
3759 p_ptt,
3760 dump_buf + offset,
3761 dump,
3762 NULL,
3763 BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3764 MCP_REG_CPU_REG_FILE_SIZE,
3765 false, 0, false, "MCP", false, 0);
3766
3767 /* Dump MCP registers */
3768 block_enable[BLOCK_MCP] = true;
3769 offset += qed_grc_dump_registers(p_hwfn,
3770 p_ptt,
3771 dump_buf + offset,
3772 dump, block_enable, "block", "MCP");
3773
3774 /* Dump required non-MCP registers */
3775 offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3776 dump, 1, SPLIT_TYPE_NONE, 0,
3777 "block", "MCP");
3778 addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3779 offset += qed_grc_dump_reg_entry(p_hwfn,
3780 p_ptt,
3781 dump_buf + offset,
3782 dump,
3783 addr,
3784 1,
3785 false, SPLIT_TYPE_NONE, 0);
3786
3787 /* Release MCP */
3788 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3789 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3790
3791 return offset;
3792 }
3793
3794 /* Dumps the tbus indirect memory for all PHYs. */
qed_grc_dump_phy(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3795 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3796 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3797 {
3798 u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3799 char mem_name[32];
3800 u8 phy_id;
3801
3802 for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3803 u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3804 struct phy_defs *phy_defs;
3805 u8 *bytes_buf;
3806
3807 phy_defs = &s_phy_defs[phy_id];
3808 addr_lo_addr = phy_defs->base_addr +
3809 phy_defs->tbus_addr_lo_addr;
3810 addr_hi_addr = phy_defs->base_addr +
3811 phy_defs->tbus_addr_hi_addr;
3812 data_lo_addr = phy_defs->base_addr +
3813 phy_defs->tbus_data_lo_addr;
3814 data_hi_addr = phy_defs->base_addr +
3815 phy_defs->tbus_data_hi_addr;
3816
3817 if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3818 phy_defs->phy_name) < 0)
3819 DP_NOTICE(p_hwfn,
3820 "Unexpected debug error: invalid PHY memory name\n");
3821
3822 offset += qed_grc_dump_mem_hdr(p_hwfn,
3823 dump_buf + offset,
3824 dump,
3825 mem_name,
3826 0,
3827 PHY_DUMP_SIZE_DWORDS,
3828 16, true, mem_name, false, 0);
3829
3830 if (!dump) {
3831 offset += PHY_DUMP_SIZE_DWORDS;
3832 continue;
3833 }
3834
3835 bytes_buf = (u8 *)(dump_buf + offset);
3836 for (tbus_hi_offset = 0;
3837 tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3838 tbus_hi_offset++) {
3839 qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3840 for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3841 tbus_lo_offset++) {
3842 qed_wr(p_hwfn,
3843 p_ptt, addr_lo_addr, tbus_lo_offset);
3844 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3845 p_ptt,
3846 data_lo_addr);
3847 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3848 p_ptt,
3849 data_hi_addr);
3850 }
3851 }
3852
3853 offset += PHY_DUMP_SIZE_DWORDS;
3854 }
3855
3856 return offset;
3857 }
3858
qed_config_dbg_line(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum block_id block_id,u8 line_id,u8 enable_mask,u8 right_shift,u8 force_valid_mask,u8 force_frame_mask)3859 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3860 struct qed_ptt *p_ptt,
3861 enum block_id block_id,
3862 u8 line_id,
3863 u8 enable_mask,
3864 u8 right_shift,
3865 u8 force_valid_mask, u8 force_frame_mask)
3866 {
3867 struct block_defs *block = s_block_defs[block_id];
3868
3869 qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3870 qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3871 qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3872 qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3873 qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3874 }
3875
3876 /* Dumps Static Debug data. Returns the dumped size in dwords. */
qed_grc_dump_static_debug(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3877 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3878 struct qed_ptt *p_ptt,
3879 u32 *dump_buf, bool dump)
3880 {
3881 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3882 u32 block_id, line_id, offset = 0;
3883
3884 /* Don't dump static debug if a debug bus recording is in progress */
3885 if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3886 return 0;
3887
3888 if (dump) {
3889 /* Disable all blocks debug output */
3890 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3891 struct block_defs *block = s_block_defs[block_id];
3892
3893 if (block->dbg_client_id[dev_data->chip_id] !=
3894 MAX_DBG_BUS_CLIENTS)
3895 qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
3896 0);
3897 }
3898
3899 qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3900 qed_bus_set_framing_mode(p_hwfn,
3901 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3902 qed_wr(p_hwfn,
3903 p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3904 qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3905 qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3906 }
3907
3908 /* Dump all static debug lines for each relevant block */
3909 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3910 struct block_defs *block = s_block_defs[block_id];
3911 struct dbg_bus_block *block_desc;
3912 u32 block_dwords, addr, len;
3913 u8 dbg_client_id;
3914
3915 if (block->dbg_client_id[dev_data->chip_id] ==
3916 MAX_DBG_BUS_CLIENTS)
3917 continue;
3918
3919 block_desc = get_dbg_bus_block_desc(p_hwfn,
3920 (enum block_id)block_id);
3921 block_dwords = NUM_DBG_LINES(block_desc) *
3922 STATIC_DEBUG_LINE_DWORDS;
3923
3924 /* Dump static section params */
3925 offset += qed_grc_dump_mem_hdr(p_hwfn,
3926 dump_buf + offset,
3927 dump,
3928 block->name,
3929 0,
3930 block_dwords,
3931 32, false, "STATIC", false, 0);
3932
3933 if (!dump) {
3934 offset += block_dwords;
3935 continue;
3936 }
3937
3938 /* If all lines are invalid - dump zeros */
3939 if (dev_data->block_in_reset[block_id]) {
3940 memset(dump_buf + offset, 0,
3941 DWORDS_TO_BYTES(block_dwords));
3942 offset += block_dwords;
3943 continue;
3944 }
3945
3946 /* Enable block's client */
3947 dbg_client_id = block->dbg_client_id[dev_data->chip_id];
3948 qed_bus_enable_clients(p_hwfn,
3949 p_ptt,
3950 BIT(dbg_client_id));
3951
3952 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3953 len = STATIC_DEBUG_LINE_DWORDS;
3954 for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
3955 line_id++) {
3956 /* Configure debug line ID */
3957 qed_config_dbg_line(p_hwfn,
3958 p_ptt,
3959 (enum block_id)block_id,
3960 (u8)line_id, 0xf, 0, 0, 0);
3961
3962 /* Read debug line info */
3963 offset += qed_grc_dump_addr_range(p_hwfn,
3964 p_ptt,
3965 dump_buf + offset,
3966 dump,
3967 addr,
3968 len,
3969 true, SPLIT_TYPE_NONE,
3970 0);
3971 }
3972
3973 /* Disable block's client and debug output */
3974 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3975 qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3976 }
3977
3978 if (dump) {
3979 qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3980 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3981 }
3982
3983 return offset;
3984 }
3985
3986 /* Performs GRC Dump to the specified buffer.
3987 * Returns the dumped size in dwords.
3988 */
qed_grc_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)3989 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3990 struct qed_ptt *p_ptt,
3991 u32 *dump_buf,
3992 bool dump, u32 *num_dumped_dwords)
3993 {
3994 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3995 bool parities_masked = false;
3996 u32 offset = 0;
3997 u8 i;
3998
3999 *num_dumped_dwords = 0;
4000 dev_data->num_regs_read = 0;
4001
4002 /* Update reset state */
4003 if (dump)
4004 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4005
4006 /* Dump global params */
4007 offset += qed_dump_common_global_params(p_hwfn,
4008 p_ptt,
4009 dump_buf + offset, dump, 4);
4010 offset += qed_dump_str_param(dump_buf + offset,
4011 dump, "dump-type", "grc-dump");
4012 offset += qed_dump_num_param(dump_buf + offset,
4013 dump,
4014 "num-lcids",
4015 qed_grc_get_param(p_hwfn,
4016 DBG_GRC_PARAM_NUM_LCIDS));
4017 offset += qed_dump_num_param(dump_buf + offset,
4018 dump,
4019 "num-ltids",
4020 qed_grc_get_param(p_hwfn,
4021 DBG_GRC_PARAM_NUM_LTIDS));
4022 offset += qed_dump_num_param(dump_buf + offset,
4023 dump, "num-ports", dev_data->num_ports);
4024
4025 /* Dump reset registers (dumped before taking blocks out of reset ) */
4026 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4027 offset += qed_grc_dump_reset_regs(p_hwfn,
4028 p_ptt,
4029 dump_buf + offset, dump);
4030
4031 /* Take all blocks out of reset (using reset registers) */
4032 if (dump) {
4033 qed_grc_unreset_blocks(p_hwfn, p_ptt);
4034 qed_update_blocks_reset_state(p_hwfn, p_ptt);
4035 }
4036
4037 /* Disable all parities using MFW command */
4038 if (dump &&
4039 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4040 parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
4041 if (!parities_masked) {
4042 DP_NOTICE(p_hwfn,
4043 "Failed to mask parities using MFW\n");
4044 if (qed_grc_get_param
4045 (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4046 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4047 }
4048 }
4049
4050 /* Dump modified registers (dumped before modifying them) */
4051 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4052 offset += qed_grc_dump_modified_regs(p_hwfn,
4053 p_ptt,
4054 dump_buf + offset, dump);
4055
4056 /* Stall storms */
4057 if (dump &&
4058 (qed_grc_is_included(p_hwfn,
4059 DBG_GRC_PARAM_DUMP_IOR) ||
4060 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4061 qed_grc_stall_storms(p_hwfn, p_ptt, true);
4062
4063 /* Dump all regs */
4064 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4065 bool block_enable[MAX_BLOCK_ID];
4066
4067 /* Dump all blocks except MCP */
4068 for (i = 0; i < MAX_BLOCK_ID; i++)
4069 block_enable[i] = true;
4070 block_enable[BLOCK_MCP] = false;
4071 offset += qed_grc_dump_registers(p_hwfn,
4072 p_ptt,
4073 dump_buf +
4074 offset,
4075 dump,
4076 block_enable, NULL, NULL);
4077
4078 /* Dump special registers */
4079 offset += qed_grc_dump_special_regs(p_hwfn,
4080 p_ptt,
4081 dump_buf + offset, dump);
4082 }
4083
4084 /* Dump memories */
4085 offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4086
4087 /* Dump MCP */
4088 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4089 offset += qed_grc_dump_mcp(p_hwfn,
4090 p_ptt, dump_buf + offset, dump);
4091
4092 /* Dump context */
4093 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4094 offset += qed_grc_dump_ctx(p_hwfn,
4095 p_ptt, dump_buf + offset, dump);
4096
4097 /* Dump RSS memories */
4098 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4099 offset += qed_grc_dump_rss(p_hwfn,
4100 p_ptt, dump_buf + offset, dump);
4101
4102 /* Dump Big RAM */
4103 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4104 if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4105 offset += qed_grc_dump_big_ram(p_hwfn,
4106 p_ptt,
4107 dump_buf + offset,
4108 dump, i);
4109
4110 /* Dump IORs */
4111 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4112 offset += qed_grc_dump_iors(p_hwfn,
4113 p_ptt, dump_buf + offset, dump);
4114
4115 /* Dump VFC */
4116 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4117 offset += qed_grc_dump_vfc(p_hwfn,
4118 p_ptt, dump_buf + offset, dump);
4119
4120 /* Dump PHY tbus */
4121 if (qed_grc_is_included(p_hwfn,
4122 DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
4123 CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4124 offset += qed_grc_dump_phy(p_hwfn,
4125 p_ptt, dump_buf + offset, dump);
4126
4127 /* Dump static debug data (only if not during debug bus recording) */
4128 if (qed_grc_is_included(p_hwfn,
4129 DBG_GRC_PARAM_DUMP_STATIC) &&
4130 (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
4131 offset += qed_grc_dump_static_debug(p_hwfn,
4132 p_ptt,
4133 dump_buf + offset, dump);
4134
4135 /* Dump last section */
4136 offset += qed_dump_last_section(dump_buf, offset, dump);
4137
4138 if (dump) {
4139 /* Unstall storms */
4140 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4141 qed_grc_stall_storms(p_hwfn, p_ptt, false);
4142
4143 /* Clear parity status */
4144 qed_grc_clear_all_prty(p_hwfn, p_ptt);
4145
4146 /* Enable all parities using MFW command */
4147 if (parities_masked)
4148 qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
4149 }
4150
4151 *num_dumped_dwords = offset;
4152
4153 return DBG_STATUS_OK;
4154 }
4155
4156 /* Writes the specified failing Idle Check rule to the specified buffer.
4157 * Returns the dumped size in dwords.
4158 */
qed_idle_chk_dump_failure(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u16 rule_id,const struct dbg_idle_chk_rule * rule,u16 fail_entry_id,u32 * cond_reg_values)4159 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
4160 struct qed_ptt *p_ptt,
4161 u32 *
4162 dump_buf,
4163 bool dump,
4164 u16 rule_id,
4165 const struct dbg_idle_chk_rule *rule,
4166 u16 fail_entry_id, u32 *cond_reg_values)
4167 {
4168 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4169 const struct dbg_idle_chk_cond_reg *cond_regs;
4170 const struct dbg_idle_chk_info_reg *info_regs;
4171 u32 i, next_reg_offset = 0, offset = 0;
4172 struct dbg_idle_chk_result_hdr *hdr;
4173 const union dbg_idle_chk_reg *regs;
4174 u8 reg_id;
4175
4176 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4177 regs = &((const union dbg_idle_chk_reg *)
4178 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4179 cond_regs = ®s[0].cond_reg;
4180 info_regs = ®s[rule->num_cond_regs].info_reg;
4181
4182 /* Dump rule data */
4183 if (dump) {
4184 memset(hdr, 0, sizeof(*hdr));
4185 hdr->rule_id = rule_id;
4186 hdr->mem_entry_id = fail_entry_id;
4187 hdr->severity = rule->severity;
4188 hdr->num_dumped_cond_regs = rule->num_cond_regs;
4189 }
4190
4191 offset += IDLE_CHK_RESULT_HDR_DWORDS;
4192
4193 /* Dump condition register values */
4194 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4195 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4196 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4197
4198 reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4199 (dump_buf + offset);
4200
4201 /* Write register header */
4202 if (!dump) {
4203 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
4204 reg->entry_size;
4205 continue;
4206 }
4207
4208 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4209 memset(reg_hdr, 0, sizeof(*reg_hdr));
4210 reg_hdr->start_entry = reg->start_entry;
4211 reg_hdr->size = reg->entry_size;
4212 SET_FIELD(reg_hdr->data,
4213 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
4214 reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4215 SET_FIELD(reg_hdr->data,
4216 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4217
4218 /* Write register values */
4219 for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4220 dump_buf[offset] = cond_reg_values[next_reg_offset];
4221 }
4222
4223 /* Dump info register values */
4224 for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4225 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4226 u32 block_id;
4227
4228 /* Check if register's block is in reset */
4229 if (!dump) {
4230 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4231 continue;
4232 }
4233
4234 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4235 if (block_id >= MAX_BLOCK_ID) {
4236 DP_NOTICE(p_hwfn, "Invalid block_id\n");
4237 return 0;
4238 }
4239
4240 if (!dev_data->block_in_reset[block_id]) {
4241 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4242 bool wide_bus, eval_mode, mode_match = true;
4243 u16 modes_buf_offset;
4244 u32 addr;
4245
4246 reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4247 (dump_buf + offset);
4248
4249 /* Check mode */
4250 eval_mode = GET_FIELD(reg->mode.data,
4251 DBG_MODE_HDR_EVAL_MODE) > 0;
4252 if (eval_mode) {
4253 modes_buf_offset =
4254 GET_FIELD(reg->mode.data,
4255 DBG_MODE_HDR_MODES_BUF_OFFSET);
4256 mode_match =
4257 qed_is_mode_match(p_hwfn,
4258 &modes_buf_offset);
4259 }
4260
4261 if (!mode_match)
4262 continue;
4263
4264 addr = GET_FIELD(reg->data,
4265 DBG_IDLE_CHK_INFO_REG_ADDRESS);
4266 wide_bus = GET_FIELD(reg->data,
4267 DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4268
4269 /* Write register header */
4270 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4271 hdr->num_dumped_info_regs++;
4272 memset(reg_hdr, 0, sizeof(*reg_hdr));
4273 reg_hdr->size = reg->size;
4274 SET_FIELD(reg_hdr->data,
4275 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
4276 rule->num_cond_regs + reg_id);
4277
4278 /* Write register values */
4279 offset += qed_grc_dump_addr_range(p_hwfn,
4280 p_ptt,
4281 dump_buf + offset,
4282 dump,
4283 addr,
4284 reg->size, wide_bus,
4285 SPLIT_TYPE_NONE, 0);
4286 }
4287 }
4288
4289 return offset;
4290 }
4291
4292 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4293 static u32
qed_idle_chk_dump_rule_entries(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,const struct dbg_idle_chk_rule * input_rules,u32 num_input_rules,u32 * num_failing_rules)4294 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4295 u32 *dump_buf, bool dump,
4296 const struct dbg_idle_chk_rule *input_rules,
4297 u32 num_input_rules, u32 *num_failing_rules)
4298 {
4299 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4300 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4301 u32 i, offset = 0;
4302 u16 entry_id;
4303 u8 reg_id;
4304
4305 *num_failing_rules = 0;
4306
4307 for (i = 0; i < num_input_rules; i++) {
4308 const struct dbg_idle_chk_cond_reg *cond_regs;
4309 const struct dbg_idle_chk_rule *rule;
4310 const union dbg_idle_chk_reg *regs;
4311 u16 num_reg_entries = 1;
4312 bool check_rule = true;
4313 const u32 *imm_values;
4314
4315 rule = &input_rules[i];
4316 regs = &((const union dbg_idle_chk_reg *)
4317 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
4318 [rule->reg_offset];
4319 cond_regs = ®s[0].cond_reg;
4320 imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
4321 [rule->imm_offset];
4322
4323 /* Check if all condition register blocks are out of reset, and
4324 * find maximal number of entries (all condition registers that
4325 * are memories must have the same size, which is > 1).
4326 */
4327 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
4328 reg_id++) {
4329 u32 block_id =
4330 GET_FIELD(cond_regs[reg_id].data,
4331 DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4332
4333 if (block_id >= MAX_BLOCK_ID) {
4334 DP_NOTICE(p_hwfn, "Invalid block_id\n");
4335 return 0;
4336 }
4337
4338 check_rule = !dev_data->block_in_reset[block_id];
4339 if (cond_regs[reg_id].num_entries > num_reg_entries)
4340 num_reg_entries = cond_regs[reg_id].num_entries;
4341 }
4342
4343 if (!check_rule && dump)
4344 continue;
4345
4346 if (!dump) {
4347 u32 entry_dump_size =
4348 qed_idle_chk_dump_failure(p_hwfn,
4349 p_ptt,
4350 dump_buf + offset,
4351 false,
4352 rule->rule_id,
4353 rule,
4354 0,
4355 NULL);
4356
4357 offset += num_reg_entries * entry_dump_size;
4358 (*num_failing_rules) += num_reg_entries;
4359 continue;
4360 }
4361
4362 /* Go over all register entries (number of entries is the same
4363 * for all condition registers).
4364 */
4365 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4366 u32 next_reg_offset = 0;
4367
4368 /* Read current entry of all condition registers */
4369 for (reg_id = 0; reg_id < rule->num_cond_regs;
4370 reg_id++) {
4371 const struct dbg_idle_chk_cond_reg *reg =
4372 &cond_regs[reg_id];
4373 u32 padded_entry_size, addr;
4374 bool wide_bus;
4375
4376 /* Find GRC address (if it's a memory, the
4377 * address of the specific entry is calculated).
4378 */
4379 addr = GET_FIELD(reg->data,
4380 DBG_IDLE_CHK_COND_REG_ADDRESS);
4381 wide_bus =
4382 GET_FIELD(reg->data,
4383 DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4384 if (reg->num_entries > 1 ||
4385 reg->start_entry > 0) {
4386 padded_entry_size =
4387 reg->entry_size > 1 ?
4388 roundup_pow_of_two(reg->entry_size) :
4389 1;
4390 addr += (reg->start_entry + entry_id) *
4391 padded_entry_size;
4392 }
4393
4394 /* Read registers */
4395 if (next_reg_offset + reg->entry_size >=
4396 IDLE_CHK_MAX_ENTRIES_SIZE) {
4397 DP_NOTICE(p_hwfn,
4398 "idle check registers entry is too large\n");
4399 return 0;
4400 }
4401
4402 next_reg_offset +=
4403 qed_grc_dump_addr_range(p_hwfn, p_ptt,
4404 cond_reg_values +
4405 next_reg_offset,
4406 dump, addr,
4407 reg->entry_size,
4408 wide_bus,
4409 SPLIT_TYPE_NONE, 0);
4410 }
4411
4412 /* Call rule condition function.
4413 * If returns true, it's a failure.
4414 */
4415 if ((*cond_arr[rule->cond_id]) (cond_reg_values,
4416 imm_values)) {
4417 offset += qed_idle_chk_dump_failure(p_hwfn,
4418 p_ptt,
4419 dump_buf + offset,
4420 dump,
4421 rule->rule_id,
4422 rule,
4423 entry_id,
4424 cond_reg_values);
4425 (*num_failing_rules)++;
4426 }
4427 }
4428 }
4429
4430 return offset;
4431 }
4432
4433 /* Performs Idle Check Dump to the specified buffer.
4434 * Returns the dumped size in dwords.
4435 */
qed_idle_chk_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)4436 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
4437 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4438 {
4439 u32 num_failing_rules_offset, offset = 0, input_offset = 0;
4440 u32 num_failing_rules = 0;
4441
4442 /* Dump global params */
4443 offset += qed_dump_common_global_params(p_hwfn,
4444 p_ptt,
4445 dump_buf + offset, dump, 1);
4446 offset += qed_dump_str_param(dump_buf + offset,
4447 dump, "dump-type", "idle-chk");
4448
4449 /* Dump idle check section header with a single parameter */
4450 offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4451 num_failing_rules_offset = offset;
4452 offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4453
4454 while (input_offset <
4455 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4456 const struct dbg_idle_chk_cond_hdr *cond_hdr =
4457 (const struct dbg_idle_chk_cond_hdr *)
4458 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
4459 [input_offset++];
4460 bool eval_mode, mode_match = true;
4461 u32 curr_failing_rules;
4462 u16 modes_buf_offset;
4463
4464 /* Check mode */
4465 eval_mode = GET_FIELD(cond_hdr->mode.data,
4466 DBG_MODE_HDR_EVAL_MODE) > 0;
4467 if (eval_mode) {
4468 modes_buf_offset =
4469 GET_FIELD(cond_hdr->mode.data,
4470 DBG_MODE_HDR_MODES_BUF_OFFSET);
4471 mode_match = qed_is_mode_match(p_hwfn,
4472 &modes_buf_offset);
4473 }
4474
4475 if (mode_match) {
4476 offset +=
4477 qed_idle_chk_dump_rule_entries(p_hwfn,
4478 p_ptt,
4479 dump_buf + offset,
4480 dump,
4481 (const struct dbg_idle_chk_rule *)
4482 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
4483 ptr[input_offset],
4484 cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
4485 &curr_failing_rules);
4486 num_failing_rules += curr_failing_rules;
4487 }
4488
4489 input_offset += cond_hdr->data_size;
4490 }
4491
4492 /* Overwrite num_rules parameter */
4493 if (dump)
4494 qed_dump_num_param(dump_buf + num_failing_rules_offset,
4495 dump, "num_rules", num_failing_rules);
4496
4497 /* Dump last section */
4498 offset += qed_dump_last_section(dump_buf, offset, dump);
4499
4500 return offset;
4501 }
4502
4503 /* Finds the meta data image in NVRAM */
qed_find_nvram_image(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 image_type,u32 * nvram_offset_bytes,u32 * nvram_size_bytes)4504 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
4505 struct qed_ptt *p_ptt,
4506 u32 image_type,
4507 u32 *nvram_offset_bytes,
4508 u32 *nvram_size_bytes)
4509 {
4510 u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4511 struct mcp_file_att file_att;
4512 int nvm_result;
4513
4514 /* Call NVRAM get file command */
4515 nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
4516 p_ptt,
4517 DRV_MSG_CODE_NVM_GET_FILE_ATT,
4518 image_type,
4519 &ret_mcp_resp,
4520 &ret_mcp_param,
4521 &ret_txn_size, (u32 *)&file_att);
4522
4523 /* Check response */
4524 if (nvm_result ||
4525 (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4526 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4527
4528 /* Update return values */
4529 *nvram_offset_bytes = file_att.nvm_start_addr;
4530 *nvram_size_bytes = file_att.len;
4531
4532 DP_VERBOSE(p_hwfn,
4533 QED_MSG_DEBUG,
4534 "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
4535 image_type, *nvram_offset_bytes, *nvram_size_bytes);
4536
4537 /* Check alignment */
4538 if (*nvram_size_bytes & 0x3)
4539 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4540
4541 return DBG_STATUS_OK;
4542 }
4543
4544 /* Reads data from NVRAM */
qed_nvram_read(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 nvram_offset_bytes,u32 nvram_size_bytes,u32 * ret_buf)4545 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
4546 struct qed_ptt *p_ptt,
4547 u32 nvram_offset_bytes,
4548 u32 nvram_size_bytes, u32 *ret_buf)
4549 {
4550 u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4551 s32 bytes_left = nvram_size_bytes;
4552 u32 read_offset = 0;
4553
4554 DP_VERBOSE(p_hwfn,
4555 QED_MSG_DEBUG,
4556 "nvram_read: reading image of size %d bytes from NVRAM\n",
4557 nvram_size_bytes);
4558
4559 do {
4560 bytes_to_copy =
4561 (bytes_left >
4562 MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4563
4564 /* Call NVRAM read command */
4565 if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4566 DRV_MSG_CODE_NVM_READ_NVRAM,
4567 (nvram_offset_bytes +
4568 read_offset) |
4569 (bytes_to_copy <<
4570 DRV_MB_PARAM_NVM_LEN_OFFSET),
4571 &ret_mcp_resp, &ret_mcp_param,
4572 &ret_read_size,
4573 (u32 *)((u8 *)ret_buf + read_offset)))
4574 return DBG_STATUS_NVRAM_READ_FAILED;
4575
4576 /* Check response */
4577 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4578 return DBG_STATUS_NVRAM_READ_FAILED;
4579
4580 /* Update read offset */
4581 read_offset += ret_read_size;
4582 bytes_left -= ret_read_size;
4583 } while (bytes_left > 0);
4584
4585 return DBG_STATUS_OK;
4586 }
4587
4588 /* Get info on the MCP Trace data in the scratchpad:
4589 * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4590 * - trace_data_size (OUT): trace data size in bytes (without the header)
4591 */
qed_mcp_trace_get_data_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * trace_data_grc_addr,u32 * trace_data_size)4592 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4593 struct qed_ptt *p_ptt,
4594 u32 *trace_data_grc_addr,
4595 u32 *trace_data_size)
4596 {
4597 u32 spad_trace_offsize, signature;
4598
4599 /* Read trace section offsize structure from MCP scratchpad */
4600 spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4601
4602 /* Extract trace section address from offsize (in scratchpad) */
4603 *trace_data_grc_addr =
4604 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4605
4606 /* Read signature from MCP trace section */
4607 signature = qed_rd(p_hwfn, p_ptt,
4608 *trace_data_grc_addr +
4609 offsetof(struct mcp_trace, signature));
4610
4611 if (signature != MFW_TRACE_SIGNATURE)
4612 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4613
4614 /* Read trace size from MCP trace section */
4615 *trace_data_size = qed_rd(p_hwfn,
4616 p_ptt,
4617 *trace_data_grc_addr +
4618 offsetof(struct mcp_trace, size));
4619
4620 return DBG_STATUS_OK;
4621 }
4622
4623 /* Reads MCP trace meta data image from NVRAM
4624 * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4625 * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4626 * loaded from file).
4627 * - trace_meta_size (OUT): size in bytes of the trace meta data.
4628 */
qed_mcp_trace_get_meta_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 trace_data_size_bytes,u32 * running_bundle_id,u32 * trace_meta_offset,u32 * trace_meta_size)4629 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4630 struct qed_ptt *p_ptt,
4631 u32 trace_data_size_bytes,
4632 u32 *running_bundle_id,
4633 u32 *trace_meta_offset,
4634 u32 *trace_meta_size)
4635 {
4636 u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4637
4638 /* Read MCP trace section offsize structure from MCP scratchpad */
4639 spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4640
4641 /* Find running bundle ID */
4642 running_mfw_addr =
4643 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4644 QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4645 *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4646 if (*running_bundle_id > 1)
4647 return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4648
4649 /* Find image in NVRAM */
4650 nvram_image_type =
4651 (*running_bundle_id ==
4652 DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4653 return qed_find_nvram_image(p_hwfn,
4654 p_ptt,
4655 nvram_image_type,
4656 trace_meta_offset, trace_meta_size);
4657 }
4658
4659 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
qed_mcp_trace_read_meta(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 nvram_offset_in_bytes,u32 size_in_bytes,u32 * buf)4660 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4661 struct qed_ptt *p_ptt,
4662 u32 nvram_offset_in_bytes,
4663 u32 size_in_bytes, u32 *buf)
4664 {
4665 u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4666 enum dbg_status status;
4667 u32 signature;
4668
4669 /* Read meta data from NVRAM */
4670 status = qed_nvram_read(p_hwfn,
4671 p_ptt,
4672 nvram_offset_in_bytes, size_in_bytes, buf);
4673 if (status != DBG_STATUS_OK)
4674 return status;
4675
4676 /* Extract and check first signature */
4677 signature = qed_read_unaligned_dword(byte_buf);
4678 byte_buf += sizeof(signature);
4679 if (signature != NVM_MAGIC_VALUE)
4680 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4681
4682 /* Extract number of modules */
4683 modules_num = *(byte_buf++);
4684
4685 /* Skip all modules */
4686 for (i = 0; i < modules_num; i++) {
4687 module_len = *(byte_buf++);
4688 byte_buf += module_len;
4689 }
4690
4691 /* Extract and check second signature */
4692 signature = qed_read_unaligned_dword(byte_buf);
4693 byte_buf += sizeof(signature);
4694 if (signature != NVM_MAGIC_VALUE)
4695 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4696
4697 return DBG_STATUS_OK;
4698 }
4699
4700 /* Dump MCP Trace */
qed_mcp_trace_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4701 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4702 struct qed_ptt *p_ptt,
4703 u32 *dump_buf,
4704 bool dump, u32 *num_dumped_dwords)
4705 {
4706 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4707 u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4708 u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4709 enum dbg_status status;
4710 bool mcp_access;
4711 int halted = 0;
4712
4713 *num_dumped_dwords = 0;
4714
4715 mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4716
4717 /* Get trace data info */
4718 status = qed_mcp_trace_get_data_info(p_hwfn,
4719 p_ptt,
4720 &trace_data_grc_addr,
4721 &trace_data_size_bytes);
4722 if (status != DBG_STATUS_OK)
4723 return status;
4724
4725 /* Dump global params */
4726 offset += qed_dump_common_global_params(p_hwfn,
4727 p_ptt,
4728 dump_buf + offset, dump, 1);
4729 offset += qed_dump_str_param(dump_buf + offset,
4730 dump, "dump-type", "mcp-trace");
4731
4732 /* Halt MCP while reading from scratchpad so the read data will be
4733 * consistent. if halt fails, MCP trace is taken anyway, with a small
4734 * risk that it may be corrupt.
4735 */
4736 if (dump && mcp_access) {
4737 halted = !qed_mcp_halt(p_hwfn, p_ptt);
4738 if (!halted)
4739 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4740 }
4741
4742 /* Find trace data size */
4743 trace_data_size_dwords =
4744 DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4745 BYTES_IN_DWORD);
4746
4747 /* Dump trace data section header and param */
4748 offset += qed_dump_section_hdr(dump_buf + offset,
4749 dump, "mcp_trace_data", 1);
4750 offset += qed_dump_num_param(dump_buf + offset,
4751 dump, "size", trace_data_size_dwords);
4752
4753 /* Read trace data from scratchpad into dump buffer */
4754 offset += qed_grc_dump_addr_range(p_hwfn,
4755 p_ptt,
4756 dump_buf + offset,
4757 dump,
4758 BYTES_TO_DWORDS(trace_data_grc_addr),
4759 trace_data_size_dwords, false,
4760 SPLIT_TYPE_NONE, 0);
4761
4762 /* Resume MCP (only if halt succeeded) */
4763 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4764 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4765
4766 /* Dump trace meta section header */
4767 offset += qed_dump_section_hdr(dump_buf + offset,
4768 dump, "mcp_trace_meta", 1);
4769
4770 /* If MCP Trace meta size parameter was set, use it.
4771 * Otherwise, read trace meta.
4772 * trace_meta_size_bytes is dword-aligned.
4773 */
4774 trace_meta_size_bytes =
4775 qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4776 if ((!trace_meta_size_bytes || dump) && mcp_access) {
4777 status = qed_mcp_trace_get_meta_info(p_hwfn,
4778 p_ptt,
4779 trace_data_size_bytes,
4780 &running_bundle_id,
4781 &trace_meta_offset_bytes,
4782 &trace_meta_size_bytes);
4783 if (status == DBG_STATUS_OK)
4784 trace_meta_size_dwords =
4785 BYTES_TO_DWORDS(trace_meta_size_bytes);
4786 }
4787
4788 /* Dump trace meta size param */
4789 offset += qed_dump_num_param(dump_buf + offset,
4790 dump, "size", trace_meta_size_dwords);
4791
4792 /* Read trace meta image into dump buffer */
4793 if (dump && trace_meta_size_dwords)
4794 status = qed_mcp_trace_read_meta(p_hwfn,
4795 p_ptt,
4796 trace_meta_offset_bytes,
4797 trace_meta_size_bytes,
4798 dump_buf + offset);
4799 if (status == DBG_STATUS_OK)
4800 offset += trace_meta_size_dwords;
4801
4802 /* Dump last section */
4803 offset += qed_dump_last_section(dump_buf, offset, dump);
4804
4805 *num_dumped_dwords = offset;
4806
4807 /* If no mcp access, indicate that the dump doesn't contain the meta
4808 * data from NVRAM.
4809 */
4810 return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4811 }
4812
4813 /* Dump GRC FIFO */
qed_reg_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4814 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4815 struct qed_ptt *p_ptt,
4816 u32 *dump_buf,
4817 bool dump, u32 *num_dumped_dwords)
4818 {
4819 u32 dwords_read, size_param_offset, offset = 0, addr, len;
4820 bool fifo_has_data;
4821
4822 *num_dumped_dwords = 0;
4823
4824 /* Dump global params */
4825 offset += qed_dump_common_global_params(p_hwfn,
4826 p_ptt,
4827 dump_buf + offset, dump, 1);
4828 offset += qed_dump_str_param(dump_buf + offset,
4829 dump, "dump-type", "reg-fifo");
4830
4831 /* Dump fifo data section header and param. The size param is 0 for
4832 * now, and is overwritten after reading the FIFO.
4833 */
4834 offset += qed_dump_section_hdr(dump_buf + offset,
4835 dump, "reg_fifo_data", 1);
4836 size_param_offset = offset;
4837 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4838
4839 if (!dump) {
4840 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4841 * test how much data is available, except for reading it.
4842 */
4843 offset += REG_FIFO_DEPTH_DWORDS;
4844 goto out;
4845 }
4846
4847 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4848 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4849
4850 /* Pull available data from fifo. Use DMAE since this is widebus memory
4851 * and must be accessed atomically. Test for dwords_read not passing
4852 * buffer size since more entries could be added to the buffer as we are
4853 * emptying it.
4854 */
4855 addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4856 len = REG_FIFO_ELEMENT_DWORDS;
4857 for (dwords_read = 0;
4858 fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4859 dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4860 offset += qed_grc_dump_addr_range(p_hwfn,
4861 p_ptt,
4862 dump_buf + offset,
4863 true,
4864 addr,
4865 len,
4866 true, SPLIT_TYPE_NONE,
4867 0);
4868 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4869 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4870 }
4871
4872 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4873 dwords_read);
4874 out:
4875 /* Dump last section */
4876 offset += qed_dump_last_section(dump_buf, offset, dump);
4877
4878 *num_dumped_dwords = offset;
4879
4880 return DBG_STATUS_OK;
4881 }
4882
4883 /* Dump IGU FIFO */
qed_igu_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4884 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4885 struct qed_ptt *p_ptt,
4886 u32 *dump_buf,
4887 bool dump, u32 *num_dumped_dwords)
4888 {
4889 u32 dwords_read, size_param_offset, offset = 0, addr, len;
4890 bool fifo_has_data;
4891
4892 *num_dumped_dwords = 0;
4893
4894 /* Dump global params */
4895 offset += qed_dump_common_global_params(p_hwfn,
4896 p_ptt,
4897 dump_buf + offset, dump, 1);
4898 offset += qed_dump_str_param(dump_buf + offset,
4899 dump, "dump-type", "igu-fifo");
4900
4901 /* Dump fifo data section header and param. The size param is 0 for
4902 * now, and is overwritten after reading the FIFO.
4903 */
4904 offset += qed_dump_section_hdr(dump_buf + offset,
4905 dump, "igu_fifo_data", 1);
4906 size_param_offset = offset;
4907 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4908
4909 if (!dump) {
4910 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4911 * test how much data is available, except for reading it.
4912 */
4913 offset += IGU_FIFO_DEPTH_DWORDS;
4914 goto out;
4915 }
4916
4917 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4918 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4919
4920 /* Pull available data from fifo. Use DMAE since this is widebus memory
4921 * and must be accessed atomically. Test for dwords_read not passing
4922 * buffer size since more entries could be added to the buffer as we are
4923 * emptying it.
4924 */
4925 addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4926 len = IGU_FIFO_ELEMENT_DWORDS;
4927 for (dwords_read = 0;
4928 fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4929 dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4930 offset += qed_grc_dump_addr_range(p_hwfn,
4931 p_ptt,
4932 dump_buf + offset,
4933 true,
4934 addr,
4935 len,
4936 true, SPLIT_TYPE_NONE,
4937 0);
4938 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4939 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4940 }
4941
4942 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4943 dwords_read);
4944 out:
4945 /* Dump last section */
4946 offset += qed_dump_last_section(dump_buf, offset, dump);
4947
4948 *num_dumped_dwords = offset;
4949
4950 return DBG_STATUS_OK;
4951 }
4952
4953 /* Protection Override dump */
qed_protection_override_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4954 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4955 struct qed_ptt *p_ptt,
4956 u32 *dump_buf,
4957 bool dump,
4958 u32 *num_dumped_dwords)
4959 {
4960 u32 size_param_offset, override_window_dwords, offset = 0, addr;
4961
4962 *num_dumped_dwords = 0;
4963
4964 /* Dump global params */
4965 offset += qed_dump_common_global_params(p_hwfn,
4966 p_ptt,
4967 dump_buf + offset, dump, 1);
4968 offset += qed_dump_str_param(dump_buf + offset,
4969 dump, "dump-type", "protection-override");
4970
4971 /* Dump data section header and param. The size param is 0 for now,
4972 * and is overwritten after reading the data.
4973 */
4974 offset += qed_dump_section_hdr(dump_buf + offset,
4975 dump, "protection_override_data", 1);
4976 size_param_offset = offset;
4977 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4978
4979 if (!dump) {
4980 offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4981 goto out;
4982 }
4983
4984 /* Add override window info to buffer */
4985 override_window_dwords =
4986 qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4987 PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4988 addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4989 offset += qed_grc_dump_addr_range(p_hwfn,
4990 p_ptt,
4991 dump_buf + offset,
4992 true,
4993 addr,
4994 override_window_dwords,
4995 true, SPLIT_TYPE_NONE, 0);
4996 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4997 override_window_dwords);
4998 out:
4999 /* Dump last section */
5000 offset += qed_dump_last_section(dump_buf, offset, dump);
5001
5002 *num_dumped_dwords = offset;
5003
5004 return DBG_STATUS_OK;
5005 }
5006
5007 /* Performs FW Asserts Dump to the specified buffer.
5008 * Returns the dumped size in dwords.
5009 */
qed_fw_asserts_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)5010 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5011 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
5012 {
5013 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5014 struct fw_asserts_ram_section *asserts;
5015 char storm_letter_str[2] = "?";
5016 struct fw_info fw_info;
5017 u32 offset = 0;
5018 u8 storm_id;
5019
5020 /* Dump global params */
5021 offset += qed_dump_common_global_params(p_hwfn,
5022 p_ptt,
5023 dump_buf + offset, dump, 1);
5024 offset += qed_dump_str_param(dump_buf + offset,
5025 dump, "dump-type", "fw-asserts");
5026
5027 /* Find Storm dump size */
5028 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5029 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
5030 struct storm_defs *storm = &s_storm_defs[storm_id];
5031 u32 last_list_idx, addr;
5032
5033 if (dev_data->block_in_reset[storm->block_id])
5034 continue;
5035
5036 /* Read FW info for the current Storm */
5037 qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
5038
5039 asserts = &fw_info.fw_asserts_section;
5040
5041 /* Dump FW Asserts section header and params */
5042 storm_letter_str[0] = storm->letter;
5043 offset += qed_dump_section_hdr(dump_buf + offset,
5044 dump, "fw_asserts", 2);
5045 offset += qed_dump_str_param(dump_buf + offset,
5046 dump, "storm", storm_letter_str);
5047 offset += qed_dump_num_param(dump_buf + offset,
5048 dump,
5049 "size",
5050 asserts->list_element_dword_size);
5051
5052 /* Read and dump FW Asserts data */
5053 if (!dump) {
5054 offset += asserts->list_element_dword_size;
5055 continue;
5056 }
5057
5058 fw_asserts_section_addr = storm->sem_fast_mem_addr +
5059 SEM_FAST_REG_INT_RAM +
5060 RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
5061 next_list_idx_addr = fw_asserts_section_addr +
5062 DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
5063 next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
5064 last_list_idx = (next_list_idx > 0 ?
5065 next_list_idx :
5066 asserts->list_num_elements) - 1;
5067 addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
5068 asserts->list_dword_offset +
5069 last_list_idx * asserts->list_element_dword_size;
5070 offset +=
5071 qed_grc_dump_addr_range(p_hwfn, p_ptt,
5072 dump_buf + offset,
5073 dump, addr,
5074 asserts->list_element_dword_size,
5075 false, SPLIT_TYPE_NONE, 0);
5076 }
5077
5078 /* Dump last section */
5079 offset += qed_dump_last_section(dump_buf, offset, dump);
5080
5081 return offset;
5082 }
5083
5084 /***************************** Public Functions *******************************/
5085
qed_dbg_set_bin_ptr(const u8 * const bin_ptr)5086 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
5087 {
5088 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
5089 u8 buf_id;
5090
5091 /* convert binary data to debug arrays */
5092 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
5093 s_dbg_arrays[buf_id].ptr =
5094 (u32 *)(bin_ptr + buf_array[buf_id].offset);
5095 s_dbg_arrays[buf_id].size_in_dwords =
5096 BYTES_TO_DWORDS(buf_array[buf_id].length);
5097 }
5098
5099 return DBG_STATUS_OK;
5100 }
5101
qed_read_fw_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct fw_info * fw_info)5102 bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
5103 struct qed_ptt *p_ptt, struct fw_info *fw_info)
5104 {
5105 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5106 u8 storm_id;
5107
5108 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5109 struct storm_defs *storm = &s_storm_defs[storm_id];
5110
5111 /* Skip Storm if it's in reset */
5112 if (dev_data->block_in_reset[storm->block_id])
5113 continue;
5114
5115 /* Read FW info for the current Storm */
5116 qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
5117
5118 return true;
5119 }
5120
5121 return false;
5122 }
5123
5124 /* Assign default GRC param values */
qed_dbg_grc_set_params_default(struct qed_hwfn * p_hwfn)5125 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
5126 {
5127 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5128 u32 i;
5129
5130 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5131 if (!s_grc_param_defs[i].is_persistent)
5132 dev_data->grc.param_val[i] =
5133 s_grc_param_defs[i].default_val[dev_data->chip_id];
5134 }
5135
qed_dbg_grc_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5136 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5137 struct qed_ptt *p_ptt,
5138 u32 *buf_size)
5139 {
5140 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5141
5142 *buf_size = 0;
5143
5144 if (status != DBG_STATUS_OK)
5145 return status;
5146
5147 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5148 !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5149 !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5150 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5151 !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5152 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5153
5154 return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5155 }
5156
qed_dbg_grc_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5157 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5158 struct qed_ptt *p_ptt,
5159 u32 *dump_buf,
5160 u32 buf_size_in_dwords,
5161 u32 *num_dumped_dwords)
5162 {
5163 u32 needed_buf_size_in_dwords;
5164 enum dbg_status status;
5165
5166 *num_dumped_dwords = 0;
5167
5168 status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5169 p_ptt,
5170 &needed_buf_size_in_dwords);
5171 if (status != DBG_STATUS_OK)
5172 return status;
5173
5174 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5175 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5176
5177 /* GRC Dump */
5178 status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5179
5180 /* Revert GRC params to their default */
5181 qed_dbg_grc_set_params_default(p_hwfn);
5182
5183 return status;
5184 }
5185
qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5186 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5187 struct qed_ptt *p_ptt,
5188 u32 *buf_size)
5189 {
5190 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5191 struct idle_chk_data *idle_chk;
5192 enum dbg_status status;
5193
5194 idle_chk = &dev_data->idle_chk;
5195 *buf_size = 0;
5196
5197 status = qed_dbg_dev_init(p_hwfn, p_ptt);
5198 if (status != DBG_STATUS_OK)
5199 return status;
5200
5201 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5202 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5203 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5204 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5205 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5206
5207 if (!idle_chk->buf_size_set) {
5208 idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5209 p_ptt, NULL, false);
5210 idle_chk->buf_size_set = true;
5211 }
5212
5213 *buf_size = idle_chk->buf_size;
5214
5215 return DBG_STATUS_OK;
5216 }
5217
qed_dbg_idle_chk_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5218 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5219 struct qed_ptt *p_ptt,
5220 u32 *dump_buf,
5221 u32 buf_size_in_dwords,
5222 u32 *num_dumped_dwords)
5223 {
5224 u32 needed_buf_size_in_dwords;
5225 enum dbg_status status;
5226
5227 *num_dumped_dwords = 0;
5228
5229 status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5230 p_ptt,
5231 &needed_buf_size_in_dwords);
5232 if (status != DBG_STATUS_OK)
5233 return status;
5234
5235 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5236 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5237
5238 /* Update reset state */
5239 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5240
5241 /* Idle Check Dump */
5242 *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5243
5244 /* Revert GRC params to their default */
5245 qed_dbg_grc_set_params_default(p_hwfn);
5246
5247 return DBG_STATUS_OK;
5248 }
5249
qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5250 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5251 struct qed_ptt *p_ptt,
5252 u32 *buf_size)
5253 {
5254 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5255
5256 *buf_size = 0;
5257
5258 if (status != DBG_STATUS_OK)
5259 return status;
5260
5261 return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5262 }
5263
qed_dbg_mcp_trace_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5264 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5265 struct qed_ptt *p_ptt,
5266 u32 *dump_buf,
5267 u32 buf_size_in_dwords,
5268 u32 *num_dumped_dwords)
5269 {
5270 u32 needed_buf_size_in_dwords;
5271 enum dbg_status status;
5272
5273 status =
5274 qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5275 p_ptt,
5276 &needed_buf_size_in_dwords);
5277 if (status != DBG_STATUS_OK && status !=
5278 DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5279 return status;
5280
5281 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5282 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5283
5284 /* Update reset state */
5285 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5286
5287 /* Perform dump */
5288 status = qed_mcp_trace_dump(p_hwfn,
5289 p_ptt, dump_buf, true, num_dumped_dwords);
5290
5291 /* Revert GRC params to their default */
5292 qed_dbg_grc_set_params_default(p_hwfn);
5293
5294 return status;
5295 }
5296
qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5297 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5298 struct qed_ptt *p_ptt,
5299 u32 *buf_size)
5300 {
5301 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5302
5303 *buf_size = 0;
5304
5305 if (status != DBG_STATUS_OK)
5306 return status;
5307
5308 return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5309 }
5310
qed_dbg_reg_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5311 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5312 struct qed_ptt *p_ptt,
5313 u32 *dump_buf,
5314 u32 buf_size_in_dwords,
5315 u32 *num_dumped_dwords)
5316 {
5317 u32 needed_buf_size_in_dwords;
5318 enum dbg_status status;
5319
5320 *num_dumped_dwords = 0;
5321
5322 status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5323 p_ptt,
5324 &needed_buf_size_in_dwords);
5325 if (status != DBG_STATUS_OK)
5326 return status;
5327
5328 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5329 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5330
5331 /* Update reset state */
5332 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5333
5334 status = qed_reg_fifo_dump(p_hwfn,
5335 p_ptt, dump_buf, true, num_dumped_dwords);
5336
5337 /* Revert GRC params to their default */
5338 qed_dbg_grc_set_params_default(p_hwfn);
5339
5340 return status;
5341 }
5342
qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5343 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5344 struct qed_ptt *p_ptt,
5345 u32 *buf_size)
5346 {
5347 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5348
5349 *buf_size = 0;
5350
5351 if (status != DBG_STATUS_OK)
5352 return status;
5353
5354 return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5355 }
5356
qed_dbg_igu_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5357 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5358 struct qed_ptt *p_ptt,
5359 u32 *dump_buf,
5360 u32 buf_size_in_dwords,
5361 u32 *num_dumped_dwords)
5362 {
5363 u32 needed_buf_size_in_dwords;
5364 enum dbg_status status;
5365
5366 *num_dumped_dwords = 0;
5367
5368 status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5369 p_ptt,
5370 &needed_buf_size_in_dwords);
5371 if (status != DBG_STATUS_OK)
5372 return status;
5373
5374 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5375 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5376
5377 /* Update reset state */
5378 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5379
5380 status = qed_igu_fifo_dump(p_hwfn,
5381 p_ptt, dump_buf, true, num_dumped_dwords);
5382 /* Revert GRC params to their default */
5383 qed_dbg_grc_set_params_default(p_hwfn);
5384
5385 return status;
5386 }
5387
5388 enum dbg_status
qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5389 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5390 struct qed_ptt *p_ptt,
5391 u32 *buf_size)
5392 {
5393 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5394
5395 *buf_size = 0;
5396
5397 if (status != DBG_STATUS_OK)
5398 return status;
5399
5400 return qed_protection_override_dump(p_hwfn,
5401 p_ptt, NULL, false, buf_size);
5402 }
5403
qed_dbg_protection_override_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5404 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5405 struct qed_ptt *p_ptt,
5406 u32 *dump_buf,
5407 u32 buf_size_in_dwords,
5408 u32 *num_dumped_dwords)
5409 {
5410 u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5411 enum dbg_status status;
5412
5413 *num_dumped_dwords = 0;
5414
5415 status =
5416 qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5417 p_ptt,
5418 p_size);
5419 if (status != DBG_STATUS_OK)
5420 return status;
5421
5422 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5423 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5424
5425 /* Update reset state */
5426 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5427
5428 status = qed_protection_override_dump(p_hwfn,
5429 p_ptt,
5430 dump_buf,
5431 true, num_dumped_dwords);
5432
5433 /* Revert GRC params to their default */
5434 qed_dbg_grc_set_params_default(p_hwfn);
5435
5436 return status;
5437 }
5438
qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5439 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5440 struct qed_ptt *p_ptt,
5441 u32 *buf_size)
5442 {
5443 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5444
5445 *buf_size = 0;
5446
5447 if (status != DBG_STATUS_OK)
5448 return status;
5449
5450 /* Update reset state */
5451 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5452
5453 *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5454
5455 return DBG_STATUS_OK;
5456 }
5457
qed_dbg_fw_asserts_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5458 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5459 struct qed_ptt *p_ptt,
5460 u32 *dump_buf,
5461 u32 buf_size_in_dwords,
5462 u32 *num_dumped_dwords)
5463 {
5464 u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5465 enum dbg_status status;
5466
5467 *num_dumped_dwords = 0;
5468
5469 status =
5470 qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5471 p_ptt,
5472 p_size);
5473 if (status != DBG_STATUS_OK)
5474 return status;
5475
5476 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5477 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5478
5479 *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5480
5481 /* Revert GRC params to their default */
5482 qed_dbg_grc_set_params_default(p_hwfn);
5483
5484 return DBG_STATUS_OK;
5485 }
5486
qed_dbg_read_attn(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum block_id block_id,enum dbg_attn_type attn_type,bool clear_status,struct dbg_attn_block_result * results)5487 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5488 struct qed_ptt *p_ptt,
5489 enum block_id block_id,
5490 enum dbg_attn_type attn_type,
5491 bool clear_status,
5492 struct dbg_attn_block_result *results)
5493 {
5494 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5495 u8 reg_idx, num_attn_regs, num_result_regs = 0;
5496 const struct dbg_attn_reg *attn_reg_arr;
5497
5498 if (status != DBG_STATUS_OK)
5499 return status;
5500
5501 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5502 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5503 !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5504 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5505
5506 attn_reg_arr = qed_get_block_attn_regs(block_id,
5507 attn_type, &num_attn_regs);
5508
5509 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5510 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5511 struct dbg_attn_reg_result *reg_result;
5512 u32 sts_addr, sts_val;
5513 u16 modes_buf_offset;
5514 bool eval_mode;
5515
5516 /* Check mode */
5517 eval_mode = GET_FIELD(reg_data->mode.data,
5518 DBG_MODE_HDR_EVAL_MODE) > 0;
5519 modes_buf_offset = GET_FIELD(reg_data->mode.data,
5520 DBG_MODE_HDR_MODES_BUF_OFFSET);
5521 if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5522 continue;
5523
5524 /* Mode match - read attention status register */
5525 sts_addr = DWORDS_TO_BYTES(clear_status ?
5526 reg_data->sts_clr_address :
5527 GET_FIELD(reg_data->data,
5528 DBG_ATTN_REG_STS_ADDRESS));
5529 sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5530 if (!sts_val)
5531 continue;
5532
5533 /* Non-zero attention status - add to results */
5534 reg_result = &results->reg_results[num_result_regs];
5535 SET_FIELD(reg_result->data,
5536 DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5537 SET_FIELD(reg_result->data,
5538 DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5539 GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5540 reg_result->block_attn_offset = reg_data->block_attn_offset;
5541 reg_result->sts_val = sts_val;
5542 reg_result->mask_val = qed_rd(p_hwfn,
5543 p_ptt,
5544 DWORDS_TO_BYTES
5545 (reg_data->mask_address));
5546 num_result_regs++;
5547 }
5548
5549 results->block_id = (u8)block_id;
5550 results->names_offset =
5551 qed_get_block_attn_data(block_id, attn_type)->names_offset;
5552 SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5553 SET_FIELD(results->data,
5554 DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5555
5556 return DBG_STATUS_OK;
5557 }
5558
5559 /******************************* Data Types **********************************/
5560
5561 struct block_info {
5562 const char *name;
5563 enum block_id id;
5564 };
5565
5566 struct mcp_trace_format {
5567 u32 data;
5568 #define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
5569 #define MCP_TRACE_FORMAT_MODULE_SHIFT 0
5570 #define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
5571 #define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
5572 #define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
5573 #define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
5574 #define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
5575 #define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
5576 #define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
5577 #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
5578 #define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
5579 #define MCP_TRACE_FORMAT_LEN_SHIFT 24
5580
5581 char *format_str;
5582 };
5583
5584 /* Meta data structure, generated by a perl script during MFW build. therefore,
5585 * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl
5586 * script.
5587 */
5588 struct mcp_trace_meta {
5589 u32 modules_num;
5590 char **modules;
5591 u32 formats_num;
5592 struct mcp_trace_format *formats;
5593 };
5594
5595 /* REG fifo element */
5596 struct reg_fifo_element {
5597 u64 data;
5598 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
5599 #define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff
5600 #define REG_FIFO_ELEMENT_ACCESS_SHIFT 23
5601 #define REG_FIFO_ELEMENT_ACCESS_MASK 0x1
5602 #define REG_FIFO_ELEMENT_PF_SHIFT 24
5603 #define REG_FIFO_ELEMENT_PF_MASK 0xf
5604 #define REG_FIFO_ELEMENT_VF_SHIFT 28
5605 #define REG_FIFO_ELEMENT_VF_MASK 0xff
5606 #define REG_FIFO_ELEMENT_PORT_SHIFT 36
5607 #define REG_FIFO_ELEMENT_PORT_MASK 0x3
5608 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38
5609 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3
5610 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40
5611 #define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7
5612 #define REG_FIFO_ELEMENT_MASTER_SHIFT 43
5613 #define REG_FIFO_ELEMENT_MASTER_MASK 0xf
5614 #define REG_FIFO_ELEMENT_ERROR_SHIFT 47
5615 #define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
5616 };
5617
5618 /* IGU fifo element */
5619 struct igu_fifo_element {
5620 u32 dword0;
5621 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0
5622 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff
5623 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8
5624 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1
5625 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9
5626 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf
5627 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13
5628 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf
5629 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17
5630 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff
5631 u32 dword1;
5632 u32 dword2;
5633 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0
5634 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1
5635 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1
5636 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff
5637 u32 reserved;
5638 };
5639
5640 struct igu_fifo_wr_data {
5641 u32 data;
5642 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0
5643 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff
5644 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24
5645 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1
5646 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25
5647 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3
5648 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27
5649 #define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1
5650 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28
5651 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1
5652 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31
5653 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1
5654 };
5655
5656 struct igu_fifo_cleanup_wr_data {
5657 u32 data;
5658 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0
5659 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff
5660 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27
5661 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1
5662 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28
5663 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7
5664 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31
5665 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1
5666 };
5667
5668 /* Protection override element */
5669 struct protection_override_element {
5670 u64 data;
5671 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0
5672 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff
5673 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23
5674 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff
5675 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47
5676 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1
5677 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48
5678 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1
5679 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49
5680 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7
5681 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52
5682 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7
5683 };
5684
5685 enum igu_fifo_sources {
5686 IGU_SRC_PXP0,
5687 IGU_SRC_PXP1,
5688 IGU_SRC_PXP2,
5689 IGU_SRC_PXP3,
5690 IGU_SRC_PXP4,
5691 IGU_SRC_PXP5,
5692 IGU_SRC_PXP6,
5693 IGU_SRC_PXP7,
5694 IGU_SRC_CAU,
5695 IGU_SRC_ATTN,
5696 IGU_SRC_GRC
5697 };
5698
5699 enum igu_fifo_addr_types {
5700 IGU_ADDR_TYPE_MSIX_MEM,
5701 IGU_ADDR_TYPE_WRITE_PBA,
5702 IGU_ADDR_TYPE_WRITE_INT_ACK,
5703 IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5704 IGU_ADDR_TYPE_READ_INT,
5705 IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5706 IGU_ADDR_TYPE_RESERVED
5707 };
5708
5709 struct igu_fifo_addr_data {
5710 u16 start_addr;
5711 u16 end_addr;
5712 char *desc;
5713 char *vf_desc;
5714 enum igu_fifo_addr_types type;
5715 };
5716
5717 /******************************** Constants **********************************/
5718
5719 #define MAX_MSG_LEN 1024
5720
5721 #define MCP_TRACE_MAX_MODULE_LEN 8
5722 #define MCP_TRACE_FORMAT_MAX_PARAMS 3
5723 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5724 (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
5725
5726 #define REG_FIFO_ELEMENT_ADDR_FACTOR 4
5727 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
5728
5729 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
5730
5731 /***************************** Constant Arrays *******************************/
5732
5733 struct user_dbg_array {
5734 const u32 *ptr;
5735 u32 size_in_dwords;
5736 };
5737
5738 /* Debug arrays */
5739 static struct user_dbg_array
5740 s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
5741
5742 /* Block names array */
5743 static struct block_info s_block_info_arr[] = {
5744 {"grc", BLOCK_GRC},
5745 {"miscs", BLOCK_MISCS},
5746 {"misc", BLOCK_MISC},
5747 {"dbu", BLOCK_DBU},
5748 {"pglue_b", BLOCK_PGLUE_B},
5749 {"cnig", BLOCK_CNIG},
5750 {"cpmu", BLOCK_CPMU},
5751 {"ncsi", BLOCK_NCSI},
5752 {"opte", BLOCK_OPTE},
5753 {"bmb", BLOCK_BMB},
5754 {"pcie", BLOCK_PCIE},
5755 {"mcp", BLOCK_MCP},
5756 {"mcp2", BLOCK_MCP2},
5757 {"pswhst", BLOCK_PSWHST},
5758 {"pswhst2", BLOCK_PSWHST2},
5759 {"pswrd", BLOCK_PSWRD},
5760 {"pswrd2", BLOCK_PSWRD2},
5761 {"pswwr", BLOCK_PSWWR},
5762 {"pswwr2", BLOCK_PSWWR2},
5763 {"pswrq", BLOCK_PSWRQ},
5764 {"pswrq2", BLOCK_PSWRQ2},
5765 {"pglcs", BLOCK_PGLCS},
5766 {"ptu", BLOCK_PTU},
5767 {"dmae", BLOCK_DMAE},
5768 {"tcm", BLOCK_TCM},
5769 {"mcm", BLOCK_MCM},
5770 {"ucm", BLOCK_UCM},
5771 {"xcm", BLOCK_XCM},
5772 {"ycm", BLOCK_YCM},
5773 {"pcm", BLOCK_PCM},
5774 {"qm", BLOCK_QM},
5775 {"tm", BLOCK_TM},
5776 {"dorq", BLOCK_DORQ},
5777 {"brb", BLOCK_BRB},
5778 {"src", BLOCK_SRC},
5779 {"prs", BLOCK_PRS},
5780 {"tsdm", BLOCK_TSDM},
5781 {"msdm", BLOCK_MSDM},
5782 {"usdm", BLOCK_USDM},
5783 {"xsdm", BLOCK_XSDM},
5784 {"ysdm", BLOCK_YSDM},
5785 {"psdm", BLOCK_PSDM},
5786 {"tsem", BLOCK_TSEM},
5787 {"msem", BLOCK_MSEM},
5788 {"usem", BLOCK_USEM},
5789 {"xsem", BLOCK_XSEM},
5790 {"ysem", BLOCK_YSEM},
5791 {"psem", BLOCK_PSEM},
5792 {"rss", BLOCK_RSS},
5793 {"tmld", BLOCK_TMLD},
5794 {"muld", BLOCK_MULD},
5795 {"yuld", BLOCK_YULD},
5796 {"xyld", BLOCK_XYLD},
5797 {"ptld", BLOCK_PTLD},
5798 {"ypld", BLOCK_YPLD},
5799 {"prm", BLOCK_PRM},
5800 {"pbf_pb1", BLOCK_PBF_PB1},
5801 {"pbf_pb2", BLOCK_PBF_PB2},
5802 {"rpb", BLOCK_RPB},
5803 {"btb", BLOCK_BTB},
5804 {"pbf", BLOCK_PBF},
5805 {"rdif", BLOCK_RDIF},
5806 {"tdif", BLOCK_TDIF},
5807 {"cdu", BLOCK_CDU},
5808 {"ccfc", BLOCK_CCFC},
5809 {"tcfc", BLOCK_TCFC},
5810 {"igu", BLOCK_IGU},
5811 {"cau", BLOCK_CAU},
5812 {"rgfs", BLOCK_RGFS},
5813 {"rgsrc", BLOCK_RGSRC},
5814 {"tgfs", BLOCK_TGFS},
5815 {"tgsrc", BLOCK_TGSRC},
5816 {"umac", BLOCK_UMAC},
5817 {"xmac", BLOCK_XMAC},
5818 {"dbg", BLOCK_DBG},
5819 {"nig", BLOCK_NIG},
5820 {"wol", BLOCK_WOL},
5821 {"bmbn", BLOCK_BMBN},
5822 {"ipc", BLOCK_IPC},
5823 {"nwm", BLOCK_NWM},
5824 {"nws", BLOCK_NWS},
5825 {"ms", BLOCK_MS},
5826 {"phy_pcie", BLOCK_PHY_PCIE},
5827 {"led", BLOCK_LED},
5828 {"avs_wrap", BLOCK_AVS_WRAP},
5829 {"pxpreqbus", BLOCK_PXPREQBUS},
5830 {"misc_aeu", BLOCK_MISC_AEU},
5831 {"bar0_map", BLOCK_BAR0_MAP}
5832 };
5833
5834 /* Status string array */
5835 static const char * const s_status_str[] = {
5836 /* DBG_STATUS_OK */
5837 "Operation completed successfully",
5838
5839 /* DBG_STATUS_APP_VERSION_NOT_SET */
5840 "Debug application version wasn't set",
5841
5842 /* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5843 "Unsupported debug application version",
5844
5845 /* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5846 "The debug block wasn't reset since the last recording",
5847
5848 /* DBG_STATUS_INVALID_ARGS */
5849 "Invalid arguments",
5850
5851 /* DBG_STATUS_OUTPUT_ALREADY_SET */
5852 "The debug output was already set",
5853
5854 /* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5855 "Invalid PCI buffer size",
5856
5857 /* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5858 "PCI buffer allocation failed",
5859
5860 /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5861 "A PCI buffer wasn't allocated",
5862
5863 /* DBG_STATUS_TOO_MANY_INPUTS */
5864 "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
5865
5866 /* DBG_STATUS_INPUT_OVERLAP */
5867 "Overlapping debug bus inputs",
5868
5869 /* DBG_STATUS_HW_ONLY_RECORDING */
5870 "Cannot record Storm data since the entire recording cycle is used by HW",
5871
5872 /* DBG_STATUS_STORM_ALREADY_ENABLED */
5873 "The Storm was already enabled",
5874
5875 /* DBG_STATUS_STORM_NOT_ENABLED */
5876 "The specified Storm wasn't enabled",
5877
5878 /* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5879 "The block was already enabled",
5880
5881 /* DBG_STATUS_BLOCK_NOT_ENABLED */
5882 "The specified block wasn't enabled",
5883
5884 /* DBG_STATUS_NO_INPUT_ENABLED */
5885 "No input was enabled for recording",
5886
5887 /* DBG_STATUS_NO_FILTER_TRIGGER_64B */
5888 "Filters and triggers are not allowed when recording in 64b units",
5889
5890 /* DBG_STATUS_FILTER_ALREADY_ENABLED */
5891 "The filter was already enabled",
5892
5893 /* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5894 "The trigger was already enabled",
5895
5896 /* DBG_STATUS_TRIGGER_NOT_ENABLED */
5897 "The trigger wasn't enabled",
5898
5899 /* DBG_STATUS_CANT_ADD_CONSTRAINT */
5900 "A constraint can be added only after a filter was enabled or a trigger state was added",
5901
5902 /* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5903 "Cannot add more than 3 trigger states",
5904
5905 /* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5906 "Cannot add more than 4 constraints per filter or trigger state",
5907
5908 /* DBG_STATUS_RECORDING_NOT_STARTED */
5909 "The recording wasn't started",
5910
5911 /* DBG_STATUS_DATA_DIDNT_TRIGGER */
5912 "A trigger was configured, but it didn't trigger",
5913
5914 /* DBG_STATUS_NO_DATA_RECORDED */
5915 "No data was recorded",
5916
5917 /* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5918 "Dump buffer is too small",
5919
5920 /* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5921 "Dumped data is not aligned to chunks",
5922
5923 /* DBG_STATUS_UNKNOWN_CHIP */
5924 "Unknown chip",
5925
5926 /* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5927 "Failed allocating virtual memory",
5928
5929 /* DBG_STATUS_BLOCK_IN_RESET */
5930 "The input block is in reset",
5931
5932 /* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5933 "Invalid MCP trace signature found in NVRAM",
5934
5935 /* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5936 "Invalid bundle ID found in NVRAM",
5937
5938 /* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5939 "Failed getting NVRAM image",
5940
5941 /* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5942 "NVRAM image is not dword-aligned",
5943
5944 /* DBG_STATUS_NVRAM_READ_FAILED */
5945 "Failed reading from NVRAM",
5946
5947 /* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5948 "Idle check parsing failed",
5949
5950 /* DBG_STATUS_MCP_TRACE_BAD_DATA */
5951 "MCP Trace data is corrupt",
5952
5953 /* DBG_STATUS_MCP_TRACE_NO_META */
5954 "Dump doesn't contain meta data - it must be provided in image file",
5955
5956 /* DBG_STATUS_MCP_COULD_NOT_HALT */
5957 "Failed to halt MCP",
5958
5959 /* DBG_STATUS_MCP_COULD_NOT_RESUME */
5960 "Failed to resume MCP after halt",
5961
5962 /* DBG_STATUS_RESERVED2 */
5963 "Reserved debug status - shouldn't be returned",
5964
5965 /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5966 "Failed to empty SEMI sync FIFO",
5967
5968 /* DBG_STATUS_IGU_FIFO_BAD_DATA */
5969 "IGU FIFO data is corrupt",
5970
5971 /* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5972 "MCP failed to mask parities",
5973
5974 /* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5975 "FW Asserts parsing failed",
5976
5977 /* DBG_STATUS_REG_FIFO_BAD_DATA */
5978 "GRC FIFO data is corrupt",
5979
5980 /* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5981 "Protection Override data is corrupt",
5982
5983 /* DBG_STATUS_DBG_ARRAY_NOT_SET */
5984 "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5985
5986 /* DBG_STATUS_FILTER_BUG */
5987 "Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
5988
5989 /* DBG_STATUS_NON_MATCHING_LINES */
5990 "Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
5991
5992 /* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
5993 "The selected trigger dword offset wasn't enabled in the recorded HW block",
5994
5995 /* DBG_STATUS_DBG_BUS_IN_USE */
5996 "The debug bus is in use"
5997 };
5998
5999 /* Idle check severity names array */
6000 static const char * const s_idle_chk_severity_str[] = {
6001 "Error",
6002 "Error if no traffic",
6003 "Warning"
6004 };
6005
6006 /* MCP Trace level names array */
6007 static const char * const s_mcp_trace_level_str[] = {
6008 "ERROR",
6009 "TRACE",
6010 "DEBUG"
6011 };
6012
6013 /* Access type names array */
6014 static const char * const s_access_strs[] = {
6015 "read",
6016 "write"
6017 };
6018
6019 /* Privilege type names array */
6020 static const char * const s_privilege_strs[] = {
6021 "VF",
6022 "PDA",
6023 "HV",
6024 "UA"
6025 };
6026
6027 /* Protection type names array */
6028 static const char * const s_protection_strs[] = {
6029 "(default)",
6030 "(default)",
6031 "(default)",
6032 "(default)",
6033 "override VF",
6034 "override PDA",
6035 "override HV",
6036 "override UA"
6037 };
6038
6039 /* Master type names array */
6040 static const char * const s_master_strs[] = {
6041 "???",
6042 "pxp",
6043 "mcp",
6044 "msdm",
6045 "psdm",
6046 "ysdm",
6047 "usdm",
6048 "tsdm",
6049 "xsdm",
6050 "dbu",
6051 "dmae",
6052 "???",
6053 "???",
6054 "???",
6055 "???",
6056 "???"
6057 };
6058
6059 /* REG FIFO error messages array */
6060 static const char * const s_reg_fifo_error_strs[] = {
6061 "grc timeout",
6062 "address doesn't belong to any block",
6063 "reserved address in block or write to read-only address",
6064 "privilege/protection mismatch",
6065 "path isolation error"
6066 };
6067
6068 /* IGU FIFO sources array */
6069 static const char * const s_igu_fifo_source_strs[] = {
6070 "TSTORM",
6071 "MSTORM",
6072 "USTORM",
6073 "XSTORM",
6074 "YSTORM",
6075 "PSTORM",
6076 "PCIE",
6077 "NIG_QM_PBF",
6078 "CAU",
6079 "ATTN",
6080 "GRC",
6081 };
6082
6083 /* IGU FIFO error messages */
6084 static const char * const s_igu_fifo_error_strs[] = {
6085 "no error",
6086 "length error",
6087 "function disabled",
6088 "VF sent command to attnetion address",
6089 "host sent prod update command",
6090 "read of during interrupt register while in MIMD mode",
6091 "access to PXP BAR reserved address",
6092 "producer update command to attention index",
6093 "unknown error",
6094 "SB index not valid",
6095 "SB relative index and FID not found",
6096 "FID not match",
6097 "command with error flag asserted (PCI error or CAU discard)",
6098 "VF sent cleanup and RF cleanup is disabled",
6099 "cleanup command on type bigger than 4"
6100 };
6101
6102 /* IGU FIFO address data */
6103 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
6104 {0x0, 0x101, "MSI-X Memory", NULL,
6105 IGU_ADDR_TYPE_MSIX_MEM},
6106 {0x102, 0x1ff, "reserved", NULL,
6107 IGU_ADDR_TYPE_RESERVED},
6108 {0x200, 0x200, "Write PBA[0:63]", NULL,
6109 IGU_ADDR_TYPE_WRITE_PBA},
6110 {0x201, 0x201, "Write PBA[64:127]", "reserved",
6111 IGU_ADDR_TYPE_WRITE_PBA},
6112 {0x202, 0x202, "Write PBA[128]", "reserved",
6113 IGU_ADDR_TYPE_WRITE_PBA},
6114 {0x203, 0x3ff, "reserved", NULL,
6115 IGU_ADDR_TYPE_RESERVED},
6116 {0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
6117 IGU_ADDR_TYPE_WRITE_INT_ACK},
6118 {0x5f0, 0x5f0, "Attention bits update", NULL,
6119 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6120 {0x5f1, 0x5f1, "Attention bits set", NULL,
6121 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6122 {0x5f2, 0x5f2, "Attention bits clear", NULL,
6123 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6124 {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
6125 IGU_ADDR_TYPE_READ_INT},
6126 {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
6127 IGU_ADDR_TYPE_READ_INT},
6128 {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6129 IGU_ADDR_TYPE_READ_INT},
6130 {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6131 IGU_ADDR_TYPE_READ_INT},
6132 {0x5f7, 0x5ff, "reserved", NULL,
6133 IGU_ADDR_TYPE_RESERVED},
6134 {0x600, 0x7ff, "Producer update", NULL,
6135 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6136 };
6137
6138 /******************************** Variables **********************************/
6139
6140 /* MCP Trace meta data array - used in case the dump doesn't contain the
6141 * meta data (e.g. due to no NVRAM access).
6142 */
6143 static struct user_dbg_array s_mcp_trace_meta_arr = { NULL, 0 };
6144
6145 /* Parsed MCP Trace meta data info, based on MCP trace meta array */
6146 static struct mcp_trace_meta s_mcp_trace_meta;
6147 static bool s_mcp_trace_meta_valid;
6148
6149 /* Temporary buffer, used for print size calculations */
6150 static char s_temp_buf[MAX_MSG_LEN];
6151
6152 /**************************** Private Functions ******************************/
6153
qed_cyclic_add(u32 a,u32 b,u32 size)6154 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6155 {
6156 return (a + b) % size;
6157 }
6158
qed_cyclic_sub(u32 a,u32 b,u32 size)6159 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6160 {
6161 return (size + a - b) % size;
6162 }
6163
6164 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6165 * bytes) and returns them as a dword value. the specified buffer offset is
6166 * updated.
6167 */
qed_read_from_cyclic_buf(void * buf,u32 * offset,u32 buf_size,u8 num_bytes_to_read)6168 static u32 qed_read_from_cyclic_buf(void *buf,
6169 u32 *offset,
6170 u32 buf_size, u8 num_bytes_to_read)
6171 {
6172 u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6173 u32 val = 0;
6174
6175 val_ptr = (u8 *)&val;
6176
6177 /* Assume running on a LITTLE ENDIAN and the buffer is network order
6178 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
6179 */
6180 for (i = 0; i < num_bytes_to_read; i++) {
6181 val_ptr[i] = bytes_buf[*offset];
6182 *offset = qed_cyclic_add(*offset, 1, buf_size);
6183 }
6184
6185 return val;
6186 }
6187
6188 /* Reads and returns the next byte from the specified buffer.
6189 * The specified buffer offset is updated.
6190 */
qed_read_byte_from_buf(void * buf,u32 * offset)6191 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6192 {
6193 return ((u8 *)buf)[(*offset)++];
6194 }
6195
6196 /* Reads and returns the next dword from the specified buffer.
6197 * The specified buffer offset is updated.
6198 */
qed_read_dword_from_buf(void * buf,u32 * offset)6199 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6200 {
6201 u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6202
6203 *offset += 4;
6204
6205 return dword_val;
6206 }
6207
6208 /* Reads the next string from the specified buffer, and copies it to the
6209 * specified pointer. The specified buffer offset is updated.
6210 */
qed_read_str_from_buf(void * buf,u32 * offset,u32 size,char * dest)6211 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6212 {
6213 const char *source_str = &((const char *)buf)[*offset];
6214
6215 strncpy(dest, source_str, size);
6216 dest[size - 1] = '\0';
6217 *offset += size;
6218 }
6219
6220 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6221 * If the specified buffer in NULL, a temporary buffer pointer is returned.
6222 */
qed_get_buf_ptr(void * buf,u32 offset)6223 static char *qed_get_buf_ptr(void *buf, u32 offset)
6224 {
6225 return buf ? (char *)buf + offset : s_temp_buf;
6226 }
6227
6228 /* Reads a param from the specified buffer. Returns the number of dwords read.
6229 * If the returned str_param is NULL, the param is numeric and its value is
6230 * returned in num_param.
6231 * Otheriwise, the param is a string and its pointer is returned in str_param.
6232 */
qed_read_param(u32 * dump_buf,const char ** param_name,const char ** param_str_val,u32 * param_num_val)6233 static u32 qed_read_param(u32 *dump_buf,
6234 const char **param_name,
6235 const char **param_str_val, u32 *param_num_val)
6236 {
6237 char *char_buf = (char *)dump_buf;
6238 size_t offset = 0;
6239
6240 /* Extract param name */
6241 *param_name = char_buf;
6242 offset += strlen(*param_name) + 1;
6243
6244 /* Check param type */
6245 if (*(char_buf + offset++)) {
6246 /* String param */
6247 *param_str_val = char_buf + offset;
6248 *param_num_val = 0;
6249 offset += strlen(*param_str_val) + 1;
6250 if (offset & 0x3)
6251 offset += (4 - (offset & 0x3));
6252 } else {
6253 /* Numeric param */
6254 *param_str_val = NULL;
6255 if (offset & 0x3)
6256 offset += (4 - (offset & 0x3));
6257 *param_num_val = *(u32 *)(char_buf + offset);
6258 offset += 4;
6259 }
6260
6261 return (u32)offset / 4;
6262 }
6263
6264 /* Reads a section header from the specified buffer.
6265 * Returns the number of dwords read.
6266 */
qed_read_section_hdr(u32 * dump_buf,const char ** section_name,u32 * num_section_params)6267 static u32 qed_read_section_hdr(u32 *dump_buf,
6268 const char **section_name,
6269 u32 *num_section_params)
6270 {
6271 const char *param_str_val;
6272
6273 return qed_read_param(dump_buf,
6274 section_name, ¶m_str_val, num_section_params);
6275 }
6276
6277 /* Reads section params from the specified buffer and prints them to the results
6278 * buffer. Returns the number of dwords read.
6279 */
qed_print_section_params(u32 * dump_buf,u32 num_section_params,char * results_buf,u32 * num_chars_printed)6280 static u32 qed_print_section_params(u32 *dump_buf,
6281 u32 num_section_params,
6282 char *results_buf, u32 *num_chars_printed)
6283 {
6284 u32 i, dump_offset = 0, results_offset = 0;
6285
6286 for (i = 0; i < num_section_params; i++) {
6287 const char *param_name, *param_str_val;
6288 u32 param_num_val = 0;
6289
6290 dump_offset += qed_read_param(dump_buf + dump_offset,
6291 ¶m_name,
6292 ¶m_str_val, ¶m_num_val);
6293
6294 if (param_str_val)
6295 results_offset +=
6296 sprintf(qed_get_buf_ptr(results_buf,
6297 results_offset),
6298 "%s: %s\n", param_name, param_str_val);
6299 else if (strcmp(param_name, "fw-timestamp"))
6300 results_offset +=
6301 sprintf(qed_get_buf_ptr(results_buf,
6302 results_offset),
6303 "%s: %d\n", param_name, param_num_val);
6304 }
6305
6306 results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6307 "\n");
6308
6309 *num_chars_printed = results_offset;
6310
6311 return dump_offset;
6312 }
6313
6314 /* Parses the idle check rules and returns the number of characters printed.
6315 * In case of parsing error, returns 0.
6316 */
qed_parse_idle_chk_dump_rules(u32 * dump_buf,u32 * dump_buf_end,u32 num_rules,bool print_fw_idle_chk,char * results_buf,u32 * num_errors,u32 * num_warnings)6317 static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
6318 u32 *dump_buf_end,
6319 u32 num_rules,
6320 bool print_fw_idle_chk,
6321 char *results_buf,
6322 u32 *num_errors, u32 *num_warnings)
6323 {
6324 /* Offset in results_buf in bytes */
6325 u32 results_offset = 0;
6326
6327 u32 rule_idx;
6328 u16 i, j;
6329
6330 *num_errors = 0;
6331 *num_warnings = 0;
6332
6333 /* Go over dumped results */
6334 for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6335 rule_idx++) {
6336 const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6337 struct dbg_idle_chk_result_hdr *hdr;
6338 const char *parsing_str, *lsi_msg;
6339 u32 parsing_str_offset;
6340 bool has_fw_msg;
6341 u8 curr_reg_id;
6342
6343 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6344 rule_parsing_data =
6345 (const struct dbg_idle_chk_rule_parsing_data *)
6346 &s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
6347 ptr[hdr->rule_id];
6348 parsing_str_offset =
6349 GET_FIELD(rule_parsing_data->data,
6350 DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6351 has_fw_msg =
6352 GET_FIELD(rule_parsing_data->data,
6353 DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6354 parsing_str =
6355 &((const char *)
6356 s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
6357 [parsing_str_offset];
6358 lsi_msg = parsing_str;
6359 curr_reg_id = 0;
6360
6361 if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6362 return 0;
6363
6364 /* Skip rule header */
6365 dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6366
6367 /* Update errors/warnings count */
6368 if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6369 hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6370 (*num_errors)++;
6371 else
6372 (*num_warnings)++;
6373
6374 /* Print rule severity */
6375 results_offset +=
6376 sprintf(qed_get_buf_ptr(results_buf,
6377 results_offset), "%s: ",
6378 s_idle_chk_severity_str[hdr->severity]);
6379
6380 /* Print rule message */
6381 if (has_fw_msg)
6382 parsing_str += strlen(parsing_str) + 1;
6383 results_offset +=
6384 sprintf(qed_get_buf_ptr(results_buf,
6385 results_offset), "%s.",
6386 has_fw_msg &&
6387 print_fw_idle_chk ? parsing_str : lsi_msg);
6388 parsing_str += strlen(parsing_str) + 1;
6389
6390 /* Print register values */
6391 results_offset +=
6392 sprintf(qed_get_buf_ptr(results_buf,
6393 results_offset), " Registers:");
6394 for (i = 0;
6395 i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6396 i++) {
6397 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6398 bool is_mem;
6399 u8 reg_id;
6400
6401 reg_hdr =
6402 (struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6403 is_mem = GET_FIELD(reg_hdr->data,
6404 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6405 reg_id = GET_FIELD(reg_hdr->data,
6406 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6407
6408 /* Skip reg header */
6409 dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6410
6411 /* Skip register names until the required reg_id is
6412 * reached.
6413 */
6414 for (; reg_id > curr_reg_id;
6415 curr_reg_id++,
6416 parsing_str += strlen(parsing_str) + 1);
6417
6418 results_offset +=
6419 sprintf(qed_get_buf_ptr(results_buf,
6420 results_offset), " %s",
6421 parsing_str);
6422 if (i < hdr->num_dumped_cond_regs && is_mem)
6423 results_offset +=
6424 sprintf(qed_get_buf_ptr(results_buf,
6425 results_offset),
6426 "[%d]", hdr->mem_entry_id +
6427 reg_hdr->start_entry);
6428 results_offset +=
6429 sprintf(qed_get_buf_ptr(results_buf,
6430 results_offset), "=");
6431 for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6432 results_offset +=
6433 sprintf(qed_get_buf_ptr(results_buf,
6434 results_offset),
6435 "0x%x", *dump_buf);
6436 if (j < reg_hdr->size - 1)
6437 results_offset +=
6438 sprintf(qed_get_buf_ptr
6439 (results_buf,
6440 results_offset), ",");
6441 }
6442 }
6443
6444 results_offset +=
6445 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6446 }
6447
6448 /* Check if end of dump buffer was exceeded */
6449 if (dump_buf > dump_buf_end)
6450 return 0;
6451
6452 return results_offset;
6453 }
6454
6455 /* Parses an idle check dump buffer.
6456 * If result_buf is not NULL, the idle check results are printed to it.
6457 * In any case, the required results buffer size is assigned to
6458 * parsed_results_bytes.
6459 * The parsing status is returned.
6460 */
qed_parse_idle_chk_dump(u32 * dump_buf,u32 num_dumped_dwords,char * results_buf,u32 * parsed_results_bytes,u32 * num_errors,u32 * num_warnings)6461 static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
6462 u32 num_dumped_dwords,
6463 char *results_buf,
6464 u32 *parsed_results_bytes,
6465 u32 *num_errors,
6466 u32 *num_warnings)
6467 {
6468 const char *section_name, *param_name, *param_str_val;
6469 u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6470 u32 num_section_params = 0, num_rules;
6471
6472 /* Offset in results_buf in bytes */
6473 u32 results_offset = 0;
6474
6475 *parsed_results_bytes = 0;
6476 *num_errors = 0;
6477 *num_warnings = 0;
6478
6479 if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6480 !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6481 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6482
6483 /* Read global_params section */
6484 dump_buf += qed_read_section_hdr(dump_buf,
6485 §ion_name, &num_section_params);
6486 if (strcmp(section_name, "global_params"))
6487 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6488
6489 /* Print global params */
6490 dump_buf += qed_print_section_params(dump_buf,
6491 num_section_params,
6492 results_buf, &results_offset);
6493
6494 /* Read idle_chk section */
6495 dump_buf += qed_read_section_hdr(dump_buf,
6496 §ion_name, &num_section_params);
6497 if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6498 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6499 dump_buf += qed_read_param(dump_buf,
6500 ¶m_name, ¶m_str_val, &num_rules);
6501 if (strcmp(param_name, "num_rules"))
6502 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6503
6504 if (num_rules) {
6505 u32 rules_print_size;
6506
6507 /* Print FW output */
6508 results_offset +=
6509 sprintf(qed_get_buf_ptr(results_buf,
6510 results_offset),
6511 "FW_IDLE_CHECK:\n");
6512 rules_print_size =
6513 qed_parse_idle_chk_dump_rules(dump_buf,
6514 dump_buf_end,
6515 num_rules,
6516 true,
6517 results_buf ?
6518 results_buf +
6519 results_offset :
6520 NULL,
6521 num_errors,
6522 num_warnings);
6523 results_offset += rules_print_size;
6524 if (!rules_print_size)
6525 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6526
6527 /* Print LSI output */
6528 results_offset +=
6529 sprintf(qed_get_buf_ptr(results_buf,
6530 results_offset),
6531 "\nLSI_IDLE_CHECK:\n");
6532 rules_print_size =
6533 qed_parse_idle_chk_dump_rules(dump_buf,
6534 dump_buf_end,
6535 num_rules,
6536 false,
6537 results_buf ?
6538 results_buf +
6539 results_offset :
6540 NULL,
6541 num_errors,
6542 num_warnings);
6543 results_offset += rules_print_size;
6544 if (!rules_print_size)
6545 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6546 }
6547
6548 /* Print errors/warnings count */
6549 if (*num_errors)
6550 results_offset +=
6551 sprintf(qed_get_buf_ptr(results_buf,
6552 results_offset),
6553 "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6554 *num_errors, *num_warnings);
6555 else if (*num_warnings)
6556 results_offset +=
6557 sprintf(qed_get_buf_ptr(results_buf,
6558 results_offset),
6559 "\nIdle Check completed successfully (with %d warnings)\n",
6560 *num_warnings);
6561 else
6562 results_offset +=
6563 sprintf(qed_get_buf_ptr(results_buf,
6564 results_offset),
6565 "\nIdle Check completed successfully\n");
6566
6567 /* Add 1 for string NULL termination */
6568 *parsed_results_bytes = results_offset + 1;
6569
6570 return DBG_STATUS_OK;
6571 }
6572
6573 /* Frees the specified MCP Trace meta data */
qed_mcp_trace_free_meta(struct qed_hwfn * p_hwfn,struct mcp_trace_meta * meta)6574 static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
6575 struct mcp_trace_meta *meta)
6576 {
6577 u32 i;
6578
6579 s_mcp_trace_meta_valid = false;
6580
6581 /* Release modules */
6582 if (meta->modules) {
6583 for (i = 0; i < meta->modules_num; i++)
6584 kfree(meta->modules[i]);
6585 kfree(meta->modules);
6586 }
6587
6588 /* Release formats */
6589 if (meta->formats) {
6590 for (i = 0; i < meta->formats_num; i++)
6591 kfree(meta->formats[i].format_str);
6592 kfree(meta->formats);
6593 }
6594 }
6595
6596 /* Allocates and fills MCP Trace meta data based on the specified meta data
6597 * dump buffer.
6598 * Returns debug status code.
6599 */
qed_mcp_trace_alloc_meta(struct qed_hwfn * p_hwfn,const u32 * meta_buf,struct mcp_trace_meta * meta)6600 static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
6601 const u32 *meta_buf,
6602 struct mcp_trace_meta *meta)
6603 {
6604 u8 *meta_buf_bytes = (u8 *)meta_buf;
6605 u32 offset = 0, signature, i;
6606
6607 /* Free the previous meta before loading a new one. */
6608 if (s_mcp_trace_meta_valid)
6609 qed_mcp_trace_free_meta(p_hwfn, meta);
6610
6611 memset(meta, 0, sizeof(*meta));
6612
6613 /* Read first signature */
6614 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6615 if (signature != NVM_MAGIC_VALUE)
6616 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6617
6618 /* Read no. of modules and allocate memory for their pointers */
6619 meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6620 meta->modules = kcalloc(meta->modules_num, sizeof(char *),
6621 GFP_KERNEL);
6622 if (!meta->modules)
6623 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6624
6625 /* Allocate and read all module strings */
6626 for (i = 0; i < meta->modules_num; i++) {
6627 u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6628
6629 *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6630 if (!(*(meta->modules + i))) {
6631 /* Update number of modules to be released */
6632 meta->modules_num = i ? i - 1 : 0;
6633 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6634 }
6635
6636 qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6637 *(meta->modules + i));
6638 if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6639 (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6640 }
6641
6642 /* Read second signature */
6643 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6644 if (signature != NVM_MAGIC_VALUE)
6645 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6646
6647 /* Read number of formats and allocate memory for all formats */
6648 meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6649 meta->formats = kcalloc(meta->formats_num,
6650 sizeof(struct mcp_trace_format),
6651 GFP_KERNEL);
6652 if (!meta->formats)
6653 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6654
6655 /* Allocate and read all strings */
6656 for (i = 0; i < meta->formats_num; i++) {
6657 struct mcp_trace_format *format_ptr = &meta->formats[i];
6658 u8 format_len;
6659
6660 format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6661 &offset);
6662 format_len =
6663 (format_ptr->data &
6664 MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
6665 format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6666 if (!format_ptr->format_str) {
6667 /* Update number of modules to be released */
6668 meta->formats_num = i ? i - 1 : 0;
6669 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6670 }
6671
6672 qed_read_str_from_buf(meta_buf_bytes,
6673 &offset,
6674 format_len, format_ptr->format_str);
6675 }
6676
6677 s_mcp_trace_meta_valid = true;
6678 return DBG_STATUS_OK;
6679 }
6680
6681 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6682 * are printed to it. The parsing status is returned.
6683 * Arguments:
6684 * trace_buf - MCP trace cyclic buffer
6685 * trace_buf_size - MCP trace cyclic buffer size in bytes
6686 * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6687 * buffer.
6688 * data_size - size in bytes of data to parse.
6689 * parsed_buf - destination buffer for parsed data.
6690 * parsed_bytes - size of parsed data in bytes.
6691 */
qed_parse_mcp_trace_buf(u8 * trace_buf,u32 trace_buf_size,u32 data_offset,u32 data_size,char * parsed_buf,u32 * parsed_bytes)6692 static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
6693 u32 trace_buf_size,
6694 u32 data_offset,
6695 u32 data_size,
6696 char *parsed_buf,
6697 u32 *parsed_bytes)
6698 {
6699 u32 param_mask, param_shift;
6700 enum dbg_status status;
6701
6702 *parsed_bytes = 0;
6703
6704 if (!s_mcp_trace_meta_valid)
6705 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6706
6707 status = DBG_STATUS_OK;
6708
6709 while (data_size) {
6710 struct mcp_trace_format *format_ptr;
6711 u8 format_level, format_module;
6712 u32 params[3] = { 0, 0, 0 };
6713 u32 header, format_idx, i;
6714
6715 if (data_size < MFW_TRACE_ENTRY_SIZE)
6716 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6717
6718 header = qed_read_from_cyclic_buf(trace_buf,
6719 &data_offset,
6720 trace_buf_size,
6721 MFW_TRACE_ENTRY_SIZE);
6722 data_size -= MFW_TRACE_ENTRY_SIZE;
6723 format_idx = header & MFW_TRACE_EVENTID_MASK;
6724
6725 /* Skip message if its index doesn't exist in the meta data */
6726 if (format_idx >= s_mcp_trace_meta.formats_num) {
6727 u8 format_size =
6728 (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
6729 MFW_TRACE_PRM_SIZE_SHIFT);
6730
6731 if (data_size < format_size)
6732 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6733
6734 data_offset = qed_cyclic_add(data_offset,
6735 format_size,
6736 trace_buf_size);
6737 data_size -= format_size;
6738 continue;
6739 }
6740
6741 format_ptr = &s_mcp_trace_meta.formats[format_idx];
6742
6743 for (i = 0,
6744 param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK,
6745 param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
6746 i < MCP_TRACE_FORMAT_MAX_PARAMS;
6747 i++,
6748 param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6749 param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6750 /* Extract param size (0..3) */
6751 u8 param_size = (u8)((format_ptr->data & param_mask) >>
6752 param_shift);
6753
6754 /* If the param size is zero, there are no other
6755 * parameters.
6756 */
6757 if (!param_size)
6758 break;
6759
6760 /* Size is encoded using 2 bits, where 3 is used to
6761 * encode 4.
6762 */
6763 if (param_size == 3)
6764 param_size = 4;
6765
6766 if (data_size < param_size)
6767 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6768
6769 params[i] = qed_read_from_cyclic_buf(trace_buf,
6770 &data_offset,
6771 trace_buf_size,
6772 param_size);
6773 data_size -= param_size;
6774 }
6775
6776 format_level = (u8)((format_ptr->data &
6777 MCP_TRACE_FORMAT_LEVEL_MASK) >>
6778 MCP_TRACE_FORMAT_LEVEL_SHIFT);
6779 format_module = (u8)((format_ptr->data &
6780 MCP_TRACE_FORMAT_MODULE_MASK) >>
6781 MCP_TRACE_FORMAT_MODULE_SHIFT);
6782 if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6783 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6784
6785 /* Print current message to results buffer */
6786 *parsed_bytes +=
6787 sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes),
6788 "%s %-8s: ",
6789 s_mcp_trace_level_str[format_level],
6790 s_mcp_trace_meta.modules[format_module]);
6791 *parsed_bytes +=
6792 sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes),
6793 format_ptr->format_str,
6794 params[0], params[1], params[2]);
6795 }
6796
6797 /* Add string NULL terminator */
6798 (*parsed_bytes)++;
6799
6800 return status;
6801 }
6802
6803 /* Parses an MCP Trace dump buffer.
6804 * If result_buf is not NULL, the MCP Trace results are printed to it.
6805 * In any case, the required results buffer size is assigned to
6806 * parsed_bytes.
6807 * The parsing status is returned.
6808 */
qed_parse_mcp_trace_dump(struct qed_hwfn * p_hwfn,u32 * dump_buf,char * parsed_buf,u32 * parsed_bytes)6809 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6810 u32 *dump_buf,
6811 char *parsed_buf,
6812 u32 *parsed_bytes)
6813 {
6814 const char *section_name, *param_name, *param_str_val;
6815 u32 data_size, trace_data_dwords, trace_meta_dwords;
6816 u32 offset, results_offset, parsed_buf_bytes;
6817 u32 param_num_val, num_section_params;
6818 struct mcp_trace *trace;
6819 enum dbg_status status;
6820 const u32 *meta_buf;
6821 u8 *trace_buf;
6822
6823 *parsed_bytes = 0;
6824
6825 /* Read global_params section */
6826 dump_buf += qed_read_section_hdr(dump_buf,
6827 §ion_name, &num_section_params);
6828 if (strcmp(section_name, "global_params"))
6829 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6830
6831 /* Print global params */
6832 dump_buf += qed_print_section_params(dump_buf,
6833 num_section_params,
6834 parsed_buf, &results_offset);
6835
6836 /* Read trace_data section */
6837 dump_buf += qed_read_section_hdr(dump_buf,
6838 §ion_name, &num_section_params);
6839 if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6840 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6841 dump_buf += qed_read_param(dump_buf,
6842 ¶m_name, ¶m_str_val, ¶m_num_val);
6843 if (strcmp(param_name, "size"))
6844 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6845 trace_data_dwords = param_num_val;
6846
6847 /* Prepare trace info */
6848 trace = (struct mcp_trace *)dump_buf;
6849 trace_buf = (u8 *)dump_buf + sizeof(*trace);
6850 offset = trace->trace_oldest;
6851 data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
6852 dump_buf += trace_data_dwords;
6853
6854 /* Read meta_data section */
6855 dump_buf += qed_read_section_hdr(dump_buf,
6856 §ion_name, &num_section_params);
6857 if (strcmp(section_name, "mcp_trace_meta"))
6858 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6859 dump_buf += qed_read_param(dump_buf,
6860 ¶m_name, ¶m_str_val, ¶m_num_val);
6861 if (strcmp(param_name, "size"))
6862 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6863 trace_meta_dwords = param_num_val;
6864
6865 /* Choose meta data buffer */
6866 if (!trace_meta_dwords) {
6867 /* Dump doesn't include meta data */
6868 if (!s_mcp_trace_meta_arr.ptr)
6869 return DBG_STATUS_MCP_TRACE_NO_META;
6870 meta_buf = s_mcp_trace_meta_arr.ptr;
6871 } else {
6872 /* Dump includes meta data */
6873 meta_buf = dump_buf;
6874 }
6875
6876 /* Allocate meta data memory */
6877 status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &s_mcp_trace_meta);
6878 if (status != DBG_STATUS_OK)
6879 return status;
6880
6881 status = qed_parse_mcp_trace_buf(trace_buf,
6882 trace->size,
6883 offset,
6884 data_size,
6885 parsed_buf ?
6886 parsed_buf + results_offset :
6887 NULL,
6888 &parsed_buf_bytes);
6889 if (status != DBG_STATUS_OK)
6890 return status;
6891
6892 *parsed_bytes = results_offset + parsed_buf_bytes;
6893
6894 return DBG_STATUS_OK;
6895 }
6896
6897 /* Parses a Reg FIFO dump buffer.
6898 * If result_buf is not NULL, the Reg FIFO results are printed to it.
6899 * In any case, the required results buffer size is assigned to
6900 * parsed_results_bytes.
6901 * The parsing status is returned.
6902 */
qed_parse_reg_fifo_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)6903 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6904 char *results_buf,
6905 u32 *parsed_results_bytes)
6906 {
6907 const char *section_name, *param_name, *param_str_val;
6908 u32 param_num_val, num_section_params, num_elements;
6909 struct reg_fifo_element *elements;
6910 u8 i, j, err_val, vf_val;
6911 u32 results_offset = 0;
6912 char vf_str[4];
6913
6914 /* Read global_params section */
6915 dump_buf += qed_read_section_hdr(dump_buf,
6916 §ion_name, &num_section_params);
6917 if (strcmp(section_name, "global_params"))
6918 return DBG_STATUS_REG_FIFO_BAD_DATA;
6919
6920 /* Print global params */
6921 dump_buf += qed_print_section_params(dump_buf,
6922 num_section_params,
6923 results_buf, &results_offset);
6924
6925 /* Read reg_fifo_data section */
6926 dump_buf += qed_read_section_hdr(dump_buf,
6927 §ion_name, &num_section_params);
6928 if (strcmp(section_name, "reg_fifo_data"))
6929 return DBG_STATUS_REG_FIFO_BAD_DATA;
6930 dump_buf += qed_read_param(dump_buf,
6931 ¶m_name, ¶m_str_val, ¶m_num_val);
6932 if (strcmp(param_name, "size"))
6933 return DBG_STATUS_REG_FIFO_BAD_DATA;
6934 if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6935 return DBG_STATUS_REG_FIFO_BAD_DATA;
6936 num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6937 elements = (struct reg_fifo_element *)dump_buf;
6938
6939 /* Decode elements */
6940 for (i = 0; i < num_elements; i++) {
6941 bool err_printed = false;
6942
6943 /* Discover if element belongs to a VF or a PF */
6944 vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6945 if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6946 sprintf(vf_str, "%s", "N/A");
6947 else
6948 sprintf(vf_str, "%d", vf_val);
6949
6950 /* Add parsed element to parsed buffer */
6951 results_offset +=
6952 sprintf(qed_get_buf_ptr(results_buf,
6953 results_offset),
6954 "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
6955 elements[i].data,
6956 (u32)GET_FIELD(elements[i].data,
6957 REG_FIFO_ELEMENT_ADDRESS) *
6958 REG_FIFO_ELEMENT_ADDR_FACTOR,
6959 s_access_strs[GET_FIELD(elements[i].data,
6960 REG_FIFO_ELEMENT_ACCESS)],
6961 (u32)GET_FIELD(elements[i].data,
6962 REG_FIFO_ELEMENT_PF),
6963 vf_str,
6964 (u32)GET_FIELD(elements[i].data,
6965 REG_FIFO_ELEMENT_PORT),
6966 s_privilege_strs[GET_FIELD(elements[i].data,
6967 REG_FIFO_ELEMENT_PRIVILEGE)],
6968 s_protection_strs[GET_FIELD(elements[i].data,
6969 REG_FIFO_ELEMENT_PROTECTION)],
6970 s_master_strs[GET_FIELD(elements[i].data,
6971 REG_FIFO_ELEMENT_MASTER)]);
6972
6973 /* Print errors */
6974 for (j = 0,
6975 err_val = GET_FIELD(elements[i].data,
6976 REG_FIFO_ELEMENT_ERROR);
6977 j < ARRAY_SIZE(s_reg_fifo_error_strs);
6978 j++, err_val >>= 1) {
6979 if (err_val & 0x1) {
6980 if (err_printed)
6981 results_offset +=
6982 sprintf(qed_get_buf_ptr
6983 (results_buf,
6984 results_offset), ", ");
6985 results_offset +=
6986 sprintf(qed_get_buf_ptr
6987 (results_buf, results_offset), "%s",
6988 s_reg_fifo_error_strs[j]);
6989 err_printed = true;
6990 }
6991 }
6992
6993 results_offset +=
6994 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6995 }
6996
6997 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6998 results_offset),
6999 "fifo contained %d elements", num_elements);
7000
7001 /* Add 1 for string NULL termination */
7002 *parsed_results_bytes = results_offset + 1;
7003
7004 return DBG_STATUS_OK;
7005 }
7006
qed_parse_igu_fifo_element(struct igu_fifo_element * element,char * results_buf,u32 * results_offset)7007 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
7008 *element, char
7009 *results_buf,
7010 u32 *results_offset)
7011 {
7012 const struct igu_fifo_addr_data *found_addr = NULL;
7013 u8 source, err_type, i, is_cleanup;
7014 char parsed_addr_data[32];
7015 char parsed_wr_data[256];
7016 u32 wr_data, prod_cons;
7017 bool is_wr_cmd, is_pf;
7018 u16 cmd_addr;
7019 u64 dword12;
7020
7021 /* Dword12 (dword index 1 and 2) contains bits 32..95 of the
7022 * FIFO element.
7023 */
7024 dword12 = ((u64)element->dword2 << 32) | element->dword1;
7025 is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
7026 is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
7027 cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
7028 source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
7029 err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
7030
7031 if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
7032 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7033 if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
7034 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7035
7036 /* Find address data */
7037 for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
7038 const struct igu_fifo_addr_data *curr_addr =
7039 &s_igu_fifo_addr_data[i];
7040
7041 if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
7042 curr_addr->end_addr)
7043 found_addr = curr_addr;
7044 }
7045
7046 if (!found_addr)
7047 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7048
7049 /* Prepare parsed address data */
7050 switch (found_addr->type) {
7051 case IGU_ADDR_TYPE_MSIX_MEM:
7052 sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
7053 break;
7054 case IGU_ADDR_TYPE_WRITE_INT_ACK:
7055 case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
7056 sprintf(parsed_addr_data,
7057 " SB = 0x%x", cmd_addr - found_addr->start_addr);
7058 break;
7059 default:
7060 parsed_addr_data[0] = '\0';
7061 }
7062
7063 if (!is_wr_cmd) {
7064 parsed_wr_data[0] = '\0';
7065 goto out;
7066 }
7067
7068 /* Prepare parsed write data */
7069 wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
7070 prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
7071 is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
7072
7073 if (source == IGU_SRC_ATTN) {
7074 sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
7075 } else {
7076 if (is_cleanup) {
7077 u8 cleanup_val, cleanup_type;
7078
7079 cleanup_val =
7080 GET_FIELD(wr_data,
7081 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
7082 cleanup_type =
7083 GET_FIELD(wr_data,
7084 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
7085
7086 sprintf(parsed_wr_data,
7087 "cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
7088 cleanup_val ? "set" : "clear",
7089 cleanup_type);
7090 } else {
7091 u8 update_flag, en_dis_int_for_sb, segment;
7092 u8 timer_mask;
7093
7094 update_flag = GET_FIELD(wr_data,
7095 IGU_FIFO_WR_DATA_UPDATE_FLAG);
7096 en_dis_int_for_sb =
7097 GET_FIELD(wr_data,
7098 IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
7099 segment = GET_FIELD(wr_data,
7100 IGU_FIFO_WR_DATA_SEGMENT);
7101 timer_mask = GET_FIELD(wr_data,
7102 IGU_FIFO_WR_DATA_TIMER_MASK);
7103
7104 sprintf(parsed_wr_data,
7105 "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
7106 prod_cons,
7107 update_flag ? "update" : "nop",
7108 en_dis_int_for_sb ?
7109 (en_dis_int_for_sb == 1 ? "disable" : "nop") :
7110 "enable",
7111 segment ? "attn" : "regular",
7112 timer_mask);
7113 }
7114 }
7115 out:
7116 /* Add parsed element to parsed buffer */
7117 *results_offset += sprintf(qed_get_buf_ptr(results_buf,
7118 *results_offset),
7119 "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
7120 element->dword2, element->dword1,
7121 element->dword0,
7122 is_pf ? "pf" : "vf",
7123 GET_FIELD(element->dword0,
7124 IGU_FIFO_ELEMENT_DWORD0_FID),
7125 s_igu_fifo_source_strs[source],
7126 is_wr_cmd ? "wr" : "rd",
7127 cmd_addr,
7128 (!is_pf && found_addr->vf_desc)
7129 ? found_addr->vf_desc
7130 : found_addr->desc,
7131 parsed_addr_data,
7132 parsed_wr_data,
7133 s_igu_fifo_error_strs[err_type]);
7134
7135 return DBG_STATUS_OK;
7136 }
7137
7138 /* Parses an IGU FIFO dump buffer.
7139 * If result_buf is not NULL, the IGU FIFO results are printed to it.
7140 * In any case, the required results buffer size is assigned to
7141 * parsed_results_bytes.
7142 * The parsing status is returned.
7143 */
qed_parse_igu_fifo_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)7144 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
7145 char *results_buf,
7146 u32 *parsed_results_bytes)
7147 {
7148 const char *section_name, *param_name, *param_str_val;
7149 u32 param_num_val, num_section_params, num_elements;
7150 struct igu_fifo_element *elements;
7151 enum dbg_status status;
7152 u32 results_offset = 0;
7153 u8 i;
7154
7155 /* Read global_params section */
7156 dump_buf += qed_read_section_hdr(dump_buf,
7157 §ion_name, &num_section_params);
7158 if (strcmp(section_name, "global_params"))
7159 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7160
7161 /* Print global params */
7162 dump_buf += qed_print_section_params(dump_buf,
7163 num_section_params,
7164 results_buf, &results_offset);
7165
7166 /* Read igu_fifo_data section */
7167 dump_buf += qed_read_section_hdr(dump_buf,
7168 §ion_name, &num_section_params);
7169 if (strcmp(section_name, "igu_fifo_data"))
7170 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7171 dump_buf += qed_read_param(dump_buf,
7172 ¶m_name, ¶m_str_val, ¶m_num_val);
7173 if (strcmp(param_name, "size"))
7174 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7175 if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7176 return DBG_STATUS_IGU_FIFO_BAD_DATA;
7177 num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7178 elements = (struct igu_fifo_element *)dump_buf;
7179
7180 /* Decode elements */
7181 for (i = 0; i < num_elements; i++) {
7182 status = qed_parse_igu_fifo_element(&elements[i],
7183 results_buf,
7184 &results_offset);
7185 if (status != DBG_STATUS_OK)
7186 return status;
7187 }
7188
7189 results_offset += sprintf(qed_get_buf_ptr(results_buf,
7190 results_offset),
7191 "fifo contained %d elements", num_elements);
7192
7193 /* Add 1 for string NULL termination */
7194 *parsed_results_bytes = results_offset + 1;
7195
7196 return DBG_STATUS_OK;
7197 }
7198
7199 static enum dbg_status
qed_parse_protection_override_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)7200 qed_parse_protection_override_dump(u32 *dump_buf,
7201 char *results_buf,
7202 u32 *parsed_results_bytes)
7203 {
7204 const char *section_name, *param_name, *param_str_val;
7205 u32 param_num_val, num_section_params, num_elements;
7206 struct protection_override_element *elements;
7207 u32 results_offset = 0;
7208 u8 i;
7209
7210 /* Read global_params section */
7211 dump_buf += qed_read_section_hdr(dump_buf,
7212 §ion_name, &num_section_params);
7213 if (strcmp(section_name, "global_params"))
7214 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7215
7216 /* Print global params */
7217 dump_buf += qed_print_section_params(dump_buf,
7218 num_section_params,
7219 results_buf, &results_offset);
7220
7221 /* Read protection_override_data section */
7222 dump_buf += qed_read_section_hdr(dump_buf,
7223 §ion_name, &num_section_params);
7224 if (strcmp(section_name, "protection_override_data"))
7225 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7226 dump_buf += qed_read_param(dump_buf,
7227 ¶m_name, ¶m_str_val, ¶m_num_val);
7228 if (strcmp(param_name, "size"))
7229 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7230 if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7231 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7232 num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7233 elements = (struct protection_override_element *)dump_buf;
7234
7235 /* Decode elements */
7236 for (i = 0; i < num_elements; i++) {
7237 u32 address = GET_FIELD(elements[i].data,
7238 PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7239 PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7240
7241 results_offset +=
7242 sprintf(qed_get_buf_ptr(results_buf,
7243 results_offset),
7244 "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7245 i, address,
7246 (u32)GET_FIELD(elements[i].data,
7247 PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7248 (u32)GET_FIELD(elements[i].data,
7249 PROTECTION_OVERRIDE_ELEMENT_READ),
7250 (u32)GET_FIELD(elements[i].data,
7251 PROTECTION_OVERRIDE_ELEMENT_WRITE),
7252 s_protection_strs[GET_FIELD(elements[i].data,
7253 PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7254 s_protection_strs[GET_FIELD(elements[i].data,
7255 PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7256 }
7257
7258 results_offset += sprintf(qed_get_buf_ptr(results_buf,
7259 results_offset),
7260 "protection override contained %d elements",
7261 num_elements);
7262
7263 /* Add 1 for string NULL termination */
7264 *parsed_results_bytes = results_offset + 1;
7265
7266 return DBG_STATUS_OK;
7267 }
7268
7269 /* Parses a FW Asserts dump buffer.
7270 * If result_buf is not NULL, the FW Asserts results are printed to it.
7271 * In any case, the required results buffer size is assigned to
7272 * parsed_results_bytes.
7273 * The parsing status is returned.
7274 */
qed_parse_fw_asserts_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)7275 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7276 char *results_buf,
7277 u32 *parsed_results_bytes)
7278 {
7279 u32 num_section_params, param_num_val, i, results_offset = 0;
7280 const char *param_name, *param_str_val, *section_name;
7281 bool last_section_found = false;
7282
7283 *parsed_results_bytes = 0;
7284
7285 /* Read global_params section */
7286 dump_buf += qed_read_section_hdr(dump_buf,
7287 §ion_name, &num_section_params);
7288 if (strcmp(section_name, "global_params"))
7289 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7290
7291 /* Print global params */
7292 dump_buf += qed_print_section_params(dump_buf,
7293 num_section_params,
7294 results_buf, &results_offset);
7295
7296 while (!last_section_found) {
7297 dump_buf += qed_read_section_hdr(dump_buf,
7298 §ion_name,
7299 &num_section_params);
7300 if (!strcmp(section_name, "fw_asserts")) {
7301 /* Extract params */
7302 const char *storm_letter = NULL;
7303 u32 storm_dump_size = 0;
7304
7305 for (i = 0; i < num_section_params; i++) {
7306 dump_buf += qed_read_param(dump_buf,
7307 ¶m_name,
7308 ¶m_str_val,
7309 ¶m_num_val);
7310 if (!strcmp(param_name, "storm"))
7311 storm_letter = param_str_val;
7312 else if (!strcmp(param_name, "size"))
7313 storm_dump_size = param_num_val;
7314 else
7315 return
7316 DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7317 }
7318
7319 if (!storm_letter || !storm_dump_size)
7320 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7321
7322 /* Print data */
7323 results_offset +=
7324 sprintf(qed_get_buf_ptr(results_buf,
7325 results_offset),
7326 "\n%sSTORM_ASSERT: size=%d\n",
7327 storm_letter, storm_dump_size);
7328 for (i = 0; i < storm_dump_size; i++, dump_buf++)
7329 results_offset +=
7330 sprintf(qed_get_buf_ptr(results_buf,
7331 results_offset),
7332 "%08x\n", *dump_buf);
7333 } else if (!strcmp(section_name, "last")) {
7334 last_section_found = true;
7335 } else {
7336 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7337 }
7338 }
7339
7340 /* Add 1 for string NULL termination */
7341 *parsed_results_bytes = results_offset + 1;
7342
7343 return DBG_STATUS_OK;
7344 }
7345
7346 /***************************** Public Functions *******************************/
7347
qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)7348 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
7349 {
7350 struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
7351 u8 buf_id;
7352
7353 /* Convert binary data to debug arrays */
7354 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
7355 s_user_dbg_arrays[buf_id].ptr =
7356 (u32 *)(bin_ptr + buf_array[buf_id].offset);
7357 s_user_dbg_arrays[buf_id].size_in_dwords =
7358 BYTES_TO_DWORDS(buf_array[buf_id].length);
7359 }
7360
7361 return DBG_STATUS_OK;
7362 }
7363
qed_dbg_get_status_str(enum dbg_status status)7364 const char *qed_dbg_get_status_str(enum dbg_status status)
7365 {
7366 return (status <
7367 MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7368 }
7369
qed_get_idle_chk_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7370 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7371 u32 *dump_buf,
7372 u32 num_dumped_dwords,
7373 u32 *results_buf_size)
7374 {
7375 u32 num_errors, num_warnings;
7376
7377 return qed_parse_idle_chk_dump(dump_buf,
7378 num_dumped_dwords,
7379 NULL,
7380 results_buf_size,
7381 &num_errors, &num_warnings);
7382 }
7383
qed_print_idle_chk_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf,u32 * num_errors,u32 * num_warnings)7384 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7385 u32 *dump_buf,
7386 u32 num_dumped_dwords,
7387 char *results_buf,
7388 u32 *num_errors,
7389 u32 *num_warnings)
7390 {
7391 u32 parsed_buf_size;
7392
7393 return qed_parse_idle_chk_dump(dump_buf,
7394 num_dumped_dwords,
7395 results_buf,
7396 &parsed_buf_size,
7397 num_errors, num_warnings);
7398 }
7399
qed_dbg_mcp_trace_set_meta_data(u32 * data,u32 size)7400 void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size)
7401 {
7402 s_mcp_trace_meta_arr.ptr = data;
7403 s_mcp_trace_meta_arr.size_in_dwords = size;
7404 }
7405
qed_get_mcp_trace_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7406 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7407 u32 *dump_buf,
7408 u32 num_dumped_dwords,
7409 u32 *results_buf_size)
7410 {
7411 return qed_parse_mcp_trace_dump(p_hwfn,
7412 dump_buf, NULL, results_buf_size);
7413 }
7414
qed_print_mcp_trace_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7415 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7416 u32 *dump_buf,
7417 u32 num_dumped_dwords,
7418 char *results_buf)
7419 {
7420 u32 parsed_buf_size;
7421
7422 return qed_parse_mcp_trace_dump(p_hwfn,
7423 dump_buf,
7424 results_buf, &parsed_buf_size);
7425 }
7426
qed_print_mcp_trace_line(u8 * dump_buf,u32 num_dumped_bytes,char * results_buf)7427 enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf,
7428 u32 num_dumped_bytes,
7429 char *results_buf)
7430 {
7431 u32 parsed_bytes;
7432
7433 return qed_parse_mcp_trace_buf(dump_buf,
7434 num_dumped_bytes,
7435 0,
7436 num_dumped_bytes,
7437 results_buf, &parsed_bytes);
7438 }
7439
qed_get_reg_fifo_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7440 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7441 u32 *dump_buf,
7442 u32 num_dumped_dwords,
7443 u32 *results_buf_size)
7444 {
7445 return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7446 }
7447
qed_print_reg_fifo_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7448 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7449 u32 *dump_buf,
7450 u32 num_dumped_dwords,
7451 char *results_buf)
7452 {
7453 u32 parsed_buf_size;
7454
7455 return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7456 }
7457
qed_get_igu_fifo_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7458 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7459 u32 *dump_buf,
7460 u32 num_dumped_dwords,
7461 u32 *results_buf_size)
7462 {
7463 return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7464 }
7465
qed_print_igu_fifo_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7466 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7467 u32 *dump_buf,
7468 u32 num_dumped_dwords,
7469 char *results_buf)
7470 {
7471 u32 parsed_buf_size;
7472
7473 return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7474 }
7475
7476 enum dbg_status
qed_get_protection_override_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7477 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7478 u32 *dump_buf,
7479 u32 num_dumped_dwords,
7480 u32 *results_buf_size)
7481 {
7482 return qed_parse_protection_override_dump(dump_buf,
7483 NULL, results_buf_size);
7484 }
7485
qed_print_protection_override_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7486 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7487 u32 *dump_buf,
7488 u32 num_dumped_dwords,
7489 char *results_buf)
7490 {
7491 u32 parsed_buf_size;
7492
7493 return qed_parse_protection_override_dump(dump_buf,
7494 results_buf,
7495 &parsed_buf_size);
7496 }
7497
qed_get_fw_asserts_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7498 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7499 u32 *dump_buf,
7500 u32 num_dumped_dwords,
7501 u32 *results_buf_size)
7502 {
7503 return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7504 }
7505
qed_print_fw_asserts_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7506 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7507 u32 *dump_buf,
7508 u32 num_dumped_dwords,
7509 char *results_buf)
7510 {
7511 u32 parsed_buf_size;
7512
7513 return qed_parse_fw_asserts_dump(dump_buf,
7514 results_buf, &parsed_buf_size);
7515 }
7516
qed_dbg_parse_attn(struct qed_hwfn * p_hwfn,struct dbg_attn_block_result * results)7517 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7518 struct dbg_attn_block_result *results)
7519 {
7520 struct user_dbg_array *block_attn, *pstrings;
7521 const u32 *block_attn_name_offsets;
7522 enum dbg_attn_type attn_type;
7523 const char *block_name;
7524 u8 num_regs, i, j;
7525
7526 num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7527 attn_type = (enum dbg_attn_type)
7528 GET_FIELD(results->data,
7529 DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7530 block_name = s_block_info_arr[results->block_id].name;
7531
7532 if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7533 !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7534 !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7535 return DBG_STATUS_DBG_ARRAY_NOT_SET;
7536
7537 block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
7538 block_attn_name_offsets = &block_attn->ptr[results->names_offset];
7539
7540 /* Go over registers with a non-zero attention status */
7541 for (i = 0; i < num_regs; i++) {
7542 struct dbg_attn_bit_mapping *bit_mapping;
7543 struct dbg_attn_reg_result *reg_result;
7544 u8 num_reg_attn, bit_idx = 0;
7545
7546 reg_result = &results->reg_results[i];
7547 num_reg_attn = GET_FIELD(reg_result->data,
7548 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7549 block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
7550 bit_mapping = &((struct dbg_attn_bit_mapping *)
7551 block_attn->ptr)[reg_result->block_attn_offset];
7552
7553 pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
7554
7555 /* Go over attention status bits */
7556 for (j = 0; j < num_reg_attn; j++) {
7557 u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7558 DBG_ATTN_BIT_MAPPING_VAL);
7559 const char *attn_name, *attn_type_str, *masked_str;
7560 u32 attn_name_offset, sts_addr;
7561
7562 /* Check if bit mask should be advanced (due to unused
7563 * bits).
7564 */
7565 if (GET_FIELD(bit_mapping[j].data,
7566 DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7567 bit_idx += (u8)attn_idx_val;
7568 continue;
7569 }
7570
7571 /* Check current bit index */
7572 if (!(reg_result->sts_val & BIT(bit_idx))) {
7573 bit_idx++;
7574 continue;
7575 }
7576
7577 /* Find attention name */
7578 attn_name_offset =
7579 block_attn_name_offsets[attn_idx_val];
7580 attn_name = &((const char *)
7581 pstrings->ptr)[attn_name_offset];
7582 attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
7583 "Interrupt" : "Parity";
7584 masked_str = reg_result->mask_val & BIT(bit_idx) ?
7585 " [masked]" : "";
7586 sts_addr = GET_FIELD(reg_result->data,
7587 DBG_ATTN_REG_RESULT_STS_ADDRESS);
7588 DP_NOTICE(p_hwfn,
7589 "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7590 block_name, attn_type_str, attn_name,
7591 sts_addr, bit_idx, masked_str);
7592
7593 bit_idx++;
7594 }
7595 }
7596
7597 return DBG_STATUS_OK;
7598 }
7599
7600 /* Wrapper for unifying the idle_chk and mcp_trace api */
7601 static enum dbg_status
qed_print_idle_chk_results_wrapper(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7602 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7603 u32 *dump_buf,
7604 u32 num_dumped_dwords,
7605 char *results_buf)
7606 {
7607 u32 num_errors, num_warnnings;
7608
7609 return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7610 results_buf, &num_errors,
7611 &num_warnnings);
7612 }
7613
7614 /* Feature meta data lookup table */
7615 static struct {
7616 char *name;
7617 enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7618 struct qed_ptt *p_ptt, u32 *size);
7619 enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7620 struct qed_ptt *p_ptt, u32 *dump_buf,
7621 u32 buf_size, u32 *dumped_dwords);
7622 enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7623 u32 *dump_buf, u32 num_dumped_dwords,
7624 char *results_buf);
7625 enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7626 u32 *dump_buf,
7627 u32 num_dumped_dwords,
7628 u32 *results_buf_size);
7629 } qed_features_lookup[] = {
7630 {
7631 "grc", qed_dbg_grc_get_dump_buf_size,
7632 qed_dbg_grc_dump, NULL, NULL}, {
7633 "idle_chk",
7634 qed_dbg_idle_chk_get_dump_buf_size,
7635 qed_dbg_idle_chk_dump,
7636 qed_print_idle_chk_results_wrapper,
7637 qed_get_idle_chk_results_buf_size}, {
7638 "mcp_trace",
7639 qed_dbg_mcp_trace_get_dump_buf_size,
7640 qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7641 qed_get_mcp_trace_results_buf_size}, {
7642 "reg_fifo",
7643 qed_dbg_reg_fifo_get_dump_buf_size,
7644 qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7645 qed_get_reg_fifo_results_buf_size}, {
7646 "igu_fifo",
7647 qed_dbg_igu_fifo_get_dump_buf_size,
7648 qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7649 qed_get_igu_fifo_results_buf_size}, {
7650 "protection_override",
7651 qed_dbg_protection_override_get_dump_buf_size,
7652 qed_dbg_protection_override_dump,
7653 qed_print_protection_override_results,
7654 qed_get_protection_override_results_buf_size}, {
7655 "fw_asserts",
7656 qed_dbg_fw_asserts_get_dump_buf_size,
7657 qed_dbg_fw_asserts_dump,
7658 qed_print_fw_asserts_results,
7659 qed_get_fw_asserts_results_buf_size},};
7660
qed_dbg_print_feature(u8 * p_text_buf,u32 text_size)7661 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7662 {
7663 u32 i, precision = 80;
7664
7665 if (!p_text_buf)
7666 return;
7667
7668 pr_notice("\n%.*s", precision, p_text_buf);
7669 for (i = precision; i < text_size; i += precision)
7670 pr_cont("%.*s", precision, p_text_buf + i);
7671 pr_cont("\n");
7672 }
7673
7674 #define QED_RESULTS_BUF_MIN_SIZE 16
7675 /* Generic function for decoding debug feature info */
format_feature(struct qed_hwfn * p_hwfn,enum qed_dbg_features feature_idx)7676 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7677 enum qed_dbg_features feature_idx)
7678 {
7679 struct qed_dbg_feature *feature =
7680 &p_hwfn->cdev->dbg_params.features[feature_idx];
7681 u32 text_size_bytes, null_char_pos, i;
7682 enum dbg_status rc;
7683 char *text_buf;
7684
7685 /* Check if feature supports formatting capability */
7686 if (!qed_features_lookup[feature_idx].results_buf_size)
7687 return DBG_STATUS_OK;
7688
7689 /* Obtain size of formatted output */
7690 rc = qed_features_lookup[feature_idx].
7691 results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7692 feature->dumped_dwords, &text_size_bytes);
7693 if (rc != DBG_STATUS_OK)
7694 return rc;
7695
7696 /* Make sure that the allocated size is a multiple of dword (4 bytes) */
7697 null_char_pos = text_size_bytes - 1;
7698 text_size_bytes = (text_size_bytes + 3) & ~0x3;
7699
7700 if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7701 DP_NOTICE(p_hwfn->cdev,
7702 "formatted size of feature was too small %d. Aborting\n",
7703 text_size_bytes);
7704 return DBG_STATUS_INVALID_ARGS;
7705 }
7706
7707 /* Allocate temp text buf */
7708 text_buf = vzalloc(text_size_bytes);
7709 if (!text_buf)
7710 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7711
7712 /* Decode feature opcodes to string on temp buf */
7713 rc = qed_features_lookup[feature_idx].
7714 print_results(p_hwfn, (u32 *)feature->dump_buf,
7715 feature->dumped_dwords, text_buf);
7716 if (rc != DBG_STATUS_OK) {
7717 vfree(text_buf);
7718 return rc;
7719 }
7720
7721 /* Replace the original null character with a '\n' character.
7722 * The bytes that were added as a result of the dword alignment are also
7723 * padded with '\n' characters.
7724 */
7725 for (i = null_char_pos; i < text_size_bytes; i++)
7726 text_buf[i] = '\n';
7727
7728 /* Dump printable feature to log */
7729 if (p_hwfn->cdev->dbg_params.print_data)
7730 qed_dbg_print_feature(text_buf, text_size_bytes);
7731
7732 /* Free the old dump_buf and point the dump_buf to the newly allocagted
7733 * and formatted text buffer.
7734 */
7735 vfree(feature->dump_buf);
7736 feature->dump_buf = text_buf;
7737 feature->buf_size = text_size_bytes;
7738 feature->dumped_dwords = text_size_bytes / 4;
7739 return rc;
7740 }
7741
7742 /* Generic function for performing the dump of a debug feature. */
qed_dbg_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_dbg_features feature_idx)7743 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7744 struct qed_ptt *p_ptt,
7745 enum qed_dbg_features feature_idx)
7746 {
7747 struct qed_dbg_feature *feature =
7748 &p_hwfn->cdev->dbg_params.features[feature_idx];
7749 u32 buf_size_dwords;
7750 enum dbg_status rc;
7751
7752 DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7753 qed_features_lookup[feature_idx].name);
7754
7755 /* Dump_buf was already allocated need to free (this can happen if dump
7756 * was called but file was never read).
7757 * We can't use the buffer as is since size may have changed.
7758 */
7759 if (feature->dump_buf) {
7760 vfree(feature->dump_buf);
7761 feature->dump_buf = NULL;
7762 }
7763
7764 /* Get buffer size from hsi, allocate accordingly, and perform the
7765 * dump.
7766 */
7767 rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7768 &buf_size_dwords);
7769 if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7770 return rc;
7771 feature->buf_size = buf_size_dwords * sizeof(u32);
7772 feature->dump_buf = vmalloc(feature->buf_size);
7773 if (!feature->dump_buf)
7774 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7775
7776 rc = qed_features_lookup[feature_idx].
7777 perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7778 feature->buf_size / sizeof(u32),
7779 &feature->dumped_dwords);
7780
7781 /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7782 * In this case the buffer holds valid binary data, but we wont able
7783 * to parse it (since parsing relies on data in NVRAM which is only
7784 * accessible when MFW is responsive). skip the formatting but return
7785 * success so that binary data is provided.
7786 */
7787 if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7788 return DBG_STATUS_OK;
7789
7790 if (rc != DBG_STATUS_OK)
7791 return rc;
7792
7793 /* Format output */
7794 rc = format_feature(p_hwfn, feature_idx);
7795 return rc;
7796 }
7797
qed_dbg_grc(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)7798 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7799 {
7800 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7801 }
7802
qed_dbg_grc_size(struct qed_dev * cdev)7803 int qed_dbg_grc_size(struct qed_dev *cdev)
7804 {
7805 return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7806 }
7807
qed_dbg_idle_chk(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)7808 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7809 {
7810 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7811 num_dumped_bytes);
7812 }
7813
qed_dbg_idle_chk_size(struct qed_dev * cdev)7814 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7815 {
7816 return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7817 }
7818
qed_dbg_reg_fifo(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)7819 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7820 {
7821 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7822 num_dumped_bytes);
7823 }
7824
qed_dbg_reg_fifo_size(struct qed_dev * cdev)7825 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7826 {
7827 return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7828 }
7829
qed_dbg_igu_fifo(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)7830 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7831 {
7832 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7833 num_dumped_bytes);
7834 }
7835
qed_dbg_igu_fifo_size(struct qed_dev * cdev)7836 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7837 {
7838 return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7839 }
7840
qed_dbg_nvm_image_length(struct qed_hwfn * p_hwfn,enum qed_nvm_images image_id,u32 * length)7841 static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
7842 enum qed_nvm_images image_id, u32 *length)
7843 {
7844 struct qed_nvm_image_att image_att;
7845 int rc;
7846
7847 *length = 0;
7848 rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
7849 if (rc)
7850 return rc;
7851
7852 *length = image_att.length;
7853
7854 return rc;
7855 }
7856
qed_dbg_nvm_image(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes,enum qed_nvm_images image_id)7857 static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
7858 u32 *num_dumped_bytes,
7859 enum qed_nvm_images image_id)
7860 {
7861 struct qed_hwfn *p_hwfn =
7862 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
7863 u32 len_rounded, i;
7864 __be32 val;
7865 int rc;
7866
7867 *num_dumped_bytes = 0;
7868 rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
7869 if (rc)
7870 return rc;
7871
7872 DP_NOTICE(p_hwfn->cdev,
7873 "Collecting a debug feature [\"nvram image %d\"]\n",
7874 image_id);
7875
7876 len_rounded = roundup(len_rounded, sizeof(u32));
7877 rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
7878 if (rc)
7879 return rc;
7880
7881 /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
7882 if (image_id != QED_NVM_IMAGE_NVM_META)
7883 for (i = 0; i < len_rounded; i += 4) {
7884 val = cpu_to_be32(*(u32 *)(buffer + i));
7885 *(u32 *)(buffer + i) = val;
7886 }
7887
7888 *num_dumped_bytes = len_rounded;
7889
7890 return rc;
7891 }
7892
qed_dbg_protection_override(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)7893 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
7894 u32 *num_dumped_bytes)
7895 {
7896 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7897 num_dumped_bytes);
7898 }
7899
qed_dbg_protection_override_size(struct qed_dev * cdev)7900 int qed_dbg_protection_override_size(struct qed_dev *cdev)
7901 {
7902 return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
7903 }
7904
qed_dbg_fw_asserts(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)7905 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
7906 u32 *num_dumped_bytes)
7907 {
7908 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
7909 num_dumped_bytes);
7910 }
7911
qed_dbg_fw_asserts_size(struct qed_dev * cdev)7912 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
7913 {
7914 return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
7915 }
7916
qed_dbg_mcp_trace(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)7917 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
7918 u32 *num_dumped_bytes)
7919 {
7920 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
7921 num_dumped_bytes);
7922 }
7923
qed_dbg_mcp_trace_size(struct qed_dev * cdev)7924 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
7925 {
7926 return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
7927 }
7928
7929 /* Defines the amount of bytes allocated for recording the length of debugfs
7930 * feature buffer.
7931 */
7932 #define REGDUMP_HEADER_SIZE sizeof(u32)
7933 #define REGDUMP_HEADER_FEATURE_SHIFT 24
7934 #define REGDUMP_HEADER_ENGINE_SHIFT 31
7935 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
7936 enum debug_print_features {
7937 OLD_MODE = 0,
7938 IDLE_CHK = 1,
7939 GRC_DUMP = 2,
7940 MCP_TRACE = 3,
7941 REG_FIFO = 4,
7942 PROTECTION_OVERRIDE = 5,
7943 IGU_FIFO = 6,
7944 PHY = 7,
7945 FW_ASSERTS = 8,
7946 NVM_CFG1 = 9,
7947 DEFAULT_CFG = 10,
7948 NVM_META = 11,
7949 };
7950
qed_calc_regdump_header(enum debug_print_features feature,int engine,u32 feature_size,u8 omit_engine)7951 static u32 qed_calc_regdump_header(enum debug_print_features feature,
7952 int engine, u32 feature_size, u8 omit_engine)
7953 {
7954 /* Insert the engine, feature and mode inside the header and combine it
7955 * with feature size.
7956 */
7957 return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
7958 (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
7959 (engine << REGDUMP_HEADER_ENGINE_SHIFT);
7960 }
7961
qed_dbg_all_data(struct qed_dev * cdev,void * buffer)7962 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
7963 {
7964 u8 cur_engine, omit_engine = 0, org_engine;
7965 u32 offset = 0, feature_size;
7966 int rc;
7967
7968 if (cdev->num_hwfns == 1)
7969 omit_engine = 1;
7970
7971 org_engine = qed_get_debug_engine(cdev);
7972 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7973 /* Collect idle_chks and grcDump for each hw function */
7974 DP_VERBOSE(cdev, QED_MSG_DEBUG,
7975 "obtaining idle_chk and grcdump for current engine\n");
7976 qed_set_debug_engine(cdev, cur_engine);
7977
7978 /* First idle_chk */
7979 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7980 REGDUMP_HEADER_SIZE, &feature_size);
7981 if (!rc) {
7982 *(u32 *)((u8 *)buffer + offset) =
7983 qed_calc_regdump_header(IDLE_CHK, cur_engine,
7984 feature_size, omit_engine);
7985 offset += (feature_size + REGDUMP_HEADER_SIZE);
7986 } else {
7987 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7988 }
7989
7990 /* Second idle_chk */
7991 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7992 REGDUMP_HEADER_SIZE, &feature_size);
7993 if (!rc) {
7994 *(u32 *)((u8 *)buffer + offset) =
7995 qed_calc_regdump_header(IDLE_CHK, cur_engine,
7996 feature_size, omit_engine);
7997 offset += (feature_size + REGDUMP_HEADER_SIZE);
7998 } else {
7999 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8000 }
8001
8002 /* reg_fifo dump */
8003 rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
8004 REGDUMP_HEADER_SIZE, &feature_size);
8005 if (!rc) {
8006 *(u32 *)((u8 *)buffer + offset) =
8007 qed_calc_regdump_header(REG_FIFO, cur_engine,
8008 feature_size, omit_engine);
8009 offset += (feature_size + REGDUMP_HEADER_SIZE);
8010 } else {
8011 DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
8012 }
8013
8014 /* igu_fifo dump */
8015 rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
8016 REGDUMP_HEADER_SIZE, &feature_size);
8017 if (!rc) {
8018 *(u32 *)((u8 *)buffer + offset) =
8019 qed_calc_regdump_header(IGU_FIFO, cur_engine,
8020 feature_size, omit_engine);
8021 offset += (feature_size + REGDUMP_HEADER_SIZE);
8022 } else {
8023 DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
8024 }
8025
8026 /* protection_override dump */
8027 rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
8028 REGDUMP_HEADER_SIZE,
8029 &feature_size);
8030 if (!rc) {
8031 *(u32 *)((u8 *)buffer + offset) =
8032 qed_calc_regdump_header(PROTECTION_OVERRIDE,
8033 cur_engine,
8034 feature_size, omit_engine);
8035 offset += (feature_size + REGDUMP_HEADER_SIZE);
8036 } else {
8037 DP_ERR(cdev,
8038 "qed_dbg_protection_override failed. rc = %d\n",
8039 rc);
8040 }
8041
8042 /* fw_asserts dump */
8043 rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
8044 REGDUMP_HEADER_SIZE, &feature_size);
8045 if (!rc) {
8046 *(u32 *)((u8 *)buffer + offset) =
8047 qed_calc_regdump_header(FW_ASSERTS, cur_engine,
8048 feature_size, omit_engine);
8049 offset += (feature_size + REGDUMP_HEADER_SIZE);
8050 } else {
8051 DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
8052 rc);
8053 }
8054
8055 /* GRC dump - must be last because when mcp stuck it will
8056 * clutter idle_chk, reg_fifo, ...
8057 */
8058 rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
8059 REGDUMP_HEADER_SIZE, &feature_size);
8060 if (!rc) {
8061 *(u32 *)((u8 *)buffer + offset) =
8062 qed_calc_regdump_header(GRC_DUMP, cur_engine,
8063 feature_size, omit_engine);
8064 offset += (feature_size + REGDUMP_HEADER_SIZE);
8065 } else {
8066 DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
8067 }
8068 }
8069
8070 qed_set_debug_engine(cdev, org_engine);
8071 /* mcp_trace */
8072 rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
8073 REGDUMP_HEADER_SIZE, &feature_size);
8074 if (!rc) {
8075 *(u32 *)((u8 *)buffer + offset) =
8076 qed_calc_regdump_header(MCP_TRACE, cur_engine,
8077 feature_size, omit_engine);
8078 offset += (feature_size + REGDUMP_HEADER_SIZE);
8079 } else {
8080 DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
8081 }
8082
8083 /* nvm cfg1 */
8084 rc = qed_dbg_nvm_image(cdev,
8085 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8086 &feature_size, QED_NVM_IMAGE_NVM_CFG1);
8087 if (!rc) {
8088 *(u32 *)((u8 *)buffer + offset) =
8089 qed_calc_regdump_header(NVM_CFG1, cur_engine,
8090 feature_size, omit_engine);
8091 offset += (feature_size + REGDUMP_HEADER_SIZE);
8092 } else if (rc != -ENOENT) {
8093 DP_ERR(cdev,
8094 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8095 QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
8096 }
8097
8098 /* nvm default */
8099 rc = qed_dbg_nvm_image(cdev,
8100 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8101 &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
8102 if (!rc) {
8103 *(u32 *)((u8 *)buffer + offset) =
8104 qed_calc_regdump_header(DEFAULT_CFG, cur_engine,
8105 feature_size, omit_engine);
8106 offset += (feature_size + REGDUMP_HEADER_SIZE);
8107 } else if (rc != -ENOENT) {
8108 DP_ERR(cdev,
8109 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8110 QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
8111 rc);
8112 }
8113
8114 /* nvm meta */
8115 rc = qed_dbg_nvm_image(cdev,
8116 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8117 &feature_size, QED_NVM_IMAGE_NVM_META);
8118 if (!rc) {
8119 *(u32 *)((u8 *)buffer + offset) =
8120 qed_calc_regdump_header(NVM_META, cur_engine,
8121 feature_size, omit_engine);
8122 offset += (feature_size + REGDUMP_HEADER_SIZE);
8123 } else if (rc != -ENOENT) {
8124 DP_ERR(cdev,
8125 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8126 QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
8127 }
8128
8129 return 0;
8130 }
8131
qed_dbg_all_data_size(struct qed_dev * cdev)8132 int qed_dbg_all_data_size(struct qed_dev *cdev)
8133 {
8134 struct qed_hwfn *p_hwfn =
8135 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
8136 u32 regs_len = 0, image_len = 0;
8137 u8 cur_engine, org_engine;
8138
8139 org_engine = qed_get_debug_engine(cdev);
8140 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8141 /* Engine specific */
8142 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8143 "calculating idle_chk and grcdump register length for current engine\n");
8144 qed_set_debug_engine(cdev, cur_engine);
8145 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8146 REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8147 REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8148 REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8149 REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8150 REGDUMP_HEADER_SIZE +
8151 qed_dbg_protection_override_size(cdev) +
8152 REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8153 }
8154
8155 qed_set_debug_engine(cdev, org_engine);
8156
8157 /* Engine common */
8158 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
8159 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8160 if (image_len)
8161 regs_len += REGDUMP_HEADER_SIZE + image_len;
8162 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8163 if (image_len)
8164 regs_len += REGDUMP_HEADER_SIZE + image_len;
8165 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8166 if (image_len)
8167 regs_len += REGDUMP_HEADER_SIZE + image_len;
8168
8169 return regs_len;
8170 }
8171
qed_dbg_feature(struct qed_dev * cdev,void * buffer,enum qed_dbg_features feature,u32 * num_dumped_bytes)8172 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8173 enum qed_dbg_features feature, u32 *num_dumped_bytes)
8174 {
8175 struct qed_hwfn *p_hwfn =
8176 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
8177 struct qed_dbg_feature *qed_feature =
8178 &cdev->dbg_params.features[feature];
8179 enum dbg_status dbg_rc;
8180 struct qed_ptt *p_ptt;
8181 int rc = 0;
8182
8183 /* Acquire ptt */
8184 p_ptt = qed_ptt_acquire(p_hwfn);
8185 if (!p_ptt)
8186 return -EINVAL;
8187
8188 /* Get dump */
8189 dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8190 if (dbg_rc != DBG_STATUS_OK) {
8191 DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8192 qed_dbg_get_status_str(dbg_rc));
8193 *num_dumped_bytes = 0;
8194 rc = -EINVAL;
8195 goto out;
8196 }
8197
8198 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8199 "copying debugfs feature to external buffer\n");
8200 memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8201 *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
8202 4;
8203
8204 out:
8205 qed_ptt_release(p_hwfn, p_ptt);
8206 return rc;
8207 }
8208
qed_dbg_feature_size(struct qed_dev * cdev,enum qed_dbg_features feature)8209 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8210 {
8211 struct qed_hwfn *p_hwfn =
8212 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
8213 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8214 struct qed_dbg_feature *qed_feature =
8215 &cdev->dbg_params.features[feature];
8216 u32 buf_size_dwords;
8217 enum dbg_status rc;
8218
8219 if (!p_ptt)
8220 return -EINVAL;
8221
8222 rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8223 &buf_size_dwords);
8224 if (rc != DBG_STATUS_OK)
8225 buf_size_dwords = 0;
8226
8227 qed_ptt_release(p_hwfn, p_ptt);
8228 qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8229 return qed_feature->buf_size;
8230 }
8231
qed_get_debug_engine(struct qed_dev * cdev)8232 u8 qed_get_debug_engine(struct qed_dev *cdev)
8233 {
8234 return cdev->dbg_params.engine_for_debug;
8235 }
8236
qed_set_debug_engine(struct qed_dev * cdev,int engine_number)8237 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8238 {
8239 DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8240 engine_number);
8241 cdev->dbg_params.engine_for_debug = engine_number;
8242 }
8243
qed_dbg_pf_init(struct qed_dev * cdev)8244 void qed_dbg_pf_init(struct qed_dev *cdev)
8245 {
8246 const u8 *dbg_values;
8247
8248 /* Debug values are after init values.
8249 * The offset is the first dword of the file.
8250 */
8251 dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8252 qed_dbg_set_bin_ptr((u8 *)dbg_values);
8253 qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
8254 }
8255
qed_dbg_pf_exit(struct qed_dev * cdev)8256 void qed_dbg_pf_exit(struct qed_dev *cdev)
8257 {
8258 struct qed_dbg_feature *feature = NULL;
8259 enum qed_dbg_features feature_idx;
8260
8261 /* Debug features' buffers may be allocated if debug feature was used
8262 * but dump wasn't called.
8263 */
8264 for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8265 feature = &cdev->dbg_params.features[feature_idx];
8266 if (feature->dump_buf) {
8267 vfree(feature->dump_buf);
8268 feature->dump_buf = NULL;
8269 }
8270 }
8271 }
8272