1 /*
2 * Copyright (C) 2017 Chelsio Communications. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
15 *
16 */
17
18 #include "t4_regs.h"
19 #include "cxgb4.h"
20 #include "cxgb4_cudbg.h"
21 #include "cudbg_zlib.h"
22
23 static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
24 { CUDBG_EDC0, cudbg_collect_edc0_meminfo },
25 { CUDBG_EDC1, cudbg_collect_edc1_meminfo },
26 { CUDBG_MC0, cudbg_collect_mc0_meminfo },
27 { CUDBG_MC1, cudbg_collect_mc1_meminfo },
28 { CUDBG_HMA, cudbg_collect_hma_meminfo },
29 };
30
31 static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
32 { CUDBG_MBOX_LOG, cudbg_collect_mbox_log },
33 { CUDBG_DEV_LOG, cudbg_collect_fw_devlog },
34 { CUDBG_REG_DUMP, cudbg_collect_reg_dump },
35 { CUDBG_CIM_LA, cudbg_collect_cim_la },
36 { CUDBG_CIM_MA_LA, cudbg_collect_cim_ma_la },
37 { CUDBG_CIM_QCFG, cudbg_collect_cim_qcfg },
38 { CUDBG_CIM_IBQ_TP0, cudbg_collect_cim_ibq_tp0 },
39 { CUDBG_CIM_IBQ_TP1, cudbg_collect_cim_ibq_tp1 },
40 { CUDBG_CIM_IBQ_ULP, cudbg_collect_cim_ibq_ulp },
41 { CUDBG_CIM_IBQ_SGE0, cudbg_collect_cim_ibq_sge0 },
42 { CUDBG_CIM_IBQ_SGE1, cudbg_collect_cim_ibq_sge1 },
43 { CUDBG_CIM_IBQ_NCSI, cudbg_collect_cim_ibq_ncsi },
44 { CUDBG_CIM_OBQ_ULP0, cudbg_collect_cim_obq_ulp0 },
45 { CUDBG_CIM_OBQ_ULP1, cudbg_collect_cim_obq_ulp1 },
46 { CUDBG_CIM_OBQ_ULP2, cudbg_collect_cim_obq_ulp2 },
47 { CUDBG_CIM_OBQ_ULP3, cudbg_collect_cim_obq_ulp3 },
48 { CUDBG_CIM_OBQ_SGE, cudbg_collect_cim_obq_sge },
49 { CUDBG_CIM_OBQ_NCSI, cudbg_collect_cim_obq_ncsi },
50 { CUDBG_RSS, cudbg_collect_rss },
51 { CUDBG_RSS_VF_CONF, cudbg_collect_rss_vf_config },
52 { CUDBG_PATH_MTU, cudbg_collect_path_mtu },
53 { CUDBG_PM_STATS, cudbg_collect_pm_stats },
54 { CUDBG_HW_SCHED, cudbg_collect_hw_sched },
55 { CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect },
56 { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
57 { CUDBG_ULPRX_LA, cudbg_collect_ulprx_la },
58 { CUDBG_TP_LA, cudbg_collect_tp_la },
59 { CUDBG_MEMINFO, cudbg_collect_meminfo },
60 { CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la },
61 { CUDBG_CLK, cudbg_collect_clk_info },
62 { CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
63 { CUDBG_CIM_OBQ_RXQ1, cudbg_collect_obq_sge_rx_q1 },
64 { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
65 { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
66 { CUDBG_TID_INFO, cudbg_collect_tid },
67 { CUDBG_PCIE_CONFIG, cudbg_collect_pcie_config },
68 { CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context },
69 { CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
70 { CUDBG_VPD_DATA, cudbg_collect_vpd_data },
71 { CUDBG_LE_TCAM, cudbg_collect_le_tcam },
72 { CUDBG_CCTRL, cudbg_collect_cctrl },
73 { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect },
74 { CUDBG_ULPTX_LA, cudbg_collect_ulptx_la },
75 { CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect },
76 { CUDBG_PBT_TABLE, cudbg_collect_pbt_tables },
77 { CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect },
78 };
79
cxgb4_get_entity_length(struct adapter * adap,u32 entity)80 static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
81 {
82 struct cudbg_tcam tcam_region = { 0 };
83 u32 value, n = 0, len = 0;
84
85 switch (entity) {
86 case CUDBG_REG_DUMP:
87 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
88 case CHELSIO_T4:
89 len = T4_REGMAP_SIZE;
90 break;
91 case CHELSIO_T5:
92 case CHELSIO_T6:
93 len = T5_REGMAP_SIZE;
94 break;
95 default:
96 break;
97 }
98 break;
99 case CUDBG_DEV_LOG:
100 len = adap->params.devlog.size;
101 break;
102 case CUDBG_CIM_LA:
103 if (is_t6(adap->params.chip)) {
104 len = adap->params.cim_la_size / 10 + 1;
105 len *= 10 * sizeof(u32);
106 } else {
107 len = adap->params.cim_la_size / 8;
108 len *= 8 * sizeof(u32);
109 }
110 len += sizeof(u32); /* for reading CIM LA configuration */
111 break;
112 case CUDBG_CIM_MA_LA:
113 len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
114 break;
115 case CUDBG_CIM_QCFG:
116 len = sizeof(struct cudbg_cim_qcfg);
117 break;
118 case CUDBG_CIM_IBQ_TP0:
119 case CUDBG_CIM_IBQ_TP1:
120 case CUDBG_CIM_IBQ_ULP:
121 case CUDBG_CIM_IBQ_SGE0:
122 case CUDBG_CIM_IBQ_SGE1:
123 case CUDBG_CIM_IBQ_NCSI:
124 len = CIM_IBQ_SIZE * 4 * sizeof(u32);
125 break;
126 case CUDBG_CIM_OBQ_ULP0:
127 len = cudbg_cim_obq_size(adap, 0);
128 break;
129 case CUDBG_CIM_OBQ_ULP1:
130 len = cudbg_cim_obq_size(adap, 1);
131 break;
132 case CUDBG_CIM_OBQ_ULP2:
133 len = cudbg_cim_obq_size(adap, 2);
134 break;
135 case CUDBG_CIM_OBQ_ULP3:
136 len = cudbg_cim_obq_size(adap, 3);
137 break;
138 case CUDBG_CIM_OBQ_SGE:
139 len = cudbg_cim_obq_size(adap, 4);
140 break;
141 case CUDBG_CIM_OBQ_NCSI:
142 len = cudbg_cim_obq_size(adap, 5);
143 break;
144 case CUDBG_CIM_OBQ_RXQ0:
145 len = cudbg_cim_obq_size(adap, 6);
146 break;
147 case CUDBG_CIM_OBQ_RXQ1:
148 len = cudbg_cim_obq_size(adap, 7);
149 break;
150 case CUDBG_EDC0:
151 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
152 if (value & EDRAM0_ENABLE_F) {
153 value = t4_read_reg(adap, MA_EDRAM0_BAR_A);
154 len = EDRAM0_SIZE_G(value);
155 }
156 len = cudbg_mbytes_to_bytes(len);
157 break;
158 case CUDBG_EDC1:
159 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
160 if (value & EDRAM1_ENABLE_F) {
161 value = t4_read_reg(adap, MA_EDRAM1_BAR_A);
162 len = EDRAM1_SIZE_G(value);
163 }
164 len = cudbg_mbytes_to_bytes(len);
165 break;
166 case CUDBG_MC0:
167 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
168 if (value & EXT_MEM0_ENABLE_F) {
169 value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
170 len = EXT_MEM0_SIZE_G(value);
171 }
172 len = cudbg_mbytes_to_bytes(len);
173 break;
174 case CUDBG_MC1:
175 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
176 if (value & EXT_MEM1_ENABLE_F) {
177 value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
178 len = EXT_MEM1_SIZE_G(value);
179 }
180 len = cudbg_mbytes_to_bytes(len);
181 break;
182 case CUDBG_RSS:
183 len = t4_chip_rss_size(adap) * sizeof(u16);
184 break;
185 case CUDBG_RSS_VF_CONF:
186 len = adap->params.arch.vfcount *
187 sizeof(struct cudbg_rss_vf_conf);
188 break;
189 case CUDBG_PATH_MTU:
190 len = NMTUS * sizeof(u16);
191 break;
192 case CUDBG_PM_STATS:
193 len = sizeof(struct cudbg_pm_stats);
194 break;
195 case CUDBG_HW_SCHED:
196 len = sizeof(struct cudbg_hw_sched);
197 break;
198 case CUDBG_TP_INDIRECT:
199 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
200 case CHELSIO_T5:
201 n = sizeof(t5_tp_pio_array) +
202 sizeof(t5_tp_tm_pio_array) +
203 sizeof(t5_tp_mib_index_array);
204 break;
205 case CHELSIO_T6:
206 n = sizeof(t6_tp_pio_array) +
207 sizeof(t6_tp_tm_pio_array) +
208 sizeof(t6_tp_mib_index_array);
209 break;
210 default:
211 break;
212 }
213 n = n / (IREG_NUM_ELEM * sizeof(u32));
214 len = sizeof(struct ireg_buf) * n;
215 break;
216 case CUDBG_SGE_INDIRECT:
217 len = sizeof(struct ireg_buf) * 2 +
218 sizeof(struct sge_qbase_reg_field);
219 break;
220 case CUDBG_ULPRX_LA:
221 len = sizeof(struct cudbg_ulprx_la);
222 break;
223 case CUDBG_TP_LA:
224 len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
225 break;
226 case CUDBG_MEMINFO:
227 len = sizeof(struct cudbg_ver_hdr) +
228 sizeof(struct cudbg_meminfo);
229 break;
230 case CUDBG_CIM_PIF_LA:
231 len = sizeof(struct cudbg_cim_pif_la);
232 len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
233 break;
234 case CUDBG_CLK:
235 len = sizeof(struct cudbg_clk_info);
236 break;
237 case CUDBG_PCIE_INDIRECT:
238 n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
239 len = sizeof(struct ireg_buf) * n * 2;
240 break;
241 case CUDBG_PM_INDIRECT:
242 n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
243 len = sizeof(struct ireg_buf) * n * 2;
244 break;
245 case CUDBG_TID_INFO:
246 len = sizeof(struct cudbg_tid_info_region_rev1);
247 break;
248 case CUDBG_PCIE_CONFIG:
249 len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
250 break;
251 case CUDBG_DUMP_CONTEXT:
252 len = cudbg_dump_context_size(adap);
253 break;
254 case CUDBG_MPS_TCAM:
255 len = sizeof(struct cudbg_mps_tcam) *
256 adap->params.arch.mps_tcam_size;
257 break;
258 case CUDBG_VPD_DATA:
259 len = sizeof(struct cudbg_vpd_data);
260 break;
261 case CUDBG_LE_TCAM:
262 cudbg_fill_le_tcam_info(adap, &tcam_region);
263 len = sizeof(struct cudbg_tcam) +
264 sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
265 break;
266 case CUDBG_CCTRL:
267 len = sizeof(u16) * NMTUS * NCCTRL_WIN;
268 break;
269 case CUDBG_MA_INDIRECT:
270 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
271 n = sizeof(t6_ma_ireg_array) /
272 (IREG_NUM_ELEM * sizeof(u32));
273 len = sizeof(struct ireg_buf) * n * 2;
274 }
275 break;
276 case CUDBG_ULPTX_LA:
277 len = sizeof(struct cudbg_ver_hdr) +
278 sizeof(struct cudbg_ulptx_la);
279 break;
280 case CUDBG_UP_CIM_INDIRECT:
281 n = 0;
282 if (is_t5(adap->params.chip))
283 n = sizeof(t5_up_cim_reg_array) /
284 ((IREG_NUM_ELEM + 1) * sizeof(u32));
285 else if (is_t6(adap->params.chip))
286 n = sizeof(t6_up_cim_reg_array) /
287 ((IREG_NUM_ELEM + 1) * sizeof(u32));
288 len = sizeof(struct ireg_buf) * n;
289 break;
290 case CUDBG_PBT_TABLE:
291 len = sizeof(struct cudbg_pbt_tables);
292 break;
293 case CUDBG_MBOX_LOG:
294 len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size;
295 break;
296 case CUDBG_HMA_INDIRECT:
297 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
298 n = sizeof(t6_hma_ireg_array) /
299 (IREG_NUM_ELEM * sizeof(u32));
300 len = sizeof(struct ireg_buf) * n;
301 }
302 break;
303 case CUDBG_HMA:
304 value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
305 if (value & HMA_MUX_F) {
306 /* In T6, there's no MC1. So, HMA shares MC1
307 * address space.
308 */
309 value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
310 len = EXT_MEM1_SIZE_G(value);
311 }
312 len = cudbg_mbytes_to_bytes(len);
313 break;
314 default:
315 break;
316 }
317
318 return len;
319 }
320
cxgb4_get_dump_length(struct adapter * adap,u32 flag)321 u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
322 {
323 u32 i, entity;
324 u32 len = 0;
325 u32 wsize;
326
327 if (flag & CXGB4_ETH_DUMP_HW) {
328 for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) {
329 entity = cxgb4_collect_hw_dump[i].entity;
330 len += cxgb4_get_entity_length(adap, entity);
331 }
332 }
333
334 if (flag & CXGB4_ETH_DUMP_MEM) {
335 for (i = 0; i < ARRAY_SIZE(cxgb4_collect_mem_dump); i++) {
336 entity = cxgb4_collect_mem_dump[i].entity;
337 len += cxgb4_get_entity_length(adap, entity);
338 }
339 }
340
341 /* If compression is enabled, a smaller destination buffer is enough */
342 wsize = cudbg_get_workspace_size();
343 if (wsize && len > CUDBG_DUMP_BUFF_SIZE)
344 len = CUDBG_DUMP_BUFF_SIZE;
345
346 return len;
347 }
348
cxgb4_cudbg_collect_entity(struct cudbg_init * pdbg_init,struct cudbg_buffer * dbg_buff,const struct cxgb4_collect_entity * e_arr,u32 arr_size,void * buf,u32 * tot_size)349 static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
350 struct cudbg_buffer *dbg_buff,
351 const struct cxgb4_collect_entity *e_arr,
352 u32 arr_size, void *buf, u32 *tot_size)
353 {
354 struct cudbg_error cudbg_err = { 0 };
355 struct cudbg_entity_hdr *entity_hdr;
356 u32 i, total_size = 0;
357 int ret;
358
359 for (i = 0; i < arr_size; i++) {
360 const struct cxgb4_collect_entity *e = &e_arr[i];
361
362 entity_hdr = cudbg_get_entity_hdr(buf, e->entity);
363 entity_hdr->entity_type = e->entity;
364 entity_hdr->start_offset = dbg_buff->offset;
365 memset(&cudbg_err, 0, sizeof(struct cudbg_error));
366 ret = e->collect_cb(pdbg_init, dbg_buff, &cudbg_err);
367 if (ret) {
368 entity_hdr->size = 0;
369 dbg_buff->offset = entity_hdr->start_offset;
370 } else {
371 cudbg_align_debug_buffer(dbg_buff, entity_hdr);
372 }
373
374 /* Log error and continue with next entity */
375 if (cudbg_err.sys_err)
376 ret = CUDBG_SYSTEM_ERROR;
377
378 entity_hdr->hdr_flags = ret;
379 entity_hdr->sys_err = cudbg_err.sys_err;
380 entity_hdr->sys_warn = cudbg_err.sys_warn;
381 total_size += entity_hdr->size;
382 }
383
384 *tot_size += total_size;
385 }
386
cudbg_alloc_compress_buff(struct cudbg_init * pdbg_init)387 static int cudbg_alloc_compress_buff(struct cudbg_init *pdbg_init)
388 {
389 u32 workspace_size;
390
391 workspace_size = cudbg_get_workspace_size();
392 pdbg_init->compress_buff = vzalloc(CUDBG_COMPRESS_BUFF_SIZE +
393 workspace_size);
394 if (!pdbg_init->compress_buff)
395 return -ENOMEM;
396
397 pdbg_init->compress_buff_size = CUDBG_COMPRESS_BUFF_SIZE;
398 pdbg_init->workspace = (u8 *)pdbg_init->compress_buff +
399 CUDBG_COMPRESS_BUFF_SIZE - workspace_size;
400 return 0;
401 }
402
cudbg_free_compress_buff(struct cudbg_init * pdbg_init)403 static void cudbg_free_compress_buff(struct cudbg_init *pdbg_init)
404 {
405 if (pdbg_init->compress_buff)
406 vfree(pdbg_init->compress_buff);
407 }
408
cxgb4_cudbg_collect(struct adapter * adap,void * buf,u32 * buf_size,u32 flag)409 int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
410 u32 flag)
411 {
412 struct cudbg_buffer dbg_buff = { 0 };
413 u32 size, min_size, total_size = 0;
414 struct cudbg_init cudbg_init;
415 struct cudbg_hdr *cudbg_hdr;
416 int rc;
417
418 size = *buf_size;
419
420 memset(&cudbg_init, 0, sizeof(struct cudbg_init));
421 cudbg_init.adap = adap;
422 cudbg_init.outbuf = buf;
423 cudbg_init.outbuf_size = size;
424
425 dbg_buff.data = buf;
426 dbg_buff.size = size;
427 dbg_buff.offset = 0;
428
429 cudbg_hdr = (struct cudbg_hdr *)buf;
430 cudbg_hdr->signature = CUDBG_SIGNATURE;
431 cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
432 cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
433 cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
434 cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
435 cudbg_hdr->chip_ver = adap->params.chip;
436 cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI;
437
438 min_size = sizeof(struct cudbg_hdr) +
439 sizeof(struct cudbg_entity_hdr) *
440 cudbg_hdr->max_entities;
441 if (size < min_size)
442 return -ENOMEM;
443
444 rc = cudbg_get_workspace_size();
445 if (rc) {
446 /* Zlib available. So, use zlib deflate */
447 cudbg_init.compress_type = CUDBG_COMPRESSION_ZLIB;
448 rc = cudbg_alloc_compress_buff(&cudbg_init);
449 if (rc) {
450 /* Ignore error and continue without compression. */
451 dev_warn(adap->pdev_dev,
452 "Fail allocating compression buffer ret: %d. Continuing without compression.\n",
453 rc);
454 cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
455 rc = 0;
456 }
457 } else {
458 cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
459 }
460
461 cudbg_hdr->compress_type = cudbg_init.compress_type;
462 dbg_buff.offset += min_size;
463 total_size = dbg_buff.offset;
464
465 if (flag & CXGB4_ETH_DUMP_HW)
466 cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
467 cxgb4_collect_hw_dump,
468 ARRAY_SIZE(cxgb4_collect_hw_dump),
469 buf,
470 &total_size);
471
472 if (flag & CXGB4_ETH_DUMP_MEM)
473 cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
474 cxgb4_collect_mem_dump,
475 ARRAY_SIZE(cxgb4_collect_mem_dump),
476 buf,
477 &total_size);
478
479 cudbg_free_compress_buff(&cudbg_init);
480 cudbg_hdr->data_len = total_size;
481 if (cudbg_init.compress_type != CUDBG_COMPRESSION_NONE)
482 *buf_size = size;
483 else
484 *buf_size = total_size;
485 return 0;
486 }
487
cxgb4_init_ethtool_dump(struct adapter * adapter)488 void cxgb4_init_ethtool_dump(struct adapter *adapter)
489 {
490 adapter->eth_dump.flag = CXGB4_ETH_DUMP_NONE;
491 adapter->eth_dump.version = adapter->params.fw_vers;
492 adapter->eth_dump.len = 0;
493 }
494
cxgb4_cudbg_vmcoredd_collect(struct vmcoredd_data * data,void * buf)495 static int cxgb4_cudbg_vmcoredd_collect(struct vmcoredd_data *data, void *buf)
496 {
497 struct adapter *adap = container_of(data, struct adapter, vmcoredd);
498 u32 len = data->size;
499
500 return cxgb4_cudbg_collect(adap, buf, &len, CXGB4_ETH_DUMP_ALL);
501 }
502
cxgb4_cudbg_vmcore_add_dump(struct adapter * adap)503 int cxgb4_cudbg_vmcore_add_dump(struct adapter *adap)
504 {
505 struct vmcoredd_data *data = &adap->vmcoredd;
506 u32 len;
507
508 len = sizeof(struct cudbg_hdr) +
509 sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
510 len += CUDBG_DUMP_BUFF_SIZE;
511
512 data->size = len;
513 snprintf(data->dump_name, sizeof(data->dump_name), "%s_%s",
514 cxgb4_driver_name, adap->name);
515 data->vmcoredd_callback = cxgb4_cudbg_vmcoredd_collect;
516
517 return vmcore_add_device_dump(data);
518 }
519