1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (c) 2018-2019 Hisilicon Limited. */
3
4 #include <linux/device.h>
5
6 #include "hclge_debugfs.h"
7 #include "hclge_err.h"
8 #include "hclge_main.h"
9 #include "hclge_tm.h"
10 #include "hnae3.h"
11
12 static const char * const state_str[] = { "off", "on" };
13 static const char * const hclge_mac_state_str[] = {
14 "TO_ADD", "TO_DEL", "ACTIVE"
15 };
16
17 static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
18
19 static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
20 { .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
21 .dfx_msg = &hclge_dbg_bios_common_reg[0],
22 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
23 .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
24 .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
25 { .cmd = HNAE3_DBG_CMD_REG_SSU,
26 .dfx_msg = &hclge_dbg_ssu_reg_0[0],
27 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
28 .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
29 .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
30 { .cmd = HNAE3_DBG_CMD_REG_SSU,
31 .dfx_msg = &hclge_dbg_ssu_reg_1[0],
32 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
33 .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
34 .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
35 { .cmd = HNAE3_DBG_CMD_REG_SSU,
36 .dfx_msg = &hclge_dbg_ssu_reg_2[0],
37 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
38 .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
39 .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
40 { .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
41 .dfx_msg = &hclge_dbg_igu_egu_reg[0],
42 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
43 .offset = HCLGE_DBG_DFX_IGU_OFFSET,
44 .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
45 { .cmd = HNAE3_DBG_CMD_REG_RPU,
46 .dfx_msg = &hclge_dbg_rpu_reg_0[0],
47 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
48 .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
49 .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
50 { .cmd = HNAE3_DBG_CMD_REG_RPU,
51 .dfx_msg = &hclge_dbg_rpu_reg_1[0],
52 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
53 .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
54 .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
55 { .cmd = HNAE3_DBG_CMD_REG_NCSI,
56 .dfx_msg = &hclge_dbg_ncsi_reg[0],
57 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
58 .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
59 .cmd = HCLGE_OPC_DFX_NCSI_REG } },
60 { .cmd = HNAE3_DBG_CMD_REG_RTC,
61 .dfx_msg = &hclge_dbg_rtc_reg[0],
62 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
63 .offset = HCLGE_DBG_DFX_RTC_OFFSET,
64 .cmd = HCLGE_OPC_DFX_RTC_REG } },
65 { .cmd = HNAE3_DBG_CMD_REG_PPP,
66 .dfx_msg = &hclge_dbg_ppp_reg[0],
67 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
68 .offset = HCLGE_DBG_DFX_PPP_OFFSET,
69 .cmd = HCLGE_OPC_DFX_PPP_REG } },
70 { .cmd = HNAE3_DBG_CMD_REG_RCB,
71 .dfx_msg = &hclge_dbg_rcb_reg[0],
72 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
73 .offset = HCLGE_DBG_DFX_RCB_OFFSET,
74 .cmd = HCLGE_OPC_DFX_RCB_REG } },
75 { .cmd = HNAE3_DBG_CMD_REG_TQP,
76 .dfx_msg = &hclge_dbg_tqp_reg[0],
77 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
78 .offset = HCLGE_DBG_DFX_TQP_OFFSET,
79 .cmd = HCLGE_OPC_DFX_TQP_REG } },
80 };
81
82 /* make sure: len(name) + interval >= maxlen(item data) + 2,
83 * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
84 * and print as "%u"(maxlen: 10), so the interval should be at least 5.
85 */
hclge_dbg_fill_content(char * content,u16 len,const struct hclge_dbg_item * items,const char ** result,u16 size)86 static void hclge_dbg_fill_content(char *content, u16 len,
87 const struct hclge_dbg_item *items,
88 const char **result, u16 size)
89 {
90 char *pos = content;
91 u16 i;
92
93 memset(content, ' ', len);
94 for (i = 0; i < size; i++) {
95 if (result)
96 strncpy(pos, result[i], strlen(result[i]));
97 else
98 strncpy(pos, items[i].name, strlen(items[i].name));
99 pos += strlen(items[i].name) + items[i].interval;
100 }
101 *pos++ = '\n';
102 *pos++ = '\0';
103 }
104
hclge_dbg_get_func_id_str(char * buf,u8 id)105 static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
106 {
107 if (id)
108 sprintf(buf, "vf%u", id - 1U);
109 else
110 sprintf(buf, "pf");
111
112 return buf;
113 }
114
hclge_dbg_get_dfx_bd_num(struct hclge_dev * hdev,int offset,u32 * bd_num)115 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
116 u32 *bd_num)
117 {
118 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
119 int entries_per_desc;
120 int index;
121 int ret;
122
123 ret = hclge_query_bd_num_cmd_send(hdev, desc);
124 if (ret) {
125 dev_err(&hdev->pdev->dev,
126 "failed to get dfx bd_num, offset = %d, ret = %d\n",
127 offset, ret);
128 return ret;
129 }
130
131 entries_per_desc = ARRAY_SIZE(desc[0].data);
132 index = offset % entries_per_desc;
133
134 *bd_num = le32_to_cpu(desc[offset / entries_per_desc].data[index]);
135 if (!(*bd_num)) {
136 dev_err(&hdev->pdev->dev, "The value of dfx bd_num is 0!\n");
137 return -EINVAL;
138 }
139
140 return 0;
141 }
142
hclge_dbg_cmd_send(struct hclge_dev * hdev,struct hclge_desc * desc_src,int index,int bd_num,enum hclge_opcode_type cmd)143 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
144 struct hclge_desc *desc_src,
145 int index, int bd_num,
146 enum hclge_opcode_type cmd)
147 {
148 struct hclge_desc *desc = desc_src;
149 int ret, i;
150
151 hclge_cmd_setup_basic_desc(desc, cmd, true);
152 desc->data[0] = cpu_to_le32(index);
153
154 for (i = 1; i < bd_num; i++) {
155 desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
156 desc++;
157 hclge_cmd_setup_basic_desc(desc, cmd, true);
158 }
159
160 ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
161 if (ret)
162 dev_err(&hdev->pdev->dev,
163 "cmd(0x%x) send fail, ret = %d\n", cmd, ret);
164 return ret;
165 }
166
167 static int
hclge_dbg_dump_reg_tqp(struct hclge_dev * hdev,const struct hclge_dbg_reg_type_info * reg_info,char * buf,int len,int * pos)168 hclge_dbg_dump_reg_tqp(struct hclge_dev *hdev,
169 const struct hclge_dbg_reg_type_info *reg_info,
170 char *buf, int len, int *pos)
171 {
172 const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
173 const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg;
174 struct hclge_desc *desc_src;
175 u32 index, entry, i, cnt;
176 int bd_num, min_num, ret;
177 struct hclge_desc *desc;
178
179 ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
180 if (ret)
181 return ret;
182
183 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
184 if (!desc_src)
185 return -ENOMEM;
186
187 min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
188
189 for (i = 0, cnt = 0; i < min_num; i++, dfx_message++)
190 *pos += scnprintf(buf + *pos, len - *pos, "item%u = %s\n",
191 cnt++, dfx_message->message);
192
193 for (i = 0; i < cnt; i++)
194 *pos += scnprintf(buf + *pos, len - *pos, "item%u\t", i);
195
196 *pos += scnprintf(buf + *pos, len - *pos, "\n");
197
198 for (index = 0; index < hdev->vport[0].alloc_tqps; index++) {
199 dfx_message = reg_info->dfx_msg;
200 desc = desc_src;
201 ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num,
202 reg_msg->cmd);
203 if (ret)
204 break;
205
206 for (i = 0; i < min_num; i++, dfx_message++) {
207 entry = i % HCLGE_DESC_DATA_LEN;
208 if (i > 0 && !entry)
209 desc++;
210
211 *pos += scnprintf(buf + *pos, len - *pos, "%#x\t",
212 le32_to_cpu(desc->data[entry]));
213 }
214 *pos += scnprintf(buf + *pos, len - *pos, "\n");
215 }
216
217 kfree(desc_src);
218 return ret;
219 }
220
221 static int
hclge_dbg_dump_reg_common(struct hclge_dev * hdev,const struct hclge_dbg_reg_type_info * reg_info,char * buf,int len,int * pos)222 hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
223 const struct hclge_dbg_reg_type_info *reg_info,
224 char *buf, int len, int *pos)
225 {
226 const struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg;
227 const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
228 struct hclge_desc *desc_src;
229 int bd_num, min_num, ret;
230 struct hclge_desc *desc;
231 u32 entry, i;
232
233 ret = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset, &bd_num);
234 if (ret)
235 return ret;
236
237 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
238 if (!desc_src)
239 return -ENOMEM;
240
241 desc = desc_src;
242
243 ret = hclge_dbg_cmd_send(hdev, desc, 0, bd_num, reg_msg->cmd);
244 if (ret) {
245 kfree(desc);
246 return ret;
247 }
248
249 min_num = min_t(int, bd_num * HCLGE_DESC_DATA_LEN, reg_msg->msg_num);
250
251 for (i = 0; i < min_num; i++, dfx_message++) {
252 entry = i % HCLGE_DESC_DATA_LEN;
253 if (i > 0 && !entry)
254 desc++;
255 if (!dfx_message->flag)
256 continue;
257
258 *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
259 dfx_message->message,
260 le32_to_cpu(desc->data[entry]));
261 }
262
263 kfree(desc_src);
264 return 0;
265 }
266
267 static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
268 {HCLGE_MAC_TX_EN_B, "mac_trans_en"},
269 {HCLGE_MAC_RX_EN_B, "mac_rcv_en"},
270 {HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
271 {HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
272 {HCLGE_MAC_1588_TX_B, "1588_trans_en"},
273 {HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
274 {HCLGE_MAC_APP_LP_B, "mac_app_loop_en"},
275 {HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
276 {HCLGE_MAC_FCS_TX_B, "mac_fcs_tx_en"},
277 {HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
278 {HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
279 {HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
280 {HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
281 {HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
282 };
283
hclge_dbg_dump_mac_enable_status(struct hclge_dev * hdev,char * buf,int len,int * pos)284 static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
285 int len, int *pos)
286 {
287 struct hclge_config_mac_mode_cmd *req;
288 struct hclge_desc desc;
289 u32 loop_en, i, offset;
290 int ret;
291
292 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
293
294 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
295 if (ret) {
296 dev_err(&hdev->pdev->dev,
297 "failed to dump mac enable status, ret = %d\n", ret);
298 return ret;
299 }
300
301 req = (struct hclge_config_mac_mode_cmd *)desc.data;
302 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
303
304 for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
305 offset = hclge_dbg_mac_en_status[i].offset;
306 *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
307 hclge_dbg_mac_en_status[i].message,
308 hnae3_get_bit(loop_en, offset));
309 }
310
311 return 0;
312 }
313
hclge_dbg_dump_mac_frame_size(struct hclge_dev * hdev,char * buf,int len,int * pos)314 static int hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev, char *buf,
315 int len, int *pos)
316 {
317 struct hclge_config_max_frm_size_cmd *req;
318 struct hclge_desc desc;
319 int ret;
320
321 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
322
323 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
324 if (ret) {
325 dev_err(&hdev->pdev->dev,
326 "failed to dump mac frame size, ret = %d\n", ret);
327 return ret;
328 }
329
330 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
331
332 *pos += scnprintf(buf + *pos, len - *pos, "max_frame_size: %u\n",
333 le16_to_cpu(req->max_frm_size));
334 *pos += scnprintf(buf + *pos, len - *pos, "min_frame_size: %u\n",
335 req->min_frm_size);
336
337 return 0;
338 }
339
hclge_dbg_dump_mac_speed_duplex(struct hclge_dev * hdev,char * buf,int len,int * pos)340 static int hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev, char *buf,
341 int len, int *pos)
342 {
343 #define HCLGE_MAC_SPEED_SHIFT 0
344 #define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
345 #define HCLGE_MAC_DUPLEX_SHIFT 7
346
347 struct hclge_config_mac_speed_dup_cmd *req;
348 struct hclge_desc desc;
349 int ret;
350
351 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
352
353 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
354 if (ret) {
355 dev_err(&hdev->pdev->dev,
356 "failed to dump mac speed duplex, ret = %d\n", ret);
357 return ret;
358 }
359
360 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
361
362 *pos += scnprintf(buf + *pos, len - *pos, "speed: %#lx\n",
363 hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
364 HCLGE_MAC_SPEED_SHIFT));
365 *pos += scnprintf(buf + *pos, len - *pos, "duplex: %#x\n",
366 hnae3_get_bit(req->speed_dup,
367 HCLGE_MAC_DUPLEX_SHIFT));
368 return 0;
369 }
370
hclge_dbg_dump_mac(struct hclge_dev * hdev,char * buf,int len)371 static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
372 {
373 int pos = 0;
374 int ret;
375
376 ret = hclge_dbg_dump_mac_enable_status(hdev, buf, len, &pos);
377 if (ret)
378 return ret;
379
380 ret = hclge_dbg_dump_mac_frame_size(hdev, buf, len, &pos);
381 if (ret)
382 return ret;
383
384 return hclge_dbg_dump_mac_speed_duplex(hdev, buf, len, &pos);
385 }
386
hclge_dbg_dump_dcb_qset(struct hclge_dev * hdev,char * buf,int len,int * pos)387 static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
388 int *pos)
389 {
390 struct hclge_dbg_bitmap_cmd req;
391 struct hclge_desc desc;
392 u16 qset_id, qset_num;
393 int ret;
394
395 ret = hclge_tm_get_qset_num(hdev, &qset_num);
396 if (ret)
397 return ret;
398
399 *pos += scnprintf(buf + *pos, len - *pos,
400 "qset_id roce_qset_mask nic_qset_mask qset_shaping_pass qset_bp_status\n");
401 for (qset_id = 0; qset_id < qset_num; qset_id++) {
402 ret = hclge_dbg_cmd_send(hdev, &desc, qset_id, 1,
403 HCLGE_OPC_QSET_DFX_STS);
404 if (ret)
405 return ret;
406
407 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
408
409 *pos += scnprintf(buf + *pos, len - *pos,
410 "%04u %#x %#x %#x %#x\n",
411 qset_id, req.bit0, req.bit1, req.bit2,
412 req.bit3);
413 }
414
415 return 0;
416 }
417
hclge_dbg_dump_dcb_pri(struct hclge_dev * hdev,char * buf,int len,int * pos)418 static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
419 int *pos)
420 {
421 struct hclge_dbg_bitmap_cmd req;
422 struct hclge_desc desc;
423 u8 pri_id, pri_num;
424 int ret;
425
426 ret = hclge_tm_get_pri_num(hdev, &pri_num);
427 if (ret)
428 return ret;
429
430 *pos += scnprintf(buf + *pos, len - *pos,
431 "pri_id pri_mask pri_cshaping_pass pri_pshaping_pass\n");
432 for (pri_id = 0; pri_id < pri_num; pri_id++) {
433 ret = hclge_dbg_cmd_send(hdev, &desc, pri_id, 1,
434 HCLGE_OPC_PRI_DFX_STS);
435 if (ret)
436 return ret;
437
438 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
439
440 *pos += scnprintf(buf + *pos, len - *pos,
441 "%03u %#x %#x %#x\n",
442 pri_id, req.bit0, req.bit1, req.bit2);
443 }
444
445 return 0;
446 }
447
hclge_dbg_dump_dcb_pg(struct hclge_dev * hdev,char * buf,int len,int * pos)448 static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
449 int *pos)
450 {
451 struct hclge_dbg_bitmap_cmd req;
452 struct hclge_desc desc;
453 u8 pg_id;
454 int ret;
455
456 *pos += scnprintf(buf + *pos, len - *pos,
457 "pg_id pg_mask pg_cshaping_pass pg_pshaping_pass\n");
458 for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
459 ret = hclge_dbg_cmd_send(hdev, &desc, pg_id, 1,
460 HCLGE_OPC_PG_DFX_STS);
461 if (ret)
462 return ret;
463
464 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
465
466 *pos += scnprintf(buf + *pos, len - *pos,
467 "%03u %#x %#x %#x\n",
468 pg_id, req.bit0, req.bit1, req.bit2);
469 }
470
471 return 0;
472 }
473
hclge_dbg_dump_dcb_queue(struct hclge_dev * hdev,char * buf,int len,int * pos)474 static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
475 int *pos)
476 {
477 struct hclge_desc desc;
478 u16 nq_id;
479 int ret;
480
481 *pos += scnprintf(buf + *pos, len - *pos,
482 "nq_id sch_nic_queue_cnt sch_roce_queue_cnt\n");
483 for (nq_id = 0; nq_id < hdev->num_tqps; nq_id++) {
484 ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
485 HCLGE_OPC_SCH_NQ_CNT);
486 if (ret)
487 return ret;
488
489 *pos += scnprintf(buf + *pos, len - *pos, "%04u %#x",
490 nq_id, le32_to_cpu(desc.data[1]));
491
492 ret = hclge_dbg_cmd_send(hdev, &desc, nq_id, 1,
493 HCLGE_OPC_SCH_RQ_CNT);
494 if (ret)
495 return ret;
496
497 *pos += scnprintf(buf + *pos, len - *pos,
498 " %#x\n",
499 le32_to_cpu(desc.data[1]));
500 }
501
502 return 0;
503 }
504
hclge_dbg_dump_dcb_port(struct hclge_dev * hdev,char * buf,int len,int * pos)505 static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
506 int *pos)
507 {
508 struct hclge_dbg_bitmap_cmd req;
509 struct hclge_desc desc;
510 u8 port_id = 0;
511 int ret;
512
513 ret = hclge_dbg_cmd_send(hdev, &desc, port_id, 1,
514 HCLGE_OPC_PORT_DFX_STS);
515 if (ret)
516 return ret;
517
518 req.bitmap = (u8)le32_to_cpu(desc.data[1]);
519
520 *pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
521 req.bit0);
522 *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
523 req.bit1);
524
525 return 0;
526 }
527
hclge_dbg_dump_dcb_tm(struct hclge_dev * hdev,char * buf,int len,int * pos)528 static int hclge_dbg_dump_dcb_tm(struct hclge_dev *hdev, char *buf, int len,
529 int *pos)
530 {
531 struct hclge_desc desc[2];
532 u8 port_id = 0;
533 int ret;
534
535 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
536 HCLGE_OPC_TM_INTERNAL_CNT);
537 if (ret)
538 return ret;
539
540 *pos += scnprintf(buf + *pos, len - *pos, "SCH_NIC_NUM: %#x\n",
541 le32_to_cpu(desc[0].data[1]));
542 *pos += scnprintf(buf + *pos, len - *pos, "SCH_ROCE_NUM: %#x\n",
543 le32_to_cpu(desc[0].data[2]));
544
545 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 2,
546 HCLGE_OPC_TM_INTERNAL_STS);
547 if (ret)
548 return ret;
549
550 *pos += scnprintf(buf + *pos, len - *pos, "pri_bp: %#x\n",
551 le32_to_cpu(desc[0].data[1]));
552 *pos += scnprintf(buf + *pos, len - *pos, "fifo_dfx_info: %#x\n",
553 le32_to_cpu(desc[0].data[2]));
554 *pos += scnprintf(buf + *pos, len - *pos,
555 "sch_roce_fifo_afull_gap: %#x\n",
556 le32_to_cpu(desc[0].data[3]));
557 *pos += scnprintf(buf + *pos, len - *pos,
558 "tx_private_waterline: %#x\n",
559 le32_to_cpu(desc[0].data[4]));
560 *pos += scnprintf(buf + *pos, len - *pos, "tm_bypass_en: %#x\n",
561 le32_to_cpu(desc[0].data[5]));
562 *pos += scnprintf(buf + *pos, len - *pos, "SSU_TM_BYPASS_EN: %#x\n",
563 le32_to_cpu(desc[1].data[0]));
564 *pos += scnprintf(buf + *pos, len - *pos, "SSU_RESERVE_CFG: %#x\n",
565 le32_to_cpu(desc[1].data[1]));
566
567 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER)
568 return 0;
569
570 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
571 HCLGE_OPC_TM_INTERNAL_STS_1);
572 if (ret)
573 return ret;
574
575 *pos += scnprintf(buf + *pos, len - *pos, "TC_MAP_SEL: %#x\n",
576 le32_to_cpu(desc[0].data[1]));
577 *pos += scnprintf(buf + *pos, len - *pos, "IGU_PFC_PRI_EN: %#x\n",
578 le32_to_cpu(desc[0].data[2]));
579 *pos += scnprintf(buf + *pos, len - *pos, "MAC_PFC_PRI_EN: %#x\n",
580 le32_to_cpu(desc[0].data[3]));
581 *pos += scnprintf(buf + *pos, len - *pos, "IGU_PRI_MAP_TC_CFG: %#x\n",
582 le32_to_cpu(desc[0].data[4]));
583 *pos += scnprintf(buf + *pos, len - *pos,
584 "IGU_TX_PRI_MAP_TC_CFG: %#x\n",
585 le32_to_cpu(desc[0].data[5]));
586
587 return 0;
588 }
589
hclge_dbg_dump_dcb(struct hclge_dev * hdev,char * buf,int len)590 static int hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *buf, int len)
591 {
592 int pos = 0;
593 int ret;
594
595 ret = hclge_dbg_dump_dcb_qset(hdev, buf, len, &pos);
596 if (ret)
597 return ret;
598
599 ret = hclge_dbg_dump_dcb_pri(hdev, buf, len, &pos);
600 if (ret)
601 return ret;
602
603 ret = hclge_dbg_dump_dcb_pg(hdev, buf, len, &pos);
604 if (ret)
605 return ret;
606
607 ret = hclge_dbg_dump_dcb_queue(hdev, buf, len, &pos);
608 if (ret)
609 return ret;
610
611 ret = hclge_dbg_dump_dcb_port(hdev, buf, len, &pos);
612 if (ret)
613 return ret;
614
615 return hclge_dbg_dump_dcb_tm(hdev, buf, len, &pos);
616 }
617
hclge_dbg_dump_reg_cmd(struct hclge_dev * hdev,enum hnae3_dbg_cmd cmd,char * buf,int len)618 static int hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev,
619 enum hnae3_dbg_cmd cmd, char *buf, int len)
620 {
621 const struct hclge_dbg_reg_type_info *reg_info;
622 int pos = 0, ret = 0;
623 int i;
624
625 for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
626 reg_info = &hclge_dbg_reg_info[i];
627 if (cmd == reg_info->cmd) {
628 if (cmd == HNAE3_DBG_CMD_REG_TQP)
629 return hclge_dbg_dump_reg_tqp(hdev, reg_info,
630 buf, len, &pos);
631
632 ret = hclge_dbg_dump_reg_common(hdev, reg_info, buf,
633 len, &pos);
634 if (ret)
635 break;
636 }
637 }
638
639 return ret;
640 }
641
hclge_dbg_dump_tc(struct hclge_dev * hdev,char * buf,int len)642 static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len)
643 {
644 struct hclge_ets_tc_weight_cmd *ets_weight;
645 struct hclge_desc desc;
646 char *sch_mode_str;
647 int pos = 0;
648 int ret;
649 u8 i;
650
651 if (!hnae3_dev_dcb_supported(hdev)) {
652 dev_err(&hdev->pdev->dev,
653 "Only DCB-supported dev supports tc\n");
654 return -EOPNOTSUPP;
655 }
656
657 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
658 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
659 if (ret) {
660 dev_err(&hdev->pdev->dev, "failed to get tc weight, ret = %d\n",
661 ret);
662 return ret;
663 }
664
665 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
666
667 pos += scnprintf(buf + pos, len - pos, "enabled tc number: %u\n",
668 hdev->tm_info.num_tc);
669 pos += scnprintf(buf + pos, len - pos, "weight_offset: %u\n",
670 ets_weight->weight_offset);
671
672 pos += scnprintf(buf + pos, len - pos, "TC MODE WEIGHT\n");
673 for (i = 0; i < HNAE3_MAX_TC; i++) {
674 sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp";
675 pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n",
676 i, sch_mode_str,
677 hdev->tm_info.pg_info[0].tc_dwrr[i]);
678 }
679
680 return 0;
681 }
682
683 static const struct hclge_dbg_item tm_pg_items[] = {
684 { "ID", 2 },
685 { "PRI_MAP", 2 },
686 { "MODE", 2 },
687 { "DWRR", 2 },
688 { "C_IR_B", 2 },
689 { "C_IR_U", 2 },
690 { "C_IR_S", 2 },
691 { "C_BS_B", 2 },
692 { "C_BS_S", 2 },
693 { "C_FLAG", 2 },
694 { "C_RATE(Mbps)", 2 },
695 { "P_IR_B", 2 },
696 { "P_IR_U", 2 },
697 { "P_IR_S", 2 },
698 { "P_BS_B", 2 },
699 { "P_BS_S", 2 },
700 { "P_FLAG", 2 },
701 { "P_RATE(Mbps)", 0 }
702 };
703
hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para * para,char ** result,u8 * index)704 static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
705 char **result, u8 *index)
706 {
707 sprintf(result[(*index)++], "%3u", para->ir_b);
708 sprintf(result[(*index)++], "%3u", para->ir_u);
709 sprintf(result[(*index)++], "%3u", para->ir_s);
710 sprintf(result[(*index)++], "%3u", para->bs_b);
711 sprintf(result[(*index)++], "%3u", para->bs_s);
712 sprintf(result[(*index)++], "%3u", para->flag);
713 sprintf(result[(*index)++], "%6u", para->rate);
714 }
715
__hclge_dbg_dump_tm_pg(struct hclge_dev * hdev,char * data_str,char * buf,int len)716 static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
717 char *buf, int len)
718 {
719 struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
720 char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
721 u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
722 char content[HCLGE_DBG_TM_INFO_LEN];
723 int pos = 0;
724 int ret;
725
726 for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
727 result[i] = data_str;
728 data_str += HCLGE_DBG_DATA_STR_LEN;
729 }
730
731 hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
732 NULL, ARRAY_SIZE(tm_pg_items));
733 pos += scnprintf(buf + pos, len - pos, "%s", content);
734
735 for (pg_id = 0; pg_id < hdev->tm_info.num_pg; pg_id++) {
736 ret = hclge_tm_get_pg_to_pri_map(hdev, pg_id, &pri_bit_map);
737 if (ret)
738 return ret;
739
740 ret = hclge_tm_get_pg_sch_mode(hdev, pg_id, &sch_mode);
741 if (ret)
742 return ret;
743
744 ret = hclge_tm_get_pg_weight(hdev, pg_id, &weight);
745 if (ret)
746 return ret;
747
748 ret = hclge_tm_get_pg_shaper(hdev, pg_id,
749 HCLGE_OPC_TM_PG_C_SHAPPING,
750 &c_shaper_para);
751 if (ret)
752 return ret;
753
754 ret = hclge_tm_get_pg_shaper(hdev, pg_id,
755 HCLGE_OPC_TM_PG_P_SHAPPING,
756 &p_shaper_para);
757 if (ret)
758 return ret;
759
760 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
761 "sp";
762
763 j = 0;
764 sprintf(result[j++], "%02u", pg_id);
765 sprintf(result[j++], "0x%02x", pri_bit_map);
766 sprintf(result[j++], "%4s", sch_mode_str);
767 sprintf(result[j++], "%3u", weight);
768 hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
769 hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
770
771 hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
772 (const char **)result,
773 ARRAY_SIZE(tm_pg_items));
774 pos += scnprintf(buf + pos, len - pos, "%s", content);
775 }
776
777 return 0;
778 }
779
hclge_dbg_dump_tm_pg(struct hclge_dev * hdev,char * buf,int len)780 static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
781 {
782 char *data_str;
783 int ret;
784
785 data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
786 HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
787 if (!data_str)
788 return -ENOMEM;
789
790 ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
791
792 kfree(data_str);
793
794 return ret;
795 }
796
hclge_dbg_dump_tm_port(struct hclge_dev * hdev,char * buf,int len)797 static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
798 {
799 struct hclge_tm_shaper_para shaper_para;
800 int pos = 0;
801 int ret;
802
803 ret = hclge_tm_get_port_shaper(hdev, &shaper_para);
804 if (ret)
805 return ret;
806
807 pos += scnprintf(buf + pos, len - pos,
808 "IR_B IR_U IR_S BS_B BS_S FLAG RATE(Mbps)\n");
809 pos += scnprintf(buf + pos, len - pos,
810 "%3u %3u %3u %3u %3u %1u %6u\n",
811 shaper_para.ir_b, shaper_para.ir_u, shaper_para.ir_s,
812 shaper_para.bs_b, shaper_para.bs_s, shaper_para.flag,
813 shaper_para.rate);
814
815 return 0;
816 }
817
hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev * hdev,u8 tc_id,char * buf,int len)818 static int hclge_dbg_dump_tm_bp_qset_map(struct hclge_dev *hdev, u8 tc_id,
819 char *buf, int len)
820 {
821 u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
822 struct hclge_bp_to_qs_map_cmd *map;
823 struct hclge_desc desc;
824 int pos = 0;
825 u8 group_id;
826 u8 grp_num;
827 u16 i = 0;
828 int ret;
829
830 grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
831 HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
832 map = (struct hclge_bp_to_qs_map_cmd *)desc.data;
833 for (group_id = 0; group_id < grp_num; group_id++) {
834 hclge_cmd_setup_basic_desc(&desc,
835 HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
836 true);
837 map->tc_id = tc_id;
838 map->qs_group_id = group_id;
839 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
840 if (ret) {
841 dev_err(&hdev->pdev->dev,
842 "failed to get bp to qset map, ret = %d\n",
843 ret);
844 return ret;
845 }
846
847 qset_mapping[group_id] = le32_to_cpu(map->qs_bit_map);
848 }
849
850 pos += scnprintf(buf + pos, len - pos, "INDEX | TM BP QSET MAPPING:\n");
851 for (group_id = 0; group_id < grp_num / 8; group_id++) {
852 pos += scnprintf(buf + pos, len - pos,
853 "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
854 group_id * 256, qset_mapping[i + 7],
855 qset_mapping[i + 6], qset_mapping[i + 5],
856 qset_mapping[i + 4], qset_mapping[i + 3],
857 qset_mapping[i + 2], qset_mapping[i + 1],
858 qset_mapping[i]);
859 i += 8;
860 }
861
862 return pos;
863 }
864
hclge_dbg_dump_tm_map(struct hclge_dev * hdev,char * buf,int len)865 static int hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *buf, int len)
866 {
867 u16 queue_id;
868 u16 qset_id;
869 u8 link_vld;
870 int pos = 0;
871 u8 pri_id;
872 u8 tc_id;
873 int ret;
874
875 for (queue_id = 0; queue_id < hdev->num_tqps; queue_id++) {
876 ret = hclge_tm_get_q_to_qs_map(hdev, queue_id, &qset_id);
877 if (ret)
878 return ret;
879
880 ret = hclge_tm_get_qset_map_pri(hdev, qset_id, &pri_id,
881 &link_vld);
882 if (ret)
883 return ret;
884
885 ret = hclge_tm_get_q_to_tc(hdev, queue_id, &tc_id);
886 if (ret)
887 return ret;
888
889 pos += scnprintf(buf + pos, len - pos,
890 "QUEUE_ID QSET_ID PRI_ID TC_ID\n");
891 pos += scnprintf(buf + pos, len - pos,
892 "%04u %4u %3u %2u\n",
893 queue_id, qset_id, pri_id, tc_id);
894
895 if (!hnae3_dev_dcb_supported(hdev))
896 continue;
897
898 ret = hclge_dbg_dump_tm_bp_qset_map(hdev, tc_id, buf + pos,
899 len - pos);
900 if (ret < 0)
901 return ret;
902 pos += ret;
903
904 pos += scnprintf(buf + pos, len - pos, "\n");
905 }
906
907 return 0;
908 }
909
hclge_dbg_dump_tm_nodes(struct hclge_dev * hdev,char * buf,int len)910 static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
911 {
912 struct hclge_tm_nodes_cmd *nodes;
913 struct hclge_desc desc;
914 int pos = 0;
915 int ret;
916
917 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
918 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
919 if (ret) {
920 dev_err(&hdev->pdev->dev,
921 "failed to dump tm nodes, ret = %d\n", ret);
922 return ret;
923 }
924
925 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
926
927 pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
928 pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
929 nodes->pg_base_id, nodes->pg_num);
930 pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
931 nodes->pri_base_id, nodes->pri_num);
932 pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
933 le16_to_cpu(nodes->qset_base_id),
934 le16_to_cpu(nodes->qset_num));
935 pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
936 le16_to_cpu(nodes->queue_base_id),
937 le16_to_cpu(nodes->queue_num));
938
939 return 0;
940 }
941
942 static const struct hclge_dbg_item tm_pri_items[] = {
943 { "ID", 4 },
944 { "MODE", 2 },
945 { "DWRR", 2 },
946 { "C_IR_B", 2 },
947 { "C_IR_U", 2 },
948 { "C_IR_S", 2 },
949 { "C_BS_B", 2 },
950 { "C_BS_S", 2 },
951 { "C_FLAG", 2 },
952 { "C_RATE(Mbps)", 2 },
953 { "P_IR_B", 2 },
954 { "P_IR_U", 2 },
955 { "P_IR_S", 2 },
956 { "P_BS_B", 2 },
957 { "P_BS_S", 2 },
958 { "P_FLAG", 2 },
959 { "P_RATE(Mbps)", 0 }
960 };
961
hclge_dbg_dump_tm_pri(struct hclge_dev * hdev,char * buf,int len)962 static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
963 {
964 char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN];
965 struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
966 char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str;
967 char content[HCLGE_DBG_TM_INFO_LEN];
968 u8 pri_num, sch_mode, weight, i, j;
969 int pos, ret;
970
971 ret = hclge_tm_get_pri_num(hdev, &pri_num);
972 if (ret)
973 return ret;
974
975 for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++)
976 result[i] = &data_str[i][0];
977
978 hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
979 NULL, ARRAY_SIZE(tm_pri_items));
980 pos = scnprintf(buf, len, "%s", content);
981
982 for (i = 0; i < pri_num; i++) {
983 ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
984 if (ret)
985 return ret;
986
987 ret = hclge_tm_get_pri_weight(hdev, i, &weight);
988 if (ret)
989 return ret;
990
991 ret = hclge_tm_get_pri_shaper(hdev, i,
992 HCLGE_OPC_TM_PRI_C_SHAPPING,
993 &c_shaper_para);
994 if (ret)
995 return ret;
996
997 ret = hclge_tm_get_pri_shaper(hdev, i,
998 HCLGE_OPC_TM_PRI_P_SHAPPING,
999 &p_shaper_para);
1000 if (ret)
1001 return ret;
1002
1003 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1004 "sp";
1005
1006 j = 0;
1007 sprintf(result[j++], "%04u", i);
1008 sprintf(result[j++], "%4s", sch_mode_str);
1009 sprintf(result[j++], "%3u", weight);
1010 hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j);
1011 hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j);
1012 hclge_dbg_fill_content(content, sizeof(content), tm_pri_items,
1013 (const char **)result,
1014 ARRAY_SIZE(tm_pri_items));
1015 pos += scnprintf(buf + pos, len - pos, "%s", content);
1016 }
1017
1018 return 0;
1019 }
1020
1021 static const struct hclge_dbg_item tm_qset_items[] = {
1022 { "ID", 4 },
1023 { "MAP_PRI", 2 },
1024 { "LINK_VLD", 2 },
1025 { "MODE", 2 },
1026 { "DWRR", 2 },
1027 { "IR_B", 2 },
1028 { "IR_U", 2 },
1029 { "IR_S", 2 },
1030 { "BS_B", 2 },
1031 { "BS_S", 2 },
1032 { "FLAG", 2 },
1033 { "RATE(Mbps)", 0 }
1034 };
1035
hclge_dbg_dump_tm_qset(struct hclge_dev * hdev,char * buf,int len)1036 static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
1037 {
1038 char data_str[ARRAY_SIZE(tm_qset_items)][HCLGE_DBG_DATA_STR_LEN];
1039 char *result[ARRAY_SIZE(tm_qset_items)], *sch_mode_str;
1040 u8 priority, link_vld, sch_mode, weight;
1041 struct hclge_tm_shaper_para shaper_para;
1042 char content[HCLGE_DBG_TM_INFO_LEN];
1043 u16 qset_num, i;
1044 int ret, pos;
1045 u8 j;
1046
1047 ret = hclge_tm_get_qset_num(hdev, &qset_num);
1048 if (ret)
1049 return ret;
1050
1051 for (i = 0; i < ARRAY_SIZE(tm_qset_items); i++)
1052 result[i] = &data_str[i][0];
1053
1054 hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1055 NULL, ARRAY_SIZE(tm_qset_items));
1056 pos = scnprintf(buf, len, "%s", content);
1057
1058 for (i = 0; i < qset_num; i++) {
1059 ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
1060 if (ret)
1061 return ret;
1062
1063 ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
1064 if (ret)
1065 return ret;
1066
1067 ret = hclge_tm_get_qset_weight(hdev, i, &weight);
1068 if (ret)
1069 return ret;
1070
1071 ret = hclge_tm_get_qset_shaper(hdev, i, &shaper_para);
1072 if (ret)
1073 return ret;
1074
1075 sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
1076 "sp";
1077
1078 j = 0;
1079 sprintf(result[j++], "%04u", i);
1080 sprintf(result[j++], "%4u", priority);
1081 sprintf(result[j++], "%4u", link_vld);
1082 sprintf(result[j++], "%4s", sch_mode_str);
1083 sprintf(result[j++], "%3u", weight);
1084 hclge_dbg_fill_shaper_content(&shaper_para, result, &j);
1085
1086 hclge_dbg_fill_content(content, sizeof(content), tm_qset_items,
1087 (const char **)result,
1088 ARRAY_SIZE(tm_qset_items));
1089 pos += scnprintf(buf + pos, len - pos, "%s", content);
1090 }
1091
1092 return 0;
1093 }
1094
hclge_dbg_dump_qos_pause_cfg(struct hclge_dev * hdev,char * buf,int len)1095 static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
1096 int len)
1097 {
1098 struct hclge_cfg_pause_param_cmd *pause_param;
1099 struct hclge_desc desc;
1100 int pos = 0;
1101 int ret;
1102
1103 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
1104 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1105 if (ret) {
1106 dev_err(&hdev->pdev->dev,
1107 "failed to dump qos pause, ret = %d\n", ret);
1108 return ret;
1109 }
1110
1111 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
1112
1113 pos += scnprintf(buf + pos, len - pos, "pause_trans_gap: 0x%x\n",
1114 pause_param->pause_trans_gap);
1115 pos += scnprintf(buf + pos, len - pos, "pause_trans_time: 0x%x\n",
1116 le16_to_cpu(pause_param->pause_trans_time));
1117 return 0;
1118 }
1119
1120 #define HCLGE_DBG_TC_MASK 0x0F
1121
hclge_dbg_dump_qos_pri_map(struct hclge_dev * hdev,char * buf,int len)1122 static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
1123 int len)
1124 {
1125 #define HCLGE_DBG_TC_BIT_WIDTH 4
1126
1127 struct hclge_qos_pri_map_cmd *pri_map;
1128 struct hclge_desc desc;
1129 int pos = 0;
1130 u8 *pri_tc;
1131 u8 tc, i;
1132 int ret;
1133
1134 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
1135 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1136 if (ret) {
1137 dev_err(&hdev->pdev->dev,
1138 "failed to dump qos pri map, ret = %d\n", ret);
1139 return ret;
1140 }
1141
1142 pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
1143
1144 pos += scnprintf(buf + pos, len - pos, "vlan_to_pri: 0x%x\n",
1145 pri_map->vlan_pri);
1146 pos += scnprintf(buf + pos, len - pos, "PRI TC\n");
1147
1148 pri_tc = (u8 *)pri_map;
1149 for (i = 0; i < HNAE3_MAX_TC; i++) {
1150 tc = pri_tc[i >> 1] >> ((i & 1) * HCLGE_DBG_TC_BIT_WIDTH);
1151 tc &= HCLGE_DBG_TC_MASK;
1152 pos += scnprintf(buf + pos, len - pos, "%u %u\n", i, tc);
1153 }
1154
1155 return 0;
1156 }
1157
hclge_dbg_dump_qos_dscp_map(struct hclge_dev * hdev,char * buf,int len)1158 static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
1159 int len)
1160 {
1161 struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
1162 struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
1163 u8 *req0 = (u8 *)desc[0].data;
1164 u8 *req1 = (u8 *)desc[1].data;
1165 u8 dscp_tc[HNAE3_MAX_DSCP];
1166 int pos, ret;
1167 u8 i, j;
1168
1169 pos = scnprintf(buf, len, "tc map mode: %s\n",
1170 tc_map_mode_str[kinfo->tc_map_mode]);
1171
1172 if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
1173 return 0;
1174
1175 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
1176 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1177 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
1178 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
1179 if (ret) {
1180 dev_err(&hdev->pdev->dev,
1181 "failed to dump qos dscp map, ret = %d\n", ret);
1182 return ret;
1183 }
1184
1185 pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n");
1186
1187 /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
1188 for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
1189 j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
1190 /* Each dscp setting has 4 bits, so each byte saves two dscp
1191 * setting
1192 */
1193 dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
1194 dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
1195 dscp_tc[i] &= HCLGE_DBG_TC_MASK;
1196 dscp_tc[j] &= HCLGE_DBG_TC_MASK;
1197 }
1198
1199 for (i = 0; i < HNAE3_MAX_DSCP; i++) {
1200 if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
1201 continue;
1202
1203 pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n",
1204 i, kinfo->dscp_prio[i], dscp_tc[i]);
1205 }
1206
1207 return 0;
1208 }
1209
hclge_dbg_dump_tx_buf_cfg(struct hclge_dev * hdev,char * buf,int len)1210 static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
1211 {
1212 struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
1213 struct hclge_desc desc;
1214 int pos = 0;
1215 int i, ret;
1216
1217 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
1218 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1219 if (ret) {
1220 dev_err(&hdev->pdev->dev,
1221 "failed to dump tx buf, ret = %d\n", ret);
1222 return ret;
1223 }
1224
1225 tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1226 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1227 pos += scnprintf(buf + pos, len - pos,
1228 "tx_packet_buf_tc_%d: 0x%x\n", i,
1229 le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
1230
1231 return pos;
1232 }
1233
hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev * hdev,char * buf,int len)1234 static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev, char *buf,
1235 int len)
1236 {
1237 struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
1238 struct hclge_desc desc;
1239 int pos = 0;
1240 int i, ret;
1241
1242 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
1243 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1244 if (ret) {
1245 dev_err(&hdev->pdev->dev,
1246 "failed to dump rx priv buf, ret = %d\n", ret);
1247 return ret;
1248 }
1249
1250 pos += scnprintf(buf + pos, len - pos, "\n");
1251
1252 rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
1253 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1254 pos += scnprintf(buf + pos, len - pos,
1255 "rx_packet_buf_tc_%d: 0x%x\n", i,
1256 le16_to_cpu(rx_buf_cmd->buf_num[i]));
1257
1258 pos += scnprintf(buf + pos, len - pos, "rx_share_buf: 0x%x\n",
1259 le16_to_cpu(rx_buf_cmd->shared_buf));
1260
1261 return pos;
1262 }
1263
hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev * hdev,char * buf,int len)1264 static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev, char *buf,
1265 int len)
1266 {
1267 struct hclge_rx_com_wl *rx_com_wl;
1268 struct hclge_desc desc;
1269 int pos = 0;
1270 int ret;
1271
1272 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
1273 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1274 if (ret) {
1275 dev_err(&hdev->pdev->dev,
1276 "failed to dump rx common wl, ret = %d\n", ret);
1277 return ret;
1278 }
1279
1280 rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
1281 pos += scnprintf(buf + pos, len - pos, "\n");
1282 pos += scnprintf(buf + pos, len - pos,
1283 "rx_com_wl: high: 0x%x, low: 0x%x\n",
1284 le16_to_cpu(rx_com_wl->com_wl.high),
1285 le16_to_cpu(rx_com_wl->com_wl.low));
1286
1287 return pos;
1288 }
1289
hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev * hdev,char * buf,int len)1290 static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev, char *buf,
1291 int len)
1292 {
1293 struct hclge_rx_com_wl *rx_packet_cnt;
1294 struct hclge_desc desc;
1295 int pos = 0;
1296 int ret;
1297
1298 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
1299 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1300 if (ret) {
1301 dev_err(&hdev->pdev->dev,
1302 "failed to dump rx global pkt cnt, ret = %d\n", ret);
1303 return ret;
1304 }
1305
1306 rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
1307 pos += scnprintf(buf + pos, len - pos,
1308 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
1309 le16_to_cpu(rx_packet_cnt->com_wl.high),
1310 le16_to_cpu(rx_packet_cnt->com_wl.low));
1311
1312 return pos;
1313 }
1314
hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev * hdev,char * buf,int len)1315 static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
1316 int len)
1317 {
1318 struct hclge_rx_priv_wl_buf *rx_priv_wl;
1319 struct hclge_desc desc[2];
1320 int pos = 0;
1321 int i, ret;
1322
1323 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1324 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1325 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
1326 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1327 if (ret) {
1328 dev_err(&hdev->pdev->dev,
1329 "failed to dump rx priv wl buf, ret = %d\n", ret);
1330 return ret;
1331 }
1332
1333 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
1334 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1335 pos += scnprintf(buf + pos, len - pos,
1336 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
1337 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1338 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1339
1340 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
1341 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1342 pos += scnprintf(buf + pos, len - pos,
1343 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
1344 i + HCLGE_TC_NUM_ONE_DESC,
1345 le16_to_cpu(rx_priv_wl->tc_wl[i].high),
1346 le16_to_cpu(rx_priv_wl->tc_wl[i].low));
1347
1348 return pos;
1349 }
1350
hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev * hdev,char * buf,int len)1351 static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
1352 char *buf, int len)
1353 {
1354 struct hclge_rx_com_thrd *rx_com_thrd;
1355 struct hclge_desc desc[2];
1356 int pos = 0;
1357 int i, ret;
1358
1359 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1360 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1361 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
1362 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1363 if (ret) {
1364 dev_err(&hdev->pdev->dev,
1365 "failed to dump rx common threshold, ret = %d\n", ret);
1366 return ret;
1367 }
1368
1369 pos += scnprintf(buf + pos, len - pos, "\n");
1370 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
1371 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1372 pos += scnprintf(buf + pos, len - pos,
1373 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
1374 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1375 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1376
1377 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
1378 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
1379 pos += scnprintf(buf + pos, len - pos,
1380 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
1381 i + HCLGE_TC_NUM_ONE_DESC,
1382 le16_to_cpu(rx_com_thrd->com_thrd[i].high),
1383 le16_to_cpu(rx_com_thrd->com_thrd[i].low));
1384
1385 return pos;
1386 }
1387
hclge_dbg_dump_qos_buf_cfg(struct hclge_dev * hdev,char * buf,int len)1388 static int hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev, char *buf,
1389 int len)
1390 {
1391 int pos = 0;
1392 int ret;
1393
1394 ret = hclge_dbg_dump_tx_buf_cfg(hdev, buf + pos, len - pos);
1395 if (ret < 0)
1396 return ret;
1397 pos += ret;
1398
1399 ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev, buf + pos, len - pos);
1400 if (ret < 0)
1401 return ret;
1402 pos += ret;
1403
1404 ret = hclge_dbg_dump_rx_common_wl_cfg(hdev, buf + pos, len - pos);
1405 if (ret < 0)
1406 return ret;
1407 pos += ret;
1408
1409 ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev, buf + pos, len - pos);
1410 if (ret < 0)
1411 return ret;
1412 pos += ret;
1413
1414 pos += scnprintf(buf + pos, len - pos, "\n");
1415 if (!hnae3_dev_dcb_supported(hdev))
1416 return 0;
1417
1418 ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev, buf + pos, len - pos);
1419 if (ret < 0)
1420 return ret;
1421 pos += ret;
1422
1423 ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev, buf + pos,
1424 len - pos);
1425 if (ret < 0)
1426 return ret;
1427
1428 return 0;
1429 }
1430
hclge_dbg_dump_mng_table(struct hclge_dev * hdev,char * buf,int len)1431 static int hclge_dbg_dump_mng_table(struct hclge_dev *hdev, char *buf, int len)
1432 {
1433 struct hclge_mac_ethertype_idx_rd_cmd *req0;
1434 struct hclge_desc desc;
1435 u32 msg_egress_port;
1436 int pos = 0;
1437 int ret, i;
1438
1439 pos += scnprintf(buf + pos, len - pos,
1440 "entry mac_addr mask ether ");
1441 pos += scnprintf(buf + pos, len - pos,
1442 "mask vlan mask i_map i_dir e_type ");
1443 pos += scnprintf(buf + pos, len - pos, "pf_id vf_id q_id drop\n");
1444
1445 for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
1446 hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
1447 true);
1448 req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
1449 req0->index = cpu_to_le16(i);
1450
1451 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1452 if (ret) {
1453 dev_err(&hdev->pdev->dev,
1454 "failed to dump manage table, ret = %d\n", ret);
1455 return ret;
1456 }
1457
1458 if (!req0->resp_code)
1459 continue;
1460
1461 pos += scnprintf(buf + pos, len - pos, "%02u %pM ",
1462 le16_to_cpu(req0->index), req0->mac_addr);
1463
1464 pos += scnprintf(buf + pos, len - pos,
1465 "%x %04x %x %04x ",
1466 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
1467 le16_to_cpu(req0->ethter_type),
1468 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
1469 le16_to_cpu(req0->vlan_tag) &
1470 HCLGE_DBG_MNG_VLAN_TAG);
1471
1472 pos += scnprintf(buf + pos, len - pos,
1473 "%x %02x %02x ",
1474 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
1475 req0->i_port_bitmap, req0->i_port_direction);
1476
1477 msg_egress_port = le16_to_cpu(req0->egress_port);
1478 pos += scnprintf(buf + pos, len - pos,
1479 "%x %x %02x %04x %x\n",
1480 !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
1481 msg_egress_port & HCLGE_DBG_MNG_PF_ID,
1482 (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
1483 le16_to_cpu(req0->egress_queue),
1484 !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
1485 }
1486
1487 return 0;
1488 }
1489
1490 #define HCLGE_DBG_TCAM_BUF_SIZE 256
1491
hclge_dbg_fd_tcam_read(struct hclge_dev * hdev,bool sel_x,char * tcam_buf,struct hclge_dbg_tcam_msg tcam_msg)1492 static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
1493 char *tcam_buf,
1494 struct hclge_dbg_tcam_msg tcam_msg)
1495 {
1496 struct hclge_fd_tcam_config_1_cmd *req1;
1497 struct hclge_fd_tcam_config_2_cmd *req2;
1498 struct hclge_fd_tcam_config_3_cmd *req3;
1499 struct hclge_desc desc[3];
1500 int pos = 0;
1501 int ret, i;
1502 u32 *req;
1503
1504 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
1505 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1506 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
1507 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1508 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
1509
1510 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
1511 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
1512 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
1513
1514 req1->stage = tcam_msg.stage;
1515 req1->xy_sel = sel_x ? 1 : 0;
1516 req1->index = cpu_to_le32(tcam_msg.loc);
1517
1518 ret = hclge_cmd_send(&hdev->hw, desc, 3);
1519 if (ret)
1520 return ret;
1521
1522 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1523 "read result tcam key %s(%u):\n", sel_x ? "x" : "y",
1524 tcam_msg.loc);
1525
1526 /* tcam_data0 ~ tcam_data1 */
1527 req = (u32 *)req1->tcam_data;
1528 for (i = 0; i < 2; i++)
1529 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1530 "%08x\n", *req++);
1531
1532 /* tcam_data2 ~ tcam_data7 */
1533 req = (u32 *)req2->tcam_data;
1534 for (i = 0; i < 6; i++)
1535 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1536 "%08x\n", *req++);
1537
1538 /* tcam_data8 ~ tcam_data12 */
1539 req = (u32 *)req3->tcam_data;
1540 for (i = 0; i < 5; i++)
1541 pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
1542 "%08x\n", *req++);
1543
1544 return ret;
1545 }
1546
hclge_dbg_get_rules_location(struct hclge_dev * hdev,u16 * rule_locs)1547 static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
1548 {
1549 struct hclge_fd_rule *rule;
1550 struct hlist_node *node;
1551 int cnt = 0;
1552
1553 spin_lock_bh(&hdev->fd_rule_lock);
1554 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
1555 rule_locs[cnt] = rule->location;
1556 cnt++;
1557 }
1558 spin_unlock_bh(&hdev->fd_rule_lock);
1559
1560 if (cnt != hdev->hclge_fd_rule_num || cnt == 0)
1561 return -EINVAL;
1562
1563 return cnt;
1564 }
1565
hclge_dbg_dump_fd_tcam(struct hclge_dev * hdev,char * buf,int len)1566 static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
1567 {
1568 u32 rule_num = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
1569 struct hclge_dbg_tcam_msg tcam_msg;
1570 int i, ret, rule_cnt;
1571 u16 *rule_locs;
1572 char *tcam_buf;
1573 int pos = 0;
1574
1575 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
1576 dev_err(&hdev->pdev->dev,
1577 "Only FD-supported dev supports dump fd tcam\n");
1578 return -EOPNOTSUPP;
1579 }
1580
1581 if (!hdev->hclge_fd_rule_num || !rule_num)
1582 return 0;
1583
1584 rule_locs = kcalloc(rule_num, sizeof(u16), GFP_KERNEL);
1585 if (!rule_locs)
1586 return -ENOMEM;
1587
1588 tcam_buf = kzalloc(HCLGE_DBG_TCAM_BUF_SIZE, GFP_KERNEL);
1589 if (!tcam_buf) {
1590 kfree(rule_locs);
1591 return -ENOMEM;
1592 }
1593
1594 rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
1595 if (rule_cnt < 0) {
1596 ret = rule_cnt;
1597 dev_err(&hdev->pdev->dev,
1598 "failed to get rule number, ret = %d\n", ret);
1599 goto out;
1600 }
1601
1602 ret = 0;
1603 for (i = 0; i < rule_cnt; i++) {
1604 tcam_msg.stage = HCLGE_FD_STAGE_1;
1605 tcam_msg.loc = rule_locs[i];
1606
1607 ret = hclge_dbg_fd_tcam_read(hdev, true, tcam_buf, tcam_msg);
1608 if (ret) {
1609 dev_err(&hdev->pdev->dev,
1610 "failed to get fd tcam key x, ret = %d\n", ret);
1611 goto out;
1612 }
1613
1614 pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1615
1616 ret = hclge_dbg_fd_tcam_read(hdev, false, tcam_buf, tcam_msg);
1617 if (ret) {
1618 dev_err(&hdev->pdev->dev,
1619 "failed to get fd tcam key y, ret = %d\n", ret);
1620 goto out;
1621 }
1622
1623 pos += scnprintf(buf + pos, len - pos, "%s", tcam_buf);
1624 }
1625
1626 out:
1627 kfree(tcam_buf);
1628 kfree(rule_locs);
1629 return ret;
1630 }
1631
hclge_dbg_dump_fd_counter(struct hclge_dev * hdev,char * buf,int len)1632 static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
1633 {
1634 u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
1635 struct hclge_fd_ad_cnt_read_cmd *req;
1636 char str_id[HCLGE_DBG_ID_LEN];
1637 struct hclge_desc desc;
1638 int pos = 0;
1639 int ret;
1640 u64 cnt;
1641 u8 i;
1642
1643 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
1644 return -EOPNOTSUPP;
1645
1646 pos += scnprintf(buf + pos, len - pos,
1647 "func_id\thit_times\n");
1648
1649 for (i = 0; i < func_num; i++) {
1650 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_CNT_OP, true);
1651 req = (struct hclge_fd_ad_cnt_read_cmd *)desc.data;
1652 req->index = cpu_to_le16(i);
1653 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1654 if (ret) {
1655 dev_err(&hdev->pdev->dev, "failed to get fd counter, ret = %d\n",
1656 ret);
1657 return ret;
1658 }
1659 cnt = le64_to_cpu(req->cnt);
1660 hclge_dbg_get_func_id_str(str_id, i);
1661 pos += scnprintf(buf + pos, len - pos,
1662 "%s\t%llu\n", str_id, cnt);
1663 }
1664
1665 return 0;
1666 }
1667
1668 static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
1669 {HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
1670 {HCLGE_MISC_RESET_STS_REG, "reset interrupt source"},
1671 {HCLGE_MISC_VECTOR_INT_STS, "reset interrupt status"},
1672 {HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
1673 {HCLGE_GLOBAL_RESET_REG, "hardware reset status"},
1674 {HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
1675 {HCLGE_FUN_RST_ING, "function reset status"}
1676 };
1677
hclge_dbg_dump_rst_info(struct hclge_dev * hdev,char * buf,int len)1678 int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
1679 {
1680 u32 i, offset;
1681 int pos = 0;
1682
1683 pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
1684 hdev->rst_stats.pf_rst_cnt);
1685 pos += scnprintf(buf + pos, len - pos, "FLR reset count: %u\n",
1686 hdev->rst_stats.flr_rst_cnt);
1687 pos += scnprintf(buf + pos, len - pos, "GLOBAL reset count: %u\n",
1688 hdev->rst_stats.global_rst_cnt);
1689 pos += scnprintf(buf + pos, len - pos, "IMP reset count: %u\n",
1690 hdev->rst_stats.imp_rst_cnt);
1691 pos += scnprintf(buf + pos, len - pos, "reset done count: %u\n",
1692 hdev->rst_stats.reset_done_cnt);
1693 pos += scnprintf(buf + pos, len - pos, "HW reset done count: %u\n",
1694 hdev->rst_stats.hw_reset_done_cnt);
1695 pos += scnprintf(buf + pos, len - pos, "reset count: %u\n",
1696 hdev->rst_stats.reset_cnt);
1697 pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
1698 hdev->rst_stats.reset_fail_cnt);
1699
1700 for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
1701 offset = hclge_dbg_rst_info[i].offset;
1702 pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
1703 hclge_dbg_rst_info[i].message,
1704 hclge_read_dev(&hdev->hw, offset));
1705 }
1706
1707 pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
1708 hdev->state);
1709
1710 return 0;
1711 }
1712
hclge_dbg_dump_serv_info(struct hclge_dev * hdev,char * buf,int len)1713 static int hclge_dbg_dump_serv_info(struct hclge_dev *hdev, char *buf, int len)
1714 {
1715 unsigned long rem_nsec;
1716 int pos = 0;
1717 u64 lc;
1718
1719 lc = local_clock();
1720 rem_nsec = do_div(lc, HCLGE_BILLION_NANO_SECONDS);
1721
1722 pos += scnprintf(buf + pos, len - pos, "local_clock: [%5lu.%06lu]\n",
1723 (unsigned long)lc, rem_nsec / 1000);
1724 pos += scnprintf(buf + pos, len - pos, "delta: %u(ms)\n",
1725 jiffies_to_msecs(jiffies - hdev->last_serv_processed));
1726 pos += scnprintf(buf + pos, len - pos,
1727 "last_service_task_processed: %lu(jiffies)\n",
1728 hdev->last_serv_processed);
1729 pos += scnprintf(buf + pos, len - pos, "last_service_task_cnt: %lu\n",
1730 hdev->serv_processed_cnt);
1731
1732 return 0;
1733 }
1734
hclge_dbg_dump_interrupt(struct hclge_dev * hdev,char * buf,int len)1735 static int hclge_dbg_dump_interrupt(struct hclge_dev *hdev, char *buf, int len)
1736 {
1737 int pos = 0;
1738
1739 pos += scnprintf(buf + pos, len - pos, "num_nic_msi: %u\n",
1740 hdev->num_nic_msi);
1741 pos += scnprintf(buf + pos, len - pos, "num_roce_msi: %u\n",
1742 hdev->num_roce_msi);
1743 pos += scnprintf(buf + pos, len - pos, "num_msi_used: %u\n",
1744 hdev->num_msi_used);
1745 pos += scnprintf(buf + pos, len - pos, "num_msi_left: %u\n",
1746 hdev->num_msi_left);
1747
1748 return 0;
1749 }
1750
hclge_dbg_imp_info_data_print(struct hclge_desc * desc_src,char * buf,int len,u32 bd_num)1751 static void hclge_dbg_imp_info_data_print(struct hclge_desc *desc_src,
1752 char *buf, int len, u32 bd_num)
1753 {
1754 #define HCLGE_DBG_IMP_INFO_PRINT_OFFSET 0x2
1755
1756 struct hclge_desc *desc_index = desc_src;
1757 u32 offset = 0;
1758 int pos = 0;
1759 u32 i, j;
1760
1761 pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1762
1763 for (i = 0; i < bd_num; i++) {
1764 j = 0;
1765 while (j < HCLGE_DESC_DATA_LEN - 1) {
1766 pos += scnprintf(buf + pos, len - pos, "0x%04x | ",
1767 offset);
1768 pos += scnprintf(buf + pos, len - pos, "0x%08x ",
1769 le32_to_cpu(desc_index->data[j++]));
1770 pos += scnprintf(buf + pos, len - pos, "0x%08x\n",
1771 le32_to_cpu(desc_index->data[j++]));
1772 offset += sizeof(u32) * HCLGE_DBG_IMP_INFO_PRINT_OFFSET;
1773 }
1774 desc_index++;
1775 }
1776 }
1777
1778 static int
hclge_dbg_get_imp_stats_info(struct hclge_dev * hdev,char * buf,int len)1779 hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
1780 {
1781 struct hclge_get_imp_bd_cmd *req;
1782 struct hclge_desc *desc_src;
1783 struct hclge_desc desc;
1784 u32 bd_num;
1785 int ret;
1786
1787 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_STATS_BD, true);
1788
1789 req = (struct hclge_get_imp_bd_cmd *)desc.data;
1790 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1791 if (ret) {
1792 dev_err(&hdev->pdev->dev,
1793 "failed to get imp statistics bd number, ret = %d\n",
1794 ret);
1795 return ret;
1796 }
1797
1798 bd_num = le32_to_cpu(req->bd_num);
1799 if (!bd_num) {
1800 dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
1801 return -EINVAL;
1802 }
1803
1804 desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
1805 if (!desc_src)
1806 return -ENOMEM;
1807
1808 ret = hclge_dbg_cmd_send(hdev, desc_src, 0, bd_num,
1809 HCLGE_OPC_IMP_STATS_INFO);
1810 if (ret) {
1811 kfree(desc_src);
1812 dev_err(&hdev->pdev->dev,
1813 "failed to get imp statistics, ret = %d\n", ret);
1814 return ret;
1815 }
1816
1817 hclge_dbg_imp_info_data_print(desc_src, buf, len, bd_num);
1818
1819 kfree(desc_src);
1820
1821 return 0;
1822 }
1823
1824 #define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
1825 #define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
1826
hclge_ncl_config_data_print(struct hclge_desc * desc,int * index,char * buf,int len,int * pos)1827 static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
1828 char *buf, int len, int *pos)
1829 {
1830 #define HCLGE_CMD_DATA_NUM 6
1831
1832 int offset = HCLGE_MAX_NCL_CONFIG_LENGTH - *index;
1833 int i, j;
1834
1835 for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1836 for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1837 if (i == 0 && j == 0)
1838 continue;
1839
1840 *pos += scnprintf(buf + *pos, len - *pos,
1841 "0x%04x | 0x%08x\n", offset,
1842 le32_to_cpu(desc[i].data[j]));
1843
1844 offset += sizeof(u32);
1845 *index -= sizeof(u32);
1846
1847 if (*index <= 0)
1848 return;
1849 }
1850 }
1851 }
1852
1853 static int
hclge_dbg_dump_ncl_config(struct hclge_dev * hdev,char * buf,int len)1854 hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
1855 {
1856 #define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
1857
1858 struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1859 int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1860 int index = HCLGE_MAX_NCL_CONFIG_LENGTH;
1861 int pos = 0;
1862 u32 data0;
1863 int ret;
1864
1865 pos += scnprintf(buf + pos, len - pos, "offset | data\n");
1866
1867 while (index > 0) {
1868 data0 = HCLGE_MAX_NCL_CONFIG_LENGTH - index;
1869 if (index >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
1870 data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
1871 else
1872 data0 |= (u32)index << 16;
1873 ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1874 HCLGE_OPC_QUERY_NCL_CONFIG);
1875 if (ret)
1876 return ret;
1877
1878 hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
1879 }
1880
1881 return 0;
1882 }
1883
hclge_dbg_dump_loopback(struct hclge_dev * hdev,char * buf,int len)1884 static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len)
1885 {
1886 struct phy_device *phydev = hdev->hw.mac.phydev;
1887 struct hclge_config_mac_mode_cmd *req_app;
1888 struct hclge_common_lb_cmd *req_common;
1889 struct hclge_desc desc;
1890 u8 loopback_en;
1891 int pos = 0;
1892 int ret;
1893
1894 req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
1895 req_common = (struct hclge_common_lb_cmd *)desc.data;
1896
1897 pos += scnprintf(buf + pos, len - pos, "mac id: %u\n",
1898 hdev->hw.mac.mac_id);
1899
1900 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
1901 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1902 if (ret) {
1903 dev_err(&hdev->pdev->dev,
1904 "failed to dump app loopback status, ret = %d\n", ret);
1905 return ret;
1906 }
1907
1908 loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
1909 HCLGE_MAC_APP_LP_B);
1910 pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n",
1911 state_str[loopback_en]);
1912
1913 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true);
1914 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1915 if (ret) {
1916 dev_err(&hdev->pdev->dev,
1917 "failed to dump common loopback status, ret = %d\n",
1918 ret);
1919 return ret;
1920 }
1921
1922 loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
1923 pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n",
1924 state_str[loopback_en]);
1925
1926 loopback_en = req_common->enable &
1927 HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0;
1928 pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n",
1929 state_str[loopback_en]);
1930
1931 if (phydev) {
1932 loopback_en = phydev->loopback_enabled;
1933 pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1934 state_str[loopback_en]);
1935 } else if (hnae3_dev_phy_imp_supported(hdev)) {
1936 loopback_en = req_common->enable &
1937 HCLGE_CMD_GE_PHY_INNER_LOOP_B;
1938 pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n",
1939 state_str[loopback_en]);
1940 }
1941
1942 return 0;
1943 }
1944
1945 /* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
1946 * @hdev: pointer to struct hclge_dev
1947 */
1948 static int
hclge_dbg_dump_mac_tnl_status(struct hclge_dev * hdev,char * buf,int len)1949 hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev, char *buf, int len)
1950 {
1951 struct hclge_mac_tnl_stats stats;
1952 unsigned long rem_nsec;
1953 int pos = 0;
1954
1955 pos += scnprintf(buf + pos, len - pos,
1956 "Recently generated mac tnl interruption:\n");
1957
1958 while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1959 rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1960
1961 pos += scnprintf(buf + pos, len - pos,
1962 "[%07lu.%03lu] status = 0x%x\n",
1963 (unsigned long)stats.time, rem_nsec / 1000,
1964 stats.status);
1965 }
1966
1967 return 0;
1968 }
1969
1970
1971 static const struct hclge_dbg_item mac_list_items[] = {
1972 { "FUNC_ID", 2 },
1973 { "MAC_ADDR", 12 },
1974 { "STATE", 2 },
1975 };
1976
hclge_dbg_dump_mac_list(struct hclge_dev * hdev,char * buf,int len,bool is_unicast)1977 static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
1978 bool is_unicast)
1979 {
1980 char data_str[ARRAY_SIZE(mac_list_items)][HCLGE_DBG_DATA_STR_LEN];
1981 char content[HCLGE_DBG_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
1982 char *result[ARRAY_SIZE(mac_list_items)];
1983 struct hclge_mac_node *mac_node, *tmp;
1984 struct hclge_vport *vport;
1985 struct list_head *list;
1986 u32 func_id;
1987 int pos = 0;
1988 int i;
1989
1990 for (i = 0; i < ARRAY_SIZE(mac_list_items); i++)
1991 result[i] = &data_str[i][0];
1992
1993 pos += scnprintf(buf + pos, len - pos, "%s MAC_LIST:\n",
1994 is_unicast ? "UC" : "MC");
1995 hclge_dbg_fill_content(content, sizeof(content), mac_list_items,
1996 NULL, ARRAY_SIZE(mac_list_items));
1997 pos += scnprintf(buf + pos, len - pos, "%s", content);
1998
1999 for (func_id = 0; func_id < hdev->num_alloc_vport; func_id++) {
2000 vport = &hdev->vport[func_id];
2001 list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
2002 spin_lock_bh(&vport->mac_list_lock);
2003 list_for_each_entry_safe(mac_node, tmp, list, node) {
2004 i = 0;
2005 result[i++] = hclge_dbg_get_func_id_str(str_id,
2006 func_id);
2007 sprintf(result[i++], "%pM", mac_node->mac_addr);
2008 sprintf(result[i++], "%5s",
2009 hclge_mac_state_str[mac_node->state]);
2010 hclge_dbg_fill_content(content, sizeof(content),
2011 mac_list_items,
2012 (const char **)result,
2013 ARRAY_SIZE(mac_list_items));
2014 pos += scnprintf(buf + pos, len - pos, "%s", content);
2015 }
2016 spin_unlock_bh(&vport->mac_list_lock);
2017 }
2018 }
2019
hclge_dbg_dump_umv_info(struct hclge_dev * hdev,char * buf,int len)2020 static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
2021 {
2022 u8 func_num = pci_num_vf(hdev->pdev) + 1;
2023 struct hclge_vport *vport;
2024 int pos = 0;
2025 u8 i;
2026
2027 pos += scnprintf(buf, len, "num_alloc_vport : %u\n",
2028 hdev->num_alloc_vport);
2029 pos += scnprintf(buf + pos, len - pos, "max_umv_size : %u\n",
2030 hdev->max_umv_size);
2031 pos += scnprintf(buf + pos, len - pos, "wanted_umv_size : %u\n",
2032 hdev->wanted_umv_size);
2033 pos += scnprintf(buf + pos, len - pos, "priv_umv_size : %u\n",
2034 hdev->priv_umv_size);
2035
2036 mutex_lock(&hdev->vport_lock);
2037 pos += scnprintf(buf + pos, len - pos, "share_umv_size : %u\n",
2038 hdev->share_umv_size);
2039 for (i = 0; i < func_num; i++) {
2040 vport = &hdev->vport[i];
2041 pos += scnprintf(buf + pos, len - pos,
2042 "vport(%u) used_umv_num : %u\n",
2043 i, vport->used_umv_num);
2044 }
2045 mutex_unlock(&hdev->vport_lock);
2046
2047 pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n",
2048 hdev->used_mc_mac_num);
2049
2050 return 0;
2051 }
2052
hclge_get_vlan_rx_offload_cfg(struct hclge_dev * hdev,u8 vf_id,struct hclge_dbg_vlan_cfg * vlan_cfg)2053 static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2054 struct hclge_dbg_vlan_cfg *vlan_cfg)
2055 {
2056 struct hclge_vport_vtag_rx_cfg_cmd *req;
2057 struct hclge_desc desc;
2058 u16 bmap_index;
2059 u8 rx_cfg;
2060 int ret;
2061
2062 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
2063
2064 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
2065 req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2066 bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2067 req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2068
2069 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2070 if (ret) {
2071 dev_err(&hdev->pdev->dev,
2072 "failed to get vport%u rxvlan cfg, ret = %d\n",
2073 vf_id, ret);
2074 return ret;
2075 }
2076
2077 rx_cfg = req->vport_vlan_cfg;
2078 vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
2079 vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
2080 vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
2081 vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
2082 vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
2083 vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
2084
2085 return 0;
2086 }
2087
hclge_get_vlan_tx_offload_cfg(struct hclge_dev * hdev,u8 vf_id,struct hclge_dbg_vlan_cfg * vlan_cfg)2088 static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
2089 struct hclge_dbg_vlan_cfg *vlan_cfg)
2090 {
2091 struct hclge_vport_vtag_tx_cfg_cmd *req;
2092 struct hclge_desc desc;
2093 u16 bmap_index;
2094 u8 tx_cfg;
2095 int ret;
2096
2097 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
2098 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
2099 req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
2100 bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
2101 req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
2102
2103 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2104 if (ret) {
2105 dev_err(&hdev->pdev->dev,
2106 "failed to get vport%u txvlan cfg, ret = %d\n",
2107 vf_id, ret);
2108 return ret;
2109 }
2110
2111 tx_cfg = req->vport_vlan_cfg;
2112 vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
2113
2114 vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
2115 vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
2116 vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
2117 vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
2118 vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
2119 vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
2120 vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
2121
2122 return 0;
2123 }
2124
hclge_get_vlan_filter_config_cmd(struct hclge_dev * hdev,u8 vlan_type,u8 vf_id,struct hclge_desc * desc)2125 static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
2126 u8 vlan_type, u8 vf_id,
2127 struct hclge_desc *desc)
2128 {
2129 struct hclge_vlan_filter_ctrl_cmd *req;
2130 int ret;
2131
2132 hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
2133 req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
2134 req->vlan_type = vlan_type;
2135 req->vf_id = vf_id;
2136
2137 ret = hclge_cmd_send(&hdev->hw, desc, 1);
2138 if (ret)
2139 dev_err(&hdev->pdev->dev,
2140 "failed to get vport%u vlan filter config, ret = %d.\n",
2141 vf_id, ret);
2142
2143 return ret;
2144 }
2145
hclge_get_vlan_filter_state(struct hclge_dev * hdev,u8 vlan_type,u8 vf_id,u8 * vlan_fe)2146 static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
2147 u8 vf_id, u8 *vlan_fe)
2148 {
2149 struct hclge_vlan_filter_ctrl_cmd *req;
2150 struct hclge_desc desc;
2151 int ret;
2152
2153 ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
2154 if (ret)
2155 return ret;
2156
2157 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
2158 *vlan_fe = req->vlan_fe;
2159
2160 return 0;
2161 }
2162
hclge_get_port_vlan_filter_bypass_state(struct hclge_dev * hdev,u8 vf_id,u8 * bypass_en)2163 static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
2164 u8 vf_id, u8 *bypass_en)
2165 {
2166 struct hclge_port_vlan_filter_bypass_cmd *req;
2167 struct hclge_desc desc;
2168 int ret;
2169
2170 if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
2171 return 0;
2172
2173 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
2174 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
2175 req->vf_id = vf_id;
2176
2177 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2178 if (ret) {
2179 dev_err(&hdev->pdev->dev,
2180 "failed to get vport%u port vlan filter bypass state, ret = %d.\n",
2181 vf_id, ret);
2182 return ret;
2183 }
2184
2185 *bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
2186
2187 return 0;
2188 }
2189
2190 static const struct hclge_dbg_item vlan_filter_items[] = {
2191 { "FUNC_ID", 2 },
2192 { "I_VF_VLAN_FILTER", 2 },
2193 { "E_VF_VLAN_FILTER", 2 },
2194 { "PORT_VLAN_FILTER_BYPASS", 0 }
2195 };
2196
2197 static const struct hclge_dbg_item vlan_offload_items[] = {
2198 { "FUNC_ID", 2 },
2199 { "PVID", 4 },
2200 { "ACCEPT_TAG1", 2 },
2201 { "ACCEPT_TAG2", 2 },
2202 { "ACCEPT_UNTAG1", 2 },
2203 { "ACCEPT_UNTAG2", 2 },
2204 { "INSERT_TAG1", 2 },
2205 { "INSERT_TAG2", 2 },
2206 { "SHIFT_TAG", 2 },
2207 { "STRIP_TAG1", 2 },
2208 { "STRIP_TAG2", 2 },
2209 { "DROP_TAG1", 2 },
2210 { "DROP_TAG2", 2 },
2211 { "PRI_ONLY_TAG1", 2 },
2212 { "PRI_ONLY_TAG2", 0 }
2213 };
2214
hclge_dbg_dump_vlan_filter_config(struct hclge_dev * hdev,char * buf,int len,int * pos)2215 static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
2216 int len, int *pos)
2217 {
2218 char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
2219 const char *result[ARRAY_SIZE(vlan_filter_items)];
2220 u8 i, j, vlan_fe, bypass, ingress, egress;
2221 u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2222 int ret;
2223
2224 ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
2225 &vlan_fe);
2226 if (ret)
2227 return ret;
2228 ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2229 egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2230
2231 *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
2232 state_str[ingress]);
2233 *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
2234 state_str[egress]);
2235
2236 hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
2237 NULL, ARRAY_SIZE(vlan_filter_items));
2238 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2239
2240 for (i = 0; i < func_num; i++) {
2241 ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
2242 &vlan_fe);
2243 if (ret)
2244 return ret;
2245
2246 ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
2247 egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
2248 ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
2249 if (ret)
2250 return ret;
2251 j = 0;
2252 result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2253 result[j++] = state_str[ingress];
2254 result[j++] = state_str[egress];
2255 result[j++] =
2256 test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
2257 hdev->ae_dev->caps) ? state_str[bypass] : "NA";
2258 hclge_dbg_fill_content(content, sizeof(content),
2259 vlan_filter_items, result,
2260 ARRAY_SIZE(vlan_filter_items));
2261 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2262 }
2263 *pos += scnprintf(buf + *pos, len - *pos, "\n");
2264
2265 return 0;
2266 }
2267
hclge_dbg_dump_vlan_offload_config(struct hclge_dev * hdev,char * buf,int len,int * pos)2268 static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
2269 int len, int *pos)
2270 {
2271 char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
2272 const char *result[ARRAY_SIZE(vlan_offload_items)];
2273 char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
2274 u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
2275 struct hclge_dbg_vlan_cfg vlan_cfg;
2276 int ret;
2277 u8 i, j;
2278
2279 hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
2280 NULL, ARRAY_SIZE(vlan_offload_items));
2281 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2282
2283 for (i = 0; i < func_num; i++) {
2284 ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
2285 if (ret)
2286 return ret;
2287
2288 ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
2289 if (ret)
2290 return ret;
2291
2292 sprintf(str_pvid, "%u", vlan_cfg.pvid);
2293 j = 0;
2294 result[j++] = hclge_dbg_get_func_id_str(str_id, i);
2295 result[j++] = str_pvid;
2296 result[j++] = state_str[vlan_cfg.accept_tag1];
2297 result[j++] = state_str[vlan_cfg.accept_tag2];
2298 result[j++] = state_str[vlan_cfg.accept_untag1];
2299 result[j++] = state_str[vlan_cfg.accept_untag2];
2300 result[j++] = state_str[vlan_cfg.insert_tag1];
2301 result[j++] = state_str[vlan_cfg.insert_tag2];
2302 result[j++] = state_str[vlan_cfg.shift_tag];
2303 result[j++] = state_str[vlan_cfg.strip_tag1];
2304 result[j++] = state_str[vlan_cfg.strip_tag2];
2305 result[j++] = state_str[vlan_cfg.drop_tag1];
2306 result[j++] = state_str[vlan_cfg.drop_tag2];
2307 result[j++] = state_str[vlan_cfg.pri_only1];
2308 result[j++] = state_str[vlan_cfg.pri_only2];
2309
2310 hclge_dbg_fill_content(content, sizeof(content),
2311 vlan_offload_items, result,
2312 ARRAY_SIZE(vlan_offload_items));
2313 *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
2314 }
2315
2316 return 0;
2317 }
2318
hclge_dbg_dump_vlan_config(struct hclge_dev * hdev,char * buf,int len)2319 static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
2320 int len)
2321 {
2322 int pos = 0;
2323 int ret;
2324
2325 ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
2326 if (ret)
2327 return ret;
2328
2329 return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
2330 }
2331
hclge_dbg_dump_ptp_info(struct hclge_dev * hdev,char * buf,int len)2332 static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
2333 {
2334 struct hclge_ptp *ptp = hdev->ptp;
2335 u32 sw_cfg = ptp->ptp_cfg;
2336 unsigned int tx_start;
2337 unsigned int last_rx;
2338 int pos = 0;
2339 u32 hw_cfg;
2340 int ret;
2341
2342 pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
2343 ptp->info.name);
2344 pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
2345 test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
2346 "yes" : "no");
2347 pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
2348 test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
2349 "yes" : "no");
2350 pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
2351 test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
2352 "yes" : "no");
2353
2354 last_rx = jiffies_to_msecs(ptp->last_rx);
2355 pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
2356 last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
2357 pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
2358
2359 tx_start = jiffies_to_msecs(ptp->tx_start);
2360 pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
2361 tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
2362 pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
2363 pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
2364 ptp->tx_skipped);
2365 pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
2366 ptp->tx_timeout);
2367 pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
2368 ptp->last_tx_seqid);
2369
2370 ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
2371 if (ret)
2372 return ret;
2373
2374 pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
2375 sw_cfg, hw_cfg);
2376
2377 pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
2378 ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
2379
2380 return 0;
2381 }
2382
hclge_dbg_dump_mac_uc(struct hclge_dev * hdev,char * buf,int len)2383 static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
2384 {
2385 hclge_dbg_dump_mac_list(hdev, buf, len, true);
2386
2387 return 0;
2388 }
2389
hclge_dbg_dump_mac_mc(struct hclge_dev * hdev,char * buf,int len)2390 static int hclge_dbg_dump_mac_mc(struct hclge_dev *hdev, char *buf, int len)
2391 {
2392 hclge_dbg_dump_mac_list(hdev, buf, len, false);
2393
2394 return 0;
2395 }
2396
2397 static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
2398 {
2399 .cmd = HNAE3_DBG_CMD_TM_NODES,
2400 .dbg_dump = hclge_dbg_dump_tm_nodes,
2401 },
2402 {
2403 .cmd = HNAE3_DBG_CMD_TM_PRI,
2404 .dbg_dump = hclge_dbg_dump_tm_pri,
2405 },
2406 {
2407 .cmd = HNAE3_DBG_CMD_TM_QSET,
2408 .dbg_dump = hclge_dbg_dump_tm_qset,
2409 },
2410 {
2411 .cmd = HNAE3_DBG_CMD_TM_MAP,
2412 .dbg_dump = hclge_dbg_dump_tm_map,
2413 },
2414 {
2415 .cmd = HNAE3_DBG_CMD_TM_PG,
2416 .dbg_dump = hclge_dbg_dump_tm_pg,
2417 },
2418 {
2419 .cmd = HNAE3_DBG_CMD_TM_PORT,
2420 .dbg_dump = hclge_dbg_dump_tm_port,
2421 },
2422 {
2423 .cmd = HNAE3_DBG_CMD_TC_SCH_INFO,
2424 .dbg_dump = hclge_dbg_dump_tc,
2425 },
2426 {
2427 .cmd = HNAE3_DBG_CMD_QOS_PAUSE_CFG,
2428 .dbg_dump = hclge_dbg_dump_qos_pause_cfg,
2429 },
2430 {
2431 .cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
2432 .dbg_dump = hclge_dbg_dump_qos_pri_map,
2433 },
2434 {
2435 .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
2436 .dbg_dump = hclge_dbg_dump_qos_dscp_map,
2437 },
2438 {
2439 .cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
2440 .dbg_dump = hclge_dbg_dump_qos_buf_cfg,
2441 },
2442 {
2443 .cmd = HNAE3_DBG_CMD_MAC_UC,
2444 .dbg_dump = hclge_dbg_dump_mac_uc,
2445 },
2446 {
2447 .cmd = HNAE3_DBG_CMD_MAC_MC,
2448 .dbg_dump = hclge_dbg_dump_mac_mc,
2449 },
2450 {
2451 .cmd = HNAE3_DBG_CMD_MNG_TBL,
2452 .dbg_dump = hclge_dbg_dump_mng_table,
2453 },
2454 {
2455 .cmd = HNAE3_DBG_CMD_LOOPBACK,
2456 .dbg_dump = hclge_dbg_dump_loopback,
2457 },
2458 {
2459 .cmd = HNAE3_DBG_CMD_PTP_INFO,
2460 .dbg_dump = hclge_dbg_dump_ptp_info,
2461 },
2462 {
2463 .cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
2464 .dbg_dump = hclge_dbg_dump_interrupt,
2465 },
2466 {
2467 .cmd = HNAE3_DBG_CMD_RESET_INFO,
2468 .dbg_dump = hclge_dbg_dump_rst_info,
2469 },
2470 {
2471 .cmd = HNAE3_DBG_CMD_IMP_INFO,
2472 .dbg_dump = hclge_dbg_get_imp_stats_info,
2473 },
2474 {
2475 .cmd = HNAE3_DBG_CMD_NCL_CONFIG,
2476 .dbg_dump = hclge_dbg_dump_ncl_config,
2477 },
2478 {
2479 .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
2480 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2481 },
2482 {
2483 .cmd = HNAE3_DBG_CMD_REG_SSU,
2484 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2485 },
2486 {
2487 .cmd = HNAE3_DBG_CMD_REG_IGU_EGU,
2488 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2489 },
2490 {
2491 .cmd = HNAE3_DBG_CMD_REG_RPU,
2492 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2493 },
2494 {
2495 .cmd = HNAE3_DBG_CMD_REG_NCSI,
2496 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2497 },
2498 {
2499 .cmd = HNAE3_DBG_CMD_REG_RTC,
2500 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2501 },
2502 {
2503 .cmd = HNAE3_DBG_CMD_REG_PPP,
2504 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2505 },
2506 {
2507 .cmd = HNAE3_DBG_CMD_REG_RCB,
2508 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2509 },
2510 {
2511 .cmd = HNAE3_DBG_CMD_REG_TQP,
2512 .dbg_dump_reg = hclge_dbg_dump_reg_cmd,
2513 },
2514 {
2515 .cmd = HNAE3_DBG_CMD_REG_MAC,
2516 .dbg_dump = hclge_dbg_dump_mac,
2517 },
2518 {
2519 .cmd = HNAE3_DBG_CMD_REG_DCB,
2520 .dbg_dump = hclge_dbg_dump_dcb,
2521 },
2522 {
2523 .cmd = HNAE3_DBG_CMD_FD_TCAM,
2524 .dbg_dump = hclge_dbg_dump_fd_tcam,
2525 },
2526 {
2527 .cmd = HNAE3_DBG_CMD_MAC_TNL_STATUS,
2528 .dbg_dump = hclge_dbg_dump_mac_tnl_status,
2529 },
2530 {
2531 .cmd = HNAE3_DBG_CMD_SERV_INFO,
2532 .dbg_dump = hclge_dbg_dump_serv_info,
2533 },
2534 {
2535 .cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
2536 .dbg_dump = hclge_dbg_dump_vlan_config,
2537 },
2538 {
2539 .cmd = HNAE3_DBG_CMD_FD_COUNTER,
2540 .dbg_dump = hclge_dbg_dump_fd_counter,
2541 },
2542 {
2543 .cmd = HNAE3_DBG_CMD_UMV_INFO,
2544 .dbg_dump = hclge_dbg_dump_umv_info,
2545 },
2546 };
2547
hclge_dbg_read_cmd(struct hnae3_handle * handle,enum hnae3_dbg_cmd cmd,char * buf,int len)2548 int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
2549 char *buf, int len)
2550 {
2551 struct hclge_vport *vport = hclge_get_vport(handle);
2552 const struct hclge_dbg_func *cmd_func;
2553 struct hclge_dev *hdev = vport->back;
2554 u32 i;
2555
2556 for (i = 0; i < ARRAY_SIZE(hclge_dbg_cmd_func); i++) {
2557 if (cmd == hclge_dbg_cmd_func[i].cmd) {
2558 cmd_func = &hclge_dbg_cmd_func[i];
2559 if (cmd_func->dbg_dump)
2560 return cmd_func->dbg_dump(hdev, buf, len);
2561 else
2562 return cmd_func->dbg_dump_reg(hdev, cmd, buf,
2563 len);
2564 }
2565 }
2566
2567 dev_err(&hdev->pdev->dev, "invalid command(%d)\n", cmd);
2568 return -EINVAL;
2569 }
2570