1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2019 Marvell.
5 *
6 */
7
8 #ifdef CONFIG_DEBUG_FS
9
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21
22 #define DEBUGFS_DIR_NAME "octeontx2"
23
24 enum {
25 CGX_STAT0,
26 CGX_STAT1,
27 CGX_STAT2,
28 CGX_STAT3,
29 CGX_STAT4,
30 CGX_STAT5,
31 CGX_STAT6,
32 CGX_STAT7,
33 CGX_STAT8,
34 CGX_STAT9,
35 CGX_STAT10,
36 CGX_STAT11,
37 CGX_STAT12,
38 CGX_STAT13,
39 CGX_STAT14,
40 CGX_STAT15,
41 CGX_STAT16,
42 CGX_STAT17,
43 CGX_STAT18,
44 };
45
46 /* NIX TX stats */
47 enum nix_stat_lf_tx {
48 TX_UCAST = 0x0,
49 TX_BCAST = 0x1,
50 TX_MCAST = 0x2,
51 TX_DROP = 0x3,
52 TX_OCTS = 0x4,
53 TX_STATS_ENUM_LAST,
54 };
55
56 /* NIX RX stats */
57 enum nix_stat_lf_rx {
58 RX_OCTS = 0x0,
59 RX_UCAST = 0x1,
60 RX_BCAST = 0x2,
61 RX_MCAST = 0x3,
62 RX_DROP = 0x4,
63 RX_DROP_OCTS = 0x5,
64 RX_FCS = 0x6,
65 RX_ERR = 0x7,
66 RX_DRP_BCAST = 0x8,
67 RX_DRP_MCAST = 0x9,
68 RX_DRP_L3BCAST = 0xa,
69 RX_DRP_L3MCAST = 0xb,
70 RX_STATS_ENUM_LAST,
71 };
72
73 static char *cgx_rx_stats_fields[] = {
74 [CGX_STAT0] = "Received packets",
75 [CGX_STAT1] = "Octets of received packets",
76 [CGX_STAT2] = "Received PAUSE packets",
77 [CGX_STAT3] = "Received PAUSE and control packets",
78 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
79 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
80 [CGX_STAT6] = "Packets dropped due to RX FIFO full",
81 [CGX_STAT7] = "Octets dropped due to RX FIFO full",
82 [CGX_STAT8] = "Error packets",
83 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
84 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
85 [CGX_STAT11] = "NCSI-bound packets dropped",
86 [CGX_STAT12] = "NCSI-bound octets dropped",
87 };
88
89 static char *cgx_tx_stats_fields[] = {
90 [CGX_STAT0] = "Packets dropped due to excessive collisions",
91 [CGX_STAT1] = "Packets dropped due to excessive deferral",
92 [CGX_STAT2] = "Multiple collisions before successful transmission",
93 [CGX_STAT3] = "Single collisions before successful transmission",
94 [CGX_STAT4] = "Total octets sent on the interface",
95 [CGX_STAT5] = "Total frames sent on the interface",
96 [CGX_STAT6] = "Packets sent with an octet count < 64",
97 [CGX_STAT7] = "Packets sent with an octet count == 64",
98 [CGX_STAT8] = "Packets sent with an octet count of 65–127",
99 [CGX_STAT9] = "Packets sent with an octet count of 128-255",
100 [CGX_STAT10] = "Packets sent with an octet count of 256-511",
101 [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
102 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
103 [CGX_STAT13] = "Packets sent with an octet count of > 1518",
104 [CGX_STAT14] = "Packets sent to a broadcast DMAC",
105 [CGX_STAT15] = "Packets sent to the multicast DMAC",
106 [CGX_STAT16] = "Transmit underflow and were truncated",
107 [CGX_STAT17] = "Control/PAUSE packets sent",
108 };
109
110 static char *rpm_rx_stats_fields[] = {
111 "Octets of received packets",
112 "Octets of received packets with out error",
113 "Received packets with alignment errors",
114 "Control/PAUSE packets received",
115 "Packets received with Frame too long Errors",
116 "Packets received with a1nrange length Errors",
117 "Received packets",
118 "Packets received with FrameCheckSequenceErrors",
119 "Packets received with VLAN header",
120 "Error packets",
121 "Packets received with unicast DMAC",
122 "Packets received with multicast DMAC",
123 "Packets received with broadcast DMAC",
124 "Dropped packets",
125 "Total frames received on interface",
126 "Packets received with an octet count < 64",
127 "Packets received with an octet count == 64",
128 "Packets received with an octet count of 65â127",
129 "Packets received with an octet count of 128-255",
130 "Packets received with an octet count of 256-511",
131 "Packets received with an octet count of 512-1023",
132 "Packets received with an octet count of 1024-1518",
133 "Packets received with an octet count of > 1518",
134 "Oversized Packets",
135 "Jabber Packets",
136 "Fragmented Packets",
137 "CBFC(class based flow control) pause frames received for class 0",
138 "CBFC pause frames received for class 1",
139 "CBFC pause frames received for class 2",
140 "CBFC pause frames received for class 3",
141 "CBFC pause frames received for class 4",
142 "CBFC pause frames received for class 5",
143 "CBFC pause frames received for class 6",
144 "CBFC pause frames received for class 7",
145 "CBFC pause frames received for class 8",
146 "CBFC pause frames received for class 9",
147 "CBFC pause frames received for class 10",
148 "CBFC pause frames received for class 11",
149 "CBFC pause frames received for class 12",
150 "CBFC pause frames received for class 13",
151 "CBFC pause frames received for class 14",
152 "CBFC pause frames received for class 15",
153 "MAC control packets received",
154 };
155
156 static char *rpm_tx_stats_fields[] = {
157 "Total octets sent on the interface",
158 "Total octets transmitted OK",
159 "Control/Pause frames sent",
160 "Total frames transmitted OK",
161 "Total frames sent with VLAN header",
162 "Error Packets",
163 "Packets sent to unicast DMAC",
164 "Packets sent to the multicast DMAC",
165 "Packets sent to a broadcast DMAC",
166 "Packets sent with an octet count == 64",
167 "Packets sent with an octet count of 65â127",
168 "Packets sent with an octet count of 128-255",
169 "Packets sent with an octet count of 256-511",
170 "Packets sent with an octet count of 512-1023",
171 "Packets sent with an octet count of 1024-1518",
172 "Packets sent with an octet count of > 1518",
173 "CBFC(class based flow control) pause frames transmitted for class 0",
174 "CBFC pause frames transmitted for class 1",
175 "CBFC pause frames transmitted for class 2",
176 "CBFC pause frames transmitted for class 3",
177 "CBFC pause frames transmitted for class 4",
178 "CBFC pause frames transmitted for class 5",
179 "CBFC pause frames transmitted for class 6",
180 "CBFC pause frames transmitted for class 7",
181 "CBFC pause frames transmitted for class 8",
182 "CBFC pause frames transmitted for class 9",
183 "CBFC pause frames transmitted for class 10",
184 "CBFC pause frames transmitted for class 11",
185 "CBFC pause frames transmitted for class 12",
186 "CBFC pause frames transmitted for class 13",
187 "CBFC pause frames transmitted for class 14",
188 "CBFC pause frames transmitted for class 15",
189 "MAC control packets sent",
190 "Total frames sent on the interface"
191 };
192
193 enum cpt_eng_type {
194 CPT_AE_TYPE = 1,
195 CPT_SE_TYPE = 2,
196 CPT_IE_TYPE = 3,
197 };
198
199 #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
200 blk_addr, NDC_AF_CONST) & 0xFF)
201
202 #define rvu_dbg_NULL NULL
203 #define rvu_dbg_open_NULL NULL
204
205 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
206 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
207 { \
208 return single_open(file, rvu_dbg_##read_op, inode->i_private); \
209 } \
210 static const struct file_operations rvu_dbg_##name##_fops = { \
211 .owner = THIS_MODULE, \
212 .open = rvu_dbg_open_##name, \
213 .read = seq_read, \
214 .write = rvu_dbg_##write_op, \
215 .llseek = seq_lseek, \
216 .release = single_release, \
217 }
218
219 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
220 static const struct file_operations rvu_dbg_##name##_fops = { \
221 .owner = THIS_MODULE, \
222 .open = simple_open, \
223 .read = rvu_dbg_##read_op, \
224 .write = rvu_dbg_##write_op \
225 }
226
227 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
228
get_lf_str_list(struct rvu_block block,int pcifunc,char * lfs)229 static void get_lf_str_list(struct rvu_block block, int pcifunc,
230 char *lfs)
231 {
232 int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
233
234 for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
235 if (lf >= block.lf.max)
236 break;
237
238 if (block.fn_map[lf] != pcifunc)
239 continue;
240
241 if (lf == prev_lf + 1) {
242 prev_lf = lf;
243 seq = 1;
244 continue;
245 }
246
247 if (seq)
248 len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
249 else
250 len += (len ? sprintf(lfs + len, ",%d", lf) :
251 sprintf(lfs + len, "%d", lf));
252
253 prev_lf = lf;
254 seq = 0;
255 }
256
257 if (seq)
258 len += sprintf(lfs + len, "-%d", prev_lf);
259
260 lfs[len] = '\0';
261 }
262
get_max_column_width(struct rvu * rvu)263 static int get_max_column_width(struct rvu *rvu)
264 {
265 int index, pf, vf, lf_str_size = 12, buf_size = 256;
266 struct rvu_block block;
267 u16 pcifunc;
268 char *buf;
269
270 buf = kzalloc(buf_size, GFP_KERNEL);
271 if (!buf)
272 return -ENOMEM;
273
274 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
275 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
276 pcifunc = pf << 10 | vf;
277 if (!pcifunc)
278 continue;
279
280 for (index = 0; index < BLK_COUNT; index++) {
281 block = rvu->hw->block[index];
282 if (!strlen(block.name))
283 continue;
284
285 get_lf_str_list(block, pcifunc, buf);
286 if (lf_str_size <= strlen(buf))
287 lf_str_size = strlen(buf) + 1;
288 }
289 }
290 }
291
292 kfree(buf);
293 return lf_str_size;
294 }
295
296 /* Dumps current provisioning status of all RVU block LFs */
rvu_dbg_rsrc_attach_status(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)297 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
298 char __user *buffer,
299 size_t count, loff_t *ppos)
300 {
301 int index, off = 0, flag = 0, len = 0, i = 0;
302 struct rvu *rvu = filp->private_data;
303 int bytes_not_copied = 0;
304 struct rvu_block block;
305 int pf, vf, pcifunc;
306 int buf_size = 2048;
307 int lf_str_size;
308 char *lfs;
309 char *buf;
310
311 /* don't allow partial reads */
312 if (*ppos != 0)
313 return 0;
314
315 buf = kzalloc(buf_size, GFP_KERNEL);
316 if (!buf)
317 return -ENOSPC;
318
319 /* Get the maximum width of a column */
320 lf_str_size = get_max_column_width(rvu);
321
322 lfs = kzalloc(lf_str_size, GFP_KERNEL);
323 if (!lfs) {
324 kfree(buf);
325 return -ENOMEM;
326 }
327 off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
328 "pcifunc");
329 for (index = 0; index < BLK_COUNT; index++)
330 if (strlen(rvu->hw->block[index].name)) {
331 off += scnprintf(&buf[off], buf_size - 1 - off,
332 "%-*s", lf_str_size,
333 rvu->hw->block[index].name);
334 }
335
336 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
337 bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
338 if (bytes_not_copied)
339 goto out;
340
341 i++;
342 *ppos += off;
343 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
344 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
345 off = 0;
346 flag = 0;
347 pcifunc = pf << 10 | vf;
348 if (!pcifunc)
349 continue;
350
351 if (vf) {
352 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
353 off = scnprintf(&buf[off],
354 buf_size - 1 - off,
355 "%-*s", lf_str_size, lfs);
356 } else {
357 sprintf(lfs, "PF%d", pf);
358 off = scnprintf(&buf[off],
359 buf_size - 1 - off,
360 "%-*s", lf_str_size, lfs);
361 }
362
363 for (index = 0; index < BLK_COUNT; index++) {
364 block = rvu->hw->block[index];
365 if (!strlen(block.name))
366 continue;
367 len = 0;
368 lfs[len] = '\0';
369 get_lf_str_list(block, pcifunc, lfs);
370 if (strlen(lfs))
371 flag = 1;
372
373 off += scnprintf(&buf[off], buf_size - 1 - off,
374 "%-*s", lf_str_size, lfs);
375 }
376 if (flag) {
377 off += scnprintf(&buf[off],
378 buf_size - 1 - off, "\n");
379 bytes_not_copied = copy_to_user(buffer +
380 (i * off),
381 buf, off);
382 if (bytes_not_copied)
383 goto out;
384
385 i++;
386 *ppos += off;
387 }
388 }
389 }
390
391 out:
392 kfree(lfs);
393 kfree(buf);
394 if (bytes_not_copied)
395 return -EFAULT;
396
397 return *ppos;
398 }
399
400 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
401
rvu_dbg_rvu_pf_cgx_map_display(struct seq_file * filp,void * unused)402 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
403 {
404 struct rvu *rvu = filp->private;
405 struct pci_dev *pdev = NULL;
406 struct mac_ops *mac_ops;
407 char cgx[10], lmac[10];
408 struct rvu_pfvf *pfvf;
409 int pf, domain, blkid;
410 u8 cgx_id, lmac_id;
411 u16 pcifunc;
412
413 domain = 2;
414 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
415 /* There can be no CGX devices at all */
416 if (!mac_ops)
417 return 0;
418 seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
419 mac_ops->name);
420 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
421 if (!is_pf_cgxmapped(rvu, pf))
422 continue;
423
424 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
425 if (!pdev)
426 continue;
427
428 cgx[0] = 0;
429 lmac[0] = 0;
430 pcifunc = pf << 10;
431 pfvf = rvu_get_pfvf(rvu, pcifunc);
432
433 if (pfvf->nix_blkaddr == BLKADDR_NIX0)
434 blkid = 0;
435 else
436 blkid = 1;
437
438 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
439 &lmac_id);
440 sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
441 sprintf(lmac, "LMAC%d", lmac_id);
442 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
443 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
444 }
445 return 0;
446 }
447
448 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
449
rvu_dbg_is_valid_lf(struct rvu * rvu,int blkaddr,int lf,u16 * pcifunc)450 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
451 u16 *pcifunc)
452 {
453 struct rvu_block *block;
454 struct rvu_hwinfo *hw;
455
456 hw = rvu->hw;
457 block = &hw->block[blkaddr];
458
459 if (lf < 0 || lf >= block->lf.max) {
460 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
461 block->lf.max - 1);
462 return false;
463 }
464
465 *pcifunc = block->fn_map[lf];
466 if (!*pcifunc) {
467 dev_warn(rvu->dev,
468 "This LF is not attached to any RVU PFFUNC\n");
469 return false;
470 }
471 return true;
472 }
473
print_npa_qsize(struct seq_file * m,struct rvu_pfvf * pfvf)474 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
475 {
476 char *buf;
477
478 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
479 if (!buf)
480 return;
481
482 if (!pfvf->aura_ctx) {
483 seq_puts(m, "Aura context is not initialized\n");
484 } else {
485 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
486 pfvf->aura_ctx->qsize);
487 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
488 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
489 }
490
491 if (!pfvf->pool_ctx) {
492 seq_puts(m, "Pool context is not initialized\n");
493 } else {
494 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
495 pfvf->pool_ctx->qsize);
496 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
497 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
498 }
499 kfree(buf);
500 }
501
502 /* The 'qsize' entry dumps current Aura/Pool context Qsize
503 * and each context's current enable/disable status in a bitmap.
504 */
rvu_dbg_qsize_display(struct seq_file * filp,void * unsused,int blktype)505 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
506 int blktype)
507 {
508 void (*print_qsize)(struct seq_file *filp,
509 struct rvu_pfvf *pfvf) = NULL;
510 struct dentry *current_dir;
511 struct rvu_pfvf *pfvf;
512 struct rvu *rvu;
513 int qsize_id;
514 u16 pcifunc;
515 int blkaddr;
516
517 rvu = filp->private;
518 switch (blktype) {
519 case BLKTYPE_NPA:
520 qsize_id = rvu->rvu_dbg.npa_qsize_id;
521 print_qsize = print_npa_qsize;
522 break;
523
524 case BLKTYPE_NIX:
525 qsize_id = rvu->rvu_dbg.nix_qsize_id;
526 print_qsize = print_nix_qsize;
527 break;
528
529 default:
530 return -EINVAL;
531 }
532
533 if (blktype == BLKTYPE_NPA) {
534 blkaddr = BLKADDR_NPA;
535 } else {
536 current_dir = filp->file->f_path.dentry->d_parent;
537 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
538 BLKADDR_NIX1 : BLKADDR_NIX0);
539 }
540
541 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
542 return -EINVAL;
543
544 pfvf = rvu_get_pfvf(rvu, pcifunc);
545 print_qsize(filp, pfvf);
546
547 return 0;
548 }
549
rvu_dbg_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int blktype)550 static ssize_t rvu_dbg_qsize_write(struct file *filp,
551 const char __user *buffer, size_t count,
552 loff_t *ppos, int blktype)
553 {
554 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
555 struct seq_file *seqfile = filp->private_data;
556 char *cmd_buf, *cmd_buf_tmp, *subtoken;
557 struct rvu *rvu = seqfile->private;
558 struct dentry *current_dir;
559 int blkaddr;
560 u16 pcifunc;
561 int ret, lf;
562
563 cmd_buf = memdup_user(buffer, count + 1);
564 if (IS_ERR(cmd_buf))
565 return -ENOMEM;
566
567 cmd_buf[count] = '\0';
568
569 cmd_buf_tmp = strchr(cmd_buf, '\n');
570 if (cmd_buf_tmp) {
571 *cmd_buf_tmp = '\0';
572 count = cmd_buf_tmp - cmd_buf + 1;
573 }
574
575 cmd_buf_tmp = cmd_buf;
576 subtoken = strsep(&cmd_buf, " ");
577 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
578 if (cmd_buf)
579 ret = -EINVAL;
580
581 if (ret < 0 || !strncmp(subtoken, "help", 4)) {
582 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
583 goto qsize_write_done;
584 }
585
586 if (blktype == BLKTYPE_NPA) {
587 blkaddr = BLKADDR_NPA;
588 } else {
589 current_dir = filp->f_path.dentry->d_parent;
590 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
591 BLKADDR_NIX1 : BLKADDR_NIX0);
592 }
593
594 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
595 ret = -EINVAL;
596 goto qsize_write_done;
597 }
598 if (blktype == BLKTYPE_NPA)
599 rvu->rvu_dbg.npa_qsize_id = lf;
600 else
601 rvu->rvu_dbg.nix_qsize_id = lf;
602
603 qsize_write_done:
604 kfree(cmd_buf_tmp);
605 return ret ? ret : count;
606 }
607
rvu_dbg_npa_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)608 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
609 const char __user *buffer,
610 size_t count, loff_t *ppos)
611 {
612 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
613 BLKTYPE_NPA);
614 }
615
rvu_dbg_npa_qsize_display(struct seq_file * filp,void * unused)616 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
617 {
618 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
619 }
620
621 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
622
623 /* Dumps given NPA Aura's context */
print_npa_aura_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)624 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
625 {
626 struct npa_aura_s *aura = &rsp->aura;
627 struct rvu *rvu = m->private;
628
629 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
630
631 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
632 aura->ena, aura->pool_caching);
633 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
634 aura->pool_way_mask, aura->avg_con);
635 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
636 aura->pool_drop_ena, aura->aura_drop_ena);
637 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
638 aura->bp_ena, aura->aura_drop);
639 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
640 aura->shift, aura->avg_level);
641
642 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
643 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
644
645 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
646 (u64)aura->limit, aura->bp, aura->fc_ena);
647
648 if (!is_rvu_otx2(rvu))
649 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
650 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
651 aura->fc_up_crossing, aura->fc_stype);
652 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
653
654 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
655
656 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
657 aura->pool_drop, aura->update_time);
658 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
659 aura->err_int, aura->err_int_ena);
660 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
661 aura->thresh_int, aura->thresh_int_ena);
662 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
663 aura->thresh_up, aura->thresh_qint_idx);
664 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
665
666 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
667 if (!is_rvu_otx2(rvu))
668 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
669 }
670
671 /* Dumps given NPA Pool's context */
print_npa_pool_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)672 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
673 {
674 struct npa_pool_s *pool = &rsp->pool;
675 struct rvu *rvu = m->private;
676
677 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
678
679 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
680 pool->ena, pool->nat_align);
681 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
682 pool->stack_caching, pool->stack_way_mask);
683 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
684 pool->buf_offset, pool->buf_size);
685
686 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
687 pool->stack_max_pages, pool->stack_pages);
688
689 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
690
691 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
692 pool->stack_offset, pool->shift, pool->avg_level);
693 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
694 pool->avg_con, pool->fc_ena, pool->fc_stype);
695 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
696 pool->fc_hyst_bits, pool->fc_up_crossing);
697 if (!is_rvu_otx2(rvu))
698 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
699 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
700
701 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
702
703 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
704
705 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
706
707 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
708 pool->err_int, pool->err_int_ena);
709 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
710 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
711 pool->thresh_int_ena, pool->thresh_up);
712 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
713 pool->thresh_qint_idx, pool->err_qint_idx);
714 if (!is_rvu_otx2(rvu))
715 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
716 }
717
718 /* Reads aura/pool's ctx from admin queue */
rvu_dbg_npa_ctx_display(struct seq_file * m,void * unused,int ctype)719 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
720 {
721 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
722 struct npa_aq_enq_req aq_req;
723 struct npa_aq_enq_rsp rsp;
724 struct rvu_pfvf *pfvf;
725 int aura, rc, max_id;
726 int npalf, id, all;
727 struct rvu *rvu;
728 u16 pcifunc;
729
730 rvu = m->private;
731
732 switch (ctype) {
733 case NPA_AQ_CTYPE_AURA:
734 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
735 id = rvu->rvu_dbg.npa_aura_ctx.id;
736 all = rvu->rvu_dbg.npa_aura_ctx.all;
737 break;
738
739 case NPA_AQ_CTYPE_POOL:
740 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
741 id = rvu->rvu_dbg.npa_pool_ctx.id;
742 all = rvu->rvu_dbg.npa_pool_ctx.all;
743 break;
744 default:
745 return -EINVAL;
746 }
747
748 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
749 return -EINVAL;
750
751 pfvf = rvu_get_pfvf(rvu, pcifunc);
752 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
753 seq_puts(m, "Aura context is not initialized\n");
754 return -EINVAL;
755 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
756 seq_puts(m, "Pool context is not initialized\n");
757 return -EINVAL;
758 }
759
760 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
761 aq_req.hdr.pcifunc = pcifunc;
762 aq_req.ctype = ctype;
763 aq_req.op = NPA_AQ_INSTOP_READ;
764 if (ctype == NPA_AQ_CTYPE_AURA) {
765 max_id = pfvf->aura_ctx->qsize;
766 print_npa_ctx = print_npa_aura_ctx;
767 } else {
768 max_id = pfvf->pool_ctx->qsize;
769 print_npa_ctx = print_npa_pool_ctx;
770 }
771
772 if (id < 0 || id >= max_id) {
773 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
774 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
775 max_id - 1);
776 return -EINVAL;
777 }
778
779 if (all)
780 id = 0;
781 else
782 max_id = id + 1;
783
784 for (aura = id; aura < max_id; aura++) {
785 aq_req.aura_id = aura;
786 seq_printf(m, "======%s : %d=======\n",
787 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
788 aq_req.aura_id);
789 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
790 if (rc) {
791 seq_puts(m, "Failed to read context\n");
792 return -EINVAL;
793 }
794 print_npa_ctx(m, &rsp);
795 }
796 return 0;
797 }
798
write_npa_ctx(struct rvu * rvu,bool all,int npalf,int id,int ctype)799 static int write_npa_ctx(struct rvu *rvu, bool all,
800 int npalf, int id, int ctype)
801 {
802 struct rvu_pfvf *pfvf;
803 int max_id = 0;
804 u16 pcifunc;
805
806 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
807 return -EINVAL;
808
809 pfvf = rvu_get_pfvf(rvu, pcifunc);
810
811 if (ctype == NPA_AQ_CTYPE_AURA) {
812 if (!pfvf->aura_ctx) {
813 dev_warn(rvu->dev, "Aura context is not initialized\n");
814 return -EINVAL;
815 }
816 max_id = pfvf->aura_ctx->qsize;
817 } else if (ctype == NPA_AQ_CTYPE_POOL) {
818 if (!pfvf->pool_ctx) {
819 dev_warn(rvu->dev, "Pool context is not initialized\n");
820 return -EINVAL;
821 }
822 max_id = pfvf->pool_ctx->qsize;
823 }
824
825 if (id < 0 || id >= max_id) {
826 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
827 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
828 max_id - 1);
829 return -EINVAL;
830 }
831
832 switch (ctype) {
833 case NPA_AQ_CTYPE_AURA:
834 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
835 rvu->rvu_dbg.npa_aura_ctx.id = id;
836 rvu->rvu_dbg.npa_aura_ctx.all = all;
837 break;
838
839 case NPA_AQ_CTYPE_POOL:
840 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
841 rvu->rvu_dbg.npa_pool_ctx.id = id;
842 rvu->rvu_dbg.npa_pool_ctx.all = all;
843 break;
844 default:
845 return -EINVAL;
846 }
847 return 0;
848 }
849
parse_cmd_buffer_ctx(char * cmd_buf,size_t * count,const char __user * buffer,int * npalf,int * id,bool * all)850 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
851 const char __user *buffer, int *npalf,
852 int *id, bool *all)
853 {
854 int bytes_not_copied;
855 char *cmd_buf_tmp;
856 char *subtoken;
857 int ret;
858
859 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
860 if (bytes_not_copied)
861 return -EFAULT;
862
863 cmd_buf[*count] = '\0';
864 cmd_buf_tmp = strchr(cmd_buf, '\n');
865
866 if (cmd_buf_tmp) {
867 *cmd_buf_tmp = '\0';
868 *count = cmd_buf_tmp - cmd_buf + 1;
869 }
870
871 subtoken = strsep(&cmd_buf, " ");
872 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
873 if (ret < 0)
874 return ret;
875 subtoken = strsep(&cmd_buf, " ");
876 if (subtoken && strcmp(subtoken, "all") == 0) {
877 *all = true;
878 } else {
879 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
880 if (ret < 0)
881 return ret;
882 }
883 if (cmd_buf)
884 return -EINVAL;
885 return ret;
886 }
887
rvu_dbg_npa_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)888 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
889 const char __user *buffer,
890 size_t count, loff_t *ppos, int ctype)
891 {
892 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
893 "aura" : "pool";
894 struct seq_file *seqfp = filp->private_data;
895 struct rvu *rvu = seqfp->private;
896 int npalf, id = 0, ret;
897 bool all = false;
898
899 if ((*ppos != 0) || !count)
900 return -EINVAL;
901
902 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
903 if (!cmd_buf)
904 return count;
905 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
906 &npalf, &id, &all);
907 if (ret < 0) {
908 dev_info(rvu->dev,
909 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
910 ctype_string, ctype_string);
911 goto done;
912 } else {
913 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
914 }
915 done:
916 kfree(cmd_buf);
917 return ret ? ret : count;
918 }
919
rvu_dbg_npa_aura_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)920 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
921 const char __user *buffer,
922 size_t count, loff_t *ppos)
923 {
924 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
925 NPA_AQ_CTYPE_AURA);
926 }
927
rvu_dbg_npa_aura_ctx_display(struct seq_file * filp,void * unused)928 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
929 {
930 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
931 }
932
933 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
934
rvu_dbg_npa_pool_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)935 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
936 const char __user *buffer,
937 size_t count, loff_t *ppos)
938 {
939 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
940 NPA_AQ_CTYPE_POOL);
941 }
942
rvu_dbg_npa_pool_ctx_display(struct seq_file * filp,void * unused)943 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
944 {
945 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
946 }
947
948 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
949
ndc_cache_stats(struct seq_file * s,int blk_addr,int ctype,int transaction)950 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
951 int ctype, int transaction)
952 {
953 u64 req, out_req, lat, cant_alloc;
954 struct nix_hw *nix_hw;
955 struct rvu *rvu;
956 int port;
957
958 if (blk_addr == BLKADDR_NDC_NPA0) {
959 rvu = s->private;
960 } else {
961 nix_hw = s->private;
962 rvu = nix_hw->rvu;
963 }
964
965 for (port = 0; port < NDC_MAX_PORT; port++) {
966 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
967 (port, ctype, transaction));
968 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
969 (port, ctype, transaction));
970 out_req = rvu_read64(rvu, blk_addr,
971 NDC_AF_PORTX_RTX_RWX_OSTDN_PC
972 (port, ctype, transaction));
973 cant_alloc = rvu_read64(rvu, blk_addr,
974 NDC_AF_PORTX_RTX_CANT_ALLOC_PC
975 (port, transaction));
976 seq_printf(s, "\nPort:%d\n", port);
977 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
978 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
979 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
980 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
981 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
982 }
983 }
984
ndc_blk_cache_stats(struct seq_file * s,int idx,int blk_addr)985 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
986 {
987 seq_puts(s, "\n***** CACHE mode read stats *****\n");
988 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
989 seq_puts(s, "\n***** CACHE mode write stats *****\n");
990 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
991 seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
992 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
993 seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
994 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
995 return 0;
996 }
997
rvu_dbg_npa_ndc_cache_display(struct seq_file * filp,void * unused)998 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
999 {
1000 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1001 }
1002
1003 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1004
ndc_blk_hits_miss_stats(struct seq_file * s,int idx,int blk_addr)1005 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1006 {
1007 struct nix_hw *nix_hw;
1008 struct rvu *rvu;
1009 int bank, max_bank;
1010
1011 if (blk_addr == BLKADDR_NDC_NPA0) {
1012 rvu = s->private;
1013 } else {
1014 nix_hw = s->private;
1015 rvu = nix_hw->rvu;
1016 }
1017
1018 max_bank = NDC_MAX_BANK(rvu, blk_addr);
1019 for (bank = 0; bank < max_bank; bank++) {
1020 seq_printf(s, "BANK:%d\n", bank);
1021 seq_printf(s, "\tHits:\t%lld\n",
1022 (u64)rvu_read64(rvu, blk_addr,
1023 NDC_AF_BANKX_HIT_PC(bank)));
1024 seq_printf(s, "\tMiss:\t%lld\n",
1025 (u64)rvu_read64(rvu, blk_addr,
1026 NDC_AF_BANKX_MISS_PC(bank)));
1027 }
1028 return 0;
1029 }
1030
rvu_dbg_nix_ndc_rx_cache_display(struct seq_file * filp,void * unused)1031 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1032 {
1033 struct nix_hw *nix_hw = filp->private;
1034 int blkaddr = 0;
1035 int ndc_idx = 0;
1036
1037 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1038 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1039 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1040
1041 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1042 }
1043
1044 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1045
rvu_dbg_nix_ndc_tx_cache_display(struct seq_file * filp,void * unused)1046 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1047 {
1048 struct nix_hw *nix_hw = filp->private;
1049 int blkaddr = 0;
1050 int ndc_idx = 0;
1051
1052 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1053 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1054 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1055
1056 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1057 }
1058
1059 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1060
rvu_dbg_npa_ndc_hits_miss_display(struct seq_file * filp,void * unused)1061 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1062 void *unused)
1063 {
1064 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1065 }
1066
1067 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1068
rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file * filp,void * unused)1069 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1070 void *unused)
1071 {
1072 struct nix_hw *nix_hw = filp->private;
1073 int ndc_idx = NPA0_U;
1074 int blkaddr = 0;
1075
1076 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1077 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1078
1079 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1080 }
1081
1082 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1083
rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file * filp,void * unused)1084 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1085 void *unused)
1086 {
1087 struct nix_hw *nix_hw = filp->private;
1088 int ndc_idx = NPA0_U;
1089 int blkaddr = 0;
1090
1091 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1092 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1093
1094 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1095 }
1096
1097 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1098
print_nix_cn10k_sq_ctx(struct seq_file * m,struct nix_cn10k_sq_ctx_s * sq_ctx)1099 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1100 struct nix_cn10k_sq_ctx_s *sq_ctx)
1101 {
1102 seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1103 sq_ctx->ena, sq_ctx->qint_idx);
1104 seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1105 sq_ctx->substream, sq_ctx->sdp_mcast);
1106 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1107 sq_ctx->cq, sq_ctx->sqe_way_mask);
1108
1109 seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1110 sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1111 seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1112 sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1113 seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1114 sq_ctx->default_chan, sq_ctx->sqb_count);
1115
1116 seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1117 seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1118 seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1119 sq_ctx->sqb_aura, sq_ctx->sq_int);
1120 seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1121 sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1122
1123 seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1124 sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1125 seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1126 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1127 seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1128 sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1129 seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1130 sq_ctx->tail_offset, sq_ctx->smenq_offset);
1131 seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1132 sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1133
1134 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1135 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1136 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1137 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1138 sq_ctx->smenq_next_sqb);
1139
1140 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1141
1142 seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1143 seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1144 sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1145 seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1146 sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1147 seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1148 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1149
1150 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1151 (u64)sq_ctx->scm_lso_rem);
1152 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1153 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1154 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1155 (u64)sq_ctx->dropped_octs);
1156 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1157 (u64)sq_ctx->dropped_pkts);
1158 }
1159
1160 /* Dumps given nix_sq's context */
print_nix_sq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1161 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1162 {
1163 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1164 struct nix_hw *nix_hw = m->private;
1165 struct rvu *rvu = nix_hw->rvu;
1166
1167 if (!is_rvu_otx2(rvu)) {
1168 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1169 return;
1170 }
1171 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1172 sq_ctx->sqe_way_mask, sq_ctx->cq);
1173 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1174 sq_ctx->sdp_mcast, sq_ctx->substream);
1175 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1176 sq_ctx->qint_idx, sq_ctx->ena);
1177
1178 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1179 sq_ctx->sqb_count, sq_ctx->default_chan);
1180 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1181 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1182 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1183 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1184
1185 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1186 sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1187 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1188 sq_ctx->sq_int, sq_ctx->sqb_aura);
1189 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1190
1191 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1192 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1193 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1194 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1195 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1196 sq_ctx->smenq_offset, sq_ctx->tail_offset);
1197 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1198 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1199 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1200 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1201 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1202 sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1203
1204 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1205 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1206 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1207 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1208 sq_ctx->smenq_next_sqb);
1209
1210 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1211
1212 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1213 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1214 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1215 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1216 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1217 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1218 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1219
1220 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1221 (u64)sq_ctx->scm_lso_rem);
1222 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1223 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1224 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1225 (u64)sq_ctx->dropped_octs);
1226 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1227 (u64)sq_ctx->dropped_pkts);
1228 }
1229
print_nix_cn10k_rq_ctx(struct seq_file * m,struct nix_cn10k_rq_ctx_s * rq_ctx)1230 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1231 struct nix_cn10k_rq_ctx_s *rq_ctx)
1232 {
1233 seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1234 rq_ctx->ena, rq_ctx->sso_ena);
1235 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1236 rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1237 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1238 rq_ctx->cq, rq_ctx->lenerr_dis);
1239 seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1240 rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1241 seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1242 rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1243 seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1244 rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1245 seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1246
1247 seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1248 rq_ctx->spb_aura, rq_ctx->lpb_aura);
1249 seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1250 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1251 rq_ctx->sso_grp, rq_ctx->sso_tt);
1252 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1253 rq_ctx->pb_caching, rq_ctx->wqe_caching);
1254 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1255 rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1256 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1257 rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1258 seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1259 rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1260
1261 seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1262 seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1263 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1264 seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1265 rq_ctx->wqe_skip, rq_ctx->spb_ena);
1266 seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1267 rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1268 seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1269 rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1270 seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1271 rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1272
1273 seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1274 rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1275 seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1276 rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1277 seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1278 rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1279 seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1280 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1281
1282 seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1283 rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1284 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1285 rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1286 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1287 rq_ctx->rq_int, rq_ctx->rq_int_ena);
1288 seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1289
1290 seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1291 rq_ctx->ltag, rq_ctx->good_utag);
1292 seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1293 rq_ctx->bad_utag, rq_ctx->flow_tagw);
1294 seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1295 rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1296 seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1297 rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1298 seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1299
1300 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1301 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1302 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1303 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1304 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1305 }
1306
1307 /* Dumps given nix_rq's context */
print_nix_rq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1308 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1309 {
1310 struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1311 struct nix_hw *nix_hw = m->private;
1312 struct rvu *rvu = nix_hw->rvu;
1313
1314 if (!is_rvu_otx2(rvu)) {
1315 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1316 return;
1317 }
1318
1319 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1320 rq_ctx->wqe_aura, rq_ctx->substream);
1321 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1322 rq_ctx->cq, rq_ctx->ena_wqwd);
1323 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1324 rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1325 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1326
1327 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1328 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1329 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1330 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1331 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1332 rq_ctx->pb_caching, rq_ctx->sso_tt);
1333 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1334 rq_ctx->sso_grp, rq_ctx->lpb_aura);
1335 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1336
1337 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1338 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1339 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1340 rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1341 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1342 rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1343 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1344 rq_ctx->spb_ena, rq_ctx->wqe_skip);
1345 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1346
1347 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1348 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1349 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1350 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1351 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1352 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1353 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1354 rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1355
1356 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1357 rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1358 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1359 rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1360 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1361 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1362 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1363
1364 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1365 rq_ctx->flow_tagw, rq_ctx->bad_utag);
1366 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1367 rq_ctx->good_utag, rq_ctx->ltag);
1368
1369 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1370 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1371 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1372 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1373 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1374 }
1375
1376 /* Dumps given nix_cq's context */
print_nix_cq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1377 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1378 {
1379 struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1380
1381 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1382
1383 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1384 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1385 cq_ctx->avg_con, cq_ctx->cint_idx);
1386 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1387 cq_ctx->cq_err, cq_ctx->qint_idx);
1388 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1389 cq_ctx->bpid, cq_ctx->bp_ena);
1390
1391 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1392 cq_ctx->update_time, cq_ctx->avg_level);
1393 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1394 cq_ctx->head, cq_ctx->tail);
1395
1396 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1397 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1398 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1399 cq_ctx->qsize, cq_ctx->caching);
1400 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1401 cq_ctx->substream, cq_ctx->ena);
1402 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1403 cq_ctx->drop_ena, cq_ctx->drop);
1404 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1405 }
1406
rvu_dbg_nix_queue_ctx_display(struct seq_file * filp,void * unused,int ctype)1407 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1408 void *unused, int ctype)
1409 {
1410 void (*print_nix_ctx)(struct seq_file *filp,
1411 struct nix_aq_enq_rsp *rsp) = NULL;
1412 struct nix_hw *nix_hw = filp->private;
1413 struct rvu *rvu = nix_hw->rvu;
1414 struct nix_aq_enq_req aq_req;
1415 struct nix_aq_enq_rsp rsp;
1416 char *ctype_string = NULL;
1417 int qidx, rc, max_id = 0;
1418 struct rvu_pfvf *pfvf;
1419 int nixlf, id, all;
1420 u16 pcifunc;
1421
1422 switch (ctype) {
1423 case NIX_AQ_CTYPE_CQ:
1424 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1425 id = rvu->rvu_dbg.nix_cq_ctx.id;
1426 all = rvu->rvu_dbg.nix_cq_ctx.all;
1427 break;
1428
1429 case NIX_AQ_CTYPE_SQ:
1430 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1431 id = rvu->rvu_dbg.nix_sq_ctx.id;
1432 all = rvu->rvu_dbg.nix_sq_ctx.all;
1433 break;
1434
1435 case NIX_AQ_CTYPE_RQ:
1436 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1437 id = rvu->rvu_dbg.nix_rq_ctx.id;
1438 all = rvu->rvu_dbg.nix_rq_ctx.all;
1439 break;
1440
1441 default:
1442 return -EINVAL;
1443 }
1444
1445 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1446 return -EINVAL;
1447
1448 pfvf = rvu_get_pfvf(rvu, pcifunc);
1449 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1450 seq_puts(filp, "SQ context is not initialized\n");
1451 return -EINVAL;
1452 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1453 seq_puts(filp, "RQ context is not initialized\n");
1454 return -EINVAL;
1455 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1456 seq_puts(filp, "CQ context is not initialized\n");
1457 return -EINVAL;
1458 }
1459
1460 if (ctype == NIX_AQ_CTYPE_SQ) {
1461 max_id = pfvf->sq_ctx->qsize;
1462 ctype_string = "sq";
1463 print_nix_ctx = print_nix_sq_ctx;
1464 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1465 max_id = pfvf->rq_ctx->qsize;
1466 ctype_string = "rq";
1467 print_nix_ctx = print_nix_rq_ctx;
1468 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1469 max_id = pfvf->cq_ctx->qsize;
1470 ctype_string = "cq";
1471 print_nix_ctx = print_nix_cq_ctx;
1472 }
1473
1474 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1475 aq_req.hdr.pcifunc = pcifunc;
1476 aq_req.ctype = ctype;
1477 aq_req.op = NIX_AQ_INSTOP_READ;
1478 if (all)
1479 id = 0;
1480 else
1481 max_id = id + 1;
1482 for (qidx = id; qidx < max_id; qidx++) {
1483 aq_req.qidx = qidx;
1484 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1485 ctype_string, nixlf, aq_req.qidx);
1486 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1487 if (rc) {
1488 seq_puts(filp, "Failed to read the context\n");
1489 return -EINVAL;
1490 }
1491 print_nix_ctx(filp, &rsp);
1492 }
1493 return 0;
1494 }
1495
write_nix_queue_ctx(struct rvu * rvu,bool all,int nixlf,int id,int ctype,char * ctype_string,struct seq_file * m)1496 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1497 int id, int ctype, char *ctype_string,
1498 struct seq_file *m)
1499 {
1500 struct nix_hw *nix_hw = m->private;
1501 struct rvu_pfvf *pfvf;
1502 int max_id = 0;
1503 u16 pcifunc;
1504
1505 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1506 return -EINVAL;
1507
1508 pfvf = rvu_get_pfvf(rvu, pcifunc);
1509
1510 if (ctype == NIX_AQ_CTYPE_SQ) {
1511 if (!pfvf->sq_ctx) {
1512 dev_warn(rvu->dev, "SQ context is not initialized\n");
1513 return -EINVAL;
1514 }
1515 max_id = pfvf->sq_ctx->qsize;
1516 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1517 if (!pfvf->rq_ctx) {
1518 dev_warn(rvu->dev, "RQ context is not initialized\n");
1519 return -EINVAL;
1520 }
1521 max_id = pfvf->rq_ctx->qsize;
1522 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1523 if (!pfvf->cq_ctx) {
1524 dev_warn(rvu->dev, "CQ context is not initialized\n");
1525 return -EINVAL;
1526 }
1527 max_id = pfvf->cq_ctx->qsize;
1528 }
1529
1530 if (id < 0 || id >= max_id) {
1531 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1532 ctype_string, max_id - 1);
1533 return -EINVAL;
1534 }
1535 switch (ctype) {
1536 case NIX_AQ_CTYPE_CQ:
1537 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1538 rvu->rvu_dbg.nix_cq_ctx.id = id;
1539 rvu->rvu_dbg.nix_cq_ctx.all = all;
1540 break;
1541
1542 case NIX_AQ_CTYPE_SQ:
1543 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1544 rvu->rvu_dbg.nix_sq_ctx.id = id;
1545 rvu->rvu_dbg.nix_sq_ctx.all = all;
1546 break;
1547
1548 case NIX_AQ_CTYPE_RQ:
1549 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1550 rvu->rvu_dbg.nix_rq_ctx.id = id;
1551 rvu->rvu_dbg.nix_rq_ctx.all = all;
1552 break;
1553 default:
1554 return -EINVAL;
1555 }
1556 return 0;
1557 }
1558
rvu_dbg_nix_queue_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)1559 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
1560 const char __user *buffer,
1561 size_t count, loff_t *ppos,
1562 int ctype)
1563 {
1564 struct seq_file *m = filp->private_data;
1565 struct nix_hw *nix_hw = m->private;
1566 struct rvu *rvu = nix_hw->rvu;
1567 char *cmd_buf, *ctype_string;
1568 int nixlf, id = 0, ret;
1569 bool all = false;
1570
1571 if ((*ppos != 0) || !count)
1572 return -EINVAL;
1573
1574 switch (ctype) {
1575 case NIX_AQ_CTYPE_SQ:
1576 ctype_string = "sq";
1577 break;
1578 case NIX_AQ_CTYPE_RQ:
1579 ctype_string = "rq";
1580 break;
1581 case NIX_AQ_CTYPE_CQ:
1582 ctype_string = "cq";
1583 break;
1584 default:
1585 return -EINVAL;
1586 }
1587
1588 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1589
1590 if (!cmd_buf)
1591 return count;
1592
1593 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1594 &nixlf, &id, &all);
1595 if (ret < 0) {
1596 dev_info(rvu->dev,
1597 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
1598 ctype_string, ctype_string);
1599 goto done;
1600 } else {
1601 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
1602 ctype_string, m);
1603 }
1604 done:
1605 kfree(cmd_buf);
1606 return ret ? ret : count;
1607 }
1608
rvu_dbg_nix_sq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1609 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
1610 const char __user *buffer,
1611 size_t count, loff_t *ppos)
1612 {
1613 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1614 NIX_AQ_CTYPE_SQ);
1615 }
1616
rvu_dbg_nix_sq_ctx_display(struct seq_file * filp,void * unused)1617 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
1618 {
1619 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
1620 }
1621
1622 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
1623
rvu_dbg_nix_rq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1624 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
1625 const char __user *buffer,
1626 size_t count, loff_t *ppos)
1627 {
1628 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1629 NIX_AQ_CTYPE_RQ);
1630 }
1631
rvu_dbg_nix_rq_ctx_display(struct seq_file * filp,void * unused)1632 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
1633 {
1634 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
1635 }
1636
1637 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
1638
rvu_dbg_nix_cq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1639 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
1640 const char __user *buffer,
1641 size_t count, loff_t *ppos)
1642 {
1643 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
1644 NIX_AQ_CTYPE_CQ);
1645 }
1646
rvu_dbg_nix_cq_ctx_display(struct seq_file * filp,void * unused)1647 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
1648 {
1649 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
1650 }
1651
1652 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
1653
print_nix_qctx_qsize(struct seq_file * filp,int qsize,unsigned long * bmap,char * qtype)1654 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
1655 unsigned long *bmap, char *qtype)
1656 {
1657 char *buf;
1658
1659 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1660 if (!buf)
1661 return;
1662
1663 bitmap_print_to_pagebuf(false, buf, bmap, qsize);
1664 seq_printf(filp, "%s context count : %d\n", qtype, qsize);
1665 seq_printf(filp, "%s context ena/dis bitmap : %s\n",
1666 qtype, buf);
1667 kfree(buf);
1668 }
1669
print_nix_qsize(struct seq_file * filp,struct rvu_pfvf * pfvf)1670 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
1671 {
1672 if (!pfvf->cq_ctx)
1673 seq_puts(filp, "cq context is not initialized\n");
1674 else
1675 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
1676 "cq");
1677
1678 if (!pfvf->rq_ctx)
1679 seq_puts(filp, "rq context is not initialized\n");
1680 else
1681 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
1682 "rq");
1683
1684 if (!pfvf->sq_ctx)
1685 seq_puts(filp, "sq context is not initialized\n");
1686 else
1687 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
1688 "sq");
1689 }
1690
rvu_dbg_nix_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1691 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
1692 const char __user *buffer,
1693 size_t count, loff_t *ppos)
1694 {
1695 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1696 BLKTYPE_NIX);
1697 }
1698
rvu_dbg_nix_qsize_display(struct seq_file * filp,void * unused)1699 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
1700 {
1701 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
1702 }
1703
1704 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
1705
print_band_prof_ctx(struct seq_file * m,struct nix_bandprof_s * prof)1706 static void print_band_prof_ctx(struct seq_file *m,
1707 struct nix_bandprof_s *prof)
1708 {
1709 char *str;
1710
1711 switch (prof->pc_mode) {
1712 case NIX_RX_PC_MODE_VLAN:
1713 str = "VLAN";
1714 break;
1715 case NIX_RX_PC_MODE_DSCP:
1716 str = "DSCP";
1717 break;
1718 case NIX_RX_PC_MODE_GEN:
1719 str = "Generic";
1720 break;
1721 case NIX_RX_PC_MODE_RSVD:
1722 str = "Reserved";
1723 break;
1724 }
1725 seq_printf(m, "W0: pc_mode\t\t%s\n", str);
1726 str = (prof->icolor == 3) ? "Color blind" :
1727 (prof->icolor == 0) ? "Green" :
1728 (prof->icolor == 1) ? "Yellow" : "Red";
1729 seq_printf(m, "W0: icolor\t\t%s\n", str);
1730 seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
1731 seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
1732 seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
1733 seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
1734 seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
1735 seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
1736 seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
1737 seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
1738
1739 seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
1740 str = (prof->lmode == 0) ? "byte" : "packet";
1741 seq_printf(m, "W1: lmode\t\t%s\n", str);
1742 seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
1743 seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
1744 seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
1745 seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
1746 str = (prof->gc_action == 0) ? "PASS" :
1747 (prof->gc_action == 1) ? "DROP" : "RED";
1748 seq_printf(m, "W1: gc_action\t\t%s\n", str);
1749 str = (prof->yc_action == 0) ? "PASS" :
1750 (prof->yc_action == 1) ? "DROP" : "RED";
1751 seq_printf(m, "W1: yc_action\t\t%s\n", str);
1752 str = (prof->rc_action == 0) ? "PASS" :
1753 (prof->rc_action == 1) ? "DROP" : "RED";
1754 seq_printf(m, "W1: rc_action\t\t%s\n", str);
1755 seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
1756 seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
1757 seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
1758
1759 seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
1760 seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
1761 seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
1762 seq_printf(m, "W4: green_pkt_pass\t%lld\n",
1763 (u64)prof->green_pkt_pass);
1764 seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
1765 (u64)prof->yellow_pkt_pass);
1766 seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
1767 seq_printf(m, "W7: green_octs_pass\t%lld\n",
1768 (u64)prof->green_octs_pass);
1769 seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
1770 (u64)prof->yellow_octs_pass);
1771 seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
1772 seq_printf(m, "W10: green_pkt_drop\t%lld\n",
1773 (u64)prof->green_pkt_drop);
1774 seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
1775 (u64)prof->yellow_pkt_drop);
1776 seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
1777 seq_printf(m, "W13: green_octs_drop\t%lld\n",
1778 (u64)prof->green_octs_drop);
1779 seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
1780 (u64)prof->yellow_octs_drop);
1781 seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
1782 seq_puts(m, "==============================\n");
1783 }
1784
rvu_dbg_nix_band_prof_ctx_display(struct seq_file * m,void * unused)1785 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
1786 {
1787 struct nix_hw *nix_hw = m->private;
1788 struct nix_cn10k_aq_enq_req aq_req;
1789 struct nix_cn10k_aq_enq_rsp aq_rsp;
1790 struct rvu *rvu = nix_hw->rvu;
1791 struct nix_ipolicer *ipolicer;
1792 int layer, prof_idx, idx, rc;
1793 u16 pcifunc;
1794 char *str;
1795
1796 /* Ingress policers do not exist on all platforms */
1797 if (!nix_hw->ipolicer)
1798 return 0;
1799
1800 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1801 if (layer == BAND_PROF_INVAL_LAYER)
1802 continue;
1803 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1804 (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
1805
1806 seq_printf(m, "\n%s bandwidth profiles\n", str);
1807 seq_puts(m, "=======================\n");
1808
1809 ipolicer = &nix_hw->ipolicer[layer];
1810
1811 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
1812 if (is_rsrc_free(&ipolicer->band_prof, idx))
1813 continue;
1814
1815 prof_idx = (idx & 0x3FFF) | (layer << 14);
1816 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1817 0x00, NIX_AQ_CTYPE_BANDPROF,
1818 prof_idx);
1819 if (rc) {
1820 dev_err(rvu->dev,
1821 "%s: Failed to fetch context of %s profile %d, err %d\n",
1822 __func__, str, idx, rc);
1823 return 0;
1824 }
1825 seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
1826 pcifunc = ipolicer->pfvf_map[idx];
1827 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
1828 seq_printf(m, "Allocated to :: PF %d\n",
1829 rvu_get_pf(pcifunc));
1830 else
1831 seq_printf(m, "Allocated to :: PF %d VF %d\n",
1832 rvu_get_pf(pcifunc),
1833 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
1834 print_band_prof_ctx(m, &aq_rsp.prof);
1835 }
1836 }
1837 return 0;
1838 }
1839
1840 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
1841
rvu_dbg_nix_band_prof_rsrc_display(struct seq_file * m,void * unused)1842 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
1843 {
1844 struct nix_hw *nix_hw = m->private;
1845 struct nix_ipolicer *ipolicer;
1846 int layer;
1847 char *str;
1848
1849 /* Ingress policers do not exist on all platforms */
1850 if (!nix_hw->ipolicer)
1851 return 0;
1852
1853 seq_puts(m, "\nBandwidth profile resource free count\n");
1854 seq_puts(m, "=====================================\n");
1855 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
1856 if (layer == BAND_PROF_INVAL_LAYER)
1857 continue;
1858 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
1859 (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
1860
1861 ipolicer = &nix_hw->ipolicer[layer];
1862 seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
1863 ipolicer->band_prof.max,
1864 rvu_rsrc_free_count(&ipolicer->band_prof));
1865 }
1866 seq_puts(m, "=====================================\n");
1867
1868 return 0;
1869 }
1870
1871 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
1872
rvu_dbg_nix_init(struct rvu * rvu,int blkaddr)1873 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
1874 {
1875 struct nix_hw *nix_hw;
1876
1877 if (!is_block_implemented(rvu->hw, blkaddr))
1878 return;
1879
1880 if (blkaddr == BLKADDR_NIX0) {
1881 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
1882 nix_hw = &rvu->hw->nix[0];
1883 } else {
1884 rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
1885 rvu->rvu_dbg.root);
1886 nix_hw = &rvu->hw->nix[1];
1887 }
1888
1889 debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1890 &rvu_dbg_nix_sq_ctx_fops);
1891 debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1892 &rvu_dbg_nix_rq_ctx_fops);
1893 debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1894 &rvu_dbg_nix_cq_ctx_fops);
1895 debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1896 &rvu_dbg_nix_ndc_tx_cache_fops);
1897 debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
1898 &rvu_dbg_nix_ndc_rx_cache_fops);
1899 debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1900 &rvu_dbg_nix_ndc_tx_hits_miss_fops);
1901 debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
1902 &rvu_dbg_nix_ndc_rx_hits_miss_fops);
1903 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
1904 &rvu_dbg_nix_qsize_fops);
1905 debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
1906 &rvu_dbg_nix_band_prof_ctx_fops);
1907 debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
1908 &rvu_dbg_nix_band_prof_rsrc_fops);
1909 }
1910
rvu_dbg_npa_init(struct rvu * rvu)1911 static void rvu_dbg_npa_init(struct rvu *rvu)
1912 {
1913 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
1914
1915 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
1916 &rvu_dbg_npa_qsize_fops);
1917 debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1918 &rvu_dbg_npa_aura_ctx_fops);
1919 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
1920 &rvu_dbg_npa_pool_ctx_fops);
1921 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
1922 &rvu_dbg_npa_ndc_cache_fops);
1923 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
1924 &rvu_dbg_npa_ndc_hits_miss_fops);
1925 }
1926
1927 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
1928 ({ \
1929 u64 cnt; \
1930 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
1931 NIX_STATS_RX, &(cnt)); \
1932 if (!err) \
1933 seq_printf(s, "%s: %llu\n", name, cnt); \
1934 cnt; \
1935 })
1936
1937 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
1938 ({ \
1939 u64 cnt; \
1940 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
1941 NIX_STATS_TX, &(cnt)); \
1942 if (!err) \
1943 seq_printf(s, "%s: %llu\n", name, cnt); \
1944 cnt; \
1945 })
1946
cgx_print_stats(struct seq_file * s,int lmac_id)1947 static int cgx_print_stats(struct seq_file *s, int lmac_id)
1948 {
1949 struct cgx_link_user_info linfo;
1950 struct mac_ops *mac_ops;
1951 void *cgxd = s->private;
1952 u64 ucast, mcast, bcast;
1953 int stat = 0, err = 0;
1954 u64 tx_stat, rx_stat;
1955 struct rvu *rvu;
1956
1957 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
1958 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
1959 if (!rvu)
1960 return -ENODEV;
1961
1962 mac_ops = get_mac_ops(cgxd);
1963
1964 if (!mac_ops)
1965 return 0;
1966
1967 /* Link status */
1968 seq_puts(s, "\n=======Link Status======\n\n");
1969 err = cgx_get_link_info(cgxd, lmac_id, &linfo);
1970 if (err)
1971 seq_puts(s, "Failed to read link status\n");
1972 seq_printf(s, "\nLink is %s %d Mbps\n\n",
1973 linfo.link_up ? "UP" : "DOWN", linfo.speed);
1974
1975 /* Rx stats */
1976 seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
1977 mac_ops->name);
1978 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
1979 if (err)
1980 return err;
1981 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
1982 if (err)
1983 return err;
1984 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
1985 if (err)
1986 return err;
1987 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
1988 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
1989 if (err)
1990 return err;
1991 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
1992 if (err)
1993 return err;
1994 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
1995 if (err)
1996 return err;
1997
1998 /* Tx stats */
1999 seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2000 mac_ops->name);
2001 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2002 if (err)
2003 return err;
2004 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2005 if (err)
2006 return err;
2007 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2008 if (err)
2009 return err;
2010 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2011 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2012 if (err)
2013 return err;
2014 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2015 if (err)
2016 return err;
2017
2018 /* Rx stats */
2019 seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2020 while (stat < mac_ops->rx_stats_cnt) {
2021 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2022 if (err)
2023 return err;
2024 if (is_rvu_otx2(rvu))
2025 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2026 rx_stat);
2027 else
2028 seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2029 rx_stat);
2030 stat++;
2031 }
2032
2033 /* Tx stats */
2034 stat = 0;
2035 seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2036 while (stat < mac_ops->tx_stats_cnt) {
2037 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2038 if (err)
2039 return err;
2040
2041 if (is_rvu_otx2(rvu))
2042 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2043 tx_stat);
2044 else
2045 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2046 tx_stat);
2047 stat++;
2048 }
2049
2050 return err;
2051 }
2052
rvu_dbg_derive_lmacid(struct seq_file * filp,int * lmac_id)2053 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2054 {
2055 struct dentry *current_dir;
2056 char *buf;
2057
2058 current_dir = filp->file->f_path.dentry->d_parent;
2059 buf = strrchr(current_dir->d_name.name, 'c');
2060 if (!buf)
2061 return -EINVAL;
2062
2063 return kstrtoint(buf + 1, 10, lmac_id);
2064 }
2065
rvu_dbg_cgx_stat_display(struct seq_file * filp,void * unused)2066 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2067 {
2068 int lmac_id, err;
2069
2070 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2071 if (!err)
2072 return cgx_print_stats(filp, lmac_id);
2073
2074 return err;
2075 }
2076
2077 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2078
cgx_print_dmac_flt(struct seq_file * s,int lmac_id)2079 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2080 {
2081 struct pci_dev *pdev = NULL;
2082 void *cgxd = s->private;
2083 char *bcast, *mcast;
2084 u16 index, domain;
2085 u8 dmac[ETH_ALEN];
2086 struct rvu *rvu;
2087 u64 cfg, mac;
2088 int pf;
2089
2090 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2091 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2092 if (!rvu)
2093 return -ENODEV;
2094
2095 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2096 domain = 2;
2097
2098 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2099 if (!pdev)
2100 return 0;
2101
2102 cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2103 bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2104 mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2105
2106 seq_puts(s,
2107 "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
2108 seq_printf(s, "%s PF%d %9s %9s",
2109 dev_name(&pdev->dev), pf, bcast, mcast);
2110 if (cfg & CGX_DMAC_CAM_ACCEPT)
2111 seq_printf(s, "%12s\n\n", "UNICAST");
2112 else
2113 seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2114
2115 seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
2116
2117 for (index = 0 ; index < 32 ; index++) {
2118 cfg = cgx_read_dmac_entry(cgxd, index);
2119 /* Display enabled dmac entries associated with current lmac */
2120 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2121 FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2122 mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2123 u64_to_ether_addr(mac, dmac);
2124 seq_printf(s, "%7d %pM\n", index, dmac);
2125 }
2126 }
2127
2128 return 0;
2129 }
2130
rvu_dbg_cgx_dmac_flt_display(struct seq_file * filp,void * unused)2131 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2132 {
2133 int err, lmac_id;
2134
2135 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2136 if (!err)
2137 return cgx_print_dmac_flt(filp, lmac_id);
2138
2139 return err;
2140 }
2141
2142 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2143
rvu_dbg_cgx_init(struct rvu * rvu)2144 static void rvu_dbg_cgx_init(struct rvu *rvu)
2145 {
2146 struct mac_ops *mac_ops;
2147 unsigned long lmac_bmap;
2148 int i, lmac_id;
2149 char dname[20];
2150 void *cgx;
2151
2152 if (!cgx_get_cgxcnt_max())
2153 return;
2154
2155 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2156 if (!mac_ops)
2157 return;
2158
2159 rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2160 rvu->rvu_dbg.root);
2161
2162 for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2163 cgx = rvu_cgx_pdata(i, rvu);
2164 if (!cgx)
2165 continue;
2166 lmac_bmap = cgx_get_lmac_bmap(cgx);
2167 /* cgx debugfs dir */
2168 sprintf(dname, "%s%d", mac_ops->name, i);
2169 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2170 rvu->rvu_dbg.cgx_root);
2171
2172 for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
2173 /* lmac debugfs dir */
2174 sprintf(dname, "lmac%d", lmac_id);
2175 rvu->rvu_dbg.lmac =
2176 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2177
2178 debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2179 cgx, &rvu_dbg_cgx_stat_fops);
2180 debugfs_create_file("mac_filter", 0600,
2181 rvu->rvu_dbg.lmac, cgx,
2182 &rvu_dbg_cgx_dmac_flt_fops);
2183 }
2184 }
2185 }
2186
2187 /* NPC debugfs APIs */
rvu_print_npc_mcam_info(struct seq_file * s,u16 pcifunc,int blkaddr)2188 static void rvu_print_npc_mcam_info(struct seq_file *s,
2189 u16 pcifunc, int blkaddr)
2190 {
2191 struct rvu *rvu = s->private;
2192 int entry_acnt, entry_ecnt;
2193 int cntr_acnt, cntr_ecnt;
2194
2195 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2196 &entry_acnt, &entry_ecnt);
2197 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2198 &cntr_acnt, &cntr_ecnt);
2199 if (!entry_acnt && !cntr_acnt)
2200 return;
2201
2202 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2203 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2204 rvu_get_pf(pcifunc));
2205 else
2206 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2207 rvu_get_pf(pcifunc),
2208 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2209
2210 if (entry_acnt) {
2211 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2212 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2213 }
2214 if (cntr_acnt) {
2215 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2216 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2217 }
2218 }
2219
rvu_dbg_npc_mcam_info_display(struct seq_file * filp,void * unsued)2220 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2221 {
2222 struct rvu *rvu = filp->private;
2223 int pf, vf, numvfs, blkaddr;
2224 struct npc_mcam *mcam;
2225 u16 pcifunc, counters;
2226 u64 cfg;
2227
2228 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2229 if (blkaddr < 0)
2230 return -ENODEV;
2231
2232 mcam = &rvu->hw->mcam;
2233 counters = rvu->hw->npc_counters;
2234
2235 seq_puts(filp, "\nNPC MCAM info:\n");
2236 /* MCAM keywidth on receive and transmit sides */
2237 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2238 cfg = (cfg >> 32) & 0x07;
2239 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2240 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2241 "224bits" : "448bits"));
2242 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2243 cfg = (cfg >> 32) & 0x07;
2244 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2245 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2246 "224bits" : "448bits"));
2247
2248 mutex_lock(&mcam->lock);
2249 /* MCAM entries */
2250 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2251 seq_printf(filp, "\t\t Reserved \t: %d\n",
2252 mcam->total_entries - mcam->bmap_entries);
2253 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2254
2255 /* MCAM counters */
2256 seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2257 seq_printf(filp, "\t\t Reserved \t: %d\n",
2258 counters - mcam->counters.max);
2259 seq_printf(filp, "\t\t Available \t: %d\n",
2260 rvu_rsrc_free_count(&mcam->counters));
2261
2262 if (mcam->bmap_entries == mcam->bmap_fcnt) {
2263 mutex_unlock(&mcam->lock);
2264 return 0;
2265 }
2266
2267 seq_puts(filp, "\n\t\t Current allocation\n");
2268 seq_puts(filp, "\t\t====================\n");
2269 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2270 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2271 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2272
2273 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2274 numvfs = (cfg >> 12) & 0xFF;
2275 for (vf = 0; vf < numvfs; vf++) {
2276 pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2277 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2278 }
2279 }
2280
2281 mutex_unlock(&mcam->lock);
2282 return 0;
2283 }
2284
2285 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2286
rvu_dbg_npc_rx_miss_stats_display(struct seq_file * filp,void * unused)2287 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2288 void *unused)
2289 {
2290 struct rvu *rvu = filp->private;
2291 struct npc_mcam *mcam;
2292 int blkaddr;
2293
2294 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2295 if (blkaddr < 0)
2296 return -ENODEV;
2297
2298 mcam = &rvu->hw->mcam;
2299
2300 seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2301 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2302 rvu_read64(rvu, blkaddr,
2303 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2304
2305 return 0;
2306 }
2307
2308 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2309
rvu_dbg_npc_mcam_show_flows(struct seq_file * s,struct rvu_npc_mcam_rule * rule)2310 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2311 struct rvu_npc_mcam_rule *rule)
2312 {
2313 u8 bit;
2314
2315 for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2316 seq_printf(s, "\t%s ", npc_get_field_name(bit));
2317 switch (bit) {
2318 case NPC_DMAC:
2319 seq_printf(s, "%pM ", rule->packet.dmac);
2320 seq_printf(s, "mask %pM\n", rule->mask.dmac);
2321 break;
2322 case NPC_SMAC:
2323 seq_printf(s, "%pM ", rule->packet.smac);
2324 seq_printf(s, "mask %pM\n", rule->mask.smac);
2325 break;
2326 case NPC_ETYPE:
2327 seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2328 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2329 break;
2330 case NPC_OUTER_VID:
2331 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2332 seq_printf(s, "mask 0x%x\n",
2333 ntohs(rule->mask.vlan_tci));
2334 break;
2335 case NPC_TOS:
2336 seq_printf(s, "%d ", rule->packet.tos);
2337 seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2338 break;
2339 case NPC_SIP_IPV4:
2340 seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2341 seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2342 break;
2343 case NPC_DIP_IPV4:
2344 seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2345 seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2346 break;
2347 case NPC_SIP_IPV6:
2348 seq_printf(s, "%pI6 ", rule->packet.ip6src);
2349 seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2350 break;
2351 case NPC_DIP_IPV6:
2352 seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2353 seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2354 break;
2355 case NPC_SPORT_TCP:
2356 case NPC_SPORT_UDP:
2357 case NPC_SPORT_SCTP:
2358 seq_printf(s, "%d ", ntohs(rule->packet.sport));
2359 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2360 break;
2361 case NPC_DPORT_TCP:
2362 case NPC_DPORT_UDP:
2363 case NPC_DPORT_SCTP:
2364 seq_printf(s, "%d ", ntohs(rule->packet.dport));
2365 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2366 break;
2367 default:
2368 seq_puts(s, "\n");
2369 break;
2370 }
2371 }
2372 }
2373
rvu_dbg_npc_mcam_show_action(struct seq_file * s,struct rvu_npc_mcam_rule * rule)2374 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2375 struct rvu_npc_mcam_rule *rule)
2376 {
2377 if (is_npc_intf_tx(rule->intf)) {
2378 switch (rule->tx_action.op) {
2379 case NIX_TX_ACTIONOP_DROP:
2380 seq_puts(s, "\taction: Drop\n");
2381 break;
2382 case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2383 seq_puts(s, "\taction: Unicast to default channel\n");
2384 break;
2385 case NIX_TX_ACTIONOP_UCAST_CHAN:
2386 seq_printf(s, "\taction: Unicast to channel %d\n",
2387 rule->tx_action.index);
2388 break;
2389 case NIX_TX_ACTIONOP_MCAST:
2390 seq_puts(s, "\taction: Multicast\n");
2391 break;
2392 case NIX_TX_ACTIONOP_DROP_VIOL:
2393 seq_puts(s, "\taction: Lockdown Violation Drop\n");
2394 break;
2395 default:
2396 break;
2397 }
2398 } else {
2399 switch (rule->rx_action.op) {
2400 case NIX_RX_ACTIONOP_DROP:
2401 seq_puts(s, "\taction: Drop\n");
2402 break;
2403 case NIX_RX_ACTIONOP_UCAST:
2404 seq_printf(s, "\taction: Direct to queue %d\n",
2405 rule->rx_action.index);
2406 break;
2407 case NIX_RX_ACTIONOP_RSS:
2408 seq_puts(s, "\taction: RSS\n");
2409 break;
2410 case NIX_RX_ACTIONOP_UCAST_IPSEC:
2411 seq_puts(s, "\taction: Unicast ipsec\n");
2412 break;
2413 case NIX_RX_ACTIONOP_MCAST:
2414 seq_puts(s, "\taction: Multicast\n");
2415 break;
2416 default:
2417 break;
2418 }
2419 }
2420 }
2421
rvu_dbg_get_intf_name(int intf)2422 static const char *rvu_dbg_get_intf_name(int intf)
2423 {
2424 switch (intf) {
2425 case NIX_INTFX_RX(0):
2426 return "NIX0_RX";
2427 case NIX_INTFX_RX(1):
2428 return "NIX1_RX";
2429 case NIX_INTFX_TX(0):
2430 return "NIX0_TX";
2431 case NIX_INTFX_TX(1):
2432 return "NIX1_TX";
2433 default:
2434 break;
2435 }
2436
2437 return "unknown";
2438 }
2439
rvu_dbg_npc_mcam_show_rules(struct seq_file * s,void * unused)2440 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2441 {
2442 struct rvu_npc_mcam_rule *iter;
2443 struct rvu *rvu = s->private;
2444 struct npc_mcam *mcam;
2445 int pf, vf = -1;
2446 bool enabled;
2447 int blkaddr;
2448 u16 target;
2449 u64 hits;
2450
2451 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2452 if (blkaddr < 0)
2453 return 0;
2454
2455 mcam = &rvu->hw->mcam;
2456
2457 mutex_lock(&mcam->lock);
2458 list_for_each_entry(iter, &mcam->mcam_rules, list) {
2459 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2460 seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2461
2462 if (iter->owner & RVU_PFVF_FUNC_MASK) {
2463 vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2464 seq_printf(s, "VF%d", vf);
2465 }
2466 seq_puts(s, "\n");
2467
2468 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2469 "RX" : "TX");
2470 seq_printf(s, "\tinterface: %s\n",
2471 rvu_dbg_get_intf_name(iter->intf));
2472 seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2473
2474 rvu_dbg_npc_mcam_show_flows(s, iter);
2475 if (is_npc_intf_rx(iter->intf)) {
2476 target = iter->rx_action.pf_func;
2477 pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2478 seq_printf(s, "\tForward to: PF%d ", pf);
2479
2480 if (target & RVU_PFVF_FUNC_MASK) {
2481 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2482 seq_printf(s, "VF%d", vf);
2483 }
2484 seq_puts(s, "\n");
2485 }
2486
2487 rvu_dbg_npc_mcam_show_action(s, iter);
2488
2489 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2490 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2491
2492 if (!iter->has_cntr)
2493 continue;
2494 seq_printf(s, "\tcounter: %d\n", iter->cntr);
2495
2496 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2497 seq_printf(s, "\thits: %lld\n", hits);
2498 }
2499 mutex_unlock(&mcam->lock);
2500
2501 return 0;
2502 }
2503
2504 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2505
rvu_dbg_npc_init(struct rvu * rvu)2506 static void rvu_dbg_npc_init(struct rvu *rvu)
2507 {
2508 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
2509
2510 debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
2511 &rvu_dbg_npc_mcam_info_fops);
2512 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
2513 &rvu_dbg_npc_mcam_rules_fops);
2514 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
2515 &rvu_dbg_npc_rx_miss_act_fops);
2516 }
2517
cpt_eng_sts_display(struct seq_file * filp,u8 eng_type)2518 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
2519 {
2520 struct cpt_ctx *ctx = filp->private;
2521 u64 busy_sts = 0, free_sts = 0;
2522 u32 e_min = 0, e_max = 0, e, i;
2523 u16 max_ses, max_ies, max_aes;
2524 struct rvu *rvu = ctx->rvu;
2525 int blkaddr = ctx->blkaddr;
2526 u64 reg;
2527
2528 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2529 max_ses = reg & 0xffff;
2530 max_ies = (reg >> 16) & 0xffff;
2531 max_aes = (reg >> 32) & 0xffff;
2532
2533 switch (eng_type) {
2534 case CPT_AE_TYPE:
2535 e_min = max_ses + max_ies;
2536 e_max = max_ses + max_ies + max_aes;
2537 break;
2538 case CPT_SE_TYPE:
2539 e_min = 0;
2540 e_max = max_ses;
2541 break;
2542 case CPT_IE_TYPE:
2543 e_min = max_ses;
2544 e_max = max_ses + max_ies;
2545 break;
2546 default:
2547 return -EINVAL;
2548 }
2549
2550 for (e = e_min, i = 0; e < e_max; e++, i++) {
2551 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
2552 if (reg & 0x1)
2553 busy_sts |= 1ULL << i;
2554
2555 if (reg & 0x2)
2556 free_sts |= 1ULL << i;
2557 }
2558 seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
2559 seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
2560
2561 return 0;
2562 }
2563
rvu_dbg_cpt_ae_sts_display(struct seq_file * filp,void * unused)2564 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
2565 {
2566 return cpt_eng_sts_display(filp, CPT_AE_TYPE);
2567 }
2568
2569 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
2570
rvu_dbg_cpt_se_sts_display(struct seq_file * filp,void * unused)2571 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
2572 {
2573 return cpt_eng_sts_display(filp, CPT_SE_TYPE);
2574 }
2575
2576 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
2577
rvu_dbg_cpt_ie_sts_display(struct seq_file * filp,void * unused)2578 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
2579 {
2580 return cpt_eng_sts_display(filp, CPT_IE_TYPE);
2581 }
2582
2583 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
2584
rvu_dbg_cpt_engines_info_display(struct seq_file * filp,void * unused)2585 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
2586 {
2587 struct cpt_ctx *ctx = filp->private;
2588 u16 max_ses, max_ies, max_aes;
2589 struct rvu *rvu = ctx->rvu;
2590 int blkaddr = ctx->blkaddr;
2591 u32 e_max, e;
2592 u64 reg;
2593
2594 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
2595 max_ses = reg & 0xffff;
2596 max_ies = (reg >> 16) & 0xffff;
2597 max_aes = (reg >> 32) & 0xffff;
2598
2599 e_max = max_ses + max_ies + max_aes;
2600
2601 seq_puts(filp, "===========================================\n");
2602 for (e = 0; e < e_max; e++) {
2603 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
2604 seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
2605 reg & 0xff);
2606 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
2607 seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
2608 reg);
2609 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
2610 seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
2611 reg);
2612 seq_puts(filp, "===========================================\n");
2613 }
2614 return 0;
2615 }
2616
2617 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
2618
rvu_dbg_cpt_lfs_info_display(struct seq_file * filp,void * unused)2619 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
2620 {
2621 struct cpt_ctx *ctx = filp->private;
2622 int blkaddr = ctx->blkaddr;
2623 struct rvu *rvu = ctx->rvu;
2624 struct rvu_block *block;
2625 struct rvu_hwinfo *hw;
2626 u64 reg;
2627 u32 lf;
2628
2629 hw = rvu->hw;
2630 block = &hw->block[blkaddr];
2631 if (!block->lf.bmap)
2632 return -ENODEV;
2633
2634 seq_puts(filp, "===========================================\n");
2635 for (lf = 0; lf < block->lf.max; lf++) {
2636 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
2637 seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
2638 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
2639 seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
2640 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
2641 seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
2642 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
2643 (lf << block->lfshift));
2644 seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
2645 seq_puts(filp, "===========================================\n");
2646 }
2647 return 0;
2648 }
2649
2650 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
2651
rvu_dbg_cpt_err_info_display(struct seq_file * filp,void * unused)2652 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
2653 {
2654 struct cpt_ctx *ctx = filp->private;
2655 struct rvu *rvu = ctx->rvu;
2656 int blkaddr = ctx->blkaddr;
2657 u64 reg0, reg1;
2658
2659 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
2660 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
2661 seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
2662 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
2663 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
2664 seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
2665 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
2666 seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
2667 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
2668 seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
2669 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
2670 seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
2671 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
2672 seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
2673
2674 return 0;
2675 }
2676
2677 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
2678
rvu_dbg_cpt_pc_display(struct seq_file * filp,void * unused)2679 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
2680 {
2681 struct cpt_ctx *ctx = filp->private;
2682 struct rvu *rvu = ctx->rvu;
2683 int blkaddr = ctx->blkaddr;
2684 u64 reg;
2685
2686 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
2687 seq_printf(filp, "CPT instruction requests %llu\n", reg);
2688 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
2689 seq_printf(filp, "CPT instruction latency %llu\n", reg);
2690 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
2691 seq_printf(filp, "CPT NCB read requests %llu\n", reg);
2692 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
2693 seq_printf(filp, "CPT NCB read latency %llu\n", reg);
2694 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
2695 seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
2696 reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
2697 seq_printf(filp, "CPT active cycles pc %llu\n", reg);
2698 reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
2699 seq_printf(filp, "CPT clock count pc %llu\n", reg);
2700
2701 return 0;
2702 }
2703
2704 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
2705
rvu_dbg_cpt_init(struct rvu * rvu,int blkaddr)2706 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
2707 {
2708 struct cpt_ctx *ctx;
2709
2710 if (!is_block_implemented(rvu->hw, blkaddr))
2711 return;
2712
2713 if (blkaddr == BLKADDR_CPT0) {
2714 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
2715 ctx = &rvu->rvu_dbg.cpt_ctx[0];
2716 ctx->blkaddr = BLKADDR_CPT0;
2717 ctx->rvu = rvu;
2718 } else {
2719 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
2720 rvu->rvu_dbg.root);
2721 ctx = &rvu->rvu_dbg.cpt_ctx[1];
2722 ctx->blkaddr = BLKADDR_CPT1;
2723 ctx->rvu = rvu;
2724 }
2725
2726 debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
2727 &rvu_dbg_cpt_pc_fops);
2728 debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2729 &rvu_dbg_cpt_ae_sts_fops);
2730 debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2731 &rvu_dbg_cpt_se_sts_fops);
2732 debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
2733 &rvu_dbg_cpt_ie_sts_fops);
2734 debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
2735 &rvu_dbg_cpt_engines_info_fops);
2736 debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
2737 &rvu_dbg_cpt_lfs_info_fops);
2738 debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
2739 &rvu_dbg_cpt_err_info_fops);
2740 }
2741
rvu_get_dbg_dir_name(struct rvu * rvu)2742 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
2743 {
2744 if (!is_rvu_otx2(rvu))
2745 return "cn10k";
2746 else
2747 return "octeontx2";
2748 }
2749
rvu_dbg_init(struct rvu * rvu)2750 void rvu_dbg_init(struct rvu *rvu)
2751 {
2752 rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
2753
2754 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
2755 &rvu_dbg_rsrc_status_fops);
2756
2757 if (!cgx_get_cgxcnt_max())
2758 goto create;
2759
2760 if (is_rvu_otx2(rvu))
2761 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
2762 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2763 else
2764 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
2765 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
2766
2767 create:
2768 rvu_dbg_npa_init(rvu);
2769 rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
2770
2771 rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
2772 rvu_dbg_cgx_init(rvu);
2773 rvu_dbg_npc_init(rvu);
2774 rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
2775 rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
2776 }
2777
rvu_dbg_exit(struct rvu * rvu)2778 void rvu_dbg_exit(struct rvu *rvu)
2779 {
2780 debugfs_remove_recursive(rvu->rvu_dbg.root);
2781 }
2782
2783 #endif /* CONFIG_DEBUG_FS */
2784