1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3 *
4 * Copyright (C) 2019 Marvell.
5 *
6 */
7
8 #ifdef CONFIG_DEBUG_FS
9
10 #include <linux/fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14
15 #include "rvu_struct.h"
16 #include "rvu_reg.h"
17 #include "rvu.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 #include "npc.h"
21 #include "rvu_npc_hash.h"
22 #include "mcs.h"
23
24 #define DEBUGFS_DIR_NAME "octeontx2"
25
26 enum {
27 CGX_STAT0,
28 CGX_STAT1,
29 CGX_STAT2,
30 CGX_STAT3,
31 CGX_STAT4,
32 CGX_STAT5,
33 CGX_STAT6,
34 CGX_STAT7,
35 CGX_STAT8,
36 CGX_STAT9,
37 CGX_STAT10,
38 CGX_STAT11,
39 CGX_STAT12,
40 CGX_STAT13,
41 CGX_STAT14,
42 CGX_STAT15,
43 CGX_STAT16,
44 CGX_STAT17,
45 CGX_STAT18,
46 };
47
48 /* NIX TX stats */
49 enum nix_stat_lf_tx {
50 TX_UCAST = 0x0,
51 TX_BCAST = 0x1,
52 TX_MCAST = 0x2,
53 TX_DROP = 0x3,
54 TX_OCTS = 0x4,
55 TX_STATS_ENUM_LAST,
56 };
57
58 /* NIX RX stats */
59 enum nix_stat_lf_rx {
60 RX_OCTS = 0x0,
61 RX_UCAST = 0x1,
62 RX_BCAST = 0x2,
63 RX_MCAST = 0x3,
64 RX_DROP = 0x4,
65 RX_DROP_OCTS = 0x5,
66 RX_FCS = 0x6,
67 RX_ERR = 0x7,
68 RX_DRP_BCAST = 0x8,
69 RX_DRP_MCAST = 0x9,
70 RX_DRP_L3BCAST = 0xa,
71 RX_DRP_L3MCAST = 0xb,
72 RX_STATS_ENUM_LAST,
73 };
74
75 static char *cgx_rx_stats_fields[] = {
76 [CGX_STAT0] = "Received packets",
77 [CGX_STAT1] = "Octets of received packets",
78 [CGX_STAT2] = "Received PAUSE packets",
79 [CGX_STAT3] = "Received PAUSE and control packets",
80 [CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
81 [CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
82 [CGX_STAT6] = "Packets dropped due to RX FIFO full",
83 [CGX_STAT7] = "Octets dropped due to RX FIFO full",
84 [CGX_STAT8] = "Error packets",
85 [CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
86 [CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
87 [CGX_STAT11] = "NCSI-bound packets dropped",
88 [CGX_STAT12] = "NCSI-bound octets dropped",
89 };
90
91 static char *cgx_tx_stats_fields[] = {
92 [CGX_STAT0] = "Packets dropped due to excessive collisions",
93 [CGX_STAT1] = "Packets dropped due to excessive deferral",
94 [CGX_STAT2] = "Multiple collisions before successful transmission",
95 [CGX_STAT3] = "Single collisions before successful transmission",
96 [CGX_STAT4] = "Total octets sent on the interface",
97 [CGX_STAT5] = "Total frames sent on the interface",
98 [CGX_STAT6] = "Packets sent with an octet count < 64",
99 [CGX_STAT7] = "Packets sent with an octet count == 64",
100 [CGX_STAT8] = "Packets sent with an octet count of 65-127",
101 [CGX_STAT9] = "Packets sent with an octet count of 128-255",
102 [CGX_STAT10] = "Packets sent with an octet count of 256-511",
103 [CGX_STAT11] = "Packets sent with an octet count of 512-1023",
104 [CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
105 [CGX_STAT13] = "Packets sent with an octet count of > 1518",
106 [CGX_STAT14] = "Packets sent to a broadcast DMAC",
107 [CGX_STAT15] = "Packets sent to the multicast DMAC",
108 [CGX_STAT16] = "Transmit underflow and were truncated",
109 [CGX_STAT17] = "Control/PAUSE packets sent",
110 };
111
112 static char *rpm_rx_stats_fields[] = {
113 "Octets of received packets",
114 "Octets of received packets with out error",
115 "Received packets with alignment errors",
116 "Control/PAUSE packets received",
117 "Packets received with Frame too long Errors",
118 "Packets received with a1nrange length Errors",
119 "Received packets",
120 "Packets received with FrameCheckSequenceErrors",
121 "Packets received with VLAN header",
122 "Error packets",
123 "Packets received with unicast DMAC",
124 "Packets received with multicast DMAC",
125 "Packets received with broadcast DMAC",
126 "Dropped packets",
127 "Total frames received on interface",
128 "Packets received with an octet count < 64",
129 "Packets received with an octet count == 64",
130 "Packets received with an octet count of 65-127",
131 "Packets received with an octet count of 128-255",
132 "Packets received with an octet count of 256-511",
133 "Packets received with an octet count of 512-1023",
134 "Packets received with an octet count of 1024-1518",
135 "Packets received with an octet count of > 1518",
136 "Oversized Packets",
137 "Jabber Packets",
138 "Fragmented Packets",
139 "CBFC(class based flow control) pause frames received for class 0",
140 "CBFC pause frames received for class 1",
141 "CBFC pause frames received for class 2",
142 "CBFC pause frames received for class 3",
143 "CBFC pause frames received for class 4",
144 "CBFC pause frames received for class 5",
145 "CBFC pause frames received for class 6",
146 "CBFC pause frames received for class 7",
147 "CBFC pause frames received for class 8",
148 "CBFC pause frames received for class 9",
149 "CBFC pause frames received for class 10",
150 "CBFC pause frames received for class 11",
151 "CBFC pause frames received for class 12",
152 "CBFC pause frames received for class 13",
153 "CBFC pause frames received for class 14",
154 "CBFC pause frames received for class 15",
155 "MAC control packets received",
156 };
157
158 static char *rpm_tx_stats_fields[] = {
159 "Total octets sent on the interface",
160 "Total octets transmitted OK",
161 "Control/Pause frames sent",
162 "Total frames transmitted OK",
163 "Total frames sent with VLAN header",
164 "Error Packets",
165 "Packets sent to unicast DMAC",
166 "Packets sent to the multicast DMAC",
167 "Packets sent to a broadcast DMAC",
168 "Packets sent with an octet count == 64",
169 "Packets sent with an octet count of 65-127",
170 "Packets sent with an octet count of 128-255",
171 "Packets sent with an octet count of 256-511",
172 "Packets sent with an octet count of 512-1023",
173 "Packets sent with an octet count of 1024-1518",
174 "Packets sent with an octet count of > 1518",
175 "CBFC(class based flow control) pause frames transmitted for class 0",
176 "CBFC pause frames transmitted for class 1",
177 "CBFC pause frames transmitted for class 2",
178 "CBFC pause frames transmitted for class 3",
179 "CBFC pause frames transmitted for class 4",
180 "CBFC pause frames transmitted for class 5",
181 "CBFC pause frames transmitted for class 6",
182 "CBFC pause frames transmitted for class 7",
183 "CBFC pause frames transmitted for class 8",
184 "CBFC pause frames transmitted for class 9",
185 "CBFC pause frames transmitted for class 10",
186 "CBFC pause frames transmitted for class 11",
187 "CBFC pause frames transmitted for class 12",
188 "CBFC pause frames transmitted for class 13",
189 "CBFC pause frames transmitted for class 14",
190 "CBFC pause frames transmitted for class 15",
191 "MAC control packets sent",
192 "Total frames sent on the interface"
193 };
194
195 enum cpt_eng_type {
196 CPT_AE_TYPE = 1,
197 CPT_SE_TYPE = 2,
198 CPT_IE_TYPE = 3,
199 };
200
201 #define rvu_dbg_NULL NULL
202 #define rvu_dbg_open_NULL NULL
203
204 #define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
205 static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
206 { \
207 return single_open(file, rvu_dbg_##read_op, inode->i_private); \
208 } \
209 static const struct file_operations rvu_dbg_##name##_fops = { \
210 .owner = THIS_MODULE, \
211 .open = rvu_dbg_open_##name, \
212 .read = seq_read, \
213 .write = rvu_dbg_##write_op, \
214 .llseek = seq_lseek, \
215 .release = single_release, \
216 }
217
218 #define RVU_DEBUG_FOPS(name, read_op, write_op) \
219 static const struct file_operations rvu_dbg_##name##_fops = { \
220 .owner = THIS_MODULE, \
221 .open = simple_open, \
222 .read = rvu_dbg_##read_op, \
223 .write = rvu_dbg_##write_op \
224 }
225
226 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
227
rvu_dbg_mcs_port_stats_display(struct seq_file * filp,void * unused,int dir)228 static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
229 {
230 struct mcs *mcs = filp->private;
231 struct mcs_port_stats stats;
232 int lmac;
233
234 seq_puts(filp, "\n port stats\n");
235 mutex_lock(&mcs->stats_lock);
236 for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
237 mcs_get_port_stats(mcs, &stats, lmac, dir);
238 seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
239 seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
240
241 if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
242 seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
243 stats.preempt_err_cnt);
244 if (dir == MCS_TX)
245 seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
246 stats.sectag_insert_err_cnt);
247 }
248 mutex_unlock(&mcs->stats_lock);
249 return 0;
250 }
251
rvu_dbg_mcs_rx_port_stats_display(struct seq_file * filp,void * unused)252 static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
253 {
254 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
255 }
256
257 RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
258
rvu_dbg_mcs_tx_port_stats_display(struct seq_file * filp,void * unused)259 static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
260 {
261 return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
262 }
263
264 RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
265
rvu_dbg_mcs_sa_stats_display(struct seq_file * filp,void * unused,int dir)266 static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
267 {
268 struct mcs *mcs = filp->private;
269 struct mcs_sa_stats stats;
270 struct rsrc_bmap *map;
271 int sa_id;
272
273 if (dir == MCS_TX) {
274 map = &mcs->tx.sa;
275 mutex_lock(&mcs->stats_lock);
276 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
277 seq_puts(filp, "\n TX SA stats\n");
278 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
279 seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
280 stats.pkt_encrypt_cnt);
281
282 seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
283 stats.pkt_protected_cnt);
284 }
285 mutex_unlock(&mcs->stats_lock);
286 return 0;
287 }
288
289 /* RX stats */
290 map = &mcs->rx.sa;
291 mutex_lock(&mcs->stats_lock);
292 for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
293 seq_puts(filp, "\n RX SA stats\n");
294 mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
295 seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
296 seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
297 seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
298 seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
299 seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
300 }
301 mutex_unlock(&mcs->stats_lock);
302 return 0;
303 }
304
rvu_dbg_mcs_rx_sa_stats_display(struct seq_file * filp,void * unused)305 static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
306 {
307 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
308 }
309
310 RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
311
rvu_dbg_mcs_tx_sa_stats_display(struct seq_file * filp,void * unused)312 static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
313 {
314 return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
315 }
316
317 RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
318
rvu_dbg_mcs_tx_sc_stats_display(struct seq_file * filp,void * unused)319 static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
320 {
321 struct mcs *mcs = filp->private;
322 struct mcs_sc_stats stats;
323 struct rsrc_bmap *map;
324 int sc_id;
325
326 map = &mcs->tx.sc;
327 seq_puts(filp, "\n SC stats\n");
328
329 mutex_lock(&mcs->stats_lock);
330 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
331 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
332 seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
333 seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
334 seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
335
336 if (mcs->hw->mcs_blks == 1) {
337 seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
338 stats.octet_encrypt_cnt);
339 seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
340 stats.octet_protected_cnt);
341 }
342 }
343 mutex_unlock(&mcs->stats_lock);
344 return 0;
345 }
346
347 RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
348
rvu_dbg_mcs_rx_sc_stats_display(struct seq_file * filp,void * unused)349 static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
350 {
351 struct mcs *mcs = filp->private;
352 struct mcs_sc_stats stats;
353 struct rsrc_bmap *map;
354 int sc_id;
355
356 map = &mcs->rx.sc;
357 seq_puts(filp, "\n SC stats\n");
358
359 mutex_lock(&mcs->stats_lock);
360 for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
361 mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
362 seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
363 seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
364 seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
365 seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
366 seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
367 seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
368
369 if (mcs->hw->mcs_blks > 1) {
370 seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
371 seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
372 }
373 if (mcs->hw->mcs_blks == 1) {
374 seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
375 stats.octet_decrypt_cnt);
376 seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
377 stats.octet_validate_cnt);
378 }
379 }
380 mutex_unlock(&mcs->stats_lock);
381 return 0;
382 }
383
384 RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
385
rvu_dbg_mcs_flowid_stats_display(struct seq_file * filp,void * unused,int dir)386 static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
387 {
388 struct mcs *mcs = filp->private;
389 struct mcs_flowid_stats stats;
390 struct rsrc_bmap *map;
391 int flow_id;
392
393 seq_puts(filp, "\n Flowid stats\n");
394
395 if (dir == MCS_RX)
396 map = &mcs->rx.flow_ids;
397 else
398 map = &mcs->tx.flow_ids;
399
400 mutex_lock(&mcs->stats_lock);
401 for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
402 mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
403 seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
404 }
405 mutex_unlock(&mcs->stats_lock);
406 return 0;
407 }
408
rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file * filp,void * unused)409 static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
410 {
411 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
412 }
413
414 RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
415
rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file * filp,void * unused)416 static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
417 {
418 return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
419 }
420
421 RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
422
rvu_dbg_mcs_tx_secy_stats_display(struct seq_file * filp,void * unused)423 static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
424 {
425 struct mcs *mcs = filp->private;
426 struct mcs_secy_stats stats;
427 struct rsrc_bmap *map;
428 int secy_id;
429
430 map = &mcs->tx.secy;
431 seq_puts(filp, "\n MCS TX secy stats\n");
432
433 mutex_lock(&mcs->stats_lock);
434 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
435 mcs_get_tx_secy_stats(mcs, &stats, secy_id);
436 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
437 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
438 stats.ctl_pkt_bcast_cnt);
439 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
440 stats.ctl_pkt_mcast_cnt);
441 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
442 stats.ctl_pkt_ucast_cnt);
443 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
444 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
445 stats.unctl_pkt_bcast_cnt);
446 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
447 stats.unctl_pkt_mcast_cnt);
448 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
449 stats.unctl_pkt_ucast_cnt);
450 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
451 seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
452 stats.octet_encrypted_cnt);
453 seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
454 stats.octet_protected_cnt);
455 seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
456 stats.pkt_noactivesa_cnt);
457 seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
458 seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
459 }
460 mutex_unlock(&mcs->stats_lock);
461 return 0;
462 }
463
464 RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
465
rvu_dbg_mcs_rx_secy_stats_display(struct seq_file * filp,void * unused)466 static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
467 {
468 struct mcs *mcs = filp->private;
469 struct mcs_secy_stats stats;
470 struct rsrc_bmap *map;
471 int secy_id;
472
473 map = &mcs->rx.secy;
474 seq_puts(filp, "\n MCS secy stats\n");
475
476 mutex_lock(&mcs->stats_lock);
477 for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
478 mcs_get_rx_secy_stats(mcs, &stats, secy_id);
479 seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
480 seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
481 stats.ctl_pkt_bcast_cnt);
482 seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
483 stats.ctl_pkt_mcast_cnt);
484 seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
485 stats.ctl_pkt_ucast_cnt);
486 seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
487 seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
488 stats.unctl_pkt_bcast_cnt);
489 seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
490 stats.unctl_pkt_mcast_cnt);
491 seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
492 stats.unctl_pkt_ucast_cnt);
493 seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
494 seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
495 stats.octet_decrypted_cnt);
496 seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
497 stats.octet_validated_cnt);
498 seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
499 stats.pkt_port_disabled_cnt);
500 seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
501 seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
502 stats.pkt_nosa_cnt);
503 seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
504 stats.pkt_nosaerror_cnt);
505 seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
506 stats.pkt_tagged_ctl_cnt);
507 seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
508 seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
509 if (mcs->hw->mcs_blks > 1)
510 seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
511 stats.pkt_notag_cnt);
512 }
513 mutex_unlock(&mcs->stats_lock);
514 return 0;
515 }
516
517 RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
518
rvu_dbg_mcs_init(struct rvu * rvu)519 static void rvu_dbg_mcs_init(struct rvu *rvu)
520 {
521 struct mcs *mcs;
522 char dname[10];
523 int i;
524
525 if (!rvu->mcs_blk_cnt)
526 return;
527
528 rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
529
530 for (i = 0; i < rvu->mcs_blk_cnt; i++) {
531 mcs = mcs_get_pdata(i);
532
533 sprintf(dname, "mcs%d", i);
534 rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
535 rvu->rvu_dbg.mcs_root);
536
537 rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
538
539 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
540 &rvu_dbg_mcs_rx_flowid_stats_fops);
541
542 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
543 &rvu_dbg_mcs_rx_secy_stats_fops);
544
545 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
546 &rvu_dbg_mcs_rx_sc_stats_fops);
547
548 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
549 &rvu_dbg_mcs_rx_sa_stats_fops);
550
551 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
552 &rvu_dbg_mcs_rx_port_stats_fops);
553
554 rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
555
556 debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
557 &rvu_dbg_mcs_tx_flowid_stats_fops);
558
559 debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
560 &rvu_dbg_mcs_tx_secy_stats_fops);
561
562 debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
563 &rvu_dbg_mcs_tx_sc_stats_fops);
564
565 debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
566 &rvu_dbg_mcs_tx_sa_stats_fops);
567
568 debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
569 &rvu_dbg_mcs_tx_port_stats_fops);
570 }
571 }
572
573 #define LMT_MAPTBL_ENTRY_SIZE 16
574 /* Dump LMTST map table */
rvu_dbg_lmtst_map_table_display(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)575 static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
576 char __user *buffer,
577 size_t count, loff_t *ppos)
578 {
579 struct rvu *rvu = filp->private_data;
580 u64 lmt_addr, val, tbl_base;
581 int pf, vf, num_vfs, hw_vfs;
582 void __iomem *lmt_map_base;
583 int buf_size = 10240;
584 size_t off = 0;
585 int index = 0;
586 char *buf;
587 int ret;
588
589 /* don't allow partial reads */
590 if (*ppos != 0)
591 return 0;
592
593 buf = kzalloc(buf_size, GFP_KERNEL);
594 if (!buf)
595 return -ENOMEM;
596
597 tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
598
599 lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
600 if (!lmt_map_base) {
601 dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
602 kfree(buf);
603 return false;
604 }
605
606 off += scnprintf(&buf[off], buf_size - 1 - off,
607 "\n\t\t\t\t\tLmtst Map Table Entries");
608 off += scnprintf(&buf[off], buf_size - 1 - off,
609 "\n\t\t\t\t\t=======================");
610 off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
611 off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
612 off += scnprintf(&buf[off], buf_size - 1 - off,
613 "Lmtline Base (word 0)\t\t");
614 off += scnprintf(&buf[off], buf_size - 1 - off,
615 "Lmt Map Entry (word 1)");
616 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
617 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
618 off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
619 pf);
620
621 index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
622 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
623 (tbl_base + index));
624 lmt_addr = readq(lmt_map_base + index);
625 off += scnprintf(&buf[off], buf_size - 1 - off,
626 " 0x%016llx\t\t", lmt_addr);
627 index += 8;
628 val = readq(lmt_map_base + index);
629 off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
630 val);
631 /* Reading num of VFs per PF */
632 rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
633 for (vf = 0; vf < num_vfs; vf++) {
634 index = (pf * rvu->hw->total_vfs * 16) +
635 ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
636 off += scnprintf(&buf[off], buf_size - 1 - off,
637 "PF%d:VF%d \t\t", pf, vf);
638 off += scnprintf(&buf[off], buf_size - 1 - off,
639 " 0x%llx\t\t", (tbl_base + index));
640 lmt_addr = readq(lmt_map_base + index);
641 off += scnprintf(&buf[off], buf_size - 1 - off,
642 " 0x%016llx\t\t", lmt_addr);
643 index += 8;
644 val = readq(lmt_map_base + index);
645 off += scnprintf(&buf[off], buf_size - 1 - off,
646 " 0x%016llx\n", val);
647 }
648 }
649 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
650
651 ret = min(off, count);
652 if (copy_to_user(buffer, buf, ret))
653 ret = -EFAULT;
654 kfree(buf);
655
656 iounmap(lmt_map_base);
657 if (ret < 0)
658 return ret;
659
660 *ppos = ret;
661 return ret;
662 }
663
664 RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
665
get_lf_str_list(struct rvu_block block,int pcifunc,char * lfs)666 static void get_lf_str_list(struct rvu_block block, int pcifunc,
667 char *lfs)
668 {
669 int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
670
671 for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
672 if (lf >= block.lf.max)
673 break;
674
675 if (block.fn_map[lf] != pcifunc)
676 continue;
677
678 if (lf == prev_lf + 1) {
679 prev_lf = lf;
680 seq = 1;
681 continue;
682 }
683
684 if (seq)
685 len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
686 else
687 len += (len ? sprintf(lfs + len, ",%d", lf) :
688 sprintf(lfs + len, "%d", lf));
689
690 prev_lf = lf;
691 seq = 0;
692 }
693
694 if (seq)
695 len += sprintf(lfs + len, "-%d", prev_lf);
696
697 lfs[len] = '\0';
698 }
699
get_max_column_width(struct rvu * rvu)700 static int get_max_column_width(struct rvu *rvu)
701 {
702 int index, pf, vf, lf_str_size = 12, buf_size = 256;
703 struct rvu_block block;
704 u16 pcifunc;
705 char *buf;
706
707 buf = kzalloc(buf_size, GFP_KERNEL);
708 if (!buf)
709 return -ENOMEM;
710
711 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
712 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
713 pcifunc = pf << 10 | vf;
714 if (!pcifunc)
715 continue;
716
717 for (index = 0; index < BLK_COUNT; index++) {
718 block = rvu->hw->block[index];
719 if (!strlen(block.name))
720 continue;
721
722 get_lf_str_list(block, pcifunc, buf);
723 if (lf_str_size <= strlen(buf))
724 lf_str_size = strlen(buf) + 1;
725 }
726 }
727 }
728
729 kfree(buf);
730 return lf_str_size;
731 }
732
733 /* Dumps current provisioning status of all RVU block LFs */
rvu_dbg_rsrc_attach_status(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)734 static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
735 char __user *buffer,
736 size_t count, loff_t *ppos)
737 {
738 int index, off = 0, flag = 0, len = 0, i = 0;
739 struct rvu *rvu = filp->private_data;
740 int bytes_not_copied = 0;
741 struct rvu_block block;
742 int pf, vf, pcifunc;
743 int buf_size = 2048;
744 int lf_str_size;
745 char *lfs;
746 char *buf;
747
748 /* don't allow partial reads */
749 if (*ppos != 0)
750 return 0;
751
752 buf = kzalloc(buf_size, GFP_KERNEL);
753 if (!buf)
754 return -ENOMEM;
755
756 /* Get the maximum width of a column */
757 lf_str_size = get_max_column_width(rvu);
758
759 lfs = kzalloc(lf_str_size, GFP_KERNEL);
760 if (!lfs) {
761 kfree(buf);
762 return -ENOMEM;
763 }
764 off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
765 "pcifunc");
766 for (index = 0; index < BLK_COUNT; index++)
767 if (strlen(rvu->hw->block[index].name)) {
768 off += scnprintf(&buf[off], buf_size - 1 - off,
769 "%-*s", lf_str_size,
770 rvu->hw->block[index].name);
771 }
772
773 off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
774 bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
775 if (bytes_not_copied)
776 goto out;
777
778 i++;
779 *ppos += off;
780 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
781 for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
782 off = 0;
783 flag = 0;
784 pcifunc = pf << 10 | vf;
785 if (!pcifunc)
786 continue;
787
788 if (vf) {
789 sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
790 off = scnprintf(&buf[off],
791 buf_size - 1 - off,
792 "%-*s", lf_str_size, lfs);
793 } else {
794 sprintf(lfs, "PF%d", pf);
795 off = scnprintf(&buf[off],
796 buf_size - 1 - off,
797 "%-*s", lf_str_size, lfs);
798 }
799
800 for (index = 0; index < BLK_COUNT; index++) {
801 block = rvu->hw->block[index];
802 if (!strlen(block.name))
803 continue;
804 len = 0;
805 lfs[len] = '\0';
806 get_lf_str_list(block, pcifunc, lfs);
807 if (strlen(lfs))
808 flag = 1;
809
810 off += scnprintf(&buf[off], buf_size - 1 - off,
811 "%-*s", lf_str_size, lfs);
812 }
813 if (flag) {
814 off += scnprintf(&buf[off],
815 buf_size - 1 - off, "\n");
816 bytes_not_copied = copy_to_user(buffer +
817 (i * off),
818 buf, off);
819 if (bytes_not_copied)
820 goto out;
821
822 i++;
823 *ppos += off;
824 }
825 }
826 }
827
828 out:
829 kfree(lfs);
830 kfree(buf);
831 if (bytes_not_copied)
832 return -EFAULT;
833
834 return *ppos;
835 }
836
837 RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
838
rvu_dbg_rvu_pf_cgx_map_display(struct seq_file * filp,void * unused)839 static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
840 {
841 struct rvu *rvu = filp->private;
842 struct pci_dev *pdev = NULL;
843 struct mac_ops *mac_ops;
844 char cgx[10], lmac[10];
845 struct rvu_pfvf *pfvf;
846 int pf, domain, blkid;
847 u8 cgx_id, lmac_id;
848 u16 pcifunc;
849
850 domain = 2;
851 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
852 /* There can be no CGX devices at all */
853 if (!mac_ops)
854 return 0;
855 seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
856 mac_ops->name);
857 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
858 if (!is_pf_cgxmapped(rvu, pf))
859 continue;
860
861 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
862 if (!pdev)
863 continue;
864
865 cgx[0] = 0;
866 lmac[0] = 0;
867 pcifunc = pf << 10;
868 pfvf = rvu_get_pfvf(rvu, pcifunc);
869
870 if (pfvf->nix_blkaddr == BLKADDR_NIX0)
871 blkid = 0;
872 else
873 blkid = 1;
874
875 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
876 &lmac_id);
877 sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
878 sprintf(lmac, "LMAC%d", lmac_id);
879 seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
880 dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
881
882 pci_dev_put(pdev);
883 }
884 return 0;
885 }
886
887 RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
888
rvu_dbg_is_valid_lf(struct rvu * rvu,int blkaddr,int lf,u16 * pcifunc)889 static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
890 u16 *pcifunc)
891 {
892 struct rvu_block *block;
893 struct rvu_hwinfo *hw;
894
895 hw = rvu->hw;
896 block = &hw->block[blkaddr];
897
898 if (lf < 0 || lf >= block->lf.max) {
899 dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
900 block->lf.max - 1);
901 return false;
902 }
903
904 *pcifunc = block->fn_map[lf];
905 if (!*pcifunc) {
906 dev_warn(rvu->dev,
907 "This LF is not attached to any RVU PFFUNC\n");
908 return false;
909 }
910 return true;
911 }
912
print_npa_qsize(struct seq_file * m,struct rvu_pfvf * pfvf)913 static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
914 {
915 char *buf;
916
917 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
918 if (!buf)
919 return;
920
921 if (!pfvf->aura_ctx) {
922 seq_puts(m, "Aura context is not initialized\n");
923 } else {
924 bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
925 pfvf->aura_ctx->qsize);
926 seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
927 seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
928 }
929
930 if (!pfvf->pool_ctx) {
931 seq_puts(m, "Pool context is not initialized\n");
932 } else {
933 bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
934 pfvf->pool_ctx->qsize);
935 seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
936 seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
937 }
938 kfree(buf);
939 }
940
941 /* The 'qsize' entry dumps current Aura/Pool context Qsize
942 * and each context's current enable/disable status in a bitmap.
943 */
rvu_dbg_qsize_display(struct seq_file * filp,void * unsused,int blktype)944 static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
945 int blktype)
946 {
947 void (*print_qsize)(struct seq_file *filp,
948 struct rvu_pfvf *pfvf) = NULL;
949 struct dentry *current_dir;
950 struct rvu_pfvf *pfvf;
951 struct rvu *rvu;
952 int qsize_id;
953 u16 pcifunc;
954 int blkaddr;
955
956 rvu = filp->private;
957 switch (blktype) {
958 case BLKTYPE_NPA:
959 qsize_id = rvu->rvu_dbg.npa_qsize_id;
960 print_qsize = print_npa_qsize;
961 break;
962
963 case BLKTYPE_NIX:
964 qsize_id = rvu->rvu_dbg.nix_qsize_id;
965 print_qsize = print_nix_qsize;
966 break;
967
968 default:
969 return -EINVAL;
970 }
971
972 if (blktype == BLKTYPE_NPA) {
973 blkaddr = BLKADDR_NPA;
974 } else {
975 current_dir = filp->file->f_path.dentry->d_parent;
976 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
977 BLKADDR_NIX1 : BLKADDR_NIX0);
978 }
979
980 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
981 return -EINVAL;
982
983 pfvf = rvu_get_pfvf(rvu, pcifunc);
984 print_qsize(filp, pfvf);
985
986 return 0;
987 }
988
rvu_dbg_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int blktype)989 static ssize_t rvu_dbg_qsize_write(struct file *filp,
990 const char __user *buffer, size_t count,
991 loff_t *ppos, int blktype)
992 {
993 char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
994 struct seq_file *seqfile = filp->private_data;
995 char *cmd_buf, *cmd_buf_tmp, *subtoken;
996 struct rvu *rvu = seqfile->private;
997 struct dentry *current_dir;
998 int blkaddr;
999 u16 pcifunc;
1000 int ret, lf;
1001
1002 cmd_buf = memdup_user(buffer, count + 1);
1003 if (IS_ERR(cmd_buf))
1004 return -ENOMEM;
1005
1006 cmd_buf[count] = '\0';
1007
1008 cmd_buf_tmp = strchr(cmd_buf, '\n');
1009 if (cmd_buf_tmp) {
1010 *cmd_buf_tmp = '\0';
1011 count = cmd_buf_tmp - cmd_buf + 1;
1012 }
1013
1014 cmd_buf_tmp = cmd_buf;
1015 subtoken = strsep(&cmd_buf, " ");
1016 ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
1017 if (cmd_buf)
1018 ret = -EINVAL;
1019
1020 if (ret < 0 || !strncmp(subtoken, "help", 4)) {
1021 dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
1022 goto qsize_write_done;
1023 }
1024
1025 if (blktype == BLKTYPE_NPA) {
1026 blkaddr = BLKADDR_NPA;
1027 } else {
1028 current_dir = filp->f_path.dentry->d_parent;
1029 blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
1030 BLKADDR_NIX1 : BLKADDR_NIX0);
1031 }
1032
1033 if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
1034 ret = -EINVAL;
1035 goto qsize_write_done;
1036 }
1037 if (blktype == BLKTYPE_NPA)
1038 rvu->rvu_dbg.npa_qsize_id = lf;
1039 else
1040 rvu->rvu_dbg.nix_qsize_id = lf;
1041
1042 qsize_write_done:
1043 kfree(cmd_buf_tmp);
1044 return ret ? ret : count;
1045 }
1046
rvu_dbg_npa_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1047 static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
1048 const char __user *buffer,
1049 size_t count, loff_t *ppos)
1050 {
1051 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
1052 BLKTYPE_NPA);
1053 }
1054
rvu_dbg_npa_qsize_display(struct seq_file * filp,void * unused)1055 static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
1056 {
1057 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
1058 }
1059
1060 RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
1061
1062 /* Dumps given NPA Aura's context */
print_npa_aura_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)1063 static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1064 {
1065 struct npa_aura_s *aura = &rsp->aura;
1066 struct rvu *rvu = m->private;
1067
1068 seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
1069
1070 seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
1071 aura->ena, aura->pool_caching);
1072 seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
1073 aura->pool_way_mask, aura->avg_con);
1074 seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
1075 aura->pool_drop_ena, aura->aura_drop_ena);
1076 seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
1077 aura->bp_ena, aura->aura_drop);
1078 seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
1079 aura->shift, aura->avg_level);
1080
1081 seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
1082 (u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
1083
1084 seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
1085 (u64)aura->limit, aura->bp, aura->fc_ena);
1086
1087 if (!is_rvu_otx2(rvu))
1088 seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
1089 seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
1090 aura->fc_up_crossing, aura->fc_stype);
1091 seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
1092
1093 seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
1094
1095 seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
1096 aura->pool_drop, aura->update_time);
1097 seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
1098 aura->err_int, aura->err_int_ena);
1099 seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
1100 aura->thresh_int, aura->thresh_int_ena);
1101 seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
1102 aura->thresh_up, aura->thresh_qint_idx);
1103 seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
1104
1105 seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
1106 if (!is_rvu_otx2(rvu))
1107 seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
1108 }
1109
1110 /* Dumps given NPA Pool's context */
print_npa_pool_ctx(struct seq_file * m,struct npa_aq_enq_rsp * rsp)1111 static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
1112 {
1113 struct npa_pool_s *pool = &rsp->pool;
1114 struct rvu *rvu = m->private;
1115
1116 seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
1117
1118 seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
1119 pool->ena, pool->nat_align);
1120 seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
1121 pool->stack_caching, pool->stack_way_mask);
1122 seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
1123 pool->buf_offset, pool->buf_size);
1124
1125 seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
1126 pool->stack_max_pages, pool->stack_pages);
1127
1128 seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
1129
1130 seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
1131 pool->stack_offset, pool->shift, pool->avg_level);
1132 seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
1133 pool->avg_con, pool->fc_ena, pool->fc_stype);
1134 seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
1135 pool->fc_hyst_bits, pool->fc_up_crossing);
1136 if (!is_rvu_otx2(rvu))
1137 seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
1138 seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
1139
1140 seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
1141
1142 seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
1143
1144 seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
1145
1146 seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
1147 pool->err_int, pool->err_int_ena);
1148 seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
1149 seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
1150 pool->thresh_int_ena, pool->thresh_up);
1151 seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
1152 pool->thresh_qint_idx, pool->err_qint_idx);
1153 if (!is_rvu_otx2(rvu))
1154 seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
1155 }
1156
1157 /* Reads aura/pool's ctx from admin queue */
rvu_dbg_npa_ctx_display(struct seq_file * m,void * unused,int ctype)1158 static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
1159 {
1160 void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
1161 struct npa_aq_enq_req aq_req;
1162 struct npa_aq_enq_rsp rsp;
1163 struct rvu_pfvf *pfvf;
1164 int aura, rc, max_id;
1165 int npalf, id, all;
1166 struct rvu *rvu;
1167 u16 pcifunc;
1168
1169 rvu = m->private;
1170
1171 switch (ctype) {
1172 case NPA_AQ_CTYPE_AURA:
1173 npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
1174 id = rvu->rvu_dbg.npa_aura_ctx.id;
1175 all = rvu->rvu_dbg.npa_aura_ctx.all;
1176 break;
1177
1178 case NPA_AQ_CTYPE_POOL:
1179 npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
1180 id = rvu->rvu_dbg.npa_pool_ctx.id;
1181 all = rvu->rvu_dbg.npa_pool_ctx.all;
1182 break;
1183 default:
1184 return -EINVAL;
1185 }
1186
1187 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1188 return -EINVAL;
1189
1190 pfvf = rvu_get_pfvf(rvu, pcifunc);
1191 if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
1192 seq_puts(m, "Aura context is not initialized\n");
1193 return -EINVAL;
1194 } else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
1195 seq_puts(m, "Pool context is not initialized\n");
1196 return -EINVAL;
1197 }
1198
1199 memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
1200 aq_req.hdr.pcifunc = pcifunc;
1201 aq_req.ctype = ctype;
1202 aq_req.op = NPA_AQ_INSTOP_READ;
1203 if (ctype == NPA_AQ_CTYPE_AURA) {
1204 max_id = pfvf->aura_ctx->qsize;
1205 print_npa_ctx = print_npa_aura_ctx;
1206 } else {
1207 max_id = pfvf->pool_ctx->qsize;
1208 print_npa_ctx = print_npa_pool_ctx;
1209 }
1210
1211 if (id < 0 || id >= max_id) {
1212 seq_printf(m, "Invalid %s, valid range is 0-%d\n",
1213 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1214 max_id - 1);
1215 return -EINVAL;
1216 }
1217
1218 if (all)
1219 id = 0;
1220 else
1221 max_id = id + 1;
1222
1223 for (aura = id; aura < max_id; aura++) {
1224 aq_req.aura_id = aura;
1225
1226 /* Skip if queue is uninitialized */
1227 if (ctype == NPA_AQ_CTYPE_POOL && !test_bit(aura, pfvf->pool_bmap))
1228 continue;
1229
1230 seq_printf(m, "======%s : %d=======\n",
1231 (ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
1232 aq_req.aura_id);
1233 rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
1234 if (rc) {
1235 seq_puts(m, "Failed to read context\n");
1236 return -EINVAL;
1237 }
1238 print_npa_ctx(m, &rsp);
1239 }
1240 return 0;
1241 }
1242
write_npa_ctx(struct rvu * rvu,bool all,int npalf,int id,int ctype)1243 static int write_npa_ctx(struct rvu *rvu, bool all,
1244 int npalf, int id, int ctype)
1245 {
1246 struct rvu_pfvf *pfvf;
1247 int max_id = 0;
1248 u16 pcifunc;
1249
1250 if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
1251 return -EINVAL;
1252
1253 pfvf = rvu_get_pfvf(rvu, pcifunc);
1254
1255 if (ctype == NPA_AQ_CTYPE_AURA) {
1256 if (!pfvf->aura_ctx) {
1257 dev_warn(rvu->dev, "Aura context is not initialized\n");
1258 return -EINVAL;
1259 }
1260 max_id = pfvf->aura_ctx->qsize;
1261 } else if (ctype == NPA_AQ_CTYPE_POOL) {
1262 if (!pfvf->pool_ctx) {
1263 dev_warn(rvu->dev, "Pool context is not initialized\n");
1264 return -EINVAL;
1265 }
1266 max_id = pfvf->pool_ctx->qsize;
1267 }
1268
1269 if (id < 0 || id >= max_id) {
1270 dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
1271 (ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
1272 max_id - 1);
1273 return -EINVAL;
1274 }
1275
1276 switch (ctype) {
1277 case NPA_AQ_CTYPE_AURA:
1278 rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
1279 rvu->rvu_dbg.npa_aura_ctx.id = id;
1280 rvu->rvu_dbg.npa_aura_ctx.all = all;
1281 break;
1282
1283 case NPA_AQ_CTYPE_POOL:
1284 rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
1285 rvu->rvu_dbg.npa_pool_ctx.id = id;
1286 rvu->rvu_dbg.npa_pool_ctx.all = all;
1287 break;
1288 default:
1289 return -EINVAL;
1290 }
1291 return 0;
1292 }
1293
parse_cmd_buffer_ctx(char * cmd_buf,size_t * count,const char __user * buffer,int * npalf,int * id,bool * all)1294 static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
1295 const char __user *buffer, int *npalf,
1296 int *id, bool *all)
1297 {
1298 int bytes_not_copied;
1299 char *cmd_buf_tmp;
1300 char *subtoken;
1301 int ret;
1302
1303 bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
1304 if (bytes_not_copied)
1305 return -EFAULT;
1306
1307 cmd_buf[*count] = '\0';
1308 cmd_buf_tmp = strchr(cmd_buf, '\n');
1309
1310 if (cmd_buf_tmp) {
1311 *cmd_buf_tmp = '\0';
1312 *count = cmd_buf_tmp - cmd_buf + 1;
1313 }
1314
1315 subtoken = strsep(&cmd_buf, " ");
1316 ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
1317 if (ret < 0)
1318 return ret;
1319 subtoken = strsep(&cmd_buf, " ");
1320 if (subtoken && strcmp(subtoken, "all") == 0) {
1321 *all = true;
1322 } else {
1323 ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
1324 if (ret < 0)
1325 return ret;
1326 }
1327 if (cmd_buf)
1328 return -EINVAL;
1329 return ret;
1330 }
1331
rvu_dbg_npa_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)1332 static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
1333 const char __user *buffer,
1334 size_t count, loff_t *ppos, int ctype)
1335 {
1336 char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
1337 "aura" : "pool";
1338 struct seq_file *seqfp = filp->private_data;
1339 struct rvu *rvu = seqfp->private;
1340 int npalf, id = 0, ret;
1341 bool all = false;
1342
1343 if ((*ppos != 0) || !count)
1344 return -EINVAL;
1345
1346 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1347 if (!cmd_buf)
1348 return count;
1349 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
1350 &npalf, &id, &all);
1351 if (ret < 0) {
1352 dev_info(rvu->dev,
1353 "Usage: echo <npalf> [%s number/all] > %s_ctx\n",
1354 ctype_string, ctype_string);
1355 goto done;
1356 } else {
1357 ret = write_npa_ctx(rvu, all, npalf, id, ctype);
1358 }
1359 done:
1360 kfree(cmd_buf);
1361 return ret ? ret : count;
1362 }
1363
rvu_dbg_npa_aura_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1364 static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
1365 const char __user *buffer,
1366 size_t count, loff_t *ppos)
1367 {
1368 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1369 NPA_AQ_CTYPE_AURA);
1370 }
1371
rvu_dbg_npa_aura_ctx_display(struct seq_file * filp,void * unused)1372 static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
1373 {
1374 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
1375 }
1376
1377 RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
1378
rvu_dbg_npa_pool_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)1379 static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
1380 const char __user *buffer,
1381 size_t count, loff_t *ppos)
1382 {
1383 return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
1384 NPA_AQ_CTYPE_POOL);
1385 }
1386
rvu_dbg_npa_pool_ctx_display(struct seq_file * filp,void * unused)1387 static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
1388 {
1389 return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
1390 }
1391
1392 RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
1393
ndc_cache_stats(struct seq_file * s,int blk_addr,int ctype,int transaction)1394 static void ndc_cache_stats(struct seq_file *s, int blk_addr,
1395 int ctype, int transaction)
1396 {
1397 u64 req, out_req, lat, cant_alloc;
1398 struct nix_hw *nix_hw;
1399 struct rvu *rvu;
1400 int port;
1401
1402 if (blk_addr == BLKADDR_NDC_NPA0) {
1403 rvu = s->private;
1404 } else {
1405 nix_hw = s->private;
1406 rvu = nix_hw->rvu;
1407 }
1408
1409 for (port = 0; port < NDC_MAX_PORT; port++) {
1410 req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
1411 (port, ctype, transaction));
1412 lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
1413 (port, ctype, transaction));
1414 out_req = rvu_read64(rvu, blk_addr,
1415 NDC_AF_PORTX_RTX_RWX_OSTDN_PC
1416 (port, ctype, transaction));
1417 cant_alloc = rvu_read64(rvu, blk_addr,
1418 NDC_AF_PORTX_RTX_CANT_ALLOC_PC
1419 (port, transaction));
1420 seq_printf(s, "\nPort:%d\n", port);
1421 seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
1422 seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
1423 seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
1424 seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
1425 seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
1426 }
1427 }
1428
ndc_blk_cache_stats(struct seq_file * s,int idx,int blk_addr)1429 static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
1430 {
1431 seq_puts(s, "\n***** CACHE mode read stats *****\n");
1432 ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
1433 seq_puts(s, "\n***** CACHE mode write stats *****\n");
1434 ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
1435 seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
1436 ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
1437 seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
1438 ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
1439 return 0;
1440 }
1441
rvu_dbg_npa_ndc_cache_display(struct seq_file * filp,void * unused)1442 static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
1443 {
1444 return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1445 }
1446
1447 RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
1448
ndc_blk_hits_miss_stats(struct seq_file * s,int idx,int blk_addr)1449 static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
1450 {
1451 struct nix_hw *nix_hw;
1452 struct rvu *rvu;
1453 int bank, max_bank;
1454 u64 ndc_af_const;
1455
1456 if (blk_addr == BLKADDR_NDC_NPA0) {
1457 rvu = s->private;
1458 } else {
1459 nix_hw = s->private;
1460 rvu = nix_hw->rvu;
1461 }
1462
1463 ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
1464 max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
1465 for (bank = 0; bank < max_bank; bank++) {
1466 seq_printf(s, "BANK:%d\n", bank);
1467 seq_printf(s, "\tHits:\t%lld\n",
1468 (u64)rvu_read64(rvu, blk_addr,
1469 NDC_AF_BANKX_HIT_PC(bank)));
1470 seq_printf(s, "\tMiss:\t%lld\n",
1471 (u64)rvu_read64(rvu, blk_addr,
1472 NDC_AF_BANKX_MISS_PC(bank)));
1473 }
1474 return 0;
1475 }
1476
rvu_dbg_nix_ndc_rx_cache_display(struct seq_file * filp,void * unused)1477 static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
1478 {
1479 struct nix_hw *nix_hw = filp->private;
1480 int blkaddr = 0;
1481 int ndc_idx = 0;
1482
1483 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1484 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1485 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
1486
1487 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1488 }
1489
1490 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
1491
rvu_dbg_nix_ndc_tx_cache_display(struct seq_file * filp,void * unused)1492 static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
1493 {
1494 struct nix_hw *nix_hw = filp->private;
1495 int blkaddr = 0;
1496 int ndc_idx = 0;
1497
1498 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1499 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1500 ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
1501
1502 return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
1503 }
1504
1505 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
1506
rvu_dbg_npa_ndc_hits_miss_display(struct seq_file * filp,void * unused)1507 static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
1508 void *unused)
1509 {
1510 return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
1511 }
1512
1513 RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
1514
rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file * filp,void * unused)1515 static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
1516 void *unused)
1517 {
1518 struct nix_hw *nix_hw = filp->private;
1519 int ndc_idx = NPA0_U;
1520 int blkaddr = 0;
1521
1522 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1523 BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
1524
1525 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1526 }
1527
1528 RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
1529
rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file * filp,void * unused)1530 static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
1531 void *unused)
1532 {
1533 struct nix_hw *nix_hw = filp->private;
1534 int ndc_idx = NPA0_U;
1535 int blkaddr = 0;
1536
1537 blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
1538 BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
1539
1540 return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
1541 }
1542
1543 RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
1544
print_nix_cn10k_sq_ctx(struct seq_file * m,struct nix_cn10k_sq_ctx_s * sq_ctx)1545 static void print_nix_cn10k_sq_ctx(struct seq_file *m,
1546 struct nix_cn10k_sq_ctx_s *sq_ctx)
1547 {
1548 seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
1549 sq_ctx->ena, sq_ctx->qint_idx);
1550 seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
1551 sq_ctx->substream, sq_ctx->sdp_mcast);
1552 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
1553 sq_ctx->cq, sq_ctx->sqe_way_mask);
1554
1555 seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
1556 sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
1557 seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
1558 sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
1559 seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
1560 sq_ctx->default_chan, sq_ctx->sqb_count);
1561
1562 seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
1563 seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
1564 seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
1565 sq_ctx->sqb_aura, sq_ctx->sq_int);
1566 seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
1567 sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
1568
1569 seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
1570 sq_ctx->max_sqe_size, sq_ctx->cq_limit);
1571 seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
1572 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1573 seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
1574 sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
1575 seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
1576 sq_ctx->tail_offset, sq_ctx->smenq_offset);
1577 seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
1578 sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
1579
1580 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1581 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1582 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1583 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1584 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1585 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1586 sq_ctx->smenq_next_sqb);
1587
1588 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1589
1590 seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
1591 seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
1592 sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
1593 seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
1594 sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
1595 seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
1596 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1597
1598 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1599 (u64)sq_ctx->scm_lso_rem);
1600 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1601 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1602 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1603 (u64)sq_ctx->dropped_octs);
1604 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1605 (u64)sq_ctx->dropped_pkts);
1606 }
1607
1608 /* Dumps given nix_sq's context */
print_nix_sq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1609 static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1610 {
1611 struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
1612 struct nix_hw *nix_hw = m->private;
1613 struct rvu *rvu = nix_hw->rvu;
1614
1615 if (!is_rvu_otx2(rvu)) {
1616 print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
1617 return;
1618 }
1619 seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
1620 sq_ctx->sqe_way_mask, sq_ctx->cq);
1621 seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1622 sq_ctx->sdp_mcast, sq_ctx->substream);
1623 seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
1624 sq_ctx->qint_idx, sq_ctx->ena);
1625
1626 seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
1627 sq_ctx->sqb_count, sq_ctx->default_chan);
1628 seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
1629 sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
1630 seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
1631 sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
1632
1633 seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
1634 sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
1635 seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
1636 sq_ctx->sq_int, sq_ctx->sqb_aura);
1637 seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
1638
1639 seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
1640 sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
1641 seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
1642 sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
1643 seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
1644 sq_ctx->smenq_offset, sq_ctx->tail_offset);
1645 seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
1646 sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
1647 seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
1648 sq_ctx->mnq_dis, sq_ctx->lmt_dis);
1649 seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
1650 sq_ctx->cq_limit, sq_ctx->max_sqe_size);
1651
1652 seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
1653 seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
1654 seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
1655 seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
1656 sq_ctx->smenq_next_sqb);
1657
1658 seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
1659
1660 seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
1661 sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
1662 seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
1663 sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
1664 seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
1665 sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
1666 seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
1667
1668 seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
1669 (u64)sq_ctx->scm_lso_rem);
1670 seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
1671 seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
1672 seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
1673 (u64)sq_ctx->dropped_octs);
1674 seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
1675 (u64)sq_ctx->dropped_pkts);
1676 }
1677
print_nix_cn10k_rq_ctx(struct seq_file * m,struct nix_cn10k_rq_ctx_s * rq_ctx)1678 static void print_nix_cn10k_rq_ctx(struct seq_file *m,
1679 struct nix_cn10k_rq_ctx_s *rq_ctx)
1680 {
1681 seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1682 rq_ctx->ena, rq_ctx->sso_ena);
1683 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1684 rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
1685 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
1686 rq_ctx->cq, rq_ctx->lenerr_dis);
1687 seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
1688 rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
1689 seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
1690 rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
1691 seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
1692 rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
1693 seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
1694
1695 seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1696 rq_ctx->spb_aura, rq_ctx->lpb_aura);
1697 seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
1698 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1699 rq_ctx->sso_grp, rq_ctx->sso_tt);
1700 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
1701 rq_ctx->pb_caching, rq_ctx->wqe_caching);
1702 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1703 rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
1704 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
1705 rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
1706 seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
1707 rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
1708
1709 seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
1710 seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
1711 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
1712 seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
1713 rq_ctx->wqe_skip, rq_ctx->spb_ena);
1714 seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
1715 rq_ctx->lpb_sizem1, rq_ctx->first_skip);
1716 seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
1717 rq_ctx->later_skip, rq_ctx->xqe_imm_size);
1718 seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
1719 rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
1720
1721 seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
1722 rq_ctx->xqe_drop, rq_ctx->xqe_pass);
1723 seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
1724 rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
1725 seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
1726 rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
1727 seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
1728 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1729
1730 seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
1731 rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
1732 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
1733 rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
1734 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
1735 rq_ctx->rq_int, rq_ctx->rq_int_ena);
1736 seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
1737
1738 seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
1739 rq_ctx->ltag, rq_ctx->good_utag);
1740 seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
1741 rq_ctx->bad_utag, rq_ctx->flow_tagw);
1742 seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
1743 rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
1744 seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
1745 rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
1746 seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
1747
1748 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1749 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1750 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1751 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1752 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1753 }
1754
1755 /* Dumps given nix_rq's context */
print_nix_rq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1756 static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1757 {
1758 struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
1759 struct nix_hw *nix_hw = m->private;
1760 struct rvu *rvu = nix_hw->rvu;
1761
1762 if (!is_rvu_otx2(rvu)) {
1763 print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
1764 return;
1765 }
1766
1767 seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
1768 rq_ctx->wqe_aura, rq_ctx->substream);
1769 seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
1770 rq_ctx->cq, rq_ctx->ena_wqwd);
1771 seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
1772 rq_ctx->ipsech_ena, rq_ctx->sso_ena);
1773 seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
1774
1775 seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
1776 rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
1777 seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
1778 rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
1779 seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
1780 rq_ctx->pb_caching, rq_ctx->sso_tt);
1781 seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
1782 rq_ctx->sso_grp, rq_ctx->lpb_aura);
1783 seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
1784
1785 seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
1786 rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
1787 seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
1788 rq_ctx->xqe_imm_size, rq_ctx->later_skip);
1789 seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
1790 rq_ctx->first_skip, rq_ctx->lpb_sizem1);
1791 seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
1792 rq_ctx->spb_ena, rq_ctx->wqe_skip);
1793 seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
1794
1795 seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
1796 rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
1797 seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
1798 rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
1799 seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
1800 rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
1801 seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
1802 rq_ctx->xqe_pass, rq_ctx->xqe_drop);
1803
1804 seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
1805 rq_ctx->qint_idx, rq_ctx->rq_int_ena);
1806 seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
1807 rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
1808 seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
1809 rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
1810 seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
1811
1812 seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
1813 rq_ctx->flow_tagw, rq_ctx->bad_utag);
1814 seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
1815 rq_ctx->good_utag, rq_ctx->ltag);
1816
1817 seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
1818 seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
1819 seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
1820 seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
1821 seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
1822 }
1823
1824 /* Dumps given nix_cq's context */
print_nix_cq_ctx(struct seq_file * m,struct nix_aq_enq_rsp * rsp)1825 static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
1826 {
1827 struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
1828
1829 seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
1830
1831 seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
1832 seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
1833 cq_ctx->avg_con, cq_ctx->cint_idx);
1834 seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
1835 cq_ctx->cq_err, cq_ctx->qint_idx);
1836 seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
1837 cq_ctx->bpid, cq_ctx->bp_ena);
1838
1839 seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
1840 cq_ctx->update_time, cq_ctx->avg_level);
1841 seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
1842 cq_ctx->head, cq_ctx->tail);
1843
1844 seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
1845 cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
1846 seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
1847 cq_ctx->qsize, cq_ctx->caching);
1848 seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
1849 cq_ctx->substream, cq_ctx->ena);
1850 seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
1851 cq_ctx->drop_ena, cq_ctx->drop);
1852 seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
1853 }
1854
rvu_dbg_nix_queue_ctx_display(struct seq_file * filp,void * unused,int ctype)1855 static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
1856 void *unused, int ctype)
1857 {
1858 void (*print_nix_ctx)(struct seq_file *filp,
1859 struct nix_aq_enq_rsp *rsp) = NULL;
1860 struct nix_hw *nix_hw = filp->private;
1861 struct rvu *rvu = nix_hw->rvu;
1862 struct nix_aq_enq_req aq_req;
1863 struct nix_aq_enq_rsp rsp;
1864 char *ctype_string = NULL;
1865 int qidx, rc, max_id = 0;
1866 struct rvu_pfvf *pfvf;
1867 int nixlf, id, all;
1868 u16 pcifunc;
1869
1870 switch (ctype) {
1871 case NIX_AQ_CTYPE_CQ:
1872 nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
1873 id = rvu->rvu_dbg.nix_cq_ctx.id;
1874 all = rvu->rvu_dbg.nix_cq_ctx.all;
1875 break;
1876
1877 case NIX_AQ_CTYPE_SQ:
1878 nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
1879 id = rvu->rvu_dbg.nix_sq_ctx.id;
1880 all = rvu->rvu_dbg.nix_sq_ctx.all;
1881 break;
1882
1883 case NIX_AQ_CTYPE_RQ:
1884 nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
1885 id = rvu->rvu_dbg.nix_rq_ctx.id;
1886 all = rvu->rvu_dbg.nix_rq_ctx.all;
1887 break;
1888
1889 default:
1890 return -EINVAL;
1891 }
1892
1893 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1894 return -EINVAL;
1895
1896 pfvf = rvu_get_pfvf(rvu, pcifunc);
1897 if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
1898 seq_puts(filp, "SQ context is not initialized\n");
1899 return -EINVAL;
1900 } else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
1901 seq_puts(filp, "RQ context is not initialized\n");
1902 return -EINVAL;
1903 } else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
1904 seq_puts(filp, "CQ context is not initialized\n");
1905 return -EINVAL;
1906 }
1907
1908 if (ctype == NIX_AQ_CTYPE_SQ) {
1909 max_id = pfvf->sq_ctx->qsize;
1910 ctype_string = "sq";
1911 print_nix_ctx = print_nix_sq_ctx;
1912 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1913 max_id = pfvf->rq_ctx->qsize;
1914 ctype_string = "rq";
1915 print_nix_ctx = print_nix_rq_ctx;
1916 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1917 max_id = pfvf->cq_ctx->qsize;
1918 ctype_string = "cq";
1919 print_nix_ctx = print_nix_cq_ctx;
1920 }
1921
1922 memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1923 aq_req.hdr.pcifunc = pcifunc;
1924 aq_req.ctype = ctype;
1925 aq_req.op = NIX_AQ_INSTOP_READ;
1926 if (all)
1927 id = 0;
1928 else
1929 max_id = id + 1;
1930 for (qidx = id; qidx < max_id; qidx++) {
1931 aq_req.qidx = qidx;
1932 seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
1933 ctype_string, nixlf, aq_req.qidx);
1934 rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
1935 if (rc) {
1936 seq_puts(filp, "Failed to read the context\n");
1937 return -EINVAL;
1938 }
1939 print_nix_ctx(filp, &rsp);
1940 }
1941 return 0;
1942 }
1943
write_nix_queue_ctx(struct rvu * rvu,bool all,int nixlf,int id,int ctype,char * ctype_string,struct seq_file * m)1944 static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
1945 int id, int ctype, char *ctype_string,
1946 struct seq_file *m)
1947 {
1948 struct nix_hw *nix_hw = m->private;
1949 struct rvu_pfvf *pfvf;
1950 int max_id = 0;
1951 u16 pcifunc;
1952
1953 if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
1954 return -EINVAL;
1955
1956 pfvf = rvu_get_pfvf(rvu, pcifunc);
1957
1958 if (ctype == NIX_AQ_CTYPE_SQ) {
1959 if (!pfvf->sq_ctx) {
1960 dev_warn(rvu->dev, "SQ context is not initialized\n");
1961 return -EINVAL;
1962 }
1963 max_id = pfvf->sq_ctx->qsize;
1964 } else if (ctype == NIX_AQ_CTYPE_RQ) {
1965 if (!pfvf->rq_ctx) {
1966 dev_warn(rvu->dev, "RQ context is not initialized\n");
1967 return -EINVAL;
1968 }
1969 max_id = pfvf->rq_ctx->qsize;
1970 } else if (ctype == NIX_AQ_CTYPE_CQ) {
1971 if (!pfvf->cq_ctx) {
1972 dev_warn(rvu->dev, "CQ context is not initialized\n");
1973 return -EINVAL;
1974 }
1975 max_id = pfvf->cq_ctx->qsize;
1976 }
1977
1978 if (id < 0 || id >= max_id) {
1979 dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
1980 ctype_string, max_id - 1);
1981 return -EINVAL;
1982 }
1983 switch (ctype) {
1984 case NIX_AQ_CTYPE_CQ:
1985 rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
1986 rvu->rvu_dbg.nix_cq_ctx.id = id;
1987 rvu->rvu_dbg.nix_cq_ctx.all = all;
1988 break;
1989
1990 case NIX_AQ_CTYPE_SQ:
1991 rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
1992 rvu->rvu_dbg.nix_sq_ctx.id = id;
1993 rvu->rvu_dbg.nix_sq_ctx.all = all;
1994 break;
1995
1996 case NIX_AQ_CTYPE_RQ:
1997 rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
1998 rvu->rvu_dbg.nix_rq_ctx.id = id;
1999 rvu->rvu_dbg.nix_rq_ctx.all = all;
2000 break;
2001 default:
2002 return -EINVAL;
2003 }
2004 return 0;
2005 }
2006
rvu_dbg_nix_queue_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos,int ctype)2007 static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
2008 const char __user *buffer,
2009 size_t count, loff_t *ppos,
2010 int ctype)
2011 {
2012 struct seq_file *m = filp->private_data;
2013 struct nix_hw *nix_hw = m->private;
2014 struct rvu *rvu = nix_hw->rvu;
2015 char *cmd_buf, *ctype_string;
2016 int nixlf, id = 0, ret;
2017 bool all = false;
2018
2019 if ((*ppos != 0) || !count)
2020 return -EINVAL;
2021
2022 switch (ctype) {
2023 case NIX_AQ_CTYPE_SQ:
2024 ctype_string = "sq";
2025 break;
2026 case NIX_AQ_CTYPE_RQ:
2027 ctype_string = "rq";
2028 break;
2029 case NIX_AQ_CTYPE_CQ:
2030 ctype_string = "cq";
2031 break;
2032 default:
2033 return -EINVAL;
2034 }
2035
2036 cmd_buf = kzalloc(count + 1, GFP_KERNEL);
2037
2038 if (!cmd_buf)
2039 return count;
2040
2041 ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
2042 &nixlf, &id, &all);
2043 if (ret < 0) {
2044 dev_info(rvu->dev,
2045 "Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
2046 ctype_string, ctype_string);
2047 goto done;
2048 } else {
2049 ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
2050 ctype_string, m);
2051 }
2052 done:
2053 kfree(cmd_buf);
2054 return ret ? ret : count;
2055 }
2056
rvu_dbg_nix_sq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2057 static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
2058 const char __user *buffer,
2059 size_t count, loff_t *ppos)
2060 {
2061 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2062 NIX_AQ_CTYPE_SQ);
2063 }
2064
rvu_dbg_nix_sq_ctx_display(struct seq_file * filp,void * unused)2065 static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
2066 {
2067 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
2068 }
2069
2070 RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
2071
rvu_dbg_nix_rq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2072 static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
2073 const char __user *buffer,
2074 size_t count, loff_t *ppos)
2075 {
2076 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2077 NIX_AQ_CTYPE_RQ);
2078 }
2079
rvu_dbg_nix_rq_ctx_display(struct seq_file * filp,void * unused)2080 static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
2081 {
2082 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
2083 }
2084
2085 RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
2086
rvu_dbg_nix_cq_ctx_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2087 static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
2088 const char __user *buffer,
2089 size_t count, loff_t *ppos)
2090 {
2091 return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
2092 NIX_AQ_CTYPE_CQ);
2093 }
2094
rvu_dbg_nix_cq_ctx_display(struct seq_file * filp,void * unused)2095 static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
2096 {
2097 return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
2098 }
2099
2100 RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
2101
print_nix_qctx_qsize(struct seq_file * filp,int qsize,unsigned long * bmap,char * qtype)2102 static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
2103 unsigned long *bmap, char *qtype)
2104 {
2105 char *buf;
2106
2107 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2108 if (!buf)
2109 return;
2110
2111 bitmap_print_to_pagebuf(false, buf, bmap, qsize);
2112 seq_printf(filp, "%s context count : %d\n", qtype, qsize);
2113 seq_printf(filp, "%s context ena/dis bitmap : %s\n",
2114 qtype, buf);
2115 kfree(buf);
2116 }
2117
print_nix_qsize(struct seq_file * filp,struct rvu_pfvf * pfvf)2118 static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
2119 {
2120 if (!pfvf->cq_ctx)
2121 seq_puts(filp, "cq context is not initialized\n");
2122 else
2123 print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
2124 "cq");
2125
2126 if (!pfvf->rq_ctx)
2127 seq_puts(filp, "rq context is not initialized\n");
2128 else
2129 print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
2130 "rq");
2131
2132 if (!pfvf->sq_ctx)
2133 seq_puts(filp, "sq context is not initialized\n");
2134 else
2135 print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
2136 "sq");
2137 }
2138
rvu_dbg_nix_qsize_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)2139 static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
2140 const char __user *buffer,
2141 size_t count, loff_t *ppos)
2142 {
2143 return rvu_dbg_qsize_write(filp, buffer, count, ppos,
2144 BLKTYPE_NIX);
2145 }
2146
rvu_dbg_nix_qsize_display(struct seq_file * filp,void * unused)2147 static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
2148 {
2149 return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
2150 }
2151
2152 RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
2153
print_band_prof_ctx(struct seq_file * m,struct nix_bandprof_s * prof)2154 static void print_band_prof_ctx(struct seq_file *m,
2155 struct nix_bandprof_s *prof)
2156 {
2157 char *str;
2158
2159 switch (prof->pc_mode) {
2160 case NIX_RX_PC_MODE_VLAN:
2161 str = "VLAN";
2162 break;
2163 case NIX_RX_PC_MODE_DSCP:
2164 str = "DSCP";
2165 break;
2166 case NIX_RX_PC_MODE_GEN:
2167 str = "Generic";
2168 break;
2169 case NIX_RX_PC_MODE_RSVD:
2170 str = "Reserved";
2171 break;
2172 }
2173 seq_printf(m, "W0: pc_mode\t\t%s\n", str);
2174 str = (prof->icolor == 3) ? "Color blind" :
2175 (prof->icolor == 0) ? "Green" :
2176 (prof->icolor == 1) ? "Yellow" : "Red";
2177 seq_printf(m, "W0: icolor\t\t%s\n", str);
2178 seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
2179 seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
2180 seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
2181 seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
2182 seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
2183 seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
2184 seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
2185 seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
2186
2187 seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
2188 str = (prof->lmode == 0) ? "byte" : "packet";
2189 seq_printf(m, "W1: lmode\t\t%s\n", str);
2190 seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
2191 seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
2192 seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
2193 seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
2194 str = (prof->gc_action == 0) ? "PASS" :
2195 (prof->gc_action == 1) ? "DROP" : "RED";
2196 seq_printf(m, "W1: gc_action\t\t%s\n", str);
2197 str = (prof->yc_action == 0) ? "PASS" :
2198 (prof->yc_action == 1) ? "DROP" : "RED";
2199 seq_printf(m, "W1: yc_action\t\t%s\n", str);
2200 str = (prof->rc_action == 0) ? "PASS" :
2201 (prof->rc_action == 1) ? "DROP" : "RED";
2202 seq_printf(m, "W1: rc_action\t\t%s\n", str);
2203 seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
2204 seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
2205 seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
2206
2207 seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
2208 seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
2209 seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
2210 seq_printf(m, "W4: green_pkt_pass\t%lld\n",
2211 (u64)prof->green_pkt_pass);
2212 seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
2213 (u64)prof->yellow_pkt_pass);
2214 seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
2215 seq_printf(m, "W7: green_octs_pass\t%lld\n",
2216 (u64)prof->green_octs_pass);
2217 seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
2218 (u64)prof->yellow_octs_pass);
2219 seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
2220 seq_printf(m, "W10: green_pkt_drop\t%lld\n",
2221 (u64)prof->green_pkt_drop);
2222 seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
2223 (u64)prof->yellow_pkt_drop);
2224 seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
2225 seq_printf(m, "W13: green_octs_drop\t%lld\n",
2226 (u64)prof->green_octs_drop);
2227 seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
2228 (u64)prof->yellow_octs_drop);
2229 seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
2230 seq_puts(m, "==============================\n");
2231 }
2232
rvu_dbg_nix_band_prof_ctx_display(struct seq_file * m,void * unused)2233 static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
2234 {
2235 struct nix_hw *nix_hw = m->private;
2236 struct nix_cn10k_aq_enq_req aq_req;
2237 struct nix_cn10k_aq_enq_rsp aq_rsp;
2238 struct rvu *rvu = nix_hw->rvu;
2239 struct nix_ipolicer *ipolicer;
2240 int layer, prof_idx, idx, rc;
2241 u16 pcifunc;
2242 char *str;
2243
2244 /* Ingress policers do not exist on all platforms */
2245 if (!nix_hw->ipolicer)
2246 return 0;
2247
2248 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2249 if (layer == BAND_PROF_INVAL_LAYER)
2250 continue;
2251 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2252 (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
2253
2254 seq_printf(m, "\n%s bandwidth profiles\n", str);
2255 seq_puts(m, "=======================\n");
2256
2257 ipolicer = &nix_hw->ipolicer[layer];
2258
2259 for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
2260 if (is_rsrc_free(&ipolicer->band_prof, idx))
2261 continue;
2262
2263 prof_idx = (idx & 0x3FFF) | (layer << 14);
2264 rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
2265 0x00, NIX_AQ_CTYPE_BANDPROF,
2266 prof_idx);
2267 if (rc) {
2268 dev_err(rvu->dev,
2269 "%s: Failed to fetch context of %s profile %d, err %d\n",
2270 __func__, str, idx, rc);
2271 return 0;
2272 }
2273 seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
2274 pcifunc = ipolicer->pfvf_map[idx];
2275 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2276 seq_printf(m, "Allocated to :: PF %d\n",
2277 rvu_get_pf(pcifunc));
2278 else
2279 seq_printf(m, "Allocated to :: PF %d VF %d\n",
2280 rvu_get_pf(pcifunc),
2281 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2282 print_band_prof_ctx(m, &aq_rsp.prof);
2283 }
2284 }
2285 return 0;
2286 }
2287
2288 RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
2289
rvu_dbg_nix_band_prof_rsrc_display(struct seq_file * m,void * unused)2290 static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
2291 {
2292 struct nix_hw *nix_hw = m->private;
2293 struct nix_ipolicer *ipolicer;
2294 int layer;
2295 char *str;
2296
2297 /* Ingress policers do not exist on all platforms */
2298 if (!nix_hw->ipolicer)
2299 return 0;
2300
2301 seq_puts(m, "\nBandwidth profile resource free count\n");
2302 seq_puts(m, "=====================================\n");
2303 for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
2304 if (layer == BAND_PROF_INVAL_LAYER)
2305 continue;
2306 str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
2307 (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
2308
2309 ipolicer = &nix_hw->ipolicer[layer];
2310 seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
2311 ipolicer->band_prof.max,
2312 rvu_rsrc_free_count(&ipolicer->band_prof));
2313 }
2314 seq_puts(m, "=====================================\n");
2315
2316 return 0;
2317 }
2318
2319 RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
2320
rvu_dbg_nix_init(struct rvu * rvu,int blkaddr)2321 static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
2322 {
2323 struct nix_hw *nix_hw;
2324
2325 if (!is_block_implemented(rvu->hw, blkaddr))
2326 return;
2327
2328 if (blkaddr == BLKADDR_NIX0) {
2329 rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
2330 nix_hw = &rvu->hw->nix[0];
2331 } else {
2332 rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
2333 rvu->rvu_dbg.root);
2334 nix_hw = &rvu->hw->nix[1];
2335 }
2336
2337 debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2338 &rvu_dbg_nix_sq_ctx_fops);
2339 debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2340 &rvu_dbg_nix_rq_ctx_fops);
2341 debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2342 &rvu_dbg_nix_cq_ctx_fops);
2343 debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2344 &rvu_dbg_nix_ndc_tx_cache_fops);
2345 debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
2346 &rvu_dbg_nix_ndc_rx_cache_fops);
2347 debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2348 &rvu_dbg_nix_ndc_tx_hits_miss_fops);
2349 debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
2350 &rvu_dbg_nix_ndc_rx_hits_miss_fops);
2351 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
2352 &rvu_dbg_nix_qsize_fops);
2353 debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
2354 &rvu_dbg_nix_band_prof_ctx_fops);
2355 debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
2356 &rvu_dbg_nix_band_prof_rsrc_fops);
2357 }
2358
rvu_dbg_npa_init(struct rvu * rvu)2359 static void rvu_dbg_npa_init(struct rvu *rvu)
2360 {
2361 rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
2362
2363 debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
2364 &rvu_dbg_npa_qsize_fops);
2365 debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2366 &rvu_dbg_npa_aura_ctx_fops);
2367 debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
2368 &rvu_dbg_npa_pool_ctx_fops);
2369 debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
2370 &rvu_dbg_npa_ndc_cache_fops);
2371 debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
2372 &rvu_dbg_npa_ndc_hits_miss_fops);
2373 }
2374
2375 #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
2376 ({ \
2377 u64 cnt; \
2378 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2379 NIX_STATS_RX, &(cnt)); \
2380 if (!err) \
2381 seq_printf(s, "%s: %llu\n", name, cnt); \
2382 cnt; \
2383 })
2384
2385 #define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
2386 ({ \
2387 u64 cnt; \
2388 err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
2389 NIX_STATS_TX, &(cnt)); \
2390 if (!err) \
2391 seq_printf(s, "%s: %llu\n", name, cnt); \
2392 cnt; \
2393 })
2394
cgx_print_stats(struct seq_file * s,int lmac_id)2395 static int cgx_print_stats(struct seq_file *s, int lmac_id)
2396 {
2397 struct cgx_link_user_info linfo;
2398 struct mac_ops *mac_ops;
2399 void *cgxd = s->private;
2400 u64 ucast, mcast, bcast;
2401 int stat = 0, err = 0;
2402 u64 tx_stat, rx_stat;
2403 struct rvu *rvu;
2404
2405 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2406 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2407 if (!rvu)
2408 return -ENODEV;
2409
2410 mac_ops = get_mac_ops(cgxd);
2411 /* There can be no CGX devices at all */
2412 if (!mac_ops)
2413 return 0;
2414
2415 /* Link status */
2416 seq_puts(s, "\n=======Link Status======\n\n");
2417 err = cgx_get_link_info(cgxd, lmac_id, &linfo);
2418 if (err)
2419 seq_puts(s, "Failed to read link status\n");
2420 seq_printf(s, "\nLink is %s %d Mbps\n\n",
2421 linfo.link_up ? "UP" : "DOWN", linfo.speed);
2422
2423 /* Rx stats */
2424 seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
2425 mac_ops->name);
2426 ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
2427 if (err)
2428 return err;
2429 mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
2430 if (err)
2431 return err;
2432 bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
2433 if (err)
2434 return err;
2435 seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
2436 PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
2437 if (err)
2438 return err;
2439 PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
2440 if (err)
2441 return err;
2442 PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
2443 if (err)
2444 return err;
2445
2446 /* Tx stats */
2447 seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
2448 mac_ops->name);
2449 ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
2450 if (err)
2451 return err;
2452 mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
2453 if (err)
2454 return err;
2455 bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
2456 if (err)
2457 return err;
2458 seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
2459 PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
2460 if (err)
2461 return err;
2462 PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
2463 if (err)
2464 return err;
2465
2466 /* Rx stats */
2467 seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
2468 while (stat < mac_ops->rx_stats_cnt) {
2469 err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
2470 if (err)
2471 return err;
2472 if (is_rvu_otx2(rvu))
2473 seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
2474 rx_stat);
2475 else
2476 seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
2477 rx_stat);
2478 stat++;
2479 }
2480
2481 /* Tx stats */
2482 stat = 0;
2483 seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
2484 while (stat < mac_ops->tx_stats_cnt) {
2485 err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
2486 if (err)
2487 return err;
2488
2489 if (is_rvu_otx2(rvu))
2490 seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
2491 tx_stat);
2492 else
2493 seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
2494 tx_stat);
2495 stat++;
2496 }
2497
2498 return err;
2499 }
2500
rvu_dbg_derive_lmacid(struct seq_file * filp,int * lmac_id)2501 static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
2502 {
2503 struct dentry *current_dir;
2504 char *buf;
2505
2506 current_dir = filp->file->f_path.dentry->d_parent;
2507 buf = strrchr(current_dir->d_name.name, 'c');
2508 if (!buf)
2509 return -EINVAL;
2510
2511 return kstrtoint(buf + 1, 10, lmac_id);
2512 }
2513
rvu_dbg_cgx_stat_display(struct seq_file * filp,void * unused)2514 static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
2515 {
2516 int lmac_id, err;
2517
2518 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2519 if (!err)
2520 return cgx_print_stats(filp, lmac_id);
2521
2522 return err;
2523 }
2524
2525 RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
2526
cgx_print_dmac_flt(struct seq_file * s,int lmac_id)2527 static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
2528 {
2529 struct pci_dev *pdev = NULL;
2530 void *cgxd = s->private;
2531 char *bcast, *mcast;
2532 u16 index, domain;
2533 u8 dmac[ETH_ALEN];
2534 struct rvu *rvu;
2535 u64 cfg, mac;
2536 int pf;
2537
2538 rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
2539 PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
2540 if (!rvu)
2541 return -ENODEV;
2542
2543 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
2544 domain = 2;
2545
2546 pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
2547 if (!pdev)
2548 return 0;
2549
2550 cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
2551 bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
2552 mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
2553
2554 seq_puts(s,
2555 "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
2556 seq_printf(s, "%s PF%d %9s %9s",
2557 dev_name(&pdev->dev), pf, bcast, mcast);
2558 if (cfg & CGX_DMAC_CAM_ACCEPT)
2559 seq_printf(s, "%12s\n\n", "UNICAST");
2560 else
2561 seq_printf(s, "%16s\n\n", "PROMISCUOUS");
2562
2563 seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
2564
2565 for (index = 0 ; index < 32 ; index++) {
2566 cfg = cgx_read_dmac_entry(cgxd, index);
2567 /* Display enabled dmac entries associated with current lmac */
2568 if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
2569 FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
2570 mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
2571 u64_to_ether_addr(mac, dmac);
2572 seq_printf(s, "%7d %pM\n", index, dmac);
2573 }
2574 }
2575
2576 pci_dev_put(pdev);
2577 return 0;
2578 }
2579
rvu_dbg_cgx_dmac_flt_display(struct seq_file * filp,void * unused)2580 static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
2581 {
2582 int err, lmac_id;
2583
2584 err = rvu_dbg_derive_lmacid(filp, &lmac_id);
2585 if (!err)
2586 return cgx_print_dmac_flt(filp, lmac_id);
2587
2588 return err;
2589 }
2590
2591 RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
2592
rvu_dbg_cgx_init(struct rvu * rvu)2593 static void rvu_dbg_cgx_init(struct rvu *rvu)
2594 {
2595 struct mac_ops *mac_ops;
2596 unsigned long lmac_bmap;
2597 int i, lmac_id;
2598 char dname[20];
2599 void *cgx;
2600
2601 if (!cgx_get_cgxcnt_max())
2602 return;
2603
2604 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
2605 if (!mac_ops)
2606 return;
2607
2608 rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
2609 rvu->rvu_dbg.root);
2610
2611 for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
2612 cgx = rvu_cgx_pdata(i, rvu);
2613 if (!cgx)
2614 continue;
2615 lmac_bmap = cgx_get_lmac_bmap(cgx);
2616 /* cgx debugfs dir */
2617 sprintf(dname, "%s%d", mac_ops->name, i);
2618 rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
2619 rvu->rvu_dbg.cgx_root);
2620
2621 for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
2622 /* lmac debugfs dir */
2623 sprintf(dname, "lmac%d", lmac_id);
2624 rvu->rvu_dbg.lmac =
2625 debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
2626
2627 debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
2628 cgx, &rvu_dbg_cgx_stat_fops);
2629 debugfs_create_file("mac_filter", 0600,
2630 rvu->rvu_dbg.lmac, cgx,
2631 &rvu_dbg_cgx_dmac_flt_fops);
2632 }
2633 }
2634 }
2635
2636 /* NPC debugfs APIs */
rvu_print_npc_mcam_info(struct seq_file * s,u16 pcifunc,int blkaddr)2637 static void rvu_print_npc_mcam_info(struct seq_file *s,
2638 u16 pcifunc, int blkaddr)
2639 {
2640 struct rvu *rvu = s->private;
2641 int entry_acnt, entry_ecnt;
2642 int cntr_acnt, cntr_ecnt;
2643
2644 rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
2645 &entry_acnt, &entry_ecnt);
2646 rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
2647 &cntr_acnt, &cntr_ecnt);
2648 if (!entry_acnt && !cntr_acnt)
2649 return;
2650
2651 if (!(pcifunc & RVU_PFVF_FUNC_MASK))
2652 seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
2653 rvu_get_pf(pcifunc));
2654 else
2655 seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
2656 rvu_get_pf(pcifunc),
2657 (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
2658
2659 if (entry_acnt) {
2660 seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
2661 seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
2662 }
2663 if (cntr_acnt) {
2664 seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
2665 seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
2666 }
2667 }
2668
rvu_dbg_npc_mcam_info_display(struct seq_file * filp,void * unsued)2669 static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
2670 {
2671 struct rvu *rvu = filp->private;
2672 int pf, vf, numvfs, blkaddr;
2673 struct npc_mcam *mcam;
2674 u16 pcifunc, counters;
2675 u64 cfg;
2676
2677 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2678 if (blkaddr < 0)
2679 return -ENODEV;
2680
2681 mcam = &rvu->hw->mcam;
2682 counters = rvu->hw->npc_counters;
2683
2684 seq_puts(filp, "\nNPC MCAM info:\n");
2685 /* MCAM keywidth on receive and transmit sides */
2686 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
2687 cfg = (cfg >> 32) & 0x07;
2688 seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2689 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2690 "224bits" : "448bits"));
2691 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
2692 cfg = (cfg >> 32) & 0x07;
2693 seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
2694 "112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
2695 "224bits" : "448bits"));
2696
2697 mutex_lock(&mcam->lock);
2698 /* MCAM entries */
2699 seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
2700 seq_printf(filp, "\t\t Reserved \t: %d\n",
2701 mcam->total_entries - mcam->bmap_entries);
2702 seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
2703
2704 /* MCAM counters */
2705 seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
2706 seq_printf(filp, "\t\t Reserved \t: %d\n",
2707 counters - mcam->counters.max);
2708 seq_printf(filp, "\t\t Available \t: %d\n",
2709 rvu_rsrc_free_count(&mcam->counters));
2710
2711 if (mcam->bmap_entries == mcam->bmap_fcnt) {
2712 mutex_unlock(&mcam->lock);
2713 return 0;
2714 }
2715
2716 seq_puts(filp, "\n\t\t Current allocation\n");
2717 seq_puts(filp, "\t\t====================\n");
2718 for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
2719 pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2720 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2721
2722 cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2723 numvfs = (cfg >> 12) & 0xFF;
2724 for (vf = 0; vf < numvfs; vf++) {
2725 pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
2726 rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
2727 }
2728 }
2729
2730 mutex_unlock(&mcam->lock);
2731 return 0;
2732 }
2733
2734 RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
2735
rvu_dbg_npc_rx_miss_stats_display(struct seq_file * filp,void * unused)2736 static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
2737 void *unused)
2738 {
2739 struct rvu *rvu = filp->private;
2740 struct npc_mcam *mcam;
2741 int blkaddr;
2742
2743 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2744 if (blkaddr < 0)
2745 return -ENODEV;
2746
2747 mcam = &rvu->hw->mcam;
2748
2749 seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
2750 seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
2751 rvu_read64(rvu, blkaddr,
2752 NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
2753
2754 return 0;
2755 }
2756
2757 RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
2758
rvu_dbg_npc_mcam_show_flows(struct seq_file * s,struct rvu_npc_mcam_rule * rule)2759 static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
2760 struct rvu_npc_mcam_rule *rule)
2761 {
2762 u8 bit;
2763
2764 for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
2765 seq_printf(s, "\t%s ", npc_get_field_name(bit));
2766 switch (bit) {
2767 case NPC_LXMB:
2768 if (rule->lxmb == 1)
2769 seq_puts(s, "\tL2M nibble is set\n");
2770 else
2771 seq_puts(s, "\tL2B nibble is set\n");
2772 break;
2773 case NPC_DMAC:
2774 seq_printf(s, "%pM ", rule->packet.dmac);
2775 seq_printf(s, "mask %pM\n", rule->mask.dmac);
2776 break;
2777 case NPC_SMAC:
2778 seq_printf(s, "%pM ", rule->packet.smac);
2779 seq_printf(s, "mask %pM\n", rule->mask.smac);
2780 break;
2781 case NPC_ETYPE:
2782 seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
2783 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
2784 break;
2785 case NPC_OUTER_VID:
2786 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_tci));
2787 seq_printf(s, "mask 0x%x\n",
2788 ntohs(rule->mask.vlan_tci));
2789 break;
2790 case NPC_INNER_VID:
2791 seq_printf(s, "0x%x ", ntohs(rule->packet.vlan_itci));
2792 seq_printf(s, "mask 0x%x\n",
2793 ntohs(rule->mask.vlan_itci));
2794 break;
2795 case NPC_TOS:
2796 seq_printf(s, "%d ", rule->packet.tos);
2797 seq_printf(s, "mask 0x%x\n", rule->mask.tos);
2798 break;
2799 case NPC_SIP_IPV4:
2800 seq_printf(s, "%pI4 ", &rule->packet.ip4src);
2801 seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
2802 break;
2803 case NPC_DIP_IPV4:
2804 seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
2805 seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
2806 break;
2807 case NPC_SIP_IPV6:
2808 seq_printf(s, "%pI6 ", rule->packet.ip6src);
2809 seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
2810 break;
2811 case NPC_DIP_IPV6:
2812 seq_printf(s, "%pI6 ", rule->packet.ip6dst);
2813 seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
2814 break;
2815 case NPC_IPFRAG_IPV6:
2816 seq_printf(s, "0x%x ", rule->packet.next_header);
2817 seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
2818 break;
2819 case NPC_IPFRAG_IPV4:
2820 seq_printf(s, "0x%x ", rule->packet.ip_flag);
2821 seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
2822 break;
2823 case NPC_SPORT_TCP:
2824 case NPC_SPORT_UDP:
2825 case NPC_SPORT_SCTP:
2826 seq_printf(s, "%d ", ntohs(rule->packet.sport));
2827 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
2828 break;
2829 case NPC_DPORT_TCP:
2830 case NPC_DPORT_UDP:
2831 case NPC_DPORT_SCTP:
2832 seq_printf(s, "%d ", ntohs(rule->packet.dport));
2833 seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
2834 break;
2835 case NPC_IPSEC_SPI:
2836 seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
2837 seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
2838 break;
2839 default:
2840 seq_puts(s, "\n");
2841 break;
2842 }
2843 }
2844 }
2845
rvu_dbg_npc_mcam_show_action(struct seq_file * s,struct rvu_npc_mcam_rule * rule)2846 static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
2847 struct rvu_npc_mcam_rule *rule)
2848 {
2849 if (is_npc_intf_tx(rule->intf)) {
2850 switch (rule->tx_action.op) {
2851 case NIX_TX_ACTIONOP_DROP:
2852 seq_puts(s, "\taction: Drop\n");
2853 break;
2854 case NIX_TX_ACTIONOP_UCAST_DEFAULT:
2855 seq_puts(s, "\taction: Unicast to default channel\n");
2856 break;
2857 case NIX_TX_ACTIONOP_UCAST_CHAN:
2858 seq_printf(s, "\taction: Unicast to channel %d\n",
2859 rule->tx_action.index);
2860 break;
2861 case NIX_TX_ACTIONOP_MCAST:
2862 seq_puts(s, "\taction: Multicast\n");
2863 break;
2864 case NIX_TX_ACTIONOP_DROP_VIOL:
2865 seq_puts(s, "\taction: Lockdown Violation Drop\n");
2866 break;
2867 default:
2868 break;
2869 }
2870 } else {
2871 switch (rule->rx_action.op) {
2872 case NIX_RX_ACTIONOP_DROP:
2873 seq_puts(s, "\taction: Drop\n");
2874 break;
2875 case NIX_RX_ACTIONOP_UCAST:
2876 seq_printf(s, "\taction: Direct to queue %d\n",
2877 rule->rx_action.index);
2878 break;
2879 case NIX_RX_ACTIONOP_RSS:
2880 seq_puts(s, "\taction: RSS\n");
2881 break;
2882 case NIX_RX_ACTIONOP_UCAST_IPSEC:
2883 seq_puts(s, "\taction: Unicast ipsec\n");
2884 break;
2885 case NIX_RX_ACTIONOP_MCAST:
2886 seq_puts(s, "\taction: Multicast\n");
2887 break;
2888 default:
2889 break;
2890 }
2891 }
2892 }
2893
rvu_dbg_get_intf_name(int intf)2894 static const char *rvu_dbg_get_intf_name(int intf)
2895 {
2896 switch (intf) {
2897 case NIX_INTFX_RX(0):
2898 return "NIX0_RX";
2899 case NIX_INTFX_RX(1):
2900 return "NIX1_RX";
2901 case NIX_INTFX_TX(0):
2902 return "NIX0_TX";
2903 case NIX_INTFX_TX(1):
2904 return "NIX1_TX";
2905 default:
2906 break;
2907 }
2908
2909 return "unknown";
2910 }
2911
rvu_dbg_npc_mcam_show_rules(struct seq_file * s,void * unused)2912 static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
2913 {
2914 struct rvu_npc_mcam_rule *iter;
2915 struct rvu *rvu = s->private;
2916 struct npc_mcam *mcam;
2917 int pf, vf = -1;
2918 bool enabled;
2919 int blkaddr;
2920 u16 target;
2921 u64 hits;
2922
2923 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2924 if (blkaddr < 0)
2925 return 0;
2926
2927 mcam = &rvu->hw->mcam;
2928
2929 mutex_lock(&mcam->lock);
2930 list_for_each_entry(iter, &mcam->mcam_rules, list) {
2931 pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2932 seq_printf(s, "\n\tInstalled by: PF%d ", pf);
2933
2934 if (iter->owner & RVU_PFVF_FUNC_MASK) {
2935 vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
2936 seq_printf(s, "VF%d", vf);
2937 }
2938 seq_puts(s, "\n");
2939
2940 seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
2941 "RX" : "TX");
2942 seq_printf(s, "\tinterface: %s\n",
2943 rvu_dbg_get_intf_name(iter->intf));
2944 seq_printf(s, "\tmcam entry: %d\n", iter->entry);
2945
2946 rvu_dbg_npc_mcam_show_flows(s, iter);
2947 if (is_npc_intf_rx(iter->intf)) {
2948 target = iter->rx_action.pf_func;
2949 pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
2950 seq_printf(s, "\tForward to: PF%d ", pf);
2951
2952 if (target & RVU_PFVF_FUNC_MASK) {
2953 vf = (target & RVU_PFVF_FUNC_MASK) - 1;
2954 seq_printf(s, "VF%d", vf);
2955 }
2956 seq_puts(s, "\n");
2957 seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
2958 seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
2959 }
2960
2961 rvu_dbg_npc_mcam_show_action(s, iter);
2962
2963 enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
2964 seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
2965
2966 if (!iter->has_cntr)
2967 continue;
2968 seq_printf(s, "\tcounter: %d\n", iter->cntr);
2969
2970 hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
2971 seq_printf(s, "\thits: %lld\n", hits);
2972 }
2973 mutex_unlock(&mcam->lock);
2974
2975 return 0;
2976 }
2977
2978 RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
2979
rvu_dbg_npc_exact_show_entries(struct seq_file * s,void * unused)2980 static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
2981 {
2982 struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
2983 struct npc_exact_table_entry *cam_entry;
2984 struct npc_exact_table *table;
2985 struct rvu *rvu = s->private;
2986 int i, j;
2987
2988 u8 bitmap = 0;
2989
2990 table = rvu->hw->table;
2991
2992 mutex_lock(&table->lock);
2993
2994 /* Check if there is at least one entry in mem table */
2995 if (!table->mem_tbl_entry_cnt)
2996 goto dump_cam_table;
2997
2998 /* Print table headers */
2999 seq_puts(s, "\n\tExact Match MEM Table\n");
3000 seq_puts(s, "Index\t");
3001
3002 for (i = 0; i < table->mem_table.ways; i++) {
3003 mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
3004 struct npc_exact_table_entry, list);
3005
3006 seq_printf(s, "Way-%d\t\t\t\t\t", i);
3007 }
3008
3009 seq_puts(s, "\n");
3010 for (i = 0; i < table->mem_table.ways; i++)
3011 seq_puts(s, "\tChan MAC \t");
3012
3013 seq_puts(s, "\n\n");
3014
3015 /* Print mem table entries */
3016 for (i = 0; i < table->mem_table.depth; i++) {
3017 bitmap = 0;
3018 for (j = 0; j < table->mem_table.ways; j++) {
3019 if (!mem_entry[j])
3020 continue;
3021
3022 if (mem_entry[j]->index != i)
3023 continue;
3024
3025 bitmap |= BIT(j);
3026 }
3027
3028 /* No valid entries */
3029 if (!bitmap)
3030 continue;
3031
3032 seq_printf(s, "%d\t", i);
3033 for (j = 0; j < table->mem_table.ways; j++) {
3034 if (!(bitmap & BIT(j))) {
3035 seq_puts(s, "nil\t\t\t\t\t");
3036 continue;
3037 }
3038
3039 seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
3040 mem_entry[j]->mac);
3041 mem_entry[j] = list_next_entry(mem_entry[j], list);
3042 }
3043 seq_puts(s, "\n");
3044 }
3045
3046 dump_cam_table:
3047
3048 if (!table->cam_tbl_entry_cnt)
3049 goto done;
3050
3051 seq_puts(s, "\n\tExact Match CAM Table\n");
3052 seq_puts(s, "index\tchan\tMAC\n");
3053
3054 /* Traverse cam table entries */
3055 list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
3056 seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
3057 cam_entry->mac);
3058 }
3059
3060 done:
3061 mutex_unlock(&table->lock);
3062 return 0;
3063 }
3064
3065 RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
3066
rvu_dbg_npc_exact_show_info(struct seq_file * s,void * unused)3067 static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
3068 {
3069 struct npc_exact_table *table;
3070 struct rvu *rvu = s->private;
3071 int i;
3072
3073 table = rvu->hw->table;
3074
3075 seq_puts(s, "\n\tExact Table Info\n");
3076 seq_printf(s, "Exact Match Feature : %s\n",
3077 rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
3078 if (!rvu->hw->cap.npc_exact_match_enabled)
3079 return 0;
3080
3081 seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
3082 for (i = 0; i < table->num_drop_rules; i++)
3083 seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
3084
3085 seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
3086 for (i = 0; i < table->num_drop_rules; i++)
3087 seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
3088
3089 seq_puts(s, "\n\tMEM Table Info\n");
3090 seq_printf(s, "Ways : %d\n", table->mem_table.ways);
3091 seq_printf(s, "Depth : %d\n", table->mem_table.depth);
3092 seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
3093 seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
3094 seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
3095
3096 seq_puts(s, "\n\tCAM Table Info\n");
3097 seq_printf(s, "Depth : %d\n", table->cam_table.depth);
3098
3099 return 0;
3100 }
3101
3102 RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
3103
rvu_dbg_npc_exact_drop_cnt(struct seq_file * s,void * unused)3104 static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
3105 {
3106 struct npc_exact_table *table;
3107 struct rvu *rvu = s->private;
3108 struct npc_key_field *field;
3109 u16 chan, pcifunc;
3110 int blkaddr, i;
3111 u64 cfg, cam1;
3112 char *str;
3113
3114 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3115 table = rvu->hw->table;
3116
3117 field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
3118
3119 seq_puts(s, "\n\t Exact Hit on drop status\n");
3120 seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
3121
3122 for (i = 0; i < table->num_drop_rules; i++) {
3123 pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
3124 cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
3125
3126 /* channel will be always in keyword 0 */
3127 cam1 = rvu_read64(rvu, blkaddr,
3128 NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
3129 chan = field->kw_mask[0] & cam1;
3130
3131 str = (cfg & 1) ? "enabled" : "disabled";
3132
3133 seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
3134 rvu_read64(rvu, blkaddr,
3135 NPC_AF_MATCH_STATX(table->counter_idx[i])),
3136 chan, str);
3137 }
3138
3139 return 0;
3140 }
3141
3142 RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
3143
rvu_dbg_npc_init(struct rvu * rvu)3144 static void rvu_dbg_npc_init(struct rvu *rvu)
3145 {
3146 rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
3147
3148 debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
3149 &rvu_dbg_npc_mcam_info_fops);
3150 debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
3151 &rvu_dbg_npc_mcam_rules_fops);
3152
3153 debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
3154 &rvu_dbg_npc_rx_miss_act_fops);
3155
3156 if (!rvu->hw->cap.npc_exact_match_enabled)
3157 return;
3158
3159 debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
3160 &rvu_dbg_npc_exact_entries_fops);
3161
3162 debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
3163 &rvu_dbg_npc_exact_info_fops);
3164
3165 debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
3166 &rvu_dbg_npc_exact_drop_cnt_fops);
3167
3168 }
3169
cpt_eng_sts_display(struct seq_file * filp,u8 eng_type)3170 static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
3171 {
3172 struct cpt_ctx *ctx = filp->private;
3173 u64 busy_sts = 0, free_sts = 0;
3174 u32 e_min = 0, e_max = 0, e, i;
3175 u16 max_ses, max_ies, max_aes;
3176 struct rvu *rvu = ctx->rvu;
3177 int blkaddr = ctx->blkaddr;
3178 u64 reg;
3179
3180 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3181 max_ses = reg & 0xffff;
3182 max_ies = (reg >> 16) & 0xffff;
3183 max_aes = (reg >> 32) & 0xffff;
3184
3185 switch (eng_type) {
3186 case CPT_AE_TYPE:
3187 e_min = max_ses + max_ies;
3188 e_max = max_ses + max_ies + max_aes;
3189 break;
3190 case CPT_SE_TYPE:
3191 e_min = 0;
3192 e_max = max_ses;
3193 break;
3194 case CPT_IE_TYPE:
3195 e_min = max_ses;
3196 e_max = max_ses + max_ies;
3197 break;
3198 default:
3199 return -EINVAL;
3200 }
3201
3202 for (e = e_min, i = 0; e < e_max; e++, i++) {
3203 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
3204 if (reg & 0x1)
3205 busy_sts |= 1ULL << i;
3206
3207 if (reg & 0x2)
3208 free_sts |= 1ULL << i;
3209 }
3210 seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
3211 seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
3212
3213 return 0;
3214 }
3215
rvu_dbg_cpt_ae_sts_display(struct seq_file * filp,void * unused)3216 static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
3217 {
3218 return cpt_eng_sts_display(filp, CPT_AE_TYPE);
3219 }
3220
3221 RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
3222
rvu_dbg_cpt_se_sts_display(struct seq_file * filp,void * unused)3223 static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
3224 {
3225 return cpt_eng_sts_display(filp, CPT_SE_TYPE);
3226 }
3227
3228 RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
3229
rvu_dbg_cpt_ie_sts_display(struct seq_file * filp,void * unused)3230 static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
3231 {
3232 return cpt_eng_sts_display(filp, CPT_IE_TYPE);
3233 }
3234
3235 RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
3236
rvu_dbg_cpt_engines_info_display(struct seq_file * filp,void * unused)3237 static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
3238 {
3239 struct cpt_ctx *ctx = filp->private;
3240 u16 max_ses, max_ies, max_aes;
3241 struct rvu *rvu = ctx->rvu;
3242 int blkaddr = ctx->blkaddr;
3243 u32 e_max, e;
3244 u64 reg;
3245
3246 reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
3247 max_ses = reg & 0xffff;
3248 max_ies = (reg >> 16) & 0xffff;
3249 max_aes = (reg >> 32) & 0xffff;
3250
3251 e_max = max_ses + max_ies + max_aes;
3252
3253 seq_puts(filp, "===========================================\n");
3254 for (e = 0; e < e_max; e++) {
3255 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
3256 seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
3257 reg & 0xff);
3258 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
3259 seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
3260 reg);
3261 reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
3262 seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
3263 reg);
3264 seq_puts(filp, "===========================================\n");
3265 }
3266 return 0;
3267 }
3268
3269 RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
3270
rvu_dbg_cpt_lfs_info_display(struct seq_file * filp,void * unused)3271 static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
3272 {
3273 struct cpt_ctx *ctx = filp->private;
3274 int blkaddr = ctx->blkaddr;
3275 struct rvu *rvu = ctx->rvu;
3276 struct rvu_block *block;
3277 struct rvu_hwinfo *hw;
3278 u64 reg;
3279 u32 lf;
3280
3281 hw = rvu->hw;
3282 block = &hw->block[blkaddr];
3283 if (!block->lf.bmap)
3284 return -ENODEV;
3285
3286 seq_puts(filp, "===========================================\n");
3287 for (lf = 0; lf < block->lf.max; lf++) {
3288 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
3289 seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
3290 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
3291 seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
3292 reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
3293 seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
3294 reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
3295 (lf << block->lfshift));
3296 seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
3297 seq_puts(filp, "===========================================\n");
3298 }
3299 return 0;
3300 }
3301
3302 RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
3303
rvu_dbg_cpt_err_info_display(struct seq_file * filp,void * unused)3304 static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
3305 {
3306 struct cpt_ctx *ctx = filp->private;
3307 struct rvu *rvu = ctx->rvu;
3308 int blkaddr = ctx->blkaddr;
3309 u64 reg0, reg1;
3310
3311 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
3312 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
3313 seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
3314 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
3315 reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
3316 seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
3317 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
3318 seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
3319 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
3320 seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
3321 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
3322 seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
3323 reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
3324 seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
3325
3326 return 0;
3327 }
3328
3329 RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
3330
rvu_dbg_cpt_pc_display(struct seq_file * filp,void * unused)3331 static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
3332 {
3333 struct cpt_ctx *ctx = filp->private;
3334 struct rvu *rvu = ctx->rvu;
3335 int blkaddr = ctx->blkaddr;
3336 u64 reg;
3337
3338 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
3339 seq_printf(filp, "CPT instruction requests %llu\n", reg);
3340 reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
3341 seq_printf(filp, "CPT instruction latency %llu\n", reg);
3342 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
3343 seq_printf(filp, "CPT NCB read requests %llu\n", reg);
3344 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
3345 seq_printf(filp, "CPT NCB read latency %llu\n", reg);
3346 reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
3347 seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
3348 reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
3349 seq_printf(filp, "CPT active cycles pc %llu\n", reg);
3350 reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
3351 seq_printf(filp, "CPT clock count pc %llu\n", reg);
3352
3353 return 0;
3354 }
3355
3356 RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
3357
rvu_dbg_cpt_init(struct rvu * rvu,int blkaddr)3358 static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
3359 {
3360 struct cpt_ctx *ctx;
3361
3362 if (!is_block_implemented(rvu->hw, blkaddr))
3363 return;
3364
3365 if (blkaddr == BLKADDR_CPT0) {
3366 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
3367 ctx = &rvu->rvu_dbg.cpt_ctx[0];
3368 ctx->blkaddr = BLKADDR_CPT0;
3369 ctx->rvu = rvu;
3370 } else {
3371 rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
3372 rvu->rvu_dbg.root);
3373 ctx = &rvu->rvu_dbg.cpt_ctx[1];
3374 ctx->blkaddr = BLKADDR_CPT1;
3375 ctx->rvu = rvu;
3376 }
3377
3378 debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
3379 &rvu_dbg_cpt_pc_fops);
3380 debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3381 &rvu_dbg_cpt_ae_sts_fops);
3382 debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3383 &rvu_dbg_cpt_se_sts_fops);
3384 debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
3385 &rvu_dbg_cpt_ie_sts_fops);
3386 debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
3387 &rvu_dbg_cpt_engines_info_fops);
3388 debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
3389 &rvu_dbg_cpt_lfs_info_fops);
3390 debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
3391 &rvu_dbg_cpt_err_info_fops);
3392 }
3393
rvu_get_dbg_dir_name(struct rvu * rvu)3394 static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
3395 {
3396 if (!is_rvu_otx2(rvu))
3397 return "cn10k";
3398 else
3399 return "octeontx2";
3400 }
3401
rvu_dbg_init(struct rvu * rvu)3402 void rvu_dbg_init(struct rvu *rvu)
3403 {
3404 rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
3405
3406 debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
3407 &rvu_dbg_rsrc_status_fops);
3408
3409 if (!is_rvu_otx2(rvu))
3410 debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
3411 rvu, &rvu_dbg_lmtst_map_table_fops);
3412
3413 if (!cgx_get_cgxcnt_max())
3414 goto create;
3415
3416 if (is_rvu_otx2(rvu))
3417 debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
3418 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3419 else
3420 debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
3421 rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
3422
3423 create:
3424 rvu_dbg_npa_init(rvu);
3425 rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
3426
3427 rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
3428 rvu_dbg_cgx_init(rvu);
3429 rvu_dbg_npc_init(rvu);
3430 rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
3431 rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
3432 rvu_dbg_mcs_init(rvu);
3433 }
3434
rvu_dbg_exit(struct rvu * rvu)3435 void rvu_dbg_exit(struct rvu *rvu)
3436 {
3437 debugfs_remove_recursive(rvu->rvu_dbg.root);
3438 }
3439
3440 #endif /* CONFIG_DEBUG_FS */
3441