1 /*
2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include "lib/mlx5.h"
34 #include "en.h"
35 #include "en_accel/ipsec.h"
36 #include "en_accel/tls.h"
37
38 static const struct counter_desc sw_stats_desc[] = {
39 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
40 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
41 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
42 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
43 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
44 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
45 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
46 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
47 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
48 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
49
50 #ifdef CONFIG_MLX5_EN_TLS
51 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
52 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
53 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
54 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
55 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
56 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
57 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
58 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
59 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
60 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
61 #endif
62
63 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
64 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
65 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
66 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
67 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
68 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
69 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
70 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
71 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
72 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
73 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
74 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
75 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
76 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
77 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
78 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
79 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
80 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
81 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
82 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
83 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
84 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
85 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
86 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
87 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
88 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
89 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
90 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
91 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
92 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
93 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
94 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
95 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
96 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
97 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
98 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
99 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
100 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
101 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
102 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
104 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
116 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
121 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
122 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
127 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
128 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
132 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
133 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
135 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
136 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
145 };
146
147 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
148
mlx5e_grp_sw_get_num_stats(struct mlx5e_priv * priv)149 static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
150 {
151 return NUM_SW_COUNTERS;
152 }
153
mlx5e_grp_sw_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)154 static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
155 {
156 int i;
157
158 for (i = 0; i < NUM_SW_COUNTERS; i++)
159 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
160 return idx;
161 }
162
mlx5e_grp_sw_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)163 static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
164 {
165 int i;
166
167 for (i = 0; i < NUM_SW_COUNTERS; i++)
168 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
169 return idx;
170 }
171
mlx5e_grp_sw_update_stats(struct mlx5e_priv * priv)172 static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
173 {
174 struct mlx5e_sw_stats *s = &priv->stats.sw;
175 int i;
176
177 memset(s, 0, sizeof(*s));
178
179 for (i = 0; i < priv->max_nch; i++) {
180 struct mlx5e_channel_stats *channel_stats =
181 &priv->channel_stats[i];
182 struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
183 struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
184 struct mlx5e_xdpsq_stats *xsksq_stats = &channel_stats->xsksq;
185 struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
186 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
187 struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
188 int j;
189
190 s->rx_packets += rq_stats->packets;
191 s->rx_bytes += rq_stats->bytes;
192 s->rx_lro_packets += rq_stats->lro_packets;
193 s->rx_lro_bytes += rq_stats->lro_bytes;
194 s->rx_ecn_mark += rq_stats->ecn_mark;
195 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
196 s->rx_csum_none += rq_stats->csum_none;
197 s->rx_csum_complete += rq_stats->csum_complete;
198 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
199 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
200 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
201 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
202 s->rx_xdp_drop += rq_stats->xdp_drop;
203 s->rx_xdp_redirect += rq_stats->xdp_redirect;
204 s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
205 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
206 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
207 s->rx_xdp_tx_nops += xdpsq_stats->nops;
208 s->rx_xdp_tx_full += xdpsq_stats->full;
209 s->rx_xdp_tx_err += xdpsq_stats->err;
210 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
211 s->rx_wqe_err += rq_stats->wqe_err;
212 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
213 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
214 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
215 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
216 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
217 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
218 s->rx_cache_reuse += rq_stats->cache_reuse;
219 s->rx_cache_full += rq_stats->cache_full;
220 s->rx_cache_empty += rq_stats->cache_empty;
221 s->rx_cache_busy += rq_stats->cache_busy;
222 s->rx_cache_waive += rq_stats->cache_waive;
223 s->rx_congst_umr += rq_stats->congst_umr;
224 s->rx_arfs_err += rq_stats->arfs_err;
225 s->rx_recover += rq_stats->recover;
226 s->ch_events += ch_stats->events;
227 s->ch_poll += ch_stats->poll;
228 s->ch_arm += ch_stats->arm;
229 s->ch_aff_change += ch_stats->aff_change;
230 s->ch_force_irq += ch_stats->force_irq;
231 s->ch_eq_rearm += ch_stats->eq_rearm;
232 /* xdp redirect */
233 s->tx_xdp_xmit += xdpsq_red_stats->xmit;
234 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
235 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
236 s->tx_xdp_nops += xdpsq_red_stats->nops;
237 s->tx_xdp_full += xdpsq_red_stats->full;
238 s->tx_xdp_err += xdpsq_red_stats->err;
239 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
240 /* AF_XDP zero-copy */
241 s->rx_xsk_packets += xskrq_stats->packets;
242 s->rx_xsk_bytes += xskrq_stats->bytes;
243 s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
244 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
245 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
246 s->rx_xsk_csum_none += xskrq_stats->csum_none;
247 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
248 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
249 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
250 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
251 s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
252 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
253 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
254 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
255 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
256 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
257 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
258 s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
259 s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
260 s->tx_xsk_xmit += xsksq_stats->xmit;
261 s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
262 s->tx_xsk_inlnw += xsksq_stats->inlnw;
263 s->tx_xsk_full += xsksq_stats->full;
264 s->tx_xsk_err += xsksq_stats->err;
265 s->tx_xsk_cqes += xsksq_stats->cqes;
266
267 for (j = 0; j < priv->max_opened_tc; j++) {
268 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
269
270 s->tx_packets += sq_stats->packets;
271 s->tx_bytes += sq_stats->bytes;
272 s->tx_tso_packets += sq_stats->tso_packets;
273 s->tx_tso_bytes += sq_stats->tso_bytes;
274 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
275 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
276 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
277 s->tx_nop += sq_stats->nop;
278 s->tx_queue_stopped += sq_stats->stopped;
279 s->tx_queue_wake += sq_stats->wake;
280 s->tx_queue_dropped += sq_stats->dropped;
281 s->tx_cqe_err += sq_stats->cqe_err;
282 s->tx_recover += sq_stats->recover;
283 s->tx_xmit_more += sq_stats->xmit_more;
284 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
285 s->tx_csum_none += sq_stats->csum_none;
286 s->tx_csum_partial += sq_stats->csum_partial;
287 #ifdef CONFIG_MLX5_EN_TLS
288 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
289 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
290 s->tx_tls_ctx += sq_stats->tls_ctx;
291 s->tx_tls_ooo += sq_stats->tls_ooo;
292 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
293 s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
294 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
295 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
296 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
297 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
298 #endif
299 s->tx_cqes += sq_stats->cqes;
300 }
301 }
302 }
303
304 static const struct counter_desc q_stats_desc[] = {
305 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
306 };
307
308 static const struct counter_desc drop_rq_stats_desc[] = {
309 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
310 };
311
312 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
313 #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
314
mlx5e_grp_q_get_num_stats(struct mlx5e_priv * priv)315 static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
316 {
317 int num_stats = 0;
318
319 if (priv->q_counter)
320 num_stats += NUM_Q_COUNTERS;
321
322 if (priv->drop_rq_q_counter)
323 num_stats += NUM_DROP_RQ_COUNTERS;
324
325 return num_stats;
326 }
327
mlx5e_grp_q_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)328 static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
329 {
330 int i;
331
332 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
333 strcpy(data + (idx++) * ETH_GSTRING_LEN,
334 q_stats_desc[i].format);
335
336 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
337 strcpy(data + (idx++) * ETH_GSTRING_LEN,
338 drop_rq_stats_desc[i].format);
339
340 return idx;
341 }
342
mlx5e_grp_q_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)343 static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
344 {
345 int i;
346
347 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
348 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
349 q_stats_desc, i);
350 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
351 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
352 drop_rq_stats_desc, i);
353 return idx;
354 }
355
mlx5e_grp_q_update_stats(struct mlx5e_priv * priv)356 static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
357 {
358 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
359 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
360
361 if (priv->q_counter &&
362 !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
363 sizeof(out)))
364 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
365 out, out_of_buffer);
366 if (priv->drop_rq_q_counter &&
367 !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
368 out, sizeof(out)))
369 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out,
370 out_of_buffer);
371 }
372
373 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
374 static const struct counter_desc vnic_env_stats_steer_desc[] = {
375 { "rx_steer_missed_packets",
376 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
377 };
378
379 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
380 { "dev_internal_queue_oob",
381 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
382 };
383
384 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
385 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
386 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
387 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
388 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
389 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
390
mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv * priv)391 static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
392 {
393 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
394 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
395 }
396
mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)397 static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
398 int idx)
399 {
400 int i;
401
402 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
403 strcpy(data + (idx++) * ETH_GSTRING_LEN,
404 vnic_env_stats_steer_desc[i].format);
405
406 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
407 strcpy(data + (idx++) * ETH_GSTRING_LEN,
408 vnic_env_stats_dev_oob_desc[i].format);
409 return idx;
410 }
411
mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)412 static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
413 int idx)
414 {
415 int i;
416
417 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
418 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
419 vnic_env_stats_steer_desc, i);
420
421 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
422 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
423 vnic_env_stats_dev_oob_desc, i);
424 return idx;
425 }
426
mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv * priv)427 static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
428 {
429 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
430 int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
431 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
432 struct mlx5_core_dev *mdev = priv->mdev;
433
434 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
435 return;
436
437 MLX5_SET(query_vnic_env_in, in, opcode,
438 MLX5_CMD_OP_QUERY_VNIC_ENV);
439 MLX5_SET(query_vnic_env_in, in, op_mod, 0);
440 MLX5_SET(query_vnic_env_in, in, other_vport, 0);
441 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
442 }
443
444 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
445 static const struct counter_desc vport_stats_desc[] = {
446 { "rx_vport_unicast_packets",
447 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
448 { "rx_vport_unicast_bytes",
449 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
450 { "tx_vport_unicast_packets",
451 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
452 { "tx_vport_unicast_bytes",
453 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
454 { "rx_vport_multicast_packets",
455 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
456 { "rx_vport_multicast_bytes",
457 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
458 { "tx_vport_multicast_packets",
459 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
460 { "tx_vport_multicast_bytes",
461 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
462 { "rx_vport_broadcast_packets",
463 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
464 { "rx_vport_broadcast_bytes",
465 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
466 { "tx_vport_broadcast_packets",
467 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
468 { "tx_vport_broadcast_bytes",
469 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
470 { "rx_vport_rdma_unicast_packets",
471 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
472 { "rx_vport_rdma_unicast_bytes",
473 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
474 { "tx_vport_rdma_unicast_packets",
475 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
476 { "tx_vport_rdma_unicast_bytes",
477 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
478 { "rx_vport_rdma_multicast_packets",
479 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
480 { "rx_vport_rdma_multicast_bytes",
481 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
482 { "tx_vport_rdma_multicast_packets",
483 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
484 { "tx_vport_rdma_multicast_bytes",
485 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
486 };
487
488 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
489
mlx5e_grp_vport_get_num_stats(struct mlx5e_priv * priv)490 static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
491 {
492 return NUM_VPORT_COUNTERS;
493 }
494
mlx5e_grp_vport_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)495 static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
496 int idx)
497 {
498 int i;
499
500 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
501 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
502 return idx;
503 }
504
mlx5e_grp_vport_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)505 static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
506 int idx)
507 {
508 int i;
509
510 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
511 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
512 vport_stats_desc, i);
513 return idx;
514 }
515
mlx5e_grp_vport_update_stats(struct mlx5e_priv * priv)516 static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
517 {
518 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
519 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
520 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
521 struct mlx5_core_dev *mdev = priv->mdev;
522
523 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
524 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
525 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
526 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
527 }
528
529 #define PPORT_802_3_OFF(c) \
530 MLX5_BYTE_OFF(ppcnt_reg, \
531 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
532 static const struct counter_desc pport_802_3_stats_desc[] = {
533 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
534 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
535 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
536 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
537 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
538 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
539 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
540 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
541 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
542 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
543 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
544 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
545 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
546 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
547 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
548 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
549 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
550 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
551 };
552
553 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
554
mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv * priv)555 static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
556 {
557 return NUM_PPORT_802_3_COUNTERS;
558 }
559
mlx5e_grp_802_3_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)560 static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
561 int idx)
562 {
563 int i;
564
565 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
566 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
567 return idx;
568 }
569
mlx5e_grp_802_3_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)570 static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
571 int idx)
572 {
573 int i;
574
575 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
576 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
577 pport_802_3_stats_desc, i);
578 return idx;
579 }
580
581 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
582 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
583
mlx5e_grp_802_3_update_stats(struct mlx5e_priv * priv)584 void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
585 {
586 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
587 struct mlx5_core_dev *mdev = priv->mdev;
588 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
589 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
590 void *out;
591
592 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
593 return;
594
595 MLX5_SET(ppcnt_reg, in, local_port, 1);
596 out = pstats->IEEE_802_3_counters;
597 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
598 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
599 }
600
601 #define PPORT_2863_OFF(c) \
602 MLX5_BYTE_OFF(ppcnt_reg, \
603 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
604 static const struct counter_desc pport_2863_stats_desc[] = {
605 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
606 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
607 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
608 };
609
610 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
611
mlx5e_grp_2863_get_num_stats(struct mlx5e_priv * priv)612 static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
613 {
614 return NUM_PPORT_2863_COUNTERS;
615 }
616
mlx5e_grp_2863_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)617 static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
618 int idx)
619 {
620 int i;
621
622 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
623 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
624 return idx;
625 }
626
mlx5e_grp_2863_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)627 static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
628 int idx)
629 {
630 int i;
631
632 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
633 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
634 pport_2863_stats_desc, i);
635 return idx;
636 }
637
mlx5e_grp_2863_update_stats(struct mlx5e_priv * priv)638 static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
639 {
640 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
641 struct mlx5_core_dev *mdev = priv->mdev;
642 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
643 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
644 void *out;
645
646 MLX5_SET(ppcnt_reg, in, local_port, 1);
647 out = pstats->RFC_2863_counters;
648 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
649 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
650 }
651
652 #define PPORT_2819_OFF(c) \
653 MLX5_BYTE_OFF(ppcnt_reg, \
654 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
655 static const struct counter_desc pport_2819_stats_desc[] = {
656 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
657 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
658 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
659 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
660 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
661 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
662 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
663 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
664 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
665 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
666 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
667 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
668 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
669 };
670
671 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
672
mlx5e_grp_2819_get_num_stats(struct mlx5e_priv * priv)673 static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
674 {
675 return NUM_PPORT_2819_COUNTERS;
676 }
677
mlx5e_grp_2819_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)678 static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
679 int idx)
680 {
681 int i;
682
683 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
684 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
685 return idx;
686 }
687
mlx5e_grp_2819_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)688 static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
689 int idx)
690 {
691 int i;
692
693 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
694 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
695 pport_2819_stats_desc, i);
696 return idx;
697 }
698
mlx5e_grp_2819_update_stats(struct mlx5e_priv * priv)699 static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
700 {
701 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
702 struct mlx5_core_dev *mdev = priv->mdev;
703 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
704 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
705 void *out;
706
707 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
708 return;
709
710 MLX5_SET(ppcnt_reg, in, local_port, 1);
711 out = pstats->RFC_2819_counters;
712 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
713 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
714 }
715
716 #define PPORT_PHY_STATISTICAL_OFF(c) \
717 MLX5_BYTE_OFF(ppcnt_reg, \
718 counter_set.phys_layer_statistical_cntrs.c##_high)
719 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
720 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
721 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
722 };
723
724 static const struct counter_desc
725 pport_phy_statistical_err_lanes_stats_desc[] = {
726 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
727 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
728 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
729 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
730 };
731
732 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
733 ARRAY_SIZE(pport_phy_statistical_stats_desc)
734 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
735 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
736
mlx5e_grp_phy_get_num_stats(struct mlx5e_priv * priv)737 static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
738 {
739 struct mlx5_core_dev *mdev = priv->mdev;
740 int num_stats;
741
742 /* "1" for link_down_events special counter */
743 num_stats = 1;
744
745 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
746 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
747
748 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
749 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
750
751 return num_stats;
752 }
753
mlx5e_grp_phy_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)754 static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
755 int idx)
756 {
757 struct mlx5_core_dev *mdev = priv->mdev;
758 int i;
759
760 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
761
762 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
763 return idx;
764
765 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
766 strcpy(data + (idx++) * ETH_GSTRING_LEN,
767 pport_phy_statistical_stats_desc[i].format);
768
769 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
770 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
771 strcpy(data + (idx++) * ETH_GSTRING_LEN,
772 pport_phy_statistical_err_lanes_stats_desc[i].format);
773
774 return idx;
775 }
776
mlx5e_grp_phy_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)777 static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
778 {
779 struct mlx5_core_dev *mdev = priv->mdev;
780 int i;
781
782 /* link_down_events_phy has special handling since it is not stored in __be64 format */
783 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
784 counter_set.phys_layer_cntrs.link_down_events);
785
786 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
787 return idx;
788
789 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
790 data[idx++] =
791 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
792 pport_phy_statistical_stats_desc, i);
793
794 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
795 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
796 data[idx++] =
797 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
798 pport_phy_statistical_err_lanes_stats_desc,
799 i);
800 return idx;
801 }
802
mlx5e_grp_phy_update_stats(struct mlx5e_priv * priv)803 static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
804 {
805 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
806 struct mlx5_core_dev *mdev = priv->mdev;
807 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
808 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
809 void *out;
810
811 MLX5_SET(ppcnt_reg, in, local_port, 1);
812 out = pstats->phy_counters;
813 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
814 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
815
816 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
817 return;
818
819 out = pstats->phy_statistical_counters;
820 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
821 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
822 }
823
824 #define PPORT_ETH_EXT_OFF(c) \
825 MLX5_BYTE_OFF(ppcnt_reg, \
826 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
827 static const struct counter_desc pport_eth_ext_stats_desc[] = {
828 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
829 };
830
831 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
832
mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv * priv)833 static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
834 {
835 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
836 return NUM_PPORT_ETH_EXT_COUNTERS;
837
838 return 0;
839 }
840
mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)841 static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
842 int idx)
843 {
844 int i;
845
846 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
847 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
848 strcpy(data + (idx++) * ETH_GSTRING_LEN,
849 pport_eth_ext_stats_desc[i].format);
850 return idx;
851 }
852
mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)853 static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
854 int idx)
855 {
856 int i;
857
858 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
859 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
860 data[idx++] =
861 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
862 pport_eth_ext_stats_desc, i);
863 return idx;
864 }
865
mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv * priv)866 static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
867 {
868 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
869 struct mlx5_core_dev *mdev = priv->mdev;
870 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
871 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
872 void *out;
873
874 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
875 return;
876
877 MLX5_SET(ppcnt_reg, in, local_port, 1);
878 out = pstats->eth_ext_counters;
879 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
880 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
881 }
882
883 #define PCIE_PERF_OFF(c) \
884 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
885 static const struct counter_desc pcie_perf_stats_desc[] = {
886 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
887 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
888 };
889
890 #define PCIE_PERF_OFF64(c) \
891 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
892 static const struct counter_desc pcie_perf_stats_desc64[] = {
893 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
894 };
895
896 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
897 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
898 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
899 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
900 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
901 };
902
903 #define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
904 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
905 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
906
mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv * priv)907 static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
908 {
909 int num_stats = 0;
910
911 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
912 num_stats += NUM_PCIE_PERF_COUNTERS;
913
914 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
915 num_stats += NUM_PCIE_PERF_COUNTERS64;
916
917 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
918 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
919
920 return num_stats;
921 }
922
mlx5e_grp_pcie_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)923 static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
924 int idx)
925 {
926 int i;
927
928 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
929 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
930 strcpy(data + (idx++) * ETH_GSTRING_LEN,
931 pcie_perf_stats_desc[i].format);
932
933 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
934 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
935 strcpy(data + (idx++) * ETH_GSTRING_LEN,
936 pcie_perf_stats_desc64[i].format);
937
938 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
939 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
940 strcpy(data + (idx++) * ETH_GSTRING_LEN,
941 pcie_perf_stall_stats_desc[i].format);
942 return idx;
943 }
944
mlx5e_grp_pcie_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)945 static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
946 int idx)
947 {
948 int i;
949
950 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
951 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
952 data[idx++] =
953 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
954 pcie_perf_stats_desc, i);
955
956 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
957 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
958 data[idx++] =
959 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
960 pcie_perf_stats_desc64, i);
961
962 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
963 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
964 data[idx++] =
965 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
966 pcie_perf_stall_stats_desc, i);
967 return idx;
968 }
969
mlx5e_grp_pcie_update_stats(struct mlx5e_priv * priv)970 static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
971 {
972 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
973 struct mlx5_core_dev *mdev = priv->mdev;
974 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
975 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
976 void *out;
977
978 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
979 return;
980
981 out = pcie_stats->pcie_perf_counters;
982 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
983 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
984 }
985
986 #define PPORT_PER_TC_PRIO_OFF(c) \
987 MLX5_BYTE_OFF(ppcnt_reg, \
988 counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
989
990 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
991 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
992 };
993
994 #define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc)
995
996 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
997 MLX5_BYTE_OFF(ppcnt_reg, \
998 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
999
1000 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1001 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1002 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1003 };
1004
1005 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1006 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1007
mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv * priv)1008 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1009 {
1010 struct mlx5_core_dev *mdev = priv->mdev;
1011
1012 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1013 return 0;
1014
1015 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1016 }
1017
mlx5e_grp_per_port_buffer_congest_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1018 static int mlx5e_grp_per_port_buffer_congest_fill_strings(struct mlx5e_priv *priv,
1019 u8 *data, int idx)
1020 {
1021 struct mlx5_core_dev *mdev = priv->mdev;
1022 int i, prio;
1023
1024 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1025 return idx;
1026
1027 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1028 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1029 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1030 pport_per_tc_prio_stats_desc[i].format, prio);
1031 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1032 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1033 pport_per_tc_congest_prio_stats_desc[i].format, prio);
1034 }
1035
1036 return idx;
1037 }
1038
mlx5e_grp_per_port_buffer_congest_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1039 static int mlx5e_grp_per_port_buffer_congest_fill_stats(struct mlx5e_priv *priv,
1040 u64 *data, int idx)
1041 {
1042 struct mlx5e_pport_stats *pport = &priv->stats.pport;
1043 struct mlx5_core_dev *mdev = priv->mdev;
1044 int i, prio;
1045
1046 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1047 return idx;
1048
1049 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1050 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1051 data[idx++] =
1052 MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1053 pport_per_tc_prio_stats_desc, i);
1054 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1055 data[idx++] =
1056 MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1057 pport_per_tc_congest_prio_stats_desc, i);
1058 }
1059
1060 return idx;
1061 }
1062
mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv * priv)1063 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1064 {
1065 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1066 struct mlx5_core_dev *mdev = priv->mdev;
1067 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1068 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1069 void *out;
1070 int prio;
1071
1072 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1073 return;
1074
1075 MLX5_SET(ppcnt_reg, in, pnat, 2);
1076 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1077 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1078 out = pstats->per_tc_prio_counters[prio];
1079 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1080 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1081 }
1082 }
1083
mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv * priv)1084 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1085 {
1086 struct mlx5_core_dev *mdev = priv->mdev;
1087
1088 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1089 return 0;
1090
1091 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1092 }
1093
mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv * priv)1094 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1095 {
1096 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1097 struct mlx5_core_dev *mdev = priv->mdev;
1098 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1099 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1100 void *out;
1101 int prio;
1102
1103 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1104 return;
1105
1106 MLX5_SET(ppcnt_reg, in, pnat, 2);
1107 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1108 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1109 out = pstats->per_tc_congest_prio_counters[prio];
1110 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1111 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1112 }
1113 }
1114
mlx5e_grp_per_port_buffer_congest_get_num_stats(struct mlx5e_priv * priv)1115 static int mlx5e_grp_per_port_buffer_congest_get_num_stats(struct mlx5e_priv *priv)
1116 {
1117 return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1118 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1119 }
1120
mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv * priv)1121 static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *priv)
1122 {
1123 mlx5e_grp_per_tc_prio_update_stats(priv);
1124 mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1125 }
1126
1127 #define PPORT_PER_PRIO_OFF(c) \
1128 MLX5_BYTE_OFF(ppcnt_reg, \
1129 counter_set.eth_per_prio_grp_data_layout.c##_high)
1130 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1131 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1132 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1133 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1134 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1135 };
1136
1137 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1138
mlx5e_grp_per_prio_traffic_get_num_stats(void)1139 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1140 {
1141 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1142 }
1143
mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1144 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1145 u8 *data,
1146 int idx)
1147 {
1148 int i, prio;
1149
1150 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1151 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1152 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1153 pport_per_prio_traffic_stats_desc[i].format, prio);
1154 }
1155
1156 return idx;
1157 }
1158
mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1159 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1160 u64 *data,
1161 int idx)
1162 {
1163 int i, prio;
1164
1165 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1166 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1167 data[idx++] =
1168 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1169 pport_per_prio_traffic_stats_desc, i);
1170 }
1171
1172 return idx;
1173 }
1174
1175 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1176 /* %s is "global" or "prio{i}" */
1177 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1178 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1179 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1180 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1181 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1182 };
1183
1184 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1185 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1186 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1187 };
1188
1189 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1190 #define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1191 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1192 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1193
mlx5e_query_pfc_combined(struct mlx5e_priv * priv)1194 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1195 {
1196 struct mlx5_core_dev *mdev = priv->mdev;
1197 u8 pfc_en_tx;
1198 u8 pfc_en_rx;
1199 int err;
1200
1201 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1202 return 0;
1203
1204 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1205
1206 return err ? 0 : pfc_en_tx | pfc_en_rx;
1207 }
1208
mlx5e_query_global_pause_combined(struct mlx5e_priv * priv)1209 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1210 {
1211 struct mlx5_core_dev *mdev = priv->mdev;
1212 u32 rx_pause;
1213 u32 tx_pause;
1214 int err;
1215
1216 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1217 return false;
1218
1219 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1220
1221 return err ? false : rx_pause | tx_pause;
1222 }
1223
mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv * priv)1224 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1225 {
1226 return (mlx5e_query_global_pause_combined(priv) +
1227 hweight8(mlx5e_query_pfc_combined(priv))) *
1228 NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1229 NUM_PPORT_PFC_STALL_COUNTERS(priv);
1230 }
1231
mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1232 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1233 u8 *data,
1234 int idx)
1235 {
1236 unsigned long pfc_combined;
1237 int i, prio;
1238
1239 pfc_combined = mlx5e_query_pfc_combined(priv);
1240 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1241 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1242 char pfc_string[ETH_GSTRING_LEN];
1243
1244 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1245 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1246 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1247 }
1248 }
1249
1250 if (mlx5e_query_global_pause_combined(priv)) {
1251 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1252 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1253 pport_per_prio_pfc_stats_desc[i].format, "global");
1254 }
1255 }
1256
1257 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1258 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1259 pport_pfc_stall_stats_desc[i].format);
1260
1261 return idx;
1262 }
1263
mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1264 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1265 u64 *data,
1266 int idx)
1267 {
1268 unsigned long pfc_combined;
1269 int i, prio;
1270
1271 pfc_combined = mlx5e_query_pfc_combined(priv);
1272 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1273 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1274 data[idx++] =
1275 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1276 pport_per_prio_pfc_stats_desc, i);
1277 }
1278 }
1279
1280 if (mlx5e_query_global_pause_combined(priv)) {
1281 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1282 data[idx++] =
1283 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1284 pport_per_prio_pfc_stats_desc, i);
1285 }
1286 }
1287
1288 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1289 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1290 pport_pfc_stall_stats_desc, i);
1291
1292 return idx;
1293 }
1294
mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv * priv)1295 static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
1296 {
1297 return mlx5e_grp_per_prio_traffic_get_num_stats() +
1298 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1299 }
1300
mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1301 static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
1302 int idx)
1303 {
1304 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1305 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1306 return idx;
1307 }
1308
mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1309 static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
1310 int idx)
1311 {
1312 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1313 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1314 return idx;
1315 }
1316
mlx5e_grp_per_prio_update_stats(struct mlx5e_priv * priv)1317 static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
1318 {
1319 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1320 struct mlx5_core_dev *mdev = priv->mdev;
1321 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1322 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1323 int prio;
1324 void *out;
1325
1326 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1327 return;
1328
1329 MLX5_SET(ppcnt_reg, in, local_port, 1);
1330 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1331 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1332 out = pstats->per_prio_counters[prio];
1333 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1334 mlx5_core_access_reg(mdev, in, sz, out, sz,
1335 MLX5_REG_PPCNT, 0, 0);
1336 }
1337 }
1338
1339 static const struct counter_desc mlx5e_pme_status_desc[] = {
1340 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1341 };
1342
1343 static const struct counter_desc mlx5e_pme_error_desc[] = {
1344 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1345 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1346 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1347 };
1348
1349 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
1350 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
1351
mlx5e_grp_pme_get_num_stats(struct mlx5e_priv * priv)1352 static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
1353 {
1354 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1355 }
1356
mlx5e_grp_pme_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1357 static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
1358 int idx)
1359 {
1360 int i;
1361
1362 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1363 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1364
1365 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1366 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1367
1368 return idx;
1369 }
1370
mlx5e_grp_pme_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1371 static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
1372 int idx)
1373 {
1374 struct mlx5_pme_stats pme_stats;
1375 int i;
1376
1377 mlx5_get_pme_stats(priv->mdev, &pme_stats);
1378
1379 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1380 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1381 mlx5e_pme_status_desc, i);
1382
1383 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1384 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1385 mlx5e_pme_error_desc, i);
1386
1387 return idx;
1388 }
1389
mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv * priv)1390 static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
1391 {
1392 return mlx5e_ipsec_get_count(priv);
1393 }
1394
mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1395 static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
1396 int idx)
1397 {
1398 return idx + mlx5e_ipsec_get_strings(priv,
1399 data + idx * ETH_GSTRING_LEN);
1400 }
1401
mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1402 static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
1403 int idx)
1404 {
1405 return idx + mlx5e_ipsec_get_stats(priv, data + idx);
1406 }
1407
mlx5e_grp_ipsec_update_stats(struct mlx5e_priv * priv)1408 static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
1409 {
1410 mlx5e_ipsec_update_stats(priv);
1411 }
1412
mlx5e_grp_tls_get_num_stats(struct mlx5e_priv * priv)1413 static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv)
1414 {
1415 return mlx5e_tls_get_count(priv);
1416 }
1417
mlx5e_grp_tls_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1418 static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data,
1419 int idx)
1420 {
1421 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1422 }
1423
mlx5e_grp_tls_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1424 static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
1425 {
1426 return idx + mlx5e_tls_get_stats(priv, data + idx);
1427 }
1428
1429 static const struct counter_desc rq_stats_desc[] = {
1430 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1431 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1432 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1433 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1434 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1435 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1436 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1437 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1438 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1439 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1440 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1441 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1442 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1443 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1444 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1445 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1446 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1447 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1448 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1449 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1450 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1451 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1452 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1453 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1454 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1455 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1456 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1457 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1458 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1459 };
1460
1461 static const struct counter_desc sq_stats_desc[] = {
1462 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1463 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1464 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1465 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1466 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1467 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1468 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1469 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1470 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1471 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1472 #ifdef CONFIG_MLX5_EN_TLS
1473 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1474 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1475 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
1476 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1477 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1478 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1479 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1480 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1481 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1482 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1483 #endif
1484 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1485 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1486 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1487 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1488 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1489 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1490 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1491 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1492 };
1493
1494 static const struct counter_desc rq_xdpsq_stats_desc[] = {
1495 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1496 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1497 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1498 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1499 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1500 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1501 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1502 };
1503
1504 static const struct counter_desc xdpsq_stats_desc[] = {
1505 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1506 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1507 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1508 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1509 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1510 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1511 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1512 };
1513
1514 static const struct counter_desc xskrq_stats_desc[] = {
1515 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
1516 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
1517 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1518 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1519 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1520 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
1521 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1522 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1523 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1524 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1525 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1526 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1527 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1528 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1529 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1530 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1531 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1532 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1533 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1534 };
1535
1536 static const struct counter_desc xsksq_stats_desc[] = {
1537 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1538 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1539 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1540 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1541 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1542 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1543 };
1544
1545 static const struct counter_desc ch_stats_desc[] = {
1546 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
1547 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1548 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1549 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1550 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
1551 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1552 };
1553
1554 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
1555 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
1556 #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
1557 #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
1558 #define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
1559 #define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
1560 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
1561
mlx5e_grp_channels_get_num_stats(struct mlx5e_priv * priv)1562 static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
1563 {
1564 int max_nch = priv->max_nch;
1565
1566 return (NUM_RQ_STATS * max_nch) +
1567 (NUM_CH_STATS * max_nch) +
1568 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
1569 (NUM_RQ_XDPSQ_STATS * max_nch) +
1570 (NUM_XDPSQ_STATS * max_nch) +
1571 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
1572 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
1573 }
1574
mlx5e_grp_channels_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1575 static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
1576 int idx)
1577 {
1578 bool is_xsk = priv->xsk.ever_used;
1579 int max_nch = priv->max_nch;
1580 int i, j, tc;
1581
1582 for (i = 0; i < max_nch; i++)
1583 for (j = 0; j < NUM_CH_STATS; j++)
1584 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1585 ch_stats_desc[j].format, i);
1586
1587 for (i = 0; i < max_nch; i++) {
1588 for (j = 0; j < NUM_RQ_STATS; j++)
1589 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1590 rq_stats_desc[j].format, i);
1591 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1592 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1593 xskrq_stats_desc[j].format, i);
1594 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1595 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1596 rq_xdpsq_stats_desc[j].format, i);
1597 }
1598
1599 for (tc = 0; tc < priv->max_opened_tc; tc++)
1600 for (i = 0; i < max_nch; i++)
1601 for (j = 0; j < NUM_SQ_STATS; j++)
1602 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1603 sq_stats_desc[j].format,
1604 priv->channel_tc2txq[i][tc]);
1605
1606 for (i = 0; i < max_nch; i++) {
1607 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1608 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1609 xsksq_stats_desc[j].format, i);
1610 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1611 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1612 xdpsq_stats_desc[j].format, i);
1613 }
1614
1615 return idx;
1616 }
1617
mlx5e_grp_channels_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1618 static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
1619 int idx)
1620 {
1621 bool is_xsk = priv->xsk.ever_used;
1622 int max_nch = priv->max_nch;
1623 int i, j, tc;
1624
1625 for (i = 0; i < max_nch; i++)
1626 for (j = 0; j < NUM_CH_STATS; j++)
1627 data[idx++] =
1628 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
1629 ch_stats_desc, j);
1630
1631 for (i = 0; i < max_nch; i++) {
1632 for (j = 0; j < NUM_RQ_STATS; j++)
1633 data[idx++] =
1634 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
1635 rq_stats_desc, j);
1636 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1637 data[idx++] =
1638 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
1639 xskrq_stats_desc, j);
1640 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1641 data[idx++] =
1642 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
1643 rq_xdpsq_stats_desc, j);
1644 }
1645
1646 for (tc = 0; tc < priv->max_opened_tc; tc++)
1647 for (i = 0; i < max_nch; i++)
1648 for (j = 0; j < NUM_SQ_STATS; j++)
1649 data[idx++] =
1650 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
1651 sq_stats_desc, j);
1652
1653 for (i = 0; i < max_nch; i++) {
1654 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1655 data[idx++] =
1656 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
1657 xsksq_stats_desc, j);
1658 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1659 data[idx++] =
1660 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
1661 xdpsq_stats_desc, j);
1662 }
1663
1664 return idx;
1665 }
1666
1667 /* The stats groups order is opposite to the update_stats() order calls */
1668 const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
1669 {
1670 .get_num_stats = mlx5e_grp_sw_get_num_stats,
1671 .fill_strings = mlx5e_grp_sw_fill_strings,
1672 .fill_stats = mlx5e_grp_sw_fill_stats,
1673 .update_stats = mlx5e_grp_sw_update_stats,
1674 },
1675 {
1676 .get_num_stats = mlx5e_grp_q_get_num_stats,
1677 .fill_strings = mlx5e_grp_q_fill_strings,
1678 .fill_stats = mlx5e_grp_q_fill_stats,
1679 .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1680 .update_stats = mlx5e_grp_q_update_stats,
1681 },
1682 {
1683 .get_num_stats = mlx5e_grp_vnic_env_get_num_stats,
1684 .fill_strings = mlx5e_grp_vnic_env_fill_strings,
1685 .fill_stats = mlx5e_grp_vnic_env_fill_stats,
1686 .update_stats = mlx5e_grp_vnic_env_update_stats,
1687 },
1688 {
1689 .get_num_stats = mlx5e_grp_vport_get_num_stats,
1690 .fill_strings = mlx5e_grp_vport_fill_strings,
1691 .fill_stats = mlx5e_grp_vport_fill_stats,
1692 .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1693 .update_stats = mlx5e_grp_vport_update_stats,
1694 },
1695 {
1696 .get_num_stats = mlx5e_grp_802_3_get_num_stats,
1697 .fill_strings = mlx5e_grp_802_3_fill_strings,
1698 .fill_stats = mlx5e_grp_802_3_fill_stats,
1699 .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1700 .update_stats = mlx5e_grp_802_3_update_stats,
1701 },
1702 {
1703 .get_num_stats = mlx5e_grp_2863_get_num_stats,
1704 .fill_strings = mlx5e_grp_2863_fill_strings,
1705 .fill_stats = mlx5e_grp_2863_fill_stats,
1706 .update_stats = mlx5e_grp_2863_update_stats,
1707 },
1708 {
1709 .get_num_stats = mlx5e_grp_2819_get_num_stats,
1710 .fill_strings = mlx5e_grp_2819_fill_strings,
1711 .fill_stats = mlx5e_grp_2819_fill_stats,
1712 .update_stats = mlx5e_grp_2819_update_stats,
1713 },
1714 {
1715 .get_num_stats = mlx5e_grp_phy_get_num_stats,
1716 .fill_strings = mlx5e_grp_phy_fill_strings,
1717 .fill_stats = mlx5e_grp_phy_fill_stats,
1718 .update_stats = mlx5e_grp_phy_update_stats,
1719 },
1720 {
1721 .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
1722 .fill_strings = mlx5e_grp_eth_ext_fill_strings,
1723 .fill_stats = mlx5e_grp_eth_ext_fill_stats,
1724 .update_stats = mlx5e_grp_eth_ext_update_stats,
1725 },
1726 {
1727 .get_num_stats = mlx5e_grp_pcie_get_num_stats,
1728 .fill_strings = mlx5e_grp_pcie_fill_strings,
1729 .fill_stats = mlx5e_grp_pcie_fill_stats,
1730 .update_stats = mlx5e_grp_pcie_update_stats,
1731 },
1732 {
1733 .get_num_stats = mlx5e_grp_per_prio_get_num_stats,
1734 .fill_strings = mlx5e_grp_per_prio_fill_strings,
1735 .fill_stats = mlx5e_grp_per_prio_fill_stats,
1736 .update_stats = mlx5e_grp_per_prio_update_stats,
1737 },
1738 {
1739 .get_num_stats = mlx5e_grp_pme_get_num_stats,
1740 .fill_strings = mlx5e_grp_pme_fill_strings,
1741 .fill_stats = mlx5e_grp_pme_fill_stats,
1742 },
1743 {
1744 .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
1745 .fill_strings = mlx5e_grp_ipsec_fill_strings,
1746 .fill_stats = mlx5e_grp_ipsec_fill_stats,
1747 .update_stats = mlx5e_grp_ipsec_update_stats,
1748 },
1749 {
1750 .get_num_stats = mlx5e_grp_tls_get_num_stats,
1751 .fill_strings = mlx5e_grp_tls_fill_strings,
1752 .fill_stats = mlx5e_grp_tls_fill_stats,
1753 },
1754 {
1755 .get_num_stats = mlx5e_grp_channels_get_num_stats,
1756 .fill_strings = mlx5e_grp_channels_fill_strings,
1757 .fill_stats = mlx5e_grp_channels_fill_stats,
1758 },
1759 {
1760 .get_num_stats = mlx5e_grp_per_port_buffer_congest_get_num_stats,
1761 .fill_strings = mlx5e_grp_per_port_buffer_congest_fill_strings,
1762 .fill_stats = mlx5e_grp_per_port_buffer_congest_fill_stats,
1763 .update_stats = mlx5e_grp_per_port_buffer_congest_update_stats,
1764 },
1765 };
1766
1767 const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);
1768