1 /*
2  * Copyright (c) 2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "lib/mlx5.h"
34 #include "en.h"
35 #include "en_accel/tls.h"
36 #include "en_accel/en_accel.h"
37 #include "en/ptp.h"
38 
stats_grps_num(struct mlx5e_priv * priv)39 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
40 {
41 	return !priv->profile->stats_grps_num ? 0 :
42 		priv->profile->stats_grps_num(priv);
43 }
44 
mlx5e_stats_total_num(struct mlx5e_priv * priv)45 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
46 {
47 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
48 	const unsigned int num_stats_grps = stats_grps_num(priv);
49 	unsigned int total = 0;
50 	int i;
51 
52 	for (i = 0; i < num_stats_grps; i++)
53 		total += stats_grps[i]->get_num_stats(priv);
54 
55 	return total;
56 }
57 
mlx5e_stats_update_ndo_stats(struct mlx5e_priv * priv)58 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
59 {
60 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
61 	const unsigned int num_stats_grps = stats_grps_num(priv);
62 	int i;
63 
64 	for (i = num_stats_grps - 1; i >= 0; i--)
65 		if (stats_grps[i]->update_stats &&
66 		    stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
67 			stats_grps[i]->update_stats(priv);
68 }
69 
mlx5e_stats_update(struct mlx5e_priv * priv)70 void mlx5e_stats_update(struct mlx5e_priv *priv)
71 {
72 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
73 	const unsigned int num_stats_grps = stats_grps_num(priv);
74 	int i;
75 
76 	for (i = num_stats_grps - 1; i >= 0; i--)
77 		if (stats_grps[i]->update_stats)
78 			stats_grps[i]->update_stats(priv);
79 }
80 
mlx5e_stats_fill(struct mlx5e_priv * priv,u64 * data,int idx)81 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
82 {
83 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
84 	const unsigned int num_stats_grps = stats_grps_num(priv);
85 	int i;
86 
87 	for (i = 0; i < num_stats_grps; i++)
88 		idx = stats_grps[i]->fill_stats(priv, data, idx);
89 }
90 
mlx5e_stats_fill_strings(struct mlx5e_priv * priv,u8 * data)91 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
92 {
93 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
94 	const unsigned int num_stats_grps = stats_grps_num(priv);
95 	int i, idx = 0;
96 
97 	for (i = 0; i < num_stats_grps; i++)
98 		idx = stats_grps[i]->fill_strings(priv, data, idx);
99 }
100 
101 /* Concrete NIC Stats */
102 
103 static const struct counter_desc sw_stats_desc[] = {
104 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
105 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
106 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
107 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
108 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
109 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
110 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
111 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
112 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
113 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
114 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
115 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
116 
117 #ifdef CONFIG_MLX5_EN_TLS
118 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
119 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
120 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
121 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
122 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
123 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
124 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
125 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
126 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
127 #endif
128 
129 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
130 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
131 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
132 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
133 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
134 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
135 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
136 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
137 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
138 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
139 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
140 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
141 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
142 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
143 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
144 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
145 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
146 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
147 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
148 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
149 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
150 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
151 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
152 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
153 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
154 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
155 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
156 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
157 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
158 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
159 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
160 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
161 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
162 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
163 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
164 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
165 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
166 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
167 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
168 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
169 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
170 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
171 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
172 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
173 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
174 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
175 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
176 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
177 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
178 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
179 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
180 #ifdef CONFIG_MLX5_EN_TLS
181 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
182 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
183 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
184 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
185 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
186 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
187 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
188 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
189 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
190 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
191 #endif
192 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
193 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
194 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
195 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
196 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
197 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
198 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
199 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
200 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
201 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
202 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
203 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
204 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
205 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
206 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
207 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
208 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
209 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
210 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
211 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
212 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
213 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
214 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
215 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
216 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
217 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
218 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
219 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
220 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
221 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
222 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
223 };
224 
225 #define NUM_SW_COUNTERS			ARRAY_SIZE(sw_stats_desc)
226 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)227 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
228 {
229 	return NUM_SW_COUNTERS;
230 }
231 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)232 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
233 {
234 	int i;
235 
236 	for (i = 0; i < NUM_SW_COUNTERS; i++)
237 		strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
238 	return idx;
239 }
240 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)241 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
242 {
243 	int i;
244 
245 	for (i = 0; i < NUM_SW_COUNTERS; i++)
246 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
247 	return idx;
248 }
249 
mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xdpsq_red_stats)250 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
251 						    struct mlx5e_xdpsq_stats *xdpsq_red_stats)
252 {
253 	s->tx_xdp_xmit  += xdpsq_red_stats->xmit;
254 	s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
255 	s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
256 	s->tx_xdp_nops  += xdpsq_red_stats->nops;
257 	s->tx_xdp_full  += xdpsq_red_stats->full;
258 	s->tx_xdp_err   += xdpsq_red_stats->err;
259 	s->tx_xdp_cqes  += xdpsq_red_stats->cqes;
260 }
261 
mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xdpsq_stats)262 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
263 						  struct mlx5e_xdpsq_stats *xdpsq_stats)
264 {
265 	s->rx_xdp_tx_xmit  += xdpsq_stats->xmit;
266 	s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
267 	s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
268 	s->rx_xdp_tx_nops  += xdpsq_stats->nops;
269 	s->rx_xdp_tx_full  += xdpsq_stats->full;
270 	s->rx_xdp_tx_err   += xdpsq_stats->err;
271 	s->rx_xdp_tx_cqe   += xdpsq_stats->cqes;
272 }
273 
mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats * s,struct mlx5e_xdpsq_stats * xsksq_stats)274 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
275 						  struct mlx5e_xdpsq_stats *xsksq_stats)
276 {
277 	s->tx_xsk_xmit  += xsksq_stats->xmit;
278 	s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
279 	s->tx_xsk_inlnw += xsksq_stats->inlnw;
280 	s->tx_xsk_full  += xsksq_stats->full;
281 	s->tx_xsk_err   += xsksq_stats->err;
282 	s->tx_xsk_cqes  += xsksq_stats->cqes;
283 }
284 
mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats * s,struct mlx5e_rq_stats * xskrq_stats)285 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
286 						  struct mlx5e_rq_stats *xskrq_stats)
287 {
288 	s->rx_xsk_packets                += xskrq_stats->packets;
289 	s->rx_xsk_bytes                  += xskrq_stats->bytes;
290 	s->rx_xsk_csum_complete          += xskrq_stats->csum_complete;
291 	s->rx_xsk_csum_unnecessary       += xskrq_stats->csum_unnecessary;
292 	s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
293 	s->rx_xsk_csum_none              += xskrq_stats->csum_none;
294 	s->rx_xsk_ecn_mark               += xskrq_stats->ecn_mark;
295 	s->rx_xsk_removed_vlan_packets   += xskrq_stats->removed_vlan_packets;
296 	s->rx_xsk_xdp_drop               += xskrq_stats->xdp_drop;
297 	s->rx_xsk_xdp_redirect           += xskrq_stats->xdp_redirect;
298 	s->rx_xsk_wqe_err                += xskrq_stats->wqe_err;
299 	s->rx_xsk_mpwqe_filler_cqes      += xskrq_stats->mpwqe_filler_cqes;
300 	s->rx_xsk_mpwqe_filler_strides   += xskrq_stats->mpwqe_filler_strides;
301 	s->rx_xsk_oversize_pkts_sw_drop  += xskrq_stats->oversize_pkts_sw_drop;
302 	s->rx_xsk_buff_alloc_err         += xskrq_stats->buff_alloc_err;
303 	s->rx_xsk_cqe_compress_blks      += xskrq_stats->cqe_compress_blks;
304 	s->rx_xsk_cqe_compress_pkts      += xskrq_stats->cqe_compress_pkts;
305 	s->rx_xsk_congst_umr             += xskrq_stats->congst_umr;
306 	s->rx_xsk_arfs_err               += xskrq_stats->arfs_err;
307 }
308 
mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats * s,struct mlx5e_rq_stats * rq_stats)309 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
310 						     struct mlx5e_rq_stats *rq_stats)
311 {
312 	s->rx_packets                 += rq_stats->packets;
313 	s->rx_bytes                   += rq_stats->bytes;
314 	s->rx_lro_packets             += rq_stats->lro_packets;
315 	s->rx_lro_bytes               += rq_stats->lro_bytes;
316 	s->rx_ecn_mark                += rq_stats->ecn_mark;
317 	s->rx_removed_vlan_packets    += rq_stats->removed_vlan_packets;
318 	s->rx_csum_none               += rq_stats->csum_none;
319 	s->rx_csum_complete           += rq_stats->csum_complete;
320 	s->rx_csum_complete_tail      += rq_stats->csum_complete_tail;
321 	s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
322 	s->rx_csum_unnecessary        += rq_stats->csum_unnecessary;
323 	s->rx_csum_unnecessary_inner  += rq_stats->csum_unnecessary_inner;
324 	s->rx_xdp_drop                += rq_stats->xdp_drop;
325 	s->rx_xdp_redirect            += rq_stats->xdp_redirect;
326 	s->rx_wqe_err                 += rq_stats->wqe_err;
327 	s->rx_mpwqe_filler_cqes       += rq_stats->mpwqe_filler_cqes;
328 	s->rx_mpwqe_filler_strides    += rq_stats->mpwqe_filler_strides;
329 	s->rx_oversize_pkts_sw_drop   += rq_stats->oversize_pkts_sw_drop;
330 	s->rx_buff_alloc_err          += rq_stats->buff_alloc_err;
331 	s->rx_cqe_compress_blks       += rq_stats->cqe_compress_blks;
332 	s->rx_cqe_compress_pkts       += rq_stats->cqe_compress_pkts;
333 	s->rx_cache_reuse             += rq_stats->cache_reuse;
334 	s->rx_cache_full              += rq_stats->cache_full;
335 	s->rx_cache_empty             += rq_stats->cache_empty;
336 	s->rx_cache_busy              += rq_stats->cache_busy;
337 	s->rx_cache_waive             += rq_stats->cache_waive;
338 	s->rx_congst_umr              += rq_stats->congst_umr;
339 	s->rx_arfs_err                += rq_stats->arfs_err;
340 	s->rx_recover                 += rq_stats->recover;
341 #ifdef CONFIG_MLX5_EN_TLS
342 	s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
343 	s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
344 	s->rx_tls_resync_req_pkt      += rq_stats->tls_resync_req_pkt;
345 	s->rx_tls_resync_req_start    += rq_stats->tls_resync_req_start;
346 	s->rx_tls_resync_req_end      += rq_stats->tls_resync_req_end;
347 	s->rx_tls_resync_req_skip     += rq_stats->tls_resync_req_skip;
348 	s->rx_tls_resync_res_ok       += rq_stats->tls_resync_res_ok;
349 	s->rx_tls_resync_res_retry    += rq_stats->tls_resync_res_retry;
350 	s->rx_tls_resync_res_skip     += rq_stats->tls_resync_res_skip;
351 	s->rx_tls_err                 += rq_stats->tls_err;
352 #endif
353 }
354 
mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats * s,struct mlx5e_ch_stats * ch_stats)355 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
356 						     struct mlx5e_ch_stats *ch_stats)
357 {
358 	s->ch_events      += ch_stats->events;
359 	s->ch_poll        += ch_stats->poll;
360 	s->ch_arm         += ch_stats->arm;
361 	s->ch_aff_change  += ch_stats->aff_change;
362 	s->ch_force_irq   += ch_stats->force_irq;
363 	s->ch_eq_rearm    += ch_stats->eq_rearm;
364 }
365 
mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats * s,struct mlx5e_sq_stats * sq_stats)366 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
367 					       struct mlx5e_sq_stats *sq_stats)
368 {
369 	s->tx_packets               += sq_stats->packets;
370 	s->tx_bytes                 += sq_stats->bytes;
371 	s->tx_tso_packets           += sq_stats->tso_packets;
372 	s->tx_tso_bytes             += sq_stats->tso_bytes;
373 	s->tx_tso_inner_packets     += sq_stats->tso_inner_packets;
374 	s->tx_tso_inner_bytes       += sq_stats->tso_inner_bytes;
375 	s->tx_added_vlan_packets    += sq_stats->added_vlan_packets;
376 	s->tx_nop                   += sq_stats->nop;
377 	s->tx_mpwqe_blks            += sq_stats->mpwqe_blks;
378 	s->tx_mpwqe_pkts            += sq_stats->mpwqe_pkts;
379 	s->tx_queue_stopped         += sq_stats->stopped;
380 	s->tx_queue_wake            += sq_stats->wake;
381 	s->tx_queue_dropped         += sq_stats->dropped;
382 	s->tx_cqe_err               += sq_stats->cqe_err;
383 	s->tx_recover               += sq_stats->recover;
384 	s->tx_xmit_more             += sq_stats->xmit_more;
385 	s->tx_csum_partial_inner    += sq_stats->csum_partial_inner;
386 	s->tx_csum_none             += sq_stats->csum_none;
387 	s->tx_csum_partial          += sq_stats->csum_partial;
388 #ifdef CONFIG_MLX5_EN_TLS
389 	s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
390 	s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
391 	s->tx_tls_ooo               += sq_stats->tls_ooo;
392 	s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
393 	s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
394 	s->tx_tls_resync_bytes      += sq_stats->tls_resync_bytes;
395 	s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
396 	s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
397 	s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
398 #endif
399 	s->tx_cqes                  += sq_stats->cqes;
400 }
401 
mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv * priv,struct mlx5e_sw_stats * s)402 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
403 						struct mlx5e_sw_stats *s)
404 {
405 	int i;
406 
407 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
408 		return;
409 
410 	mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
411 
412 	if (priv->tx_ptp_opened) {
413 		for (i = 0; i < priv->max_opened_tc; i++) {
414 			mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
415 
416 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
417 			barrier();
418 		}
419 	}
420 	if (priv->rx_ptp_opened) {
421 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
422 
423 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
424 		barrier();
425 	}
426 }
427 
mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv * priv,struct mlx5e_sw_stats * s)428 static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
429 						struct mlx5e_sw_stats *s)
430 {
431 	struct mlx5e_sq_stats **stats;
432 	u16 max_qos_sqs;
433 	int i;
434 
435 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
436 	max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
437 	stats = READ_ONCE(priv->htb.qos_sq_stats);
438 
439 	for (i = 0; i < max_qos_sqs; i++) {
440 		mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
441 
442 		/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
443 		barrier();
444 	}
445 }
446 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)447 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
448 {
449 	struct mlx5e_sw_stats *s = &priv->stats.sw;
450 	int i;
451 
452 	memset(s, 0, sizeof(*s));
453 
454 	for (i = 0; i < priv->stats_nch; i++) {
455 		struct mlx5e_channel_stats *channel_stats =
456 			&priv->channel_stats[i];
457 		int j;
458 
459 		mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
460 		mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
461 		mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
462 		/* xdp redirect */
463 		mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
464 		/* AF_XDP zero-copy */
465 		mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
466 		mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
467 
468 		for (j = 0; j < priv->max_opened_tc; j++) {
469 			mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
470 
471 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
472 			barrier();
473 		}
474 	}
475 	mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
476 	mlx5e_stats_grp_sw_update_stats_qos(priv, s);
477 }
478 
479 static const struct counter_desc q_stats_desc[] = {
480 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
481 };
482 
483 static const struct counter_desc drop_rq_stats_desc[] = {
484 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
485 };
486 
487 #define NUM_Q_COUNTERS			ARRAY_SIZE(q_stats_desc)
488 #define NUM_DROP_RQ_COUNTERS		ARRAY_SIZE(drop_rq_stats_desc)
489 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)490 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
491 {
492 	int num_stats = 0;
493 
494 	if (priv->q_counter)
495 		num_stats += NUM_Q_COUNTERS;
496 
497 	if (priv->drop_rq_q_counter)
498 		num_stats += NUM_DROP_RQ_COUNTERS;
499 
500 	return num_stats;
501 }
502 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)503 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
504 {
505 	int i;
506 
507 	for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
508 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
509 		       q_stats_desc[i].format);
510 
511 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
512 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
513 		       drop_rq_stats_desc[i].format);
514 
515 	return idx;
516 }
517 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)518 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
519 {
520 	int i;
521 
522 	for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
523 		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
524 						   q_stats_desc, i);
525 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
526 		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
527 						   drop_rq_stats_desc, i);
528 	return idx;
529 }
530 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)531 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
532 {
533 	struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
534 	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
535 	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
536 	int ret;
537 
538 	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
539 
540 	if (priv->q_counter) {
541 		MLX5_SET(query_q_counter_in, in, counter_set_id,
542 			 priv->q_counter);
543 		ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
544 		if (!ret)
545 			qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
546 							  out, out_of_buffer);
547 	}
548 
549 	if (priv->drop_rq_q_counter) {
550 		MLX5_SET(query_q_counter_in, in, counter_set_id,
551 			 priv->drop_rq_q_counter);
552 		ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
553 		if (!ret)
554 			qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
555 							    out, out_of_buffer);
556 	}
557 }
558 
559 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
560 static const struct counter_desc vnic_env_stats_steer_desc[] = {
561 	{ "rx_steer_missed_packets",
562 		VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
563 };
564 
565 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
566 	{ "dev_internal_queue_oob",
567 		VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
568 };
569 
570 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
571 	(MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
572 	 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
573 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
574 	(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
575 	 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
576 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)577 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
578 {
579 	return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
580 		NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
581 }
582 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)583 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
584 {
585 	int i;
586 
587 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
588 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
589 		       vnic_env_stats_steer_desc[i].format);
590 
591 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
592 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
593 		       vnic_env_stats_dev_oob_desc[i].format);
594 	return idx;
595 }
596 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)597 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
598 {
599 	int i;
600 
601 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
602 		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
603 						  vnic_env_stats_steer_desc, i);
604 
605 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
606 		data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
607 						  vnic_env_stats_dev_oob_desc, i);
608 	return idx;
609 }
610 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)611 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
612 {
613 	u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
614 	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
615 	struct mlx5_core_dev *mdev = priv->mdev;
616 
617 	if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
618 		return;
619 
620 	MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
621 	mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
622 }
623 
624 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
625 static const struct counter_desc vport_stats_desc[] = {
626 	{ "rx_vport_unicast_packets",
627 		VPORT_COUNTER_OFF(received_eth_unicast.packets) },
628 	{ "rx_vport_unicast_bytes",
629 		VPORT_COUNTER_OFF(received_eth_unicast.octets) },
630 	{ "tx_vport_unicast_packets",
631 		VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
632 	{ "tx_vport_unicast_bytes",
633 		VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
634 	{ "rx_vport_multicast_packets",
635 		VPORT_COUNTER_OFF(received_eth_multicast.packets) },
636 	{ "rx_vport_multicast_bytes",
637 		VPORT_COUNTER_OFF(received_eth_multicast.octets) },
638 	{ "tx_vport_multicast_packets",
639 		VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
640 	{ "tx_vport_multicast_bytes",
641 		VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
642 	{ "rx_vport_broadcast_packets",
643 		VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
644 	{ "rx_vport_broadcast_bytes",
645 		VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
646 	{ "tx_vport_broadcast_packets",
647 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
648 	{ "tx_vport_broadcast_bytes",
649 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
650 	{ "rx_vport_rdma_unicast_packets",
651 		VPORT_COUNTER_OFF(received_ib_unicast.packets) },
652 	{ "rx_vport_rdma_unicast_bytes",
653 		VPORT_COUNTER_OFF(received_ib_unicast.octets) },
654 	{ "tx_vport_rdma_unicast_packets",
655 		VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
656 	{ "tx_vport_rdma_unicast_bytes",
657 		VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
658 	{ "rx_vport_rdma_multicast_packets",
659 		VPORT_COUNTER_OFF(received_ib_multicast.packets) },
660 	{ "rx_vport_rdma_multicast_bytes",
661 		VPORT_COUNTER_OFF(received_ib_multicast.octets) },
662 	{ "tx_vport_rdma_multicast_packets",
663 		VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
664 	{ "tx_vport_rdma_multicast_bytes",
665 		VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
666 };
667 
668 #define NUM_VPORT_COUNTERS		ARRAY_SIZE(vport_stats_desc)
669 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)670 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
671 {
672 	return NUM_VPORT_COUNTERS;
673 }
674 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)675 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
676 {
677 	int i;
678 
679 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
680 		strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
681 	return idx;
682 }
683 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)684 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
685 {
686 	int i;
687 
688 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
689 		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
690 						  vport_stats_desc, i);
691 	return idx;
692 }
693 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)694 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
695 {
696 	u32 *out = (u32 *)priv->stats.vport.query_vport_out;
697 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
698 	struct mlx5_core_dev *mdev = priv->mdev;
699 
700 	MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
701 	mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
702 }
703 
704 #define PPORT_802_3_OFF(c) \
705 	MLX5_BYTE_OFF(ppcnt_reg, \
706 		      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
707 static const struct counter_desc pport_802_3_stats_desc[] = {
708 	{ "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
709 	{ "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
710 	{ "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
711 	{ "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
712 	{ "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
713 	{ "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
714 	{ "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
715 	{ "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
716 	{ "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
717 	{ "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
718 	{ "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
719 	{ "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
720 	{ "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
721 	{ "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
722 	{ "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
723 	{ "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
724 	{ "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
725 	{ "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
726 };
727 
728 #define NUM_PPORT_802_3_COUNTERS	ARRAY_SIZE(pport_802_3_stats_desc)
729 
730 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
731 {
732 	return NUM_PPORT_802_3_COUNTERS;
733 }
734 
735 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
736 {
737 	int i;
738 
739 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
740 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
741 	return idx;
742 }
743 
744 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
745 {
746 	int i;
747 
748 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
749 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
750 						  pport_802_3_stats_desc, i);
751 	return idx;
752 }
753 
754 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
755 	(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
756 
757 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
758 {
759 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
760 	struct mlx5_core_dev *mdev = priv->mdev;
761 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
762 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
763 	void *out;
764 
765 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
766 		return;
767 
768 	MLX5_SET(ppcnt_reg, in, local_port, 1);
769 	out = pstats->IEEE_802_3_counters;
770 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
771 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
772 }
773 
774 #define MLX5E_READ_CTR64_BE_F(ptr, set, c)		\
775 	be64_to_cpu(*(__be64 *)((char *)ptr +		\
776 		MLX5_BYTE_OFF(ppcnt_reg,		\
777 			      counter_set.set.c##_high)))
778 
mlx5e_stats_get_ieee(struct mlx5_core_dev * mdev,u32 * ppcnt_ieee_802_3)779 static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
780 				u32 *ppcnt_ieee_802_3)
781 {
782 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
783 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
784 
785 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
786 		return -EOPNOTSUPP;
787 
788 	MLX5_SET(ppcnt_reg, in, local_port, 1);
789 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
790 	return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
791 				    sz, MLX5_REG_PPCNT, 0, 0);
792 }
793 
mlx5e_stats_pause_get(struct mlx5e_priv * priv,struct ethtool_pause_stats * pause_stats)794 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
795 			   struct ethtool_pause_stats *pause_stats)
796 {
797 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
798 	struct mlx5_core_dev *mdev = priv->mdev;
799 
800 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
801 		return;
802 
803 	pause_stats->tx_pause_frames =
804 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
805 				      eth_802_3_cntrs_grp_data_layout,
806 				      a_pause_mac_ctrl_frames_transmitted);
807 	pause_stats->rx_pause_frames =
808 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
809 				      eth_802_3_cntrs_grp_data_layout,
810 				      a_pause_mac_ctrl_frames_received);
811 }
812 
mlx5e_stats_eth_phy_get(struct mlx5e_priv * priv,struct ethtool_eth_phy_stats * phy_stats)813 void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
814 			     struct ethtool_eth_phy_stats *phy_stats)
815 {
816 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
817 	struct mlx5_core_dev *mdev = priv->mdev;
818 
819 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
820 		return;
821 
822 	phy_stats->SymbolErrorDuringCarrier =
823 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
824 				      eth_802_3_cntrs_grp_data_layout,
825 				      a_symbol_error_during_carrier);
826 }
827 
mlx5e_stats_eth_mac_get(struct mlx5e_priv * priv,struct ethtool_eth_mac_stats * mac_stats)828 void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
829 			     struct ethtool_eth_mac_stats *mac_stats)
830 {
831 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
832 	struct mlx5_core_dev *mdev = priv->mdev;
833 
834 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
835 		return;
836 
837 #define RD(name)							\
838 	MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,				\
839 			      eth_802_3_cntrs_grp_data_layout,		\
840 			      name)
841 
842 	mac_stats->FramesTransmittedOK	= RD(a_frames_transmitted_ok);
843 	mac_stats->FramesReceivedOK	= RD(a_frames_received_ok);
844 	mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
845 	mac_stats->OctetsTransmittedOK	= RD(a_octets_transmitted_ok);
846 	mac_stats->OctetsReceivedOK	= RD(a_octets_received_ok);
847 	mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
848 	mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
849 	mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
850 	mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
851 	mac_stats->InRangeLengthErrors	= RD(a_in_range_length_errors);
852 	mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
853 	mac_stats->FrameTooLongErrors	= RD(a_frame_too_long_errors);
854 #undef RD
855 }
856 
mlx5e_stats_eth_ctrl_get(struct mlx5e_priv * priv,struct ethtool_eth_ctrl_stats * ctrl_stats)857 void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
858 			      struct ethtool_eth_ctrl_stats *ctrl_stats)
859 {
860 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
861 	struct mlx5_core_dev *mdev = priv->mdev;
862 
863 	if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
864 		return;
865 
866 	ctrl_stats->MACControlFramesTransmitted =
867 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
868 				      eth_802_3_cntrs_grp_data_layout,
869 				      a_mac_control_frames_transmitted);
870 	ctrl_stats->MACControlFramesReceived =
871 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
872 				      eth_802_3_cntrs_grp_data_layout,
873 				      a_mac_control_frames_received);
874 	ctrl_stats->UnsupportedOpcodesReceived =
875 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
876 				      eth_802_3_cntrs_grp_data_layout,
877 				      a_unsupported_opcodes_received);
878 }
879 
880 #define PPORT_2863_OFF(c) \
881 	MLX5_BYTE_OFF(ppcnt_reg, \
882 		      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
883 static const struct counter_desc pport_2863_stats_desc[] = {
884 	{ "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
885 	{ "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
886 	{ "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
887 };
888 
889 #define NUM_PPORT_2863_COUNTERS		ARRAY_SIZE(pport_2863_stats_desc)
890 
891 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
892 {
893 	return NUM_PPORT_2863_COUNTERS;
894 }
895 
896 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
897 {
898 	int i;
899 
900 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
901 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
902 	return idx;
903 }
904 
905 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
906 {
907 	int i;
908 
909 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
910 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
911 						  pport_2863_stats_desc, i);
912 	return idx;
913 }
914 
915 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
916 {
917 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
918 	struct mlx5_core_dev *mdev = priv->mdev;
919 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
920 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
921 	void *out;
922 
923 	MLX5_SET(ppcnt_reg, in, local_port, 1);
924 	out = pstats->RFC_2863_counters;
925 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
926 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
927 }
928 
929 #define PPORT_2819_OFF(c) \
930 	MLX5_BYTE_OFF(ppcnt_reg, \
931 		      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
932 static const struct counter_desc pport_2819_stats_desc[] = {
933 	{ "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
934 	{ "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
935 	{ "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
936 	{ "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
937 	{ "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
938 	{ "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
939 	{ "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
940 	{ "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
941 	{ "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
942 	{ "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
943 	{ "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
944 	{ "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
945 	{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
946 };
947 
948 #define NUM_PPORT_2819_COUNTERS		ARRAY_SIZE(pport_2819_stats_desc)
949 
950 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
951 {
952 	return NUM_PPORT_2819_COUNTERS;
953 }
954 
955 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
956 {
957 	int i;
958 
959 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
960 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
961 	return idx;
962 }
963 
964 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
965 {
966 	int i;
967 
968 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
969 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
970 						  pport_2819_stats_desc, i);
971 	return idx;
972 }
973 
974 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
975 {
976 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
977 	struct mlx5_core_dev *mdev = priv->mdev;
978 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
979 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
980 	void *out;
981 
982 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
983 		return;
984 
985 	MLX5_SET(ppcnt_reg, in, local_port, 1);
986 	out = pstats->RFC_2819_counters;
987 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
988 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
989 }
990 
991 static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
992 	{    0,    64 },
993 	{   65,   127 },
994 	{  128,   255 },
995 	{  256,   511 },
996 	{  512,  1023 },
997 	{ 1024,  1518 },
998 	{ 1519,  2047 },
999 	{ 2048,  4095 },
1000 	{ 4096,  8191 },
1001 	{ 8192, 10239 },
1002 	{}
1003 };
1004 
mlx5e_stats_rmon_get(struct mlx5e_priv * priv,struct ethtool_rmon_stats * rmon,const struct ethtool_rmon_hist_range ** ranges)1005 void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
1006 			  struct ethtool_rmon_stats *rmon,
1007 			  const struct ethtool_rmon_hist_range **ranges)
1008 {
1009 	u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
1010 	struct mlx5_core_dev *mdev = priv->mdev;
1011 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1012 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1013 
1014 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1015 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1016 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters,
1017 				 sz, MLX5_REG_PPCNT, 0, 0))
1018 		return;
1019 
1020 #define RD(name)						\
1021 	MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters,		\
1022 			      eth_2819_cntrs_grp_data_layout,	\
1023 			      name)
1024 
1025 	rmon->undersize_pkts	= RD(ether_stats_undersize_pkts);
1026 	rmon->fragments		= RD(ether_stats_fragments);
1027 	rmon->jabbers		= RD(ether_stats_jabbers);
1028 
1029 	rmon->hist[0]		= RD(ether_stats_pkts64octets);
1030 	rmon->hist[1]		= RD(ether_stats_pkts65to127octets);
1031 	rmon->hist[2]		= RD(ether_stats_pkts128to255octets);
1032 	rmon->hist[3]		= RD(ether_stats_pkts256to511octets);
1033 	rmon->hist[4]		= RD(ether_stats_pkts512to1023octets);
1034 	rmon->hist[5]		= RD(ether_stats_pkts1024to1518octets);
1035 	rmon->hist[6]		= RD(ether_stats_pkts1519to2047octets);
1036 	rmon->hist[7]		= RD(ether_stats_pkts2048to4095octets);
1037 	rmon->hist[8]		= RD(ether_stats_pkts4096to8191octets);
1038 	rmon->hist[9]		= RD(ether_stats_pkts8192to10239octets);
1039 #undef RD
1040 
1041 	*ranges = mlx5e_rmon_ranges;
1042 }
1043 
1044 #define PPORT_PHY_STATISTICAL_OFF(c) \
1045 	MLX5_BYTE_OFF(ppcnt_reg, \
1046 		      counter_set.phys_layer_statistical_cntrs.c##_high)
1047 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
1048 	{ "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
1049 	{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
1050 };
1051 
1052 static const struct counter_desc
1053 pport_phy_statistical_err_lanes_stats_desc[] = {
1054 	{ "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
1055 	{ "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
1056 	{ "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
1057 	{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
1058 };
1059 
1060 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
1061 	ARRAY_SIZE(pport_phy_statistical_stats_desc)
1062 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
1063 	ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
1064 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)1065 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
1066 {
1067 	struct mlx5_core_dev *mdev = priv->mdev;
1068 	int num_stats;
1069 
1070 	/* "1" for link_down_events special counter */
1071 	num_stats = 1;
1072 
1073 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
1074 		     NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
1075 
1076 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
1077 		     NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
1078 
1079 	return num_stats;
1080 }
1081 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)1082 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
1083 {
1084 	struct mlx5_core_dev *mdev = priv->mdev;
1085 	int i;
1086 
1087 	strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
1088 
1089 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1090 		return idx;
1091 
1092 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1093 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
1094 		       pport_phy_statistical_stats_desc[i].format);
1095 
1096 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1097 		for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1098 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1099 			       pport_phy_statistical_err_lanes_stats_desc[i].format);
1100 
1101 	return idx;
1102 }
1103 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)1104 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
1105 {
1106 	struct mlx5_core_dev *mdev = priv->mdev;
1107 	int i;
1108 
1109 	/* link_down_events_phy has special handling since it is not stored in __be64 format */
1110 	data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
1111 			       counter_set.phys_layer_cntrs.link_down_events);
1112 
1113 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1114 		return idx;
1115 
1116 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1117 		data[idx++] =
1118 			MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1119 					    pport_phy_statistical_stats_desc, i);
1120 
1121 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1122 		for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1123 			data[idx++] =
1124 				MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1125 						    pport_phy_statistical_err_lanes_stats_desc,
1126 						    i);
1127 	return idx;
1128 }
1129 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)1130 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
1131 {
1132 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1133 	struct mlx5_core_dev *mdev = priv->mdev;
1134 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1135 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1136 	void *out;
1137 
1138 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1139 	out = pstats->phy_counters;
1140 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1141 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1142 
1143 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1144 		return;
1145 
1146 	out = pstats->phy_statistical_counters;
1147 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1148 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1149 }
1150 
mlx5e_stats_fec_get(struct mlx5e_priv * priv,struct ethtool_fec_stats * fec_stats)1151 void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
1152 			 struct ethtool_fec_stats *fec_stats)
1153 {
1154 	u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
1155 	struct mlx5_core_dev *mdev = priv->mdev;
1156 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1157 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1158 
1159 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1160 		return;
1161 
1162 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1163 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1164 	if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
1165 				 sz, MLX5_REG_PPCNT, 0, 0))
1166 		return;
1167 
1168 	fec_stats->corrected_bits.total =
1169 		MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
1170 				      phys_layer_statistical_cntrs,
1171 				      phy_corrected_bits);
1172 }
1173 
1174 #define PPORT_ETH_EXT_OFF(c) \
1175 	MLX5_BYTE_OFF(ppcnt_reg, \
1176 		      counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1177 static const struct counter_desc pport_eth_ext_stats_desc[] = {
1178 	{ "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1179 };
1180 
1181 #define NUM_PPORT_ETH_EXT_COUNTERS	ARRAY_SIZE(pport_eth_ext_stats_desc)
1182 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)1183 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1184 {
1185 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1186 		return NUM_PPORT_ETH_EXT_COUNTERS;
1187 
1188 	return 0;
1189 }
1190 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)1191 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1192 {
1193 	int i;
1194 
1195 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1196 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1197 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1198 			       pport_eth_ext_stats_desc[i].format);
1199 	return idx;
1200 }
1201 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)1202 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1203 {
1204 	int i;
1205 
1206 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1207 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1208 			data[idx++] =
1209 				MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
1210 						    pport_eth_ext_stats_desc, i);
1211 	return idx;
1212 }
1213 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)1214 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1215 {
1216 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1217 	struct mlx5_core_dev *mdev = priv->mdev;
1218 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1219 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1220 	void *out;
1221 
1222 	if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1223 		return;
1224 
1225 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1226 	out = pstats->eth_ext_counters;
1227 	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1228 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1229 }
1230 
1231 #define PCIE_PERF_OFF(c) \
1232 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1233 static const struct counter_desc pcie_perf_stats_desc[] = {
1234 	{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1235 	{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1236 };
1237 
1238 #define PCIE_PERF_OFF64(c) \
1239 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1240 static const struct counter_desc pcie_perf_stats_desc64[] = {
1241 	{ "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1242 };
1243 
1244 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1245 	{ "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1246 	{ "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1247 	{ "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1248 	{ "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1249 };
1250 
1251 #define NUM_PCIE_PERF_COUNTERS		ARRAY_SIZE(pcie_perf_stats_desc)
1252 #define NUM_PCIE_PERF_COUNTERS64	ARRAY_SIZE(pcie_perf_stats_desc64)
1253 #define NUM_PCIE_PERF_STALL_COUNTERS	ARRAY_SIZE(pcie_perf_stall_stats_desc)
1254 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)1255 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1256 {
1257 	int num_stats = 0;
1258 
1259 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1260 		num_stats += NUM_PCIE_PERF_COUNTERS;
1261 
1262 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1263 		num_stats += NUM_PCIE_PERF_COUNTERS64;
1264 
1265 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1266 		num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1267 
1268 	return num_stats;
1269 }
1270 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)1271 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1272 {
1273 	int i;
1274 
1275 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1276 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1277 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1278 			       pcie_perf_stats_desc[i].format);
1279 
1280 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1281 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1282 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1283 			       pcie_perf_stats_desc64[i].format);
1284 
1285 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1286 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1287 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1288 			       pcie_perf_stall_stats_desc[i].format);
1289 	return idx;
1290 }
1291 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)1292 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1293 {
1294 	int i;
1295 
1296 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1297 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1298 			data[idx++] =
1299 				MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1300 						    pcie_perf_stats_desc, i);
1301 
1302 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1303 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1304 			data[idx++] =
1305 				MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1306 						    pcie_perf_stats_desc64, i);
1307 
1308 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1309 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1310 			data[idx++] =
1311 				MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1312 						    pcie_perf_stall_stats_desc, i);
1313 	return idx;
1314 }
1315 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)1316 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1317 {
1318 	struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1319 	struct mlx5_core_dev *mdev = priv->mdev;
1320 	u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1321 	int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1322 	void *out;
1323 
1324 	if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1325 		return;
1326 
1327 	out = pcie_stats->pcie_perf_counters;
1328 	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1329 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1330 }
1331 
1332 #define PPORT_PER_TC_PRIO_OFF(c) \
1333 	MLX5_BYTE_OFF(ppcnt_reg, \
1334 		      counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1335 
1336 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1337 	{ "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1338 };
1339 
1340 #define NUM_PPORT_PER_TC_PRIO_COUNTERS	ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1341 
1342 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1343 	MLX5_BYTE_OFF(ppcnt_reg, \
1344 		      counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1345 
1346 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1347 	{ "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1348 	{ "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1349 };
1350 
1351 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1352 	ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1353 
mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv * priv)1354 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1355 {
1356 	struct mlx5_core_dev *mdev = priv->mdev;
1357 
1358 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1359 		return 0;
1360 
1361 	return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1362 }
1363 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)1364 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1365 {
1366 	struct mlx5_core_dev *mdev = priv->mdev;
1367 	int i, prio;
1368 
1369 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1370 		return idx;
1371 
1372 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1373 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1374 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1375 				pport_per_tc_prio_stats_desc[i].format, prio);
1376 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1377 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1378 				pport_per_tc_congest_prio_stats_desc[i].format, prio);
1379 	}
1380 
1381 	return idx;
1382 }
1383 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)1384 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1385 {
1386 	struct mlx5e_pport_stats *pport = &priv->stats.pport;
1387 	struct mlx5_core_dev *mdev = priv->mdev;
1388 	int i, prio;
1389 
1390 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1391 		return idx;
1392 
1393 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1394 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1395 			data[idx++] =
1396 				MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1397 						    pport_per_tc_prio_stats_desc, i);
1398 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1399 			data[idx++] =
1400 				MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1401 						    pport_per_tc_congest_prio_stats_desc, i);
1402 	}
1403 
1404 	return idx;
1405 }
1406 
mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv * priv)1407 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1408 {
1409 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1410 	struct mlx5_core_dev *mdev = priv->mdev;
1411 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1412 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1413 	void *out;
1414 	int prio;
1415 
1416 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1417 		return;
1418 
1419 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1420 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1421 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1422 		out = pstats->per_tc_prio_counters[prio];
1423 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1424 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1425 	}
1426 }
1427 
mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv * priv)1428 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1429 {
1430 	struct mlx5_core_dev *mdev = priv->mdev;
1431 
1432 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1433 		return 0;
1434 
1435 	return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1436 }
1437 
mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv * priv)1438 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1439 {
1440 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1441 	struct mlx5_core_dev *mdev = priv->mdev;
1442 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1443 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1444 	void *out;
1445 	int prio;
1446 
1447 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1448 		return;
1449 
1450 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1451 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1452 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1453 		out = pstats->per_tc_congest_prio_counters[prio];
1454 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1455 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1456 	}
1457 }
1458 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)1459 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1460 {
1461 	return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1462 		mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1463 }
1464 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)1465 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1466 {
1467 	mlx5e_grp_per_tc_prio_update_stats(priv);
1468 	mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1469 }
1470 
1471 #define PPORT_PER_PRIO_OFF(c) \
1472 	MLX5_BYTE_OFF(ppcnt_reg, \
1473 		      counter_set.eth_per_prio_grp_data_layout.c##_high)
1474 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1475 	{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1476 	{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1477 	{ "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1478 	{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1479 	{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1480 };
1481 
1482 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS	ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1483 
mlx5e_grp_per_prio_traffic_get_num_stats(void)1484 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1485 {
1486 	return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1487 }
1488 
mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1489 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1490 						   u8 *data,
1491 						   int idx)
1492 {
1493 	int i, prio;
1494 
1495 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1496 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1497 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1498 				pport_per_prio_traffic_stats_desc[i].format, prio);
1499 	}
1500 
1501 	return idx;
1502 }
1503 
mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1504 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1505 						 u64 *data,
1506 						 int idx)
1507 {
1508 	int i, prio;
1509 
1510 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1511 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1512 			data[idx++] =
1513 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1514 						    pport_per_prio_traffic_stats_desc, i);
1515 	}
1516 
1517 	return idx;
1518 }
1519 
1520 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1521 	/* %s is "global" or "prio{i}" */
1522 	{ "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1523 	{ "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1524 	{ "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1525 	{ "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1526 	{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1527 };
1528 
1529 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1530 	{ "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1531 	{ "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1532 };
1533 
1534 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS		ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1535 #define NUM_PPORT_PFC_STALL_COUNTERS(priv)	(ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1536 						 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1537 						 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1538 
mlx5e_query_pfc_combined(struct mlx5e_priv * priv)1539 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1540 {
1541 	struct mlx5_core_dev *mdev = priv->mdev;
1542 	u8 pfc_en_tx;
1543 	u8 pfc_en_rx;
1544 	int err;
1545 
1546 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1547 		return 0;
1548 
1549 	err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1550 
1551 	return err ? 0 : pfc_en_tx | pfc_en_rx;
1552 }
1553 
mlx5e_query_global_pause_combined(struct mlx5e_priv * priv)1554 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1555 {
1556 	struct mlx5_core_dev *mdev = priv->mdev;
1557 	u32 rx_pause;
1558 	u32 tx_pause;
1559 	int err;
1560 
1561 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1562 		return false;
1563 
1564 	err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1565 
1566 	return err ? false : rx_pause | tx_pause;
1567 }
1568 
mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv * priv)1569 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1570 {
1571 	return (mlx5e_query_global_pause_combined(priv) +
1572 		hweight8(mlx5e_query_pfc_combined(priv))) *
1573 		NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1574 		NUM_PPORT_PFC_STALL_COUNTERS(priv);
1575 }
1576 
mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1577 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1578 					       u8 *data,
1579 					       int idx)
1580 {
1581 	unsigned long pfc_combined;
1582 	int i, prio;
1583 
1584 	pfc_combined = mlx5e_query_pfc_combined(priv);
1585 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1586 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1587 			char pfc_string[ETH_GSTRING_LEN];
1588 
1589 			snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1590 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1591 				pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1592 		}
1593 	}
1594 
1595 	if (mlx5e_query_global_pause_combined(priv)) {
1596 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1597 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1598 				pport_per_prio_pfc_stats_desc[i].format, "global");
1599 		}
1600 	}
1601 
1602 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1603 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
1604 		       pport_pfc_stall_stats_desc[i].format);
1605 
1606 	return idx;
1607 }
1608 
mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1609 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1610 					     u64 *data,
1611 					     int idx)
1612 {
1613 	unsigned long pfc_combined;
1614 	int i, prio;
1615 
1616 	pfc_combined = mlx5e_query_pfc_combined(priv);
1617 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1618 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1619 			data[idx++] =
1620 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1621 						    pport_per_prio_pfc_stats_desc, i);
1622 		}
1623 	}
1624 
1625 	if (mlx5e_query_global_pause_combined(priv)) {
1626 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1627 			data[idx++] =
1628 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1629 						    pport_per_prio_pfc_stats_desc, i);
1630 		}
1631 	}
1632 
1633 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1634 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1635 						  pport_pfc_stall_stats_desc, i);
1636 
1637 	return idx;
1638 }
1639 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)1640 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1641 {
1642 	return mlx5e_grp_per_prio_traffic_get_num_stats() +
1643 		mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1644 }
1645 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)1646 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1647 {
1648 	idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1649 	idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1650 	return idx;
1651 }
1652 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)1653 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1654 {
1655 	idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1656 	idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1657 	return idx;
1658 }
1659 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)1660 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1661 {
1662 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1663 	struct mlx5_core_dev *mdev = priv->mdev;
1664 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1665 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1666 	int prio;
1667 	void *out;
1668 
1669 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1670 		return;
1671 
1672 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1673 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1674 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1675 		out = pstats->per_prio_counters[prio];
1676 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1677 		mlx5_core_access_reg(mdev, in, sz, out, sz,
1678 				     MLX5_REG_PPCNT, 0, 0);
1679 	}
1680 }
1681 
1682 static const struct counter_desc mlx5e_pme_status_desc[] = {
1683 	{ "module_unplug",       sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1684 };
1685 
1686 static const struct counter_desc mlx5e_pme_error_desc[] = {
1687 	{ "module_bus_stuck",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1688 	{ "module_high_temp",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1689 	{ "module_bad_shorted",  sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1690 };
1691 
1692 #define NUM_PME_STATUS_STATS		ARRAY_SIZE(mlx5e_pme_status_desc)
1693 #define NUM_PME_ERR_STATS		ARRAY_SIZE(mlx5e_pme_error_desc)
1694 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)1695 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1696 {
1697 	return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1698 }
1699 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)1700 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
1701 {
1702 	int i;
1703 
1704 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1705 		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1706 
1707 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
1708 		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1709 
1710 	return idx;
1711 }
1712 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)1713 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
1714 {
1715 	struct mlx5_pme_stats pme_stats;
1716 	int i;
1717 
1718 	mlx5_get_pme_stats(priv->mdev, &pme_stats);
1719 
1720 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1721 		data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1722 						   mlx5e_pme_status_desc, i);
1723 
1724 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
1725 		data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1726 						   mlx5e_pme_error_desc, i);
1727 
1728 	return idx;
1729 }
1730 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme)1731 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1732 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)1733 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
1734 {
1735 	return mlx5e_tls_get_count(priv);
1736 }
1737 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)1738 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
1739 {
1740 	return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1741 }
1742 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)1743 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
1744 {
1745 	return idx + mlx5e_tls_get_stats(priv, data + idx);
1746 }
1747 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls)1748 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1749 
1750 static const struct counter_desc rq_stats_desc[] = {
1751 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1752 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1753 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1754 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1755 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1756 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1757 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1758 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1759 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1760 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1761 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1762 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1763 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1764 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1765 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1766 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1767 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1768 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1769 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1770 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1771 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1772 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1773 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1774 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1775 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1776 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1777 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1778 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1779 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1780 #ifdef CONFIG_MLX5_EN_TLS
1781 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
1782 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
1783 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
1784 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
1785 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
1786 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
1787 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
1788 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
1789 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
1790 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
1791 #endif
1792 };
1793 
1794 static const struct counter_desc sq_stats_desc[] = {
1795 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1796 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1797 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1798 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1799 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1800 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1801 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1802 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1803 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1804 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1805 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
1806 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
1807 #ifdef CONFIG_MLX5_EN_TLS
1808 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1809 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1810 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1811 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1812 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1813 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1814 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1815 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1816 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1817 #endif
1818 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1819 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1820 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1821 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1822 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1823 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1824 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1825 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1826 };
1827 
1828 static const struct counter_desc rq_xdpsq_stats_desc[] = {
1829 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1830 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1831 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1832 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1833 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1834 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1835 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1836 };
1837 
1838 static const struct counter_desc xdpsq_stats_desc[] = {
1839 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1840 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1841 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1842 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1843 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1844 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1845 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1846 };
1847 
1848 static const struct counter_desc xskrq_stats_desc[] = {
1849 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
1850 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
1851 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1852 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1853 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1854 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
1855 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1856 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1857 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1858 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1859 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1860 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1861 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1862 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1863 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1864 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1865 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1866 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1867 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1868 };
1869 
1870 static const struct counter_desc xsksq_stats_desc[] = {
1871 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1872 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1873 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1874 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1875 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1876 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1877 };
1878 
1879 static const struct counter_desc ch_stats_desc[] = {
1880 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
1881 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1882 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1883 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1884 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
1885 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1886 };
1887 
1888 static const struct counter_desc ptp_sq_stats_desc[] = {
1889 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
1890 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
1891 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1892 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1893 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1894 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
1895 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1896 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
1897 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
1898 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1899 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
1900 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
1901 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
1902 	{ MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1903 };
1904 
1905 static const struct counter_desc ptp_ch_stats_desc[] = {
1906 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
1907 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
1908 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
1909 	{ MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1910 };
1911 
1912 static const struct counter_desc ptp_cq_stats_desc[] = {
1913 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
1914 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
1915 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
1916 	{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
1917 };
1918 
1919 static const struct counter_desc ptp_rq_stats_desc[] = {
1920 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
1921 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
1922 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1923 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1924 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1925 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1926 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1927 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
1928 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1929 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1930 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
1931 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
1932 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1933 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1934 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1935 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1936 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1937 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1938 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1939 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1940 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1941 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_reuse) },
1942 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_full) },
1943 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_empty) },
1944 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_busy) },
1945 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_waive) },
1946 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1947 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1948 	{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
1949 };
1950 
1951 static const struct counter_desc qos_sq_stats_desc[] = {
1952 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
1953 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
1954 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1955 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1956 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1957 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1958 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1959 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1960 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1961 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
1962 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
1963 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
1964 #ifdef CONFIG_MLX5_EN_TLS
1965 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1966 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1967 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1968 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1969 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1970 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1971 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1972 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1973 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1974 #endif
1975 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1976 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
1977 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
1978 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1979 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
1980 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
1981 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
1982 	{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1983 };
1984 
1985 #define NUM_RQ_STATS			ARRAY_SIZE(rq_stats_desc)
1986 #define NUM_SQ_STATS			ARRAY_SIZE(sq_stats_desc)
1987 #define NUM_XDPSQ_STATS			ARRAY_SIZE(xdpsq_stats_desc)
1988 #define NUM_RQ_XDPSQ_STATS		ARRAY_SIZE(rq_xdpsq_stats_desc)
1989 #define NUM_XSKRQ_STATS			ARRAY_SIZE(xskrq_stats_desc)
1990 #define NUM_XSKSQ_STATS			ARRAY_SIZE(xsksq_stats_desc)
1991 #define NUM_CH_STATS			ARRAY_SIZE(ch_stats_desc)
1992 #define NUM_PTP_SQ_STATS		ARRAY_SIZE(ptp_sq_stats_desc)
1993 #define NUM_PTP_CH_STATS		ARRAY_SIZE(ptp_ch_stats_desc)
1994 #define NUM_PTP_CQ_STATS		ARRAY_SIZE(ptp_cq_stats_desc)
1995 #define NUM_PTP_RQ_STATS                ARRAY_SIZE(ptp_rq_stats_desc)
1996 #define NUM_QOS_SQ_STATS		ARRAY_SIZE(qos_sq_stats_desc)
1997 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)1998 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
1999 {
2000 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2001 	return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs);
2002 }
2003 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)2004 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
2005 {
2006 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2007 	u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
2008 	int i, qid;
2009 
2010 	for (qid = 0; qid < max_qos_sqs; qid++)
2011 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2012 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2013 				qos_sq_stats_desc[i].format, qid);
2014 
2015 	return idx;
2016 }
2017 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)2018 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
2019 {
2020 	struct mlx5e_sq_stats **stats;
2021 	u16 max_qos_sqs;
2022 	int i, qid;
2023 
2024 	/* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2025 	max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
2026 	stats = READ_ONCE(priv->htb.qos_sq_stats);
2027 
2028 	for (qid = 0; qid < max_qos_sqs; qid++) {
2029 		struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
2030 
2031 		for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2032 			data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
2033 	}
2034 
2035 	return idx;
2036 }
2037 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos)2038 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
2039 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)2040 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
2041 {
2042 	int num = NUM_PTP_CH_STATS;
2043 
2044 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2045 		return 0;
2046 
2047 	if (priv->tx_ptp_opened)
2048 		num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
2049 	if (priv->rx_ptp_opened)
2050 		num += NUM_PTP_RQ_STATS;
2051 
2052 	return num;
2053 }
2054 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)2055 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
2056 {
2057 	int i, tc;
2058 
2059 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2060 		return idx;
2061 
2062 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2063 		sprintf(data + (idx++) * ETH_GSTRING_LEN,
2064 			ptp_ch_stats_desc[i].format);
2065 
2066 	if (priv->tx_ptp_opened) {
2067 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2068 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2069 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
2070 					ptp_sq_stats_desc[i].format, tc);
2071 
2072 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2073 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2074 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
2075 					ptp_cq_stats_desc[i].format, tc);
2076 	}
2077 	if (priv->rx_ptp_opened) {
2078 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2079 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2080 				ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
2081 	}
2082 	return idx;
2083 }
2084 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)2085 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
2086 {
2087 	int i, tc;
2088 
2089 	if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2090 		return idx;
2091 
2092 	for (i = 0; i < NUM_PTP_CH_STATS; i++)
2093 		data[idx++] =
2094 			MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
2095 					     ptp_ch_stats_desc, i);
2096 
2097 	if (priv->tx_ptp_opened) {
2098 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2099 			for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2100 				data[idx++] =
2101 					MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
2102 							     ptp_sq_stats_desc, i);
2103 
2104 		for (tc = 0; tc < priv->max_opened_tc; tc++)
2105 			for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2106 				data[idx++] =
2107 					MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
2108 							     ptp_cq_stats_desc, i);
2109 	}
2110 	if (priv->rx_ptp_opened) {
2111 		for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2112 			data[idx++] =
2113 				MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
2114 						     ptp_rq_stats_desc, i);
2115 	}
2116 	return idx;
2117 }
2118 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp)2119 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
2120 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)2121 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
2122 {
2123 	int max_nch = priv->stats_nch;
2124 
2125 	return (NUM_RQ_STATS * max_nch) +
2126 	       (NUM_CH_STATS * max_nch) +
2127 	       (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
2128 	       (NUM_RQ_XDPSQ_STATS * max_nch) +
2129 	       (NUM_XDPSQ_STATS * max_nch) +
2130 	       (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
2131 	       (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
2132 }
2133 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)2134 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
2135 {
2136 	bool is_xsk = priv->xsk.ever_used;
2137 	int max_nch = priv->stats_nch;
2138 	int i, j, tc;
2139 
2140 	for (i = 0; i < max_nch; i++)
2141 		for (j = 0; j < NUM_CH_STATS; j++)
2142 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2143 				ch_stats_desc[j].format, i);
2144 
2145 	for (i = 0; i < max_nch; i++) {
2146 		for (j = 0; j < NUM_RQ_STATS; j++)
2147 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2148 				rq_stats_desc[j].format, i);
2149 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2150 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2151 				xskrq_stats_desc[j].format, i);
2152 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2153 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2154 				rq_xdpsq_stats_desc[j].format, i);
2155 	}
2156 
2157 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2158 		for (i = 0; i < max_nch; i++)
2159 			for (j = 0; j < NUM_SQ_STATS; j++)
2160 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
2161 					sq_stats_desc[j].format,
2162 					i + tc * max_nch);
2163 
2164 	for (i = 0; i < max_nch; i++) {
2165 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2166 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2167 				xsksq_stats_desc[j].format, i);
2168 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2169 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
2170 				xdpsq_stats_desc[j].format, i);
2171 	}
2172 
2173 	return idx;
2174 }
2175 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)2176 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
2177 {
2178 	bool is_xsk = priv->xsk.ever_used;
2179 	int max_nch = priv->stats_nch;
2180 	int i, j, tc;
2181 
2182 	for (i = 0; i < max_nch; i++)
2183 		for (j = 0; j < NUM_CH_STATS; j++)
2184 			data[idx++] =
2185 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
2186 						     ch_stats_desc, j);
2187 
2188 	for (i = 0; i < max_nch; i++) {
2189 		for (j = 0; j < NUM_RQ_STATS; j++)
2190 			data[idx++] =
2191 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
2192 						     rq_stats_desc, j);
2193 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2194 			data[idx++] =
2195 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
2196 						     xskrq_stats_desc, j);
2197 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2198 			data[idx++] =
2199 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
2200 						     rq_xdpsq_stats_desc, j);
2201 	}
2202 
2203 	for (tc = 0; tc < priv->max_opened_tc; tc++)
2204 		for (i = 0; i < max_nch; i++)
2205 			for (j = 0; j < NUM_SQ_STATS; j++)
2206 				data[idx++] =
2207 					MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
2208 							     sq_stats_desc, j);
2209 
2210 	for (i = 0; i < max_nch; i++) {
2211 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2212 			data[idx++] =
2213 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
2214 						     xsksq_stats_desc, j);
2215 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
2216 			data[idx++] =
2217 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
2218 						     xdpsq_stats_desc, j);
2219 	}
2220 
2221 	return idx;
2222 }
2223 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels)2224 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2225 
2226 MLX5E_DEFINE_STATS_GRP(sw, 0);
2227 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2228 MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2229 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2230 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2231 MLX5E_DEFINE_STATS_GRP(2863, 0);
2232 MLX5E_DEFINE_STATS_GRP(2819, 0);
2233 MLX5E_DEFINE_STATS_GRP(phy, 0);
2234 MLX5E_DEFINE_STATS_GRP(pcie, 0);
2235 MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2236 MLX5E_DEFINE_STATS_GRP(pme, 0);
2237 MLX5E_DEFINE_STATS_GRP(channels, 0);
2238 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
2239 MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
2240 static MLX5E_DEFINE_STATS_GRP(tls, 0);
2241 static MLX5E_DEFINE_STATS_GRP(ptp, 0);
2242 static MLX5E_DEFINE_STATS_GRP(qos, 0);
2243 
2244 /* The stats groups order is opposite to the update_stats() order calls */
2245 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2246 	&MLX5E_STATS_GRP(sw),
2247 	&MLX5E_STATS_GRP(qcnt),
2248 	&MLX5E_STATS_GRP(vnic_env),
2249 	&MLX5E_STATS_GRP(vport),
2250 	&MLX5E_STATS_GRP(802_3),
2251 	&MLX5E_STATS_GRP(2863),
2252 	&MLX5E_STATS_GRP(2819),
2253 	&MLX5E_STATS_GRP(phy),
2254 	&MLX5E_STATS_GRP(eth_ext),
2255 	&MLX5E_STATS_GRP(pcie),
2256 	&MLX5E_STATS_GRP(per_prio),
2257 	&MLX5E_STATS_GRP(pme),
2258 #ifdef CONFIG_MLX5_EN_IPSEC
2259 	&MLX5E_STATS_GRP(ipsec_sw),
2260 	&MLX5E_STATS_GRP(ipsec_hw),
2261 #endif
2262 	&MLX5E_STATS_GRP(tls),
2263 	&MLX5E_STATS_GRP(channels),
2264 	&MLX5E_STATS_GRP(per_port_buff_congest),
2265 	&MLX5E_STATS_GRP(ptp),
2266 	&MLX5E_STATS_GRP(qos),
2267 };
2268 
mlx5e_nic_stats_grps_num(struct mlx5e_priv * priv)2269 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2270 {
2271 	return ARRAY_SIZE(mlx5e_nic_stats_grps);
2272 }
2273