1 /*
2  * Copyright (c) 2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "lib/mlx5.h"
34 #include "en.h"
35 #include "en_accel/tls.h"
36 #include "en_accel/en_accel.h"
37 
stats_grps_num(struct mlx5e_priv * priv)38 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
39 {
40 	return !priv->profile->stats_grps_num ? 0 :
41 		priv->profile->stats_grps_num(priv);
42 }
43 
mlx5e_stats_total_num(struct mlx5e_priv * priv)44 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
45 {
46 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
47 	const unsigned int num_stats_grps = stats_grps_num(priv);
48 	unsigned int total = 0;
49 	int i;
50 
51 	for (i = 0; i < num_stats_grps; i++)
52 		total += stats_grps[i]->get_num_stats(priv);
53 
54 	return total;
55 }
56 
mlx5e_stats_update_ndo_stats(struct mlx5e_priv * priv)57 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
58 {
59 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
60 	const unsigned int num_stats_grps = stats_grps_num(priv);
61 	int i;
62 
63 	for (i = num_stats_grps - 1; i >= 0; i--)
64 		if (stats_grps[i]->update_stats &&
65 		    stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
66 			stats_grps[i]->update_stats(priv);
67 }
68 
mlx5e_stats_update(struct mlx5e_priv * priv)69 void mlx5e_stats_update(struct mlx5e_priv *priv)
70 {
71 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
72 	const unsigned int num_stats_grps = stats_grps_num(priv);
73 	int i;
74 
75 	for (i = num_stats_grps - 1; i >= 0; i--)
76 		if (stats_grps[i]->update_stats)
77 			stats_grps[i]->update_stats(priv);
78 }
79 
mlx5e_stats_fill(struct mlx5e_priv * priv,u64 * data,int idx)80 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
81 {
82 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
83 	const unsigned int num_stats_grps = stats_grps_num(priv);
84 	int i;
85 
86 	for (i = 0; i < num_stats_grps; i++)
87 		idx = stats_grps[i]->fill_stats(priv, data, idx);
88 }
89 
mlx5e_stats_fill_strings(struct mlx5e_priv * priv,u8 * data)90 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
91 {
92 	mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
93 	const unsigned int num_stats_grps = stats_grps_num(priv);
94 	int i, idx = 0;
95 
96 	for (i = 0; i < num_stats_grps; i++)
97 		idx = stats_grps[i]->fill_strings(priv, data, idx);
98 }
99 
100 /* Concrete NIC Stats */
101 
102 static const struct counter_desc sw_stats_desc[] = {
103 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
104 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
105 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
106 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
107 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
108 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
109 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
110 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
111 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
112 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
113 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
114 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
115 
116 #ifdef CONFIG_MLX5_EN_TLS
117 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
118 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
119 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
120 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
121 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
122 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
123 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
124 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
125 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
126 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
127 #endif
128 
129 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
130 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
131 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
132 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
133 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
134 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
135 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
136 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
137 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
138 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
139 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
140 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
141 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
142 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
143 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
144 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
145 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
146 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
147 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
148 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
149 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
150 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
151 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
152 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
153 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
154 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
155 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
156 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
157 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
158 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
159 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
160 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
161 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
162 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
163 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
164 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
165 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
166 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
167 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
168 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
169 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
170 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
171 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
172 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
173 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
174 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
175 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
176 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
177 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
178 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
179 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
180 #ifdef CONFIG_MLX5_EN_TLS
181 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
182 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
183 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) },
184 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) },
185 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
186 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
187 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
188 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
189 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
190 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
191 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
192 #endif
193 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
194 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
195 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
196 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
197 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
198 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
199 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
200 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
201 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
202 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
203 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
204 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
205 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
206 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
207 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
208 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
209 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
210 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
211 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
212 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
213 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
214 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
215 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
216 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
217 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
218 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
219 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
220 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
221 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
222 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
223 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
224 };
225 
226 #define NUM_SW_COUNTERS			ARRAY_SIZE(sw_stats_desc)
227 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)228 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
229 {
230 	return NUM_SW_COUNTERS;
231 }
232 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)233 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
234 {
235 	int i;
236 
237 	for (i = 0; i < NUM_SW_COUNTERS; i++)
238 		strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
239 	return idx;
240 }
241 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)242 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
243 {
244 	int i;
245 
246 	for (i = 0; i < NUM_SW_COUNTERS; i++)
247 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
248 	return idx;
249 }
250 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)251 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
252 {
253 	struct mlx5e_sw_stats *s = &priv->stats.sw;
254 	int i;
255 
256 	memset(s, 0, sizeof(*s));
257 
258 	for (i = 0; i < priv->max_nch; i++) {
259 		struct mlx5e_channel_stats *channel_stats =
260 			&priv->channel_stats[i];
261 		struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
262 		struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
263 		struct mlx5e_xdpsq_stats *xsksq_stats = &channel_stats->xsksq;
264 		struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
265 		struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
266 		struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
267 		int j;
268 
269 		s->rx_packets	+= rq_stats->packets;
270 		s->rx_bytes	+= rq_stats->bytes;
271 		s->rx_lro_packets += rq_stats->lro_packets;
272 		s->rx_lro_bytes	+= rq_stats->lro_bytes;
273 		s->rx_ecn_mark	+= rq_stats->ecn_mark;
274 		s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
275 		s->rx_csum_none	+= rq_stats->csum_none;
276 		s->rx_csum_complete += rq_stats->csum_complete;
277 		s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
278 		s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
279 		s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
280 		s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
281 		s->rx_xdp_drop     += rq_stats->xdp_drop;
282 		s->rx_xdp_redirect += rq_stats->xdp_redirect;
283 		s->rx_xdp_tx_xmit  += xdpsq_stats->xmit;
284 		s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
285 		s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
286 		s->rx_xdp_tx_nops  += xdpsq_stats->nops;
287 		s->rx_xdp_tx_full  += xdpsq_stats->full;
288 		s->rx_xdp_tx_err   += xdpsq_stats->err;
289 		s->rx_xdp_tx_cqe   += xdpsq_stats->cqes;
290 		s->rx_wqe_err   += rq_stats->wqe_err;
291 		s->rx_mpwqe_filler_cqes    += rq_stats->mpwqe_filler_cqes;
292 		s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
293 		s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
294 		s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
295 		s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
296 		s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
297 		s->rx_cache_reuse += rq_stats->cache_reuse;
298 		s->rx_cache_full  += rq_stats->cache_full;
299 		s->rx_cache_empty += rq_stats->cache_empty;
300 		s->rx_cache_busy  += rq_stats->cache_busy;
301 		s->rx_cache_waive += rq_stats->cache_waive;
302 		s->rx_congst_umr  += rq_stats->congst_umr;
303 		s->rx_arfs_err    += rq_stats->arfs_err;
304 		s->rx_recover     += rq_stats->recover;
305 #ifdef CONFIG_MLX5_EN_TLS
306 		s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
307 		s->rx_tls_decrypted_bytes   += rq_stats->tls_decrypted_bytes;
308 		s->rx_tls_ctx               += rq_stats->tls_ctx;
309 		s->rx_tls_del               += rq_stats->tls_del;
310 		s->rx_tls_resync_req_pkt    += rq_stats->tls_resync_req_pkt;
311 		s->rx_tls_resync_req_start  += rq_stats->tls_resync_req_start;
312 		s->rx_tls_resync_req_end    += rq_stats->tls_resync_req_end;
313 		s->rx_tls_resync_req_skip   += rq_stats->tls_resync_req_skip;
314 		s->rx_tls_resync_res_ok     += rq_stats->tls_resync_res_ok;
315 		s->rx_tls_resync_res_skip   += rq_stats->tls_resync_res_skip;
316 		s->rx_tls_err               += rq_stats->tls_err;
317 #endif
318 		s->ch_events      += ch_stats->events;
319 		s->ch_poll        += ch_stats->poll;
320 		s->ch_arm         += ch_stats->arm;
321 		s->ch_aff_change  += ch_stats->aff_change;
322 		s->ch_force_irq   += ch_stats->force_irq;
323 		s->ch_eq_rearm    += ch_stats->eq_rearm;
324 		/* xdp redirect */
325 		s->tx_xdp_xmit    += xdpsq_red_stats->xmit;
326 		s->tx_xdp_mpwqe   += xdpsq_red_stats->mpwqe;
327 		s->tx_xdp_inlnw   += xdpsq_red_stats->inlnw;
328 		s->tx_xdp_nops	  += xdpsq_red_stats->nops;
329 		s->tx_xdp_full    += xdpsq_red_stats->full;
330 		s->tx_xdp_err     += xdpsq_red_stats->err;
331 		s->tx_xdp_cqes    += xdpsq_red_stats->cqes;
332 		/* AF_XDP zero-copy */
333 		s->rx_xsk_packets                += xskrq_stats->packets;
334 		s->rx_xsk_bytes                  += xskrq_stats->bytes;
335 		s->rx_xsk_csum_complete          += xskrq_stats->csum_complete;
336 		s->rx_xsk_csum_unnecessary       += xskrq_stats->csum_unnecessary;
337 		s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
338 		s->rx_xsk_csum_none              += xskrq_stats->csum_none;
339 		s->rx_xsk_ecn_mark               += xskrq_stats->ecn_mark;
340 		s->rx_xsk_removed_vlan_packets   += xskrq_stats->removed_vlan_packets;
341 		s->rx_xsk_xdp_drop               += xskrq_stats->xdp_drop;
342 		s->rx_xsk_xdp_redirect           += xskrq_stats->xdp_redirect;
343 		s->rx_xsk_wqe_err                += xskrq_stats->wqe_err;
344 		s->rx_xsk_mpwqe_filler_cqes      += xskrq_stats->mpwqe_filler_cqes;
345 		s->rx_xsk_mpwqe_filler_strides   += xskrq_stats->mpwqe_filler_strides;
346 		s->rx_xsk_oversize_pkts_sw_drop  += xskrq_stats->oversize_pkts_sw_drop;
347 		s->rx_xsk_buff_alloc_err         += xskrq_stats->buff_alloc_err;
348 		s->rx_xsk_cqe_compress_blks      += xskrq_stats->cqe_compress_blks;
349 		s->rx_xsk_cqe_compress_pkts      += xskrq_stats->cqe_compress_pkts;
350 		s->rx_xsk_congst_umr             += xskrq_stats->congst_umr;
351 		s->rx_xsk_arfs_err               += xskrq_stats->arfs_err;
352 		s->tx_xsk_xmit                   += xsksq_stats->xmit;
353 		s->tx_xsk_mpwqe                  += xsksq_stats->mpwqe;
354 		s->tx_xsk_inlnw                  += xsksq_stats->inlnw;
355 		s->tx_xsk_full                   += xsksq_stats->full;
356 		s->tx_xsk_err                    += xsksq_stats->err;
357 		s->tx_xsk_cqes                   += xsksq_stats->cqes;
358 
359 		for (j = 0; j < priv->max_opened_tc; j++) {
360 			struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
361 
362 			s->tx_packets		+= sq_stats->packets;
363 			s->tx_bytes		+= sq_stats->bytes;
364 			s->tx_tso_packets	+= sq_stats->tso_packets;
365 			s->tx_tso_bytes		+= sq_stats->tso_bytes;
366 			s->tx_tso_inner_packets	+= sq_stats->tso_inner_packets;
367 			s->tx_tso_inner_bytes	+= sq_stats->tso_inner_bytes;
368 			s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
369 			s->tx_nop               += sq_stats->nop;
370 			s->tx_mpwqe_blks        += sq_stats->mpwqe_blks;
371 			s->tx_mpwqe_pkts        += sq_stats->mpwqe_pkts;
372 			s->tx_queue_stopped	+= sq_stats->stopped;
373 			s->tx_queue_wake	+= sq_stats->wake;
374 			s->tx_queue_dropped	+= sq_stats->dropped;
375 			s->tx_cqe_err		+= sq_stats->cqe_err;
376 			s->tx_recover		+= sq_stats->recover;
377 			s->tx_xmit_more		+= sq_stats->xmit_more;
378 			s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
379 			s->tx_csum_none		+= sq_stats->csum_none;
380 			s->tx_csum_partial	+= sq_stats->csum_partial;
381 #ifdef CONFIG_MLX5_EN_TLS
382 			s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
383 			s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
384 			s->tx_tls_ctx               += sq_stats->tls_ctx;
385 			s->tx_tls_ooo               += sq_stats->tls_ooo;
386 			s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
387 			s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
388 			s->tx_tls_resync_bytes      += sq_stats->tls_resync_bytes;
389 			s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
390 			s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
391 			s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
392 #endif
393 			s->tx_cqes		+= sq_stats->cqes;
394 
395 			/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
396 			barrier();
397 		}
398 	}
399 }
400 
401 static const struct counter_desc q_stats_desc[] = {
402 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
403 };
404 
405 static const struct counter_desc drop_rq_stats_desc[] = {
406 	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
407 };
408 
409 #define NUM_Q_COUNTERS			ARRAY_SIZE(q_stats_desc)
410 #define NUM_DROP_RQ_COUNTERS		ARRAY_SIZE(drop_rq_stats_desc)
411 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)412 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
413 {
414 	int num_stats = 0;
415 
416 	if (priv->q_counter)
417 		num_stats += NUM_Q_COUNTERS;
418 
419 	if (priv->drop_rq_q_counter)
420 		num_stats += NUM_DROP_RQ_COUNTERS;
421 
422 	return num_stats;
423 }
424 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)425 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
426 {
427 	int i;
428 
429 	for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
430 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
431 		       q_stats_desc[i].format);
432 
433 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
434 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
435 		       drop_rq_stats_desc[i].format);
436 
437 	return idx;
438 }
439 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)440 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
441 {
442 	int i;
443 
444 	for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
445 		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
446 						   q_stats_desc, i);
447 	for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
448 		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
449 						   drop_rq_stats_desc, i);
450 	return idx;
451 }
452 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)453 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
454 {
455 	struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
456 	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
457 	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
458 	int ret;
459 
460 	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
461 
462 	if (priv->q_counter) {
463 		MLX5_SET(query_q_counter_in, in, counter_set_id,
464 			 priv->q_counter);
465 		ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
466 		if (!ret)
467 			qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
468 							  out, out_of_buffer);
469 	}
470 
471 	if (priv->drop_rq_q_counter) {
472 		MLX5_SET(query_q_counter_in, in, counter_set_id,
473 			 priv->drop_rq_q_counter);
474 		ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
475 		if (!ret)
476 			qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
477 							    out, out_of_buffer);
478 	}
479 }
480 
481 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
482 static const struct counter_desc vnic_env_stats_steer_desc[] = {
483 	{ "rx_steer_missed_packets",
484 		VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
485 };
486 
487 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
488 	{ "dev_internal_queue_oob",
489 		VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
490 };
491 
492 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
493 	(MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
494 	 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
495 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
496 	(MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
497 	 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
498 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)499 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
500 {
501 	return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
502 		NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
503 }
504 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)505 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
506 {
507 	int i;
508 
509 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
510 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
511 		       vnic_env_stats_steer_desc[i].format);
512 
513 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
514 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
515 		       vnic_env_stats_dev_oob_desc[i].format);
516 	return idx;
517 }
518 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)519 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
520 {
521 	int i;
522 
523 	for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
524 		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
525 						  vnic_env_stats_steer_desc, i);
526 
527 	for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
528 		data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
529 						  vnic_env_stats_dev_oob_desc, i);
530 	return idx;
531 }
532 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)533 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
534 {
535 	u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
536 	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
537 	struct mlx5_core_dev *mdev = priv->mdev;
538 
539 	if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
540 		return;
541 
542 	MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
543 	mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
544 }
545 
546 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
547 static const struct counter_desc vport_stats_desc[] = {
548 	{ "rx_vport_unicast_packets",
549 		VPORT_COUNTER_OFF(received_eth_unicast.packets) },
550 	{ "rx_vport_unicast_bytes",
551 		VPORT_COUNTER_OFF(received_eth_unicast.octets) },
552 	{ "tx_vport_unicast_packets",
553 		VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
554 	{ "tx_vport_unicast_bytes",
555 		VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
556 	{ "rx_vport_multicast_packets",
557 		VPORT_COUNTER_OFF(received_eth_multicast.packets) },
558 	{ "rx_vport_multicast_bytes",
559 		VPORT_COUNTER_OFF(received_eth_multicast.octets) },
560 	{ "tx_vport_multicast_packets",
561 		VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
562 	{ "tx_vport_multicast_bytes",
563 		VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
564 	{ "rx_vport_broadcast_packets",
565 		VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
566 	{ "rx_vport_broadcast_bytes",
567 		VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
568 	{ "tx_vport_broadcast_packets",
569 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
570 	{ "tx_vport_broadcast_bytes",
571 		VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
572 	{ "rx_vport_rdma_unicast_packets",
573 		VPORT_COUNTER_OFF(received_ib_unicast.packets) },
574 	{ "rx_vport_rdma_unicast_bytes",
575 		VPORT_COUNTER_OFF(received_ib_unicast.octets) },
576 	{ "tx_vport_rdma_unicast_packets",
577 		VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
578 	{ "tx_vport_rdma_unicast_bytes",
579 		VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
580 	{ "rx_vport_rdma_multicast_packets",
581 		VPORT_COUNTER_OFF(received_ib_multicast.packets) },
582 	{ "rx_vport_rdma_multicast_bytes",
583 		VPORT_COUNTER_OFF(received_ib_multicast.octets) },
584 	{ "tx_vport_rdma_multicast_packets",
585 		VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
586 	{ "tx_vport_rdma_multicast_bytes",
587 		VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
588 };
589 
590 #define NUM_VPORT_COUNTERS		ARRAY_SIZE(vport_stats_desc)
591 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)592 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
593 {
594 	return NUM_VPORT_COUNTERS;
595 }
596 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)597 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
598 {
599 	int i;
600 
601 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
602 		strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
603 	return idx;
604 }
605 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)606 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
607 {
608 	int i;
609 
610 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
611 		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
612 						  vport_stats_desc, i);
613 	return idx;
614 }
615 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)616 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
617 {
618 	u32 *out = (u32 *)priv->stats.vport.query_vport_out;
619 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
620 	struct mlx5_core_dev *mdev = priv->mdev;
621 
622 	MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
623 	mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
624 }
625 
626 #define PPORT_802_3_OFF(c) \
627 	MLX5_BYTE_OFF(ppcnt_reg, \
628 		      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
629 static const struct counter_desc pport_802_3_stats_desc[] = {
630 	{ "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
631 	{ "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
632 	{ "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
633 	{ "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
634 	{ "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
635 	{ "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
636 	{ "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
637 	{ "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
638 	{ "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
639 	{ "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
640 	{ "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
641 	{ "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
642 	{ "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
643 	{ "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
644 	{ "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
645 	{ "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
646 	{ "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
647 	{ "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
648 };
649 
650 #define NUM_PPORT_802_3_COUNTERS	ARRAY_SIZE(pport_802_3_stats_desc)
651 
652 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
653 {
654 	return NUM_PPORT_802_3_COUNTERS;
655 }
656 
657 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
658 {
659 	int i;
660 
661 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
662 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
663 	return idx;
664 }
665 
666 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
667 {
668 	int i;
669 
670 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
671 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
672 						  pport_802_3_stats_desc, i);
673 	return idx;
674 }
675 
676 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
677 	(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
678 
679 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
680 {
681 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
682 	struct mlx5_core_dev *mdev = priv->mdev;
683 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
684 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
685 	void *out;
686 
687 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
688 		return;
689 
690 	MLX5_SET(ppcnt_reg, in, local_port, 1);
691 	out = pstats->IEEE_802_3_counters;
692 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
693 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
694 }
695 
696 #define MLX5E_READ_CTR64_BE_F(ptr, c)			\
697 	be64_to_cpu(*(__be64 *)((char *)ptr +		\
698 		MLX5_BYTE_OFF(ppcnt_reg,		\
699 			counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)))
700 
mlx5e_stats_pause_get(struct mlx5e_priv * priv,struct ethtool_pause_stats * pause_stats)701 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
702 			   struct ethtool_pause_stats *pause_stats)
703 {
704 	u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
705 	struct mlx5_core_dev *mdev = priv->mdev;
706 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
707 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
708 
709 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
710 		return;
711 
712 	MLX5_SET(ppcnt_reg, in, local_port, 1);
713 	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
714 	mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
715 			     sz, MLX5_REG_PPCNT, 0, 0);
716 
717 	pause_stats->tx_pause_frames =
718 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
719 				      a_pause_mac_ctrl_frames_transmitted);
720 	pause_stats->rx_pause_frames =
721 		MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
722 				      a_pause_mac_ctrl_frames_received);
723 }
724 
725 #define PPORT_2863_OFF(c) \
726 	MLX5_BYTE_OFF(ppcnt_reg, \
727 		      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
728 static const struct counter_desc pport_2863_stats_desc[] = {
729 	{ "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
730 	{ "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
731 	{ "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
732 };
733 
734 #define NUM_PPORT_2863_COUNTERS		ARRAY_SIZE(pport_2863_stats_desc)
735 
736 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
737 {
738 	return NUM_PPORT_2863_COUNTERS;
739 }
740 
741 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
742 {
743 	int i;
744 
745 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
746 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
747 	return idx;
748 }
749 
750 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
751 {
752 	int i;
753 
754 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
755 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
756 						  pport_2863_stats_desc, i);
757 	return idx;
758 }
759 
760 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
761 {
762 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
763 	struct mlx5_core_dev *mdev = priv->mdev;
764 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
765 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
766 	void *out;
767 
768 	MLX5_SET(ppcnt_reg, in, local_port, 1);
769 	out = pstats->RFC_2863_counters;
770 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
771 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
772 }
773 
774 #define PPORT_2819_OFF(c) \
775 	MLX5_BYTE_OFF(ppcnt_reg, \
776 		      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
777 static const struct counter_desc pport_2819_stats_desc[] = {
778 	{ "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
779 	{ "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
780 	{ "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
781 	{ "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
782 	{ "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
783 	{ "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
784 	{ "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
785 	{ "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
786 	{ "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
787 	{ "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
788 	{ "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
789 	{ "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
790 	{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
791 };
792 
793 #define NUM_PPORT_2819_COUNTERS		ARRAY_SIZE(pport_2819_stats_desc)
794 
795 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
796 {
797 	return NUM_PPORT_2819_COUNTERS;
798 }
799 
800 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
801 {
802 	int i;
803 
804 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
805 		strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
806 	return idx;
807 }
808 
809 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
810 {
811 	int i;
812 
813 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
814 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
815 						  pport_2819_stats_desc, i);
816 	return idx;
817 }
818 
819 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
820 {
821 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
822 	struct mlx5_core_dev *mdev = priv->mdev;
823 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
824 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
825 	void *out;
826 
827 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
828 		return;
829 
830 	MLX5_SET(ppcnt_reg, in, local_port, 1);
831 	out = pstats->RFC_2819_counters;
832 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
833 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
834 }
835 
836 #define PPORT_PHY_STATISTICAL_OFF(c) \
837 	MLX5_BYTE_OFF(ppcnt_reg, \
838 		      counter_set.phys_layer_statistical_cntrs.c##_high)
839 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
840 	{ "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
841 	{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
842 };
843 
844 static const struct counter_desc
845 pport_phy_statistical_err_lanes_stats_desc[] = {
846 	{ "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
847 	{ "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
848 	{ "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
849 	{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
850 };
851 
852 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
853 	ARRAY_SIZE(pport_phy_statistical_stats_desc)
854 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
855 	ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
856 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)857 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
858 {
859 	struct mlx5_core_dev *mdev = priv->mdev;
860 	int num_stats;
861 
862 	/* "1" for link_down_events special counter */
863 	num_stats = 1;
864 
865 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
866 		     NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
867 
868 	num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
869 		     NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
870 
871 	return num_stats;
872 }
873 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)874 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
875 {
876 	struct mlx5_core_dev *mdev = priv->mdev;
877 	int i;
878 
879 	strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
880 
881 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
882 		return idx;
883 
884 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
885 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
886 		       pport_phy_statistical_stats_desc[i].format);
887 
888 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
889 		for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
890 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
891 			       pport_phy_statistical_err_lanes_stats_desc[i].format);
892 
893 	return idx;
894 }
895 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)896 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
897 {
898 	struct mlx5_core_dev *mdev = priv->mdev;
899 	int i;
900 
901 	/* link_down_events_phy has special handling since it is not stored in __be64 format */
902 	data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
903 			       counter_set.phys_layer_cntrs.link_down_events);
904 
905 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
906 		return idx;
907 
908 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
909 		data[idx++] =
910 			MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
911 					    pport_phy_statistical_stats_desc, i);
912 
913 	if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
914 		for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
915 			data[idx++] =
916 				MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
917 						    pport_phy_statistical_err_lanes_stats_desc,
918 						    i);
919 	return idx;
920 }
921 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)922 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
923 {
924 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
925 	struct mlx5_core_dev *mdev = priv->mdev;
926 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
927 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
928 	void *out;
929 
930 	MLX5_SET(ppcnt_reg, in, local_port, 1);
931 	out = pstats->phy_counters;
932 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
933 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
934 
935 	if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
936 		return;
937 
938 	out = pstats->phy_statistical_counters;
939 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
940 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
941 }
942 
943 #define PPORT_ETH_EXT_OFF(c) \
944 	MLX5_BYTE_OFF(ppcnt_reg, \
945 		      counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
946 static const struct counter_desc pport_eth_ext_stats_desc[] = {
947 	{ "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
948 };
949 
950 #define NUM_PPORT_ETH_EXT_COUNTERS	ARRAY_SIZE(pport_eth_ext_stats_desc)
951 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)952 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
953 {
954 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
955 		return NUM_PPORT_ETH_EXT_COUNTERS;
956 
957 	return 0;
958 }
959 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)960 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
961 {
962 	int i;
963 
964 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
965 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
966 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
967 			       pport_eth_ext_stats_desc[i].format);
968 	return idx;
969 }
970 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)971 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
972 {
973 	int i;
974 
975 	if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
976 		for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
977 			data[idx++] =
978 				MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
979 						    pport_eth_ext_stats_desc, i);
980 	return idx;
981 }
982 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)983 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
984 {
985 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
986 	struct mlx5_core_dev *mdev = priv->mdev;
987 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
988 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
989 	void *out;
990 
991 	if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
992 		return;
993 
994 	MLX5_SET(ppcnt_reg, in, local_port, 1);
995 	out = pstats->eth_ext_counters;
996 	MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
997 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
998 }
999 
1000 #define PCIE_PERF_OFF(c) \
1001 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1002 static const struct counter_desc pcie_perf_stats_desc[] = {
1003 	{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1004 	{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1005 };
1006 
1007 #define PCIE_PERF_OFF64(c) \
1008 	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1009 static const struct counter_desc pcie_perf_stats_desc64[] = {
1010 	{ "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1011 };
1012 
1013 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1014 	{ "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1015 	{ "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1016 	{ "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1017 	{ "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1018 };
1019 
1020 #define NUM_PCIE_PERF_COUNTERS		ARRAY_SIZE(pcie_perf_stats_desc)
1021 #define NUM_PCIE_PERF_COUNTERS64	ARRAY_SIZE(pcie_perf_stats_desc64)
1022 #define NUM_PCIE_PERF_STALL_COUNTERS	ARRAY_SIZE(pcie_perf_stall_stats_desc)
1023 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)1024 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1025 {
1026 	int num_stats = 0;
1027 
1028 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1029 		num_stats += NUM_PCIE_PERF_COUNTERS;
1030 
1031 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1032 		num_stats += NUM_PCIE_PERF_COUNTERS64;
1033 
1034 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1035 		num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1036 
1037 	return num_stats;
1038 }
1039 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)1040 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1041 {
1042 	int i;
1043 
1044 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1045 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1046 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1047 			       pcie_perf_stats_desc[i].format);
1048 
1049 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1050 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1051 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1052 			       pcie_perf_stats_desc64[i].format);
1053 
1054 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1055 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1056 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
1057 			       pcie_perf_stall_stats_desc[i].format);
1058 	return idx;
1059 }
1060 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)1061 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1062 {
1063 	int i;
1064 
1065 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1066 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1067 			data[idx++] =
1068 				MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1069 						    pcie_perf_stats_desc, i);
1070 
1071 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1072 		for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1073 			data[idx++] =
1074 				MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1075 						    pcie_perf_stats_desc64, i);
1076 
1077 	if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1078 		for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1079 			data[idx++] =
1080 				MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1081 						    pcie_perf_stall_stats_desc, i);
1082 	return idx;
1083 }
1084 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)1085 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1086 {
1087 	struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1088 	struct mlx5_core_dev *mdev = priv->mdev;
1089 	u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1090 	int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1091 	void *out;
1092 
1093 	if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1094 		return;
1095 
1096 	out = pcie_stats->pcie_perf_counters;
1097 	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1098 	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1099 }
1100 
1101 #define PPORT_PER_TC_PRIO_OFF(c) \
1102 	MLX5_BYTE_OFF(ppcnt_reg, \
1103 		      counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1104 
1105 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1106 	{ "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1107 };
1108 
1109 #define NUM_PPORT_PER_TC_PRIO_COUNTERS	ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1110 
1111 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1112 	MLX5_BYTE_OFF(ppcnt_reg, \
1113 		      counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1114 
1115 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1116 	{ "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1117 	{ "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1118 };
1119 
1120 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1121 	ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1122 
mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv * priv)1123 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1124 {
1125 	struct mlx5_core_dev *mdev = priv->mdev;
1126 
1127 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1128 		return 0;
1129 
1130 	return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1131 }
1132 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)1133 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1134 {
1135 	struct mlx5_core_dev *mdev = priv->mdev;
1136 	int i, prio;
1137 
1138 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1139 		return idx;
1140 
1141 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1142 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1143 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1144 				pport_per_tc_prio_stats_desc[i].format, prio);
1145 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1146 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1147 				pport_per_tc_congest_prio_stats_desc[i].format, prio);
1148 	}
1149 
1150 	return idx;
1151 }
1152 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)1153 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1154 {
1155 	struct mlx5e_pport_stats *pport = &priv->stats.pport;
1156 	struct mlx5_core_dev *mdev = priv->mdev;
1157 	int i, prio;
1158 
1159 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1160 		return idx;
1161 
1162 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1163 		for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1164 			data[idx++] =
1165 				MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1166 						    pport_per_tc_prio_stats_desc, i);
1167 		for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1168 			data[idx++] =
1169 				MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1170 						    pport_per_tc_congest_prio_stats_desc, i);
1171 	}
1172 
1173 	return idx;
1174 }
1175 
mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv * priv)1176 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1177 {
1178 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1179 	struct mlx5_core_dev *mdev = priv->mdev;
1180 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1181 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1182 	void *out;
1183 	int prio;
1184 
1185 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1186 		return;
1187 
1188 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1189 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1190 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1191 		out = pstats->per_tc_prio_counters[prio];
1192 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1193 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1194 	}
1195 }
1196 
mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv * priv)1197 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1198 {
1199 	struct mlx5_core_dev *mdev = priv->mdev;
1200 
1201 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1202 		return 0;
1203 
1204 	return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1205 }
1206 
mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv * priv)1207 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1208 {
1209 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1210 	struct mlx5_core_dev *mdev = priv->mdev;
1211 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1212 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1213 	void *out;
1214 	int prio;
1215 
1216 	if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1217 		return;
1218 
1219 	MLX5_SET(ppcnt_reg, in, pnat, 2);
1220 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1221 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1222 		out = pstats->per_tc_congest_prio_counters[prio];
1223 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1224 		mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1225 	}
1226 }
1227 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)1228 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1229 {
1230 	return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1231 		mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1232 }
1233 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)1234 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1235 {
1236 	mlx5e_grp_per_tc_prio_update_stats(priv);
1237 	mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1238 }
1239 
1240 #define PPORT_PER_PRIO_OFF(c) \
1241 	MLX5_BYTE_OFF(ppcnt_reg, \
1242 		      counter_set.eth_per_prio_grp_data_layout.c##_high)
1243 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1244 	{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1245 	{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1246 	{ "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1247 	{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1248 	{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1249 };
1250 
1251 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS	ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1252 
mlx5e_grp_per_prio_traffic_get_num_stats(void)1253 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1254 {
1255 	return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1256 }
1257 
mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1258 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1259 						   u8 *data,
1260 						   int idx)
1261 {
1262 	int i, prio;
1263 
1264 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1265 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1266 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1267 				pport_per_prio_traffic_stats_desc[i].format, prio);
1268 	}
1269 
1270 	return idx;
1271 }
1272 
mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1273 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1274 						 u64 *data,
1275 						 int idx)
1276 {
1277 	int i, prio;
1278 
1279 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1280 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1281 			data[idx++] =
1282 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1283 						    pport_per_prio_traffic_stats_desc, i);
1284 	}
1285 
1286 	return idx;
1287 }
1288 
1289 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1290 	/* %s is "global" or "prio{i}" */
1291 	{ "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1292 	{ "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1293 	{ "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1294 	{ "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1295 	{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1296 };
1297 
1298 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1299 	{ "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1300 	{ "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1301 };
1302 
1303 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS		ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1304 #define NUM_PPORT_PFC_STALL_COUNTERS(priv)	(ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1305 						 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1306 						 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1307 
mlx5e_query_pfc_combined(struct mlx5e_priv * priv)1308 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1309 {
1310 	struct mlx5_core_dev *mdev = priv->mdev;
1311 	u8 pfc_en_tx;
1312 	u8 pfc_en_rx;
1313 	int err;
1314 
1315 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1316 		return 0;
1317 
1318 	err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1319 
1320 	return err ? 0 : pfc_en_tx | pfc_en_rx;
1321 }
1322 
mlx5e_query_global_pause_combined(struct mlx5e_priv * priv)1323 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1324 {
1325 	struct mlx5_core_dev *mdev = priv->mdev;
1326 	u32 rx_pause;
1327 	u32 tx_pause;
1328 	int err;
1329 
1330 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1331 		return false;
1332 
1333 	err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1334 
1335 	return err ? false : rx_pause | tx_pause;
1336 }
1337 
mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv * priv)1338 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1339 {
1340 	return (mlx5e_query_global_pause_combined(priv) +
1341 		hweight8(mlx5e_query_pfc_combined(priv))) *
1342 		NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1343 		NUM_PPORT_PFC_STALL_COUNTERS(priv);
1344 }
1345 
mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv * priv,u8 * data,int idx)1346 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1347 					       u8 *data,
1348 					       int idx)
1349 {
1350 	unsigned long pfc_combined;
1351 	int i, prio;
1352 
1353 	pfc_combined = mlx5e_query_pfc_combined(priv);
1354 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1355 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1356 			char pfc_string[ETH_GSTRING_LEN];
1357 
1358 			snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1359 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1360 				pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1361 		}
1362 	}
1363 
1364 	if (mlx5e_query_global_pause_combined(priv)) {
1365 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1366 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1367 				pport_per_prio_pfc_stats_desc[i].format, "global");
1368 		}
1369 	}
1370 
1371 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1372 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
1373 		       pport_pfc_stall_stats_desc[i].format);
1374 
1375 	return idx;
1376 }
1377 
mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv * priv,u64 * data,int idx)1378 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1379 					     u64 *data,
1380 					     int idx)
1381 {
1382 	unsigned long pfc_combined;
1383 	int i, prio;
1384 
1385 	pfc_combined = mlx5e_query_pfc_combined(priv);
1386 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1387 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1388 			data[idx++] =
1389 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1390 						    pport_per_prio_pfc_stats_desc, i);
1391 		}
1392 	}
1393 
1394 	if (mlx5e_query_global_pause_combined(priv)) {
1395 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1396 			data[idx++] =
1397 				MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1398 						    pport_per_prio_pfc_stats_desc, i);
1399 		}
1400 	}
1401 
1402 	for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1403 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1404 						  pport_pfc_stall_stats_desc, i);
1405 
1406 	return idx;
1407 }
1408 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)1409 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1410 {
1411 	return mlx5e_grp_per_prio_traffic_get_num_stats() +
1412 		mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1413 }
1414 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)1415 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1416 {
1417 	idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1418 	idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1419 	return idx;
1420 }
1421 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)1422 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1423 {
1424 	idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1425 	idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1426 	return idx;
1427 }
1428 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)1429 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1430 {
1431 	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1432 	struct mlx5_core_dev *mdev = priv->mdev;
1433 	u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1434 	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1435 	int prio;
1436 	void *out;
1437 
1438 	if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1439 		return;
1440 
1441 	MLX5_SET(ppcnt_reg, in, local_port, 1);
1442 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1443 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1444 		out = pstats->per_prio_counters[prio];
1445 		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1446 		mlx5_core_access_reg(mdev, in, sz, out, sz,
1447 				     MLX5_REG_PPCNT, 0, 0);
1448 	}
1449 }
1450 
1451 static const struct counter_desc mlx5e_pme_status_desc[] = {
1452 	{ "module_unplug",       sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1453 };
1454 
1455 static const struct counter_desc mlx5e_pme_error_desc[] = {
1456 	{ "module_bus_stuck",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1457 	{ "module_high_temp",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1458 	{ "module_bad_shorted",  sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1459 };
1460 
1461 #define NUM_PME_STATUS_STATS		ARRAY_SIZE(mlx5e_pme_status_desc)
1462 #define NUM_PME_ERR_STATS		ARRAY_SIZE(mlx5e_pme_error_desc)
1463 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)1464 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1465 {
1466 	return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1467 }
1468 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)1469 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
1470 {
1471 	int i;
1472 
1473 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1474 		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1475 
1476 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
1477 		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1478 
1479 	return idx;
1480 }
1481 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)1482 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
1483 {
1484 	struct mlx5_pme_stats pme_stats;
1485 	int i;
1486 
1487 	mlx5_get_pme_stats(priv->mdev, &pme_stats);
1488 
1489 	for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1490 		data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1491 						   mlx5e_pme_status_desc, i);
1492 
1493 	for (i = 0; i < NUM_PME_ERR_STATS; i++)
1494 		data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1495 						   mlx5e_pme_error_desc, i);
1496 
1497 	return idx;
1498 }
1499 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme)1500 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1501 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)1502 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
1503 {
1504 	return mlx5e_tls_get_count(priv);
1505 }
1506 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)1507 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
1508 {
1509 	return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1510 }
1511 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)1512 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
1513 {
1514 	return idx + mlx5e_tls_get_stats(priv, data + idx);
1515 }
1516 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls)1517 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1518 
1519 static const struct counter_desc rq_stats_desc[] = {
1520 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1521 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1522 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1523 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1524 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1525 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1526 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1527 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1528 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1529 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1530 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1531 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1532 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1533 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1534 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1535 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1536 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1537 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1538 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1539 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1540 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1541 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1542 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1543 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1544 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1545 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1546 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1547 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1548 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1549 #ifdef CONFIG_MLX5_EN_TLS
1550 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
1551 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
1552 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) },
1553 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) },
1554 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
1555 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
1556 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
1557 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
1558 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
1559 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
1560 	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
1561 #endif
1562 };
1563 
1564 static const struct counter_desc sq_stats_desc[] = {
1565 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1566 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1567 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1568 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1569 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1570 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1571 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1572 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1573 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1574 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1575 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
1576 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
1577 #ifdef CONFIG_MLX5_EN_TLS
1578 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1579 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1580 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
1581 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1582 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1583 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1584 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1585 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1586 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1587 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1588 #endif
1589 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1590 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1591 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1592 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1593 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1594 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1595 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1596 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1597 };
1598 
1599 static const struct counter_desc rq_xdpsq_stats_desc[] = {
1600 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1601 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1602 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1603 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1604 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1605 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1606 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1607 };
1608 
1609 static const struct counter_desc xdpsq_stats_desc[] = {
1610 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1611 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1612 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1613 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1614 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1615 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1616 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1617 };
1618 
1619 static const struct counter_desc xskrq_stats_desc[] = {
1620 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
1621 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
1622 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1623 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1624 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1625 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
1626 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1627 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1628 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1629 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1630 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1631 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1632 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1633 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1634 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1635 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1636 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1637 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1638 	{ MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1639 };
1640 
1641 static const struct counter_desc xsksq_stats_desc[] = {
1642 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1643 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1644 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1645 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1646 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1647 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1648 };
1649 
1650 static const struct counter_desc ch_stats_desc[] = {
1651 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
1652 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1653 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1654 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1655 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
1656 	{ MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1657 };
1658 
1659 #define NUM_RQ_STATS			ARRAY_SIZE(rq_stats_desc)
1660 #define NUM_SQ_STATS			ARRAY_SIZE(sq_stats_desc)
1661 #define NUM_XDPSQ_STATS			ARRAY_SIZE(xdpsq_stats_desc)
1662 #define NUM_RQ_XDPSQ_STATS		ARRAY_SIZE(rq_xdpsq_stats_desc)
1663 #define NUM_XSKRQ_STATS			ARRAY_SIZE(xskrq_stats_desc)
1664 #define NUM_XSKSQ_STATS			ARRAY_SIZE(xsksq_stats_desc)
1665 #define NUM_CH_STATS			ARRAY_SIZE(ch_stats_desc)
1666 
MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)1667 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
1668 {
1669 	int max_nch = priv->max_nch;
1670 
1671 	return (NUM_RQ_STATS * max_nch) +
1672 	       (NUM_CH_STATS * max_nch) +
1673 	       (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
1674 	       (NUM_RQ_XDPSQ_STATS * max_nch) +
1675 	       (NUM_XDPSQ_STATS * max_nch) +
1676 	       (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
1677 	       (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
1678 }
1679 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)1680 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
1681 {
1682 	bool is_xsk = priv->xsk.ever_used;
1683 	int max_nch = priv->max_nch;
1684 	int i, j, tc;
1685 
1686 	for (i = 0; i < max_nch; i++)
1687 		for (j = 0; j < NUM_CH_STATS; j++)
1688 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1689 				ch_stats_desc[j].format, i);
1690 
1691 	for (i = 0; i < max_nch; i++) {
1692 		for (j = 0; j < NUM_RQ_STATS; j++)
1693 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1694 				rq_stats_desc[j].format, i);
1695 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1696 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1697 				xskrq_stats_desc[j].format, i);
1698 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1699 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1700 				rq_xdpsq_stats_desc[j].format, i);
1701 	}
1702 
1703 	for (tc = 0; tc < priv->max_opened_tc; tc++)
1704 		for (i = 0; i < max_nch; i++)
1705 			for (j = 0; j < NUM_SQ_STATS; j++)
1706 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
1707 					sq_stats_desc[j].format,
1708 					i + tc * max_nch);
1709 
1710 	for (i = 0; i < max_nch; i++) {
1711 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1712 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1713 				xsksq_stats_desc[j].format, i);
1714 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
1715 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
1716 				xdpsq_stats_desc[j].format, i);
1717 	}
1718 
1719 	return idx;
1720 }
1721 
MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)1722 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
1723 {
1724 	bool is_xsk = priv->xsk.ever_used;
1725 	int max_nch = priv->max_nch;
1726 	int i, j, tc;
1727 
1728 	for (i = 0; i < max_nch; i++)
1729 		for (j = 0; j < NUM_CH_STATS; j++)
1730 			data[idx++] =
1731 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
1732 						     ch_stats_desc, j);
1733 
1734 	for (i = 0; i < max_nch; i++) {
1735 		for (j = 0; j < NUM_RQ_STATS; j++)
1736 			data[idx++] =
1737 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
1738 						     rq_stats_desc, j);
1739 		for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1740 			data[idx++] =
1741 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
1742 						     xskrq_stats_desc, j);
1743 		for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1744 			data[idx++] =
1745 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
1746 						     rq_xdpsq_stats_desc, j);
1747 	}
1748 
1749 	for (tc = 0; tc < priv->max_opened_tc; tc++)
1750 		for (i = 0; i < max_nch; i++)
1751 			for (j = 0; j < NUM_SQ_STATS; j++)
1752 				data[idx++] =
1753 					MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
1754 							     sq_stats_desc, j);
1755 
1756 	for (i = 0; i < max_nch; i++) {
1757 		for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1758 			data[idx++] =
1759 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
1760 						     xsksq_stats_desc, j);
1761 		for (j = 0; j < NUM_XDPSQ_STATS; j++)
1762 			data[idx++] =
1763 				MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
1764 						     xdpsq_stats_desc, j);
1765 	}
1766 
1767 	return idx;
1768 }
1769 
MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels)1770 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
1771 
1772 MLX5E_DEFINE_STATS_GRP(sw, 0);
1773 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
1774 MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
1775 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
1776 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
1777 MLX5E_DEFINE_STATS_GRP(2863, 0);
1778 MLX5E_DEFINE_STATS_GRP(2819, 0);
1779 MLX5E_DEFINE_STATS_GRP(phy, 0);
1780 MLX5E_DEFINE_STATS_GRP(pcie, 0);
1781 MLX5E_DEFINE_STATS_GRP(per_prio, 0);
1782 MLX5E_DEFINE_STATS_GRP(pme, 0);
1783 MLX5E_DEFINE_STATS_GRP(channels, 0);
1784 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
1785 MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
1786 static MLX5E_DEFINE_STATS_GRP(tls, 0);
1787 
1788 /* The stats groups order is opposite to the update_stats() order calls */
1789 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
1790 	&MLX5E_STATS_GRP(sw),
1791 	&MLX5E_STATS_GRP(qcnt),
1792 	&MLX5E_STATS_GRP(vnic_env),
1793 	&MLX5E_STATS_GRP(vport),
1794 	&MLX5E_STATS_GRP(802_3),
1795 	&MLX5E_STATS_GRP(2863),
1796 	&MLX5E_STATS_GRP(2819),
1797 	&MLX5E_STATS_GRP(phy),
1798 	&MLX5E_STATS_GRP(eth_ext),
1799 	&MLX5E_STATS_GRP(pcie),
1800 	&MLX5E_STATS_GRP(per_prio),
1801 	&MLX5E_STATS_GRP(pme),
1802 #ifdef CONFIG_MLX5_EN_IPSEC
1803 	&MLX5E_STATS_GRP(ipsec_sw),
1804 	&MLX5E_STATS_GRP(ipsec_hw),
1805 #endif
1806 	&MLX5E_STATS_GRP(tls),
1807 	&MLX5E_STATS_GRP(channels),
1808 	&MLX5E_STATS_GRP(per_port_buff_congest),
1809 };
1810 
mlx5e_nic_stats_grps_num(struct mlx5e_priv * priv)1811 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
1812 {
1813 	return ARRAY_SIZE(mlx5e_nic_stats_grps);
1814 }
1815