1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3
4 #include <linux/inetdevice.h>
5 #include <linux/etherdevice.h>
6 #include <linux/ethtool.h>
7
8 #include "mana.h"
9
10 static const struct {
11 char name[ETH_GSTRING_LEN];
12 u16 offset;
13 } mana_eth_stats[] = {
14 {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
15 {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
16 };
17
mana_get_sset_count(struct net_device * ndev,int stringset)18 static int mana_get_sset_count(struct net_device *ndev, int stringset)
19 {
20 struct mana_port_context *apc = netdev_priv(ndev);
21 unsigned int num_queues = apc->num_queues;
22
23 if (stringset != ETH_SS_STATS)
24 return -EINVAL;
25
26 return ARRAY_SIZE(mana_eth_stats) + num_queues * 8;
27 }
28
mana_get_strings(struct net_device * ndev,u32 stringset,u8 * data)29 static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
30 {
31 struct mana_port_context *apc = netdev_priv(ndev);
32 unsigned int num_queues = apc->num_queues;
33 u8 *p = data;
34 int i;
35
36 if (stringset != ETH_SS_STATS)
37 return;
38
39 for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) {
40 memcpy(p, mana_eth_stats[i].name, ETH_GSTRING_LEN);
41 p += ETH_GSTRING_LEN;
42 }
43
44 for (i = 0; i < num_queues; i++) {
45 sprintf(p, "rx_%d_packets", i);
46 p += ETH_GSTRING_LEN;
47 sprintf(p, "rx_%d_bytes", i);
48 p += ETH_GSTRING_LEN;
49 sprintf(p, "rx_%d_xdp_drop", i);
50 p += ETH_GSTRING_LEN;
51 sprintf(p, "rx_%d_xdp_tx", i);
52 p += ETH_GSTRING_LEN;
53 sprintf(p, "rx_%d_xdp_redirect", i);
54 p += ETH_GSTRING_LEN;
55 }
56
57 for (i = 0; i < num_queues; i++) {
58 sprintf(p, "tx_%d_packets", i);
59 p += ETH_GSTRING_LEN;
60 sprintf(p, "tx_%d_bytes", i);
61 p += ETH_GSTRING_LEN;
62 sprintf(p, "tx_%d_xdp_xmit", i);
63 p += ETH_GSTRING_LEN;
64 }
65 }
66
mana_get_ethtool_stats(struct net_device * ndev,struct ethtool_stats * e_stats,u64 * data)67 static void mana_get_ethtool_stats(struct net_device *ndev,
68 struct ethtool_stats *e_stats, u64 *data)
69 {
70 struct mana_port_context *apc = netdev_priv(ndev);
71 unsigned int num_queues = apc->num_queues;
72 void *eth_stats = &apc->eth_stats;
73 struct mana_stats_rx *rx_stats;
74 struct mana_stats_tx *tx_stats;
75 unsigned int start;
76 u64 packets, bytes;
77 u64 xdp_redirect;
78 u64 xdp_xmit;
79 u64 xdp_drop;
80 u64 xdp_tx;
81 int q, i = 0;
82
83 if (!apc->port_is_up)
84 return;
85
86 for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
87 data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
88
89 for (q = 0; q < num_queues; q++) {
90 rx_stats = &apc->rxqs[q]->stats;
91
92 do {
93 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
94 packets = rx_stats->packets;
95 bytes = rx_stats->bytes;
96 xdp_drop = rx_stats->xdp_drop;
97 xdp_tx = rx_stats->xdp_tx;
98 xdp_redirect = rx_stats->xdp_redirect;
99 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
100
101 data[i++] = packets;
102 data[i++] = bytes;
103 data[i++] = xdp_drop;
104 data[i++] = xdp_tx;
105 data[i++] = xdp_redirect;
106 }
107
108 for (q = 0; q < num_queues; q++) {
109 tx_stats = &apc->tx_qp[q].txq.stats;
110
111 do {
112 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
113 packets = tx_stats->packets;
114 bytes = tx_stats->bytes;
115 xdp_xmit = tx_stats->xdp_xmit;
116 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
117
118 data[i++] = packets;
119 data[i++] = bytes;
120 data[i++] = xdp_xmit;
121 }
122 }
123
mana_get_rxnfc(struct net_device * ndev,struct ethtool_rxnfc * cmd,u32 * rules)124 static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd,
125 u32 *rules)
126 {
127 struct mana_port_context *apc = netdev_priv(ndev);
128
129 switch (cmd->cmd) {
130 case ETHTOOL_GRXRINGS:
131 cmd->data = apc->num_queues;
132 return 0;
133 }
134
135 return -EOPNOTSUPP;
136 }
137
mana_get_rxfh_key_size(struct net_device * ndev)138 static u32 mana_get_rxfh_key_size(struct net_device *ndev)
139 {
140 return MANA_HASH_KEY_SIZE;
141 }
142
mana_rss_indir_size(struct net_device * ndev)143 static u32 mana_rss_indir_size(struct net_device *ndev)
144 {
145 return MANA_INDIRECT_TABLE_SIZE;
146 }
147
mana_get_rxfh(struct net_device * ndev,u32 * indir,u8 * key,u8 * hfunc)148 static int mana_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key,
149 u8 *hfunc)
150 {
151 struct mana_port_context *apc = netdev_priv(ndev);
152 int i;
153
154 if (hfunc)
155 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
156
157 if (indir) {
158 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
159 indir[i] = apc->indir_table[i];
160 }
161
162 if (key)
163 memcpy(key, apc->hashkey, MANA_HASH_KEY_SIZE);
164
165 return 0;
166 }
167
mana_set_rxfh(struct net_device * ndev,const u32 * indir,const u8 * key,const u8 hfunc)168 static int mana_set_rxfh(struct net_device *ndev, const u32 *indir,
169 const u8 *key, const u8 hfunc)
170 {
171 struct mana_port_context *apc = netdev_priv(ndev);
172 bool update_hash = false, update_table = false;
173 u32 save_table[MANA_INDIRECT_TABLE_SIZE];
174 u8 save_key[MANA_HASH_KEY_SIZE];
175 int i, err;
176
177 if (!apc->port_is_up)
178 return -EOPNOTSUPP;
179
180 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
181 return -EOPNOTSUPP;
182
183 if (indir) {
184 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
185 if (indir[i] >= apc->num_queues)
186 return -EINVAL;
187
188 update_table = true;
189 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
190 save_table[i] = apc->indir_table[i];
191 apc->indir_table[i] = indir[i];
192 }
193 }
194
195 if (key) {
196 update_hash = true;
197 memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE);
198 memcpy(apc->hashkey, key, MANA_HASH_KEY_SIZE);
199 }
200
201 err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
202
203 if (err) { /* recover to original values */
204 if (update_table) {
205 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
206 apc->indir_table[i] = save_table[i];
207 }
208
209 if (update_hash)
210 memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE);
211
212 mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
213 }
214
215 return err;
216 }
217
mana_get_channels(struct net_device * ndev,struct ethtool_channels * channel)218 static void mana_get_channels(struct net_device *ndev,
219 struct ethtool_channels *channel)
220 {
221 struct mana_port_context *apc = netdev_priv(ndev);
222
223 channel->max_combined = apc->max_queues;
224 channel->combined_count = apc->num_queues;
225 }
226
mana_set_channels(struct net_device * ndev,struct ethtool_channels * channels)227 static int mana_set_channels(struct net_device *ndev,
228 struct ethtool_channels *channels)
229 {
230 struct mana_port_context *apc = netdev_priv(ndev);
231 unsigned int new_count = channels->combined_count;
232 unsigned int old_count = apc->num_queues;
233 int err, err2;
234
235 err = mana_detach(ndev, false);
236 if (err) {
237 netdev_err(ndev, "mana_detach failed: %d\n", err);
238 return err;
239 }
240
241 apc->num_queues = new_count;
242 err = mana_attach(ndev);
243 if (!err)
244 return 0;
245
246 netdev_err(ndev, "mana_attach failed: %d\n", err);
247
248 /* Try to roll it back to the old configuration. */
249 apc->num_queues = old_count;
250 err2 = mana_attach(ndev);
251 if (err2)
252 netdev_err(ndev, "mana re-attach failed: %d\n", err2);
253
254 return err;
255 }
256
257 const struct ethtool_ops mana_ethtool_ops = {
258 .get_ethtool_stats = mana_get_ethtool_stats,
259 .get_sset_count = mana_get_sset_count,
260 .get_strings = mana_get_strings,
261 .get_rxnfc = mana_get_rxnfc,
262 .get_rxfh_key_size = mana_get_rxfh_key_size,
263 .get_rxfh_indir_size = mana_rss_indir_size,
264 .get_rxfh = mana_get_rxfh,
265 .set_rxfh = mana_set_rxfh,
266 .get_channels = mana_get_channels,
267 .set_channels = mana_set_channels,
268 };
269