1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* ethtool support for ice */
5 
6 #include "ice.h"
7 
8 struct ice_stats {
9 	char stat_string[ETH_GSTRING_LEN];
10 	int sizeof_stat;
11 	int stat_offset;
12 };
13 
14 #define ICE_STAT(_type, _name, _stat) { \
15 	.stat_string = _name, \
16 	.sizeof_stat = FIELD_SIZEOF(_type, _stat), \
17 	.stat_offset = offsetof(_type, _stat) \
18 }
19 
20 #define ICE_VSI_STAT(_name, _stat) \
21 		ICE_STAT(struct ice_vsi, _name, _stat)
22 #define ICE_PF_STAT(_name, _stat) \
23 		ICE_STAT(struct ice_pf, _name, _stat)
24 
ice_q_stats_len(struct net_device * netdev)25 static int ice_q_stats_len(struct net_device *netdev)
26 {
27 	struct ice_netdev_priv *np = netdev_priv(netdev);
28 
29 	return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
30 		(sizeof(struct ice_q_stats) / sizeof(u64)));
31 }
32 
33 #define ICE_PF_STATS_LEN	ARRAY_SIZE(ice_gstrings_pf_stats)
34 #define ICE_VSI_STATS_LEN	ARRAY_SIZE(ice_gstrings_vsi_stats)
35 
36 #define ICE_ALL_STATS_LEN(n)	(ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \
37 				 ice_q_stats_len(n))
38 
39 static const struct ice_stats ice_gstrings_vsi_stats[] = {
40 	ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
41 	ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
42 	ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
43 	ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
44 	ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
45 	ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
46 	ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
47 	ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes),
48 	ICE_VSI_STAT("rx_discards", eth_stats.rx_discards),
49 	ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
50 	ICE_VSI_STAT("tx_linearize", tx_linearize),
51 	ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
52 	ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
53 	ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
54 };
55 
56 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
57  * but they aren't. This device is capable of supporting multiple
58  * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual
59  * netdevs whereas the PF_STATs are for the physical function that's
60  * hosting these netdevs.
61  *
62  * The PF_STATs are appended to the netdev stats only when ethtool -S
63  * is queried on the base PF netdev.
64  */
65 static struct ice_stats ice_gstrings_pf_stats[] = {
66 	ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
67 	ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
68 	ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
69 	ICE_PF_STAT("rx_unicast", stats.eth.rx_unicast),
70 	ICE_PF_STAT("tx_multicast", stats.eth.tx_multicast),
71 	ICE_PF_STAT("rx_multicast", stats.eth.rx_multicast),
72 	ICE_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
73 	ICE_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
74 	ICE_PF_STAT("tx_errors", stats.eth.tx_errors),
75 	ICE_PF_STAT("tx_size_64", stats.tx_size_64),
76 	ICE_PF_STAT("rx_size_64", stats.rx_size_64),
77 	ICE_PF_STAT("tx_size_127", stats.tx_size_127),
78 	ICE_PF_STAT("rx_size_127", stats.rx_size_127),
79 	ICE_PF_STAT("tx_size_255", stats.tx_size_255),
80 	ICE_PF_STAT("rx_size_255", stats.rx_size_255),
81 	ICE_PF_STAT("tx_size_511", stats.tx_size_511),
82 	ICE_PF_STAT("rx_size_511", stats.rx_size_511),
83 	ICE_PF_STAT("tx_size_1023", stats.tx_size_1023),
84 	ICE_PF_STAT("rx_size_1023", stats.rx_size_1023),
85 	ICE_PF_STAT("tx_size_1522", stats.tx_size_1522),
86 	ICE_PF_STAT("rx_size_1522", stats.rx_size_1522),
87 	ICE_PF_STAT("tx_size_big", stats.tx_size_big),
88 	ICE_PF_STAT("rx_size_big", stats.rx_size_big),
89 	ICE_PF_STAT("link_xon_tx", stats.link_xon_tx),
90 	ICE_PF_STAT("link_xon_rx", stats.link_xon_rx),
91 	ICE_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
92 	ICE_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
93 	ICE_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
94 	ICE_PF_STAT("rx_undersize", stats.rx_undersize),
95 	ICE_PF_STAT("rx_fragments", stats.rx_fragments),
96 	ICE_PF_STAT("rx_oversize", stats.rx_oversize),
97 	ICE_PF_STAT("rx_jabber", stats.rx_jabber),
98 	ICE_PF_STAT("rx_csum_bad", hw_csum_rx_error),
99 	ICE_PF_STAT("rx_length_errors", stats.rx_len_errors),
100 	ICE_PF_STAT("rx_dropped", stats.eth.rx_discards),
101 	ICE_PF_STAT("rx_crc_errors", stats.crc_errors),
102 	ICE_PF_STAT("illegal_bytes", stats.illegal_bytes),
103 	ICE_PF_STAT("mac_local_faults", stats.mac_local_faults),
104 	ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
105 };
106 
107 static u32 ice_regs_dump_list[] = {
108 	PFGEN_STATE,
109 	PRTGEN_STATUS,
110 	QRX_CTRL(0),
111 	QINT_TQCTL(0),
112 	QINT_RQCTL(0),
113 	PFINT_OICR_ENA,
114 	QRX_ITR(0),
115 };
116 
117 /**
118  * ice_nvm_version_str - format the NVM version strings
119  * @hw: ptr to the hardware info
120  */
ice_nvm_version_str(struct ice_hw * hw)121 static char *ice_nvm_version_str(struct ice_hw *hw)
122 {
123 	static char buf[ICE_ETHTOOL_FWVER_LEN];
124 	u8 ver, patch;
125 	u32 full_ver;
126 	u16 build;
127 
128 	full_ver = hw->nvm.oem_ver;
129 	ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
130 	build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >>
131 		      ICE_OEM_VER_BUILD_SHIFT);
132 	patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK);
133 
134 	snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d",
135 		 (hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT,
136 		 (hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT,
137 		 hw->nvm.eetrack, ver, build, patch);
138 
139 	return buf;
140 }
141 
142 static void
ice_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)143 ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
144 {
145 	struct ice_netdev_priv *np = netdev_priv(netdev);
146 	struct ice_vsi *vsi = np->vsi;
147 	struct ice_pf *pf = vsi->back;
148 
149 	strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
150 	strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
151 	strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),
152 		sizeof(drvinfo->fw_version));
153 	strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
154 		sizeof(drvinfo->bus_info));
155 }
156 
ice_get_regs_len(struct net_device __always_unused * netdev)157 static int ice_get_regs_len(struct net_device __always_unused *netdev)
158 {
159 	return sizeof(ice_regs_dump_list);
160 }
161 
162 static void
ice_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * p)163 ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
164 {
165 	struct ice_netdev_priv *np = netdev_priv(netdev);
166 	struct ice_pf *pf = np->vsi->back;
167 	struct ice_hw *hw = &pf->hw;
168 	u32 *regs_buf = (u32 *)p;
169 	int i;
170 
171 	regs->version = 1;
172 
173 	for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
174 		regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
175 }
176 
ice_get_msglevel(struct net_device * netdev)177 static u32 ice_get_msglevel(struct net_device *netdev)
178 {
179 	struct ice_netdev_priv *np = netdev_priv(netdev);
180 	struct ice_pf *pf = np->vsi->back;
181 
182 #ifndef CONFIG_DYNAMIC_DEBUG
183 	if (pf->hw.debug_mask)
184 		netdev_info(netdev, "hw debug_mask: 0x%llX\n",
185 			    pf->hw.debug_mask);
186 #endif /* !CONFIG_DYNAMIC_DEBUG */
187 
188 	return pf->msg_enable;
189 }
190 
ice_set_msglevel(struct net_device * netdev,u32 data)191 static void ice_set_msglevel(struct net_device *netdev, u32 data)
192 {
193 	struct ice_netdev_priv *np = netdev_priv(netdev);
194 	struct ice_pf *pf = np->vsi->back;
195 
196 #ifndef CONFIG_DYNAMIC_DEBUG
197 	if (ICE_DBG_USER & data)
198 		pf->hw.debug_mask = data;
199 	else
200 		pf->msg_enable = data;
201 #else
202 	pf->msg_enable = data;
203 #endif /* !CONFIG_DYNAMIC_DEBUG */
204 }
205 
ice_get_strings(struct net_device * netdev,u32 stringset,u8 * data)206 static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
207 {
208 	struct ice_netdev_priv *np = netdev_priv(netdev);
209 	struct ice_vsi *vsi = np->vsi;
210 	char *p = (char *)data;
211 	unsigned int i;
212 
213 	switch (stringset) {
214 	case ETH_SS_STATS:
215 		for (i = 0; i < ICE_VSI_STATS_LEN; i++) {
216 			snprintf(p, ETH_GSTRING_LEN, "%s",
217 				 ice_gstrings_vsi_stats[i].stat_string);
218 			p += ETH_GSTRING_LEN;
219 		}
220 
221 		ice_for_each_alloc_txq(vsi, i) {
222 			snprintf(p, ETH_GSTRING_LEN,
223 				 "tx-queue-%u.tx_packets", i);
224 			p += ETH_GSTRING_LEN;
225 			snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_bytes", i);
226 			p += ETH_GSTRING_LEN;
227 		}
228 
229 		ice_for_each_alloc_rxq(vsi, i) {
230 			snprintf(p, ETH_GSTRING_LEN,
231 				 "rx-queue-%u.rx_packets", i);
232 			p += ETH_GSTRING_LEN;
233 			snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_bytes", i);
234 			p += ETH_GSTRING_LEN;
235 		}
236 
237 		if (vsi->type != ICE_VSI_PF)
238 			return;
239 
240 		for (i = 0; i < ICE_PF_STATS_LEN; i++) {
241 			snprintf(p, ETH_GSTRING_LEN, "port.%s",
242 				 ice_gstrings_pf_stats[i].stat_string);
243 			p += ETH_GSTRING_LEN;
244 		}
245 
246 		break;
247 	default:
248 		break;
249 	}
250 }
251 
ice_get_sset_count(struct net_device * netdev,int sset)252 static int ice_get_sset_count(struct net_device *netdev, int sset)
253 {
254 	switch (sset) {
255 	case ETH_SS_STATS:
256 		/* The number (and order) of strings reported *must* remain
257 		 * constant for a given netdevice. This function must not
258 		 * report a different number based on run time parameters
259 		 * (such as the number of queues in use, or the setting of
260 		 * a private ethtool flag). This is due to the nature of the
261 		 * ethtool stats API.
262 		 *
263 		 * User space programs such as ethtool must make 3 separate
264 		 * ioctl requests, one for size, one for the strings, and
265 		 * finally one for the stats. Since these cross into
266 		 * user space, changes to the number or size could result in
267 		 * undefined memory access or incorrect string<->value
268 		 * correlations for statistics.
269 		 *
270 		 * Even if it appears to be safe, changes to the size or
271 		 * order of strings will suffer from race conditions and are
272 		 * not safe.
273 		 */
274 		return ICE_ALL_STATS_LEN(netdev);
275 	default:
276 		return -EOPNOTSUPP;
277 	}
278 }
279 
280 static void
ice_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats __always_unused * stats,u64 * data)281 ice_get_ethtool_stats(struct net_device *netdev,
282 		      struct ethtool_stats __always_unused *stats, u64 *data)
283 {
284 	struct ice_netdev_priv *np = netdev_priv(netdev);
285 	struct ice_vsi *vsi = np->vsi;
286 	struct ice_pf *pf = vsi->back;
287 	struct ice_ring *ring;
288 	unsigned int j = 0;
289 	int i = 0;
290 	char *p;
291 
292 	for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
293 		p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
294 		data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
295 			    sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
296 	}
297 
298 	/* populate per queue stats */
299 	rcu_read_lock();
300 
301 	ice_for_each_alloc_txq(vsi, j) {
302 		ring = READ_ONCE(vsi->tx_rings[j]);
303 		if (ring) {
304 			data[i++] = ring->stats.pkts;
305 			data[i++] = ring->stats.bytes;
306 		} else {
307 			data[i++] = 0;
308 			data[i++] = 0;
309 		}
310 	}
311 
312 	ice_for_each_alloc_rxq(vsi, j) {
313 		ring = READ_ONCE(vsi->rx_rings[j]);
314 		if (ring) {
315 			data[i++] = ring->stats.pkts;
316 			data[i++] = ring->stats.bytes;
317 		} else {
318 			data[i++] = 0;
319 			data[i++] = 0;
320 		}
321 	}
322 
323 	rcu_read_unlock();
324 
325 	if (vsi->type != ICE_VSI_PF)
326 		return;
327 
328 	for (j = 0; j < ICE_PF_STATS_LEN; j++) {
329 		p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset;
330 		data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
331 			     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
332 	}
333 }
334 
335 static int
ice_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * ks)336 ice_get_link_ksettings(struct net_device *netdev,
337 		       struct ethtool_link_ksettings *ks)
338 {
339 	struct ice_netdev_priv *np = netdev_priv(netdev);
340 	struct ice_link_status *hw_link_info;
341 	struct ice_vsi *vsi = np->vsi;
342 	bool link_up;
343 
344 	hw_link_info = &vsi->port_info->phy.link_info;
345 	link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
346 
347 	ethtool_link_ksettings_add_link_mode(ks, supported,
348 					     10000baseT_Full);
349 	ethtool_link_ksettings_add_link_mode(ks, advertising,
350 					     10000baseT_Full);
351 
352 	/* set speed and duplex */
353 	if (link_up) {
354 		switch (hw_link_info->link_speed) {
355 		case ICE_AQ_LINK_SPEED_100MB:
356 			ks->base.speed = SPEED_100;
357 			break;
358 		case ICE_AQ_LINK_SPEED_2500MB:
359 			ks->base.speed = SPEED_2500;
360 			break;
361 		case ICE_AQ_LINK_SPEED_5GB:
362 			ks->base.speed = SPEED_5000;
363 			break;
364 		case ICE_AQ_LINK_SPEED_10GB:
365 			ks->base.speed = SPEED_10000;
366 			break;
367 		case ICE_AQ_LINK_SPEED_25GB:
368 			ks->base.speed = SPEED_25000;
369 			break;
370 		case ICE_AQ_LINK_SPEED_40GB:
371 			ks->base.speed = SPEED_40000;
372 			break;
373 		default:
374 			ks->base.speed = SPEED_UNKNOWN;
375 			break;
376 		}
377 
378 		ks->base.duplex = DUPLEX_FULL;
379 	} else {
380 		ks->base.speed = SPEED_UNKNOWN;
381 		ks->base.duplex = DUPLEX_UNKNOWN;
382 	}
383 
384 	/* set autoneg settings */
385 	ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
386 			    AUTONEG_ENABLE : AUTONEG_DISABLE);
387 
388 	/* set media type settings */
389 	switch (vsi->port_info->phy.media_type) {
390 	case ICE_MEDIA_FIBER:
391 		ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
392 		ks->base.port = PORT_FIBRE;
393 		break;
394 	case ICE_MEDIA_BASET:
395 		ethtool_link_ksettings_add_link_mode(ks, supported, TP);
396 		ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
397 		ks->base.port = PORT_TP;
398 		break;
399 	case ICE_MEDIA_BACKPLANE:
400 		ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
401 		ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
402 		ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
403 		ethtool_link_ksettings_add_link_mode(ks, advertising,
404 						     Backplane);
405 		ks->base.port = PORT_NONE;
406 		break;
407 	case ICE_MEDIA_DA:
408 		ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
409 		ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
410 		ks->base.port = PORT_DA;
411 		break;
412 	default:
413 		ks->base.port = PORT_OTHER;
414 		break;
415 	}
416 
417 	/* flow control is symmetric and always supported */
418 	ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
419 
420 	switch (vsi->port_info->fc.req_mode) {
421 	case ICE_FC_FULL:
422 		ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
423 		break;
424 	case ICE_FC_TX_PAUSE:
425 		ethtool_link_ksettings_add_link_mode(ks, advertising,
426 						     Asym_Pause);
427 		break;
428 	case ICE_FC_RX_PAUSE:
429 		ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
430 		ethtool_link_ksettings_add_link_mode(ks, advertising,
431 						     Asym_Pause);
432 		break;
433 	case ICE_FC_PFC:
434 	default:
435 		ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
436 		ethtool_link_ksettings_del_link_mode(ks, advertising,
437 						     Asym_Pause);
438 		break;
439 	}
440 
441 	return 0;
442 }
443 
444 /**
445  * ice_get_rxnfc - command to get RX flow classification rules
446  * @netdev: network interface device structure
447  * @cmd: ethtool rxnfc command
448  * @rule_locs: buffer to rturn Rx flow classification rules
449  *
450  * Returns Success if the command is supported.
451  */
ice_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 __always_unused * rule_locs)452 static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
453 			 u32 __always_unused *rule_locs)
454 {
455 	struct ice_netdev_priv *np = netdev_priv(netdev);
456 	struct ice_vsi *vsi = np->vsi;
457 	int ret = -EOPNOTSUPP;
458 
459 	switch (cmd->cmd) {
460 	case ETHTOOL_GRXRINGS:
461 		cmd->data = vsi->rss_size;
462 		ret = 0;
463 		break;
464 	default:
465 		break;
466 	}
467 
468 	return ret;
469 }
470 
471 static void
ice_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)472 ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
473 {
474 	struct ice_netdev_priv *np = netdev_priv(netdev);
475 	struct ice_vsi *vsi = np->vsi;
476 
477 	ring->rx_max_pending = ICE_MAX_NUM_DESC;
478 	ring->tx_max_pending = ICE_MAX_NUM_DESC;
479 	ring->rx_pending = vsi->rx_rings[0]->count;
480 	ring->tx_pending = vsi->tx_rings[0]->count;
481 	ring->rx_mini_pending = ICE_MIN_NUM_DESC;
482 	ring->rx_mini_max_pending = 0;
483 	ring->rx_jumbo_max_pending = 0;
484 	ring->rx_jumbo_pending = 0;
485 }
486 
487 static int
ice_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)488 ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
489 {
490 	struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
491 	struct ice_netdev_priv *np = netdev_priv(netdev);
492 	struct ice_vsi *vsi = np->vsi;
493 	struct ice_pf *pf = vsi->back;
494 	int i, timeout = 50, err = 0;
495 	u32 new_rx_cnt, new_tx_cnt;
496 
497 	if (ring->tx_pending > ICE_MAX_NUM_DESC ||
498 	    ring->tx_pending < ICE_MIN_NUM_DESC ||
499 	    ring->rx_pending > ICE_MAX_NUM_DESC ||
500 	    ring->rx_pending < ICE_MIN_NUM_DESC) {
501 		netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
502 			   ring->tx_pending, ring->rx_pending,
503 			   ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC);
504 		return -EINVAL;
505 	}
506 
507 	new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
508 	new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
509 
510 	/* if nothing to do return success */
511 	if (new_tx_cnt == vsi->tx_rings[0]->count &&
512 	    new_rx_cnt == vsi->rx_rings[0]->count) {
513 		netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
514 		return 0;
515 	}
516 
517 	while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
518 		timeout--;
519 		if (!timeout)
520 			return -EBUSY;
521 		usleep_range(1000, 2000);
522 	}
523 
524 	/* set for the next time the netdev is started */
525 	if (!netif_running(vsi->netdev)) {
526 		for (i = 0; i < vsi->alloc_txq; i++)
527 			vsi->tx_rings[i]->count = new_tx_cnt;
528 		for (i = 0; i < vsi->alloc_rxq; i++)
529 			vsi->rx_rings[i]->count = new_rx_cnt;
530 		netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
531 		goto done;
532 	}
533 
534 	if (new_tx_cnt == vsi->tx_rings[0]->count)
535 		goto process_rx;
536 
537 	/* alloc updated Tx resources */
538 	netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
539 		    vsi->tx_rings[0]->count, new_tx_cnt);
540 
541 	tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
542 				sizeof(struct ice_ring), GFP_KERNEL);
543 	if (!tx_rings) {
544 		err = -ENOMEM;
545 		goto done;
546 	}
547 
548 	for (i = 0; i < vsi->alloc_txq; i++) {
549 		/* clone ring and setup updated count */
550 		tx_rings[i] = *vsi->tx_rings[i];
551 		tx_rings[i].count = new_tx_cnt;
552 		tx_rings[i].desc = NULL;
553 		tx_rings[i].tx_buf = NULL;
554 		err = ice_setup_tx_ring(&tx_rings[i]);
555 		if (err) {
556 			while (i) {
557 				i--;
558 				ice_clean_tx_ring(&tx_rings[i]);
559 			}
560 			devm_kfree(&pf->pdev->dev, tx_rings);
561 			goto done;
562 		}
563 	}
564 
565 process_rx:
566 	if (new_rx_cnt == vsi->rx_rings[0]->count)
567 		goto process_link;
568 
569 	/* alloc updated Rx resources */
570 	netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
571 		    vsi->rx_rings[0]->count, new_rx_cnt);
572 
573 	rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
574 				sizeof(struct ice_ring), GFP_KERNEL);
575 	if (!rx_rings) {
576 		err = -ENOMEM;
577 		goto done;
578 	}
579 
580 	for (i = 0; i < vsi->alloc_rxq; i++) {
581 		/* clone ring and setup updated count */
582 		rx_rings[i] = *vsi->rx_rings[i];
583 		rx_rings[i].count = new_rx_cnt;
584 		rx_rings[i].desc = NULL;
585 		rx_rings[i].rx_buf = NULL;
586 		/* this is to allow wr32 to have something to write to
587 		 * during early allocation of Rx buffers
588 		 */
589 		rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS;
590 
591 		err = ice_setup_rx_ring(&rx_rings[i]);
592 		if (err)
593 			goto rx_unwind;
594 
595 		/* allocate Rx buffers */
596 		err = ice_alloc_rx_bufs(&rx_rings[i],
597 					ICE_DESC_UNUSED(&rx_rings[i]));
598 rx_unwind:
599 		if (err) {
600 			while (i) {
601 				i--;
602 				ice_free_rx_ring(&rx_rings[i]);
603 			}
604 			devm_kfree(&pf->pdev->dev, rx_rings);
605 			err = -ENOMEM;
606 			goto free_tx;
607 		}
608 	}
609 
610 process_link:
611 	/* Bring interface down, copy in the new ring info, then restore the
612 	 * interface. if VSI is up, bring it down and then back up
613 	 */
614 	if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
615 		ice_down(vsi);
616 
617 		if (tx_rings) {
618 			for (i = 0; i < vsi->alloc_txq; i++) {
619 				ice_free_tx_ring(vsi->tx_rings[i]);
620 				*vsi->tx_rings[i] = tx_rings[i];
621 			}
622 			devm_kfree(&pf->pdev->dev, tx_rings);
623 		}
624 
625 		if (rx_rings) {
626 			for (i = 0; i < vsi->alloc_rxq; i++) {
627 				ice_free_rx_ring(vsi->rx_rings[i]);
628 				/* copy the real tail offset */
629 				rx_rings[i].tail = vsi->rx_rings[i]->tail;
630 				/* this is to fake out the allocation routine
631 				 * into thinking it has to realloc everything
632 				 * but the recycling logic will let us re-use
633 				 * the buffers allocated above
634 				 */
635 				rx_rings[i].next_to_use = 0;
636 				rx_rings[i].next_to_clean = 0;
637 				rx_rings[i].next_to_alloc = 0;
638 				*vsi->rx_rings[i] = rx_rings[i];
639 			}
640 			devm_kfree(&pf->pdev->dev, rx_rings);
641 		}
642 
643 		ice_up(vsi);
644 	}
645 	goto done;
646 
647 free_tx:
648 	/* error cleanup if the Rx allocations failed after getting Tx */
649 	if (tx_rings) {
650 		for (i = 0; i < vsi->alloc_txq; i++)
651 			ice_free_tx_ring(&tx_rings[i]);
652 		devm_kfree(&pf->pdev->dev, tx_rings);
653 	}
654 
655 done:
656 	clear_bit(__ICE_CFG_BUSY, pf->state);
657 	return err;
658 }
659 
ice_nway_reset(struct net_device * netdev)660 static int ice_nway_reset(struct net_device *netdev)
661 {
662 	/* restart autonegotiation */
663 	struct ice_netdev_priv *np = netdev_priv(netdev);
664 	struct ice_link_status *hw_link_info;
665 	struct ice_vsi *vsi = np->vsi;
666 	struct ice_port_info *pi;
667 	enum ice_status status;
668 	bool link_up;
669 
670 	pi = vsi->port_info;
671 	hw_link_info = &pi->phy.link_info;
672 	link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
673 
674 	status = ice_aq_set_link_restart_an(pi, link_up, NULL);
675 	if (status) {
676 		netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
677 			    status, pi->hw->adminq.sq_last_status);
678 		return -EIO;
679 	}
680 
681 	return 0;
682 }
683 
684 /**
685  * ice_get_pauseparam - Get Flow Control status
686  * @netdev: network interface device structure
687  * @pause: ethernet pause (flow control) parameters
688  */
689 static void
ice_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)690 ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
691 {
692 	struct ice_netdev_priv *np = netdev_priv(netdev);
693 	struct ice_port_info *pi;
694 
695 	pi = np->vsi->port_info;
696 	pause->autoneg =
697 		((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ?
698 		 AUTONEG_ENABLE : AUTONEG_DISABLE);
699 
700 	if (pi->fc.current_mode == ICE_FC_RX_PAUSE) {
701 		pause->rx_pause = 1;
702 	} else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) {
703 		pause->tx_pause = 1;
704 	} else if (pi->fc.current_mode == ICE_FC_FULL) {
705 		pause->rx_pause = 1;
706 		pause->tx_pause = 1;
707 	}
708 }
709 
710 /**
711  * ice_set_pauseparam - Set Flow Control parameter
712  * @netdev: network interface device structure
713  * @pause: return tx/rx flow control status
714  */
715 static int
ice_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)716 ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
717 {
718 	struct ice_netdev_priv *np = netdev_priv(netdev);
719 	struct ice_link_status *hw_link_info;
720 	struct ice_pf *pf = np->vsi->back;
721 	struct ice_vsi *vsi = np->vsi;
722 	struct ice_hw *hw = &pf->hw;
723 	struct ice_port_info *pi;
724 	enum ice_status status;
725 	u8 aq_failures;
726 	bool link_up;
727 	int err = 0;
728 
729 	pi = vsi->port_info;
730 	hw_link_info = &pi->phy.link_info;
731 	link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
732 
733 	/* Changing the port's flow control is not supported if this isn't the
734 	 * PF VSI
735 	 */
736 	if (vsi->type != ICE_VSI_PF) {
737 		netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n");
738 		return -EOPNOTSUPP;
739 	}
740 
741 	if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
742 		netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
743 		return -EOPNOTSUPP;
744 	}
745 
746 	/* If we have link and don't have autoneg */
747 	if (!test_bit(__ICE_DOWN, pf->state) &&
748 	    !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
749 		/* Send message that it might not necessarily work*/
750 		netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
751 	}
752 
753 	if (pause->rx_pause && pause->tx_pause)
754 		pi->fc.req_mode = ICE_FC_FULL;
755 	else if (pause->rx_pause && !pause->tx_pause)
756 		pi->fc.req_mode = ICE_FC_RX_PAUSE;
757 	else if (!pause->rx_pause && pause->tx_pause)
758 		pi->fc.req_mode = ICE_FC_TX_PAUSE;
759 	else if (!pause->rx_pause && !pause->tx_pause)
760 		pi->fc.req_mode = ICE_FC_NONE;
761 	else
762 		return -EINVAL;
763 
764 	/* Tell the OS link is going down, the link will go back up when fw
765 	 * says it is ready asynchronously
766 	 */
767 	ice_print_link_msg(vsi, false);
768 	netif_carrier_off(netdev);
769 	netif_tx_stop_all_queues(netdev);
770 
771 	/* Set the FC mode and only restart AN if link is up */
772 	status = ice_set_fc(pi, &aq_failures, link_up);
773 
774 	if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
775 		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n",
776 			    status, hw->adminq.sq_last_status);
777 		err = -EAGAIN;
778 	} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
779 		netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n",
780 			    status, hw->adminq.sq_last_status);
781 		err = -EAGAIN;
782 	} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
783 		netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n",
784 			    status, hw->adminq.sq_last_status);
785 		err = -EAGAIN;
786 	}
787 
788 	if (!test_bit(__ICE_DOWN, pf->state)) {
789 		/* Give it a little more time to try to come back */
790 		msleep(75);
791 		if (!test_bit(__ICE_DOWN, pf->state))
792 			return ice_nway_reset(netdev);
793 	}
794 
795 	return err;
796 }
797 
798 /**
799  * ice_get_rxfh_key_size - get the RSS hash key size
800  * @netdev: network interface device structure
801  *
802  * Returns the table size.
803  */
ice_get_rxfh_key_size(struct net_device __always_unused * netdev)804 static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
805 {
806 	return ICE_VSIQF_HKEY_ARRAY_SIZE;
807 }
808 
809 /**
810  * ice_get_rxfh_indir_size - get the rx flow hash indirection table size
811  * @netdev: network interface device structure
812  *
813  * Returns the table size.
814  */
ice_get_rxfh_indir_size(struct net_device * netdev)815 static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
816 {
817 	struct ice_netdev_priv *np = netdev_priv(netdev);
818 
819 	return np->vsi->rss_table_size;
820 }
821 
822 /**
823  * ice_get_rxfh - get the rx flow hash indirection table
824  * @netdev: network interface device structure
825  * @indir: indirection table
826  * @key: hash key
827  * @hfunc: hash function
828  *
829  * Reads the indirection table directly from the hardware.
830  */
831 static int
ice_get_rxfh(struct net_device * netdev,u32 * indir,u8 * key,u8 * hfunc)832 ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
833 {
834 	struct ice_netdev_priv *np = netdev_priv(netdev);
835 	struct ice_vsi *vsi = np->vsi;
836 	struct ice_pf *pf = vsi->back;
837 	int ret = 0, i;
838 	u8 *lut;
839 
840 	if (hfunc)
841 		*hfunc = ETH_RSS_HASH_TOP;
842 
843 	if (!indir)
844 		return 0;
845 
846 	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
847 		/* RSS not supported return error here */
848 		netdev_warn(netdev, "RSS is not configured on this VSI!\n");
849 		return -EIO;
850 	}
851 
852 	lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
853 	if (!lut)
854 		return -ENOMEM;
855 
856 	if (ice_get_rss(vsi, key, lut, vsi->rss_table_size)) {
857 		ret = -EIO;
858 		goto out;
859 	}
860 
861 	for (i = 0; i < vsi->rss_table_size; i++)
862 		indir[i] = (u32)(lut[i]);
863 
864 out:
865 	devm_kfree(&pf->pdev->dev, lut);
866 	return ret;
867 }
868 
869 /**
870  * ice_set_rxfh - set the rx flow hash indirection table
871  * @netdev: network interface device structure
872  * @indir: indirection table
873  * @key: hash key
874  * @hfunc: hash function
875  *
876  * Returns -EINVAL if the table specifies an invalid queue id, otherwise
877  * returns 0 after programming the table.
878  */
ice_set_rxfh(struct net_device * netdev,const u32 * indir,const u8 * key,const u8 hfunc)879 static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
880 			const u8 *key, const u8 hfunc)
881 {
882 	struct ice_netdev_priv *np = netdev_priv(netdev);
883 	struct ice_vsi *vsi = np->vsi;
884 	struct ice_pf *pf = vsi->back;
885 	u8 *seed = NULL;
886 
887 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
888 		return -EOPNOTSUPP;
889 
890 	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
891 		/* RSS not supported return error here */
892 		netdev_warn(netdev, "RSS is not configured on this VSI!\n");
893 		return -EIO;
894 	}
895 
896 	if (key) {
897 		if (!vsi->rss_hkey_user) {
898 			vsi->rss_hkey_user =
899 				devm_kzalloc(&pf->pdev->dev,
900 					     ICE_VSIQF_HKEY_ARRAY_SIZE,
901 					     GFP_KERNEL);
902 			if (!vsi->rss_hkey_user)
903 				return -ENOMEM;
904 		}
905 		memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
906 		seed = vsi->rss_hkey_user;
907 	}
908 
909 	if (!vsi->rss_lut_user) {
910 		vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev,
911 						 vsi->rss_table_size,
912 						 GFP_KERNEL);
913 		if (!vsi->rss_lut_user)
914 			return -ENOMEM;
915 	}
916 
917 	/* Each 32 bits pointed by 'indir' is stored with a lut entry */
918 	if (indir) {
919 		int i;
920 
921 		for (i = 0; i < vsi->rss_table_size; i++)
922 			vsi->rss_lut_user[i] = (u8)(indir[i]);
923 	} else {
924 		ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
925 				 vsi->rss_size);
926 	}
927 
928 	if (ice_set_rss(vsi, seed, vsi->rss_lut_user, vsi->rss_table_size))
929 		return -EIO;
930 
931 	return 0;
932 }
933 
934 static const struct ethtool_ops ice_ethtool_ops = {
935 	.get_link_ksettings	= ice_get_link_ksettings,
936 	.get_drvinfo            = ice_get_drvinfo,
937 	.get_regs_len           = ice_get_regs_len,
938 	.get_regs               = ice_get_regs,
939 	.get_msglevel           = ice_get_msglevel,
940 	.set_msglevel           = ice_set_msglevel,
941 	.get_link		= ethtool_op_get_link,
942 	.get_strings		= ice_get_strings,
943 	.get_ethtool_stats      = ice_get_ethtool_stats,
944 	.get_sset_count		= ice_get_sset_count,
945 	.get_rxnfc		= ice_get_rxnfc,
946 	.get_ringparam		= ice_get_ringparam,
947 	.set_ringparam		= ice_set_ringparam,
948 	.nway_reset		= ice_nway_reset,
949 	.get_pauseparam		= ice_get_pauseparam,
950 	.set_pauseparam		= ice_set_pauseparam,
951 	.get_rxfh_key_size	= ice_get_rxfh_key_size,
952 	.get_rxfh_indir_size	= ice_get_rxfh_indir_size,
953 	.get_rxfh		= ice_get_rxfh,
954 	.set_rxfh		= ice_set_rxfh,
955 };
956 
957 /**
958  * ice_set_ethtool_ops - setup netdev ethtool ops
959  * @netdev: network interface device structure
960  *
961  * setup netdev ethtool ops with ice specific ops
962  */
ice_set_ethtool_ops(struct net_device * netdev)963 void ice_set_ethtool_ops(struct net_device *netdev)
964 {
965 	netdev->ethtool_ops = &ice_ethtool_ops;
966 }
967