1 /*******************************************************************************
2 STMMAC Ethtool support
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 The full GNU General Public License is included in this distribution in
16 the file called "COPYING".
17
18 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
19 *******************************************************************************/
20
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/interrupt.h>
24 #include <linux/mii.h>
25 #include <linux/phy.h>
26 #include <linux/net_tstamp.h>
27 #include <asm/io.h>
28
29 #include "stmmac.h"
30 #include "dwmac_dma.h"
31
32 #define REG_SPACE_SIZE 0x1060
33 #define MAC100_ETHTOOL_NAME "st_mac100"
34 #define GMAC_ETHTOOL_NAME "st_gmac"
35
36 #define ETHTOOL_DMA_OFFSET 55
37
38 struct stmmac_stats {
39 char stat_string[ETH_GSTRING_LEN];
40 int sizeof_stat;
41 int stat_offset;
42 };
43
44 #define STMMAC_STAT(m) \
45 { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \
46 offsetof(struct stmmac_priv, xstats.m)}
47
48 static const struct stmmac_stats stmmac_gstrings_stats[] = {
49 /* Transmit errors */
50 STMMAC_STAT(tx_underflow),
51 STMMAC_STAT(tx_carrier),
52 STMMAC_STAT(tx_losscarrier),
53 STMMAC_STAT(vlan_tag),
54 STMMAC_STAT(tx_deferred),
55 STMMAC_STAT(tx_vlan),
56 STMMAC_STAT(tx_jabber),
57 STMMAC_STAT(tx_frame_flushed),
58 STMMAC_STAT(tx_payload_error),
59 STMMAC_STAT(tx_ip_header_error),
60 /* Receive errors */
61 STMMAC_STAT(rx_desc),
62 STMMAC_STAT(sa_filter_fail),
63 STMMAC_STAT(overflow_error),
64 STMMAC_STAT(ipc_csum_error),
65 STMMAC_STAT(rx_collision),
66 STMMAC_STAT(rx_crc_errors),
67 STMMAC_STAT(dribbling_bit),
68 STMMAC_STAT(rx_length),
69 STMMAC_STAT(rx_mii),
70 STMMAC_STAT(rx_multicast),
71 STMMAC_STAT(rx_gmac_overflow),
72 STMMAC_STAT(rx_watchdog),
73 STMMAC_STAT(da_rx_filter_fail),
74 STMMAC_STAT(sa_rx_filter_fail),
75 STMMAC_STAT(rx_missed_cntr),
76 STMMAC_STAT(rx_overflow_cntr),
77 STMMAC_STAT(rx_vlan),
78 /* Tx/Rx IRQ error info */
79 STMMAC_STAT(tx_undeflow_irq),
80 STMMAC_STAT(tx_process_stopped_irq),
81 STMMAC_STAT(tx_jabber_irq),
82 STMMAC_STAT(rx_overflow_irq),
83 STMMAC_STAT(rx_buf_unav_irq),
84 STMMAC_STAT(rx_process_stopped_irq),
85 STMMAC_STAT(rx_watchdog_irq),
86 STMMAC_STAT(tx_early_irq),
87 STMMAC_STAT(fatal_bus_error_irq),
88 /* Tx/Rx IRQ Events */
89 STMMAC_STAT(rx_early_irq),
90 STMMAC_STAT(threshold),
91 STMMAC_STAT(tx_pkt_n),
92 STMMAC_STAT(rx_pkt_n),
93 STMMAC_STAT(normal_irq_n),
94 STMMAC_STAT(rx_normal_irq_n),
95 STMMAC_STAT(napi_poll),
96 STMMAC_STAT(tx_normal_irq_n),
97 STMMAC_STAT(tx_clean),
98 STMMAC_STAT(tx_set_ic_bit),
99 STMMAC_STAT(irq_receive_pmt_irq_n),
100 /* MMC info */
101 STMMAC_STAT(mmc_tx_irq_n),
102 STMMAC_STAT(mmc_rx_irq_n),
103 STMMAC_STAT(mmc_rx_csum_offload_irq_n),
104 /* EEE */
105 STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
106 STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
107 STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
108 STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
109 STMMAC_STAT(phy_eee_wakeup_error_n),
110 /* Extended RDES status */
111 STMMAC_STAT(ip_hdr_err),
112 STMMAC_STAT(ip_payload_err),
113 STMMAC_STAT(ip_csum_bypassed),
114 STMMAC_STAT(ipv4_pkt_rcvd),
115 STMMAC_STAT(ipv6_pkt_rcvd),
116 STMMAC_STAT(no_ptp_rx_msg_type_ext),
117 STMMAC_STAT(ptp_rx_msg_type_sync),
118 STMMAC_STAT(ptp_rx_msg_type_follow_up),
119 STMMAC_STAT(ptp_rx_msg_type_delay_req),
120 STMMAC_STAT(ptp_rx_msg_type_delay_resp),
121 STMMAC_STAT(ptp_rx_msg_type_pdelay_req),
122 STMMAC_STAT(ptp_rx_msg_type_pdelay_resp),
123 STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up),
124 STMMAC_STAT(ptp_rx_msg_type_announce),
125 STMMAC_STAT(ptp_rx_msg_type_management),
126 STMMAC_STAT(ptp_rx_msg_pkt_reserved_type),
127 STMMAC_STAT(ptp_frame_type),
128 STMMAC_STAT(ptp_ver),
129 STMMAC_STAT(timestamp_dropped),
130 STMMAC_STAT(av_pkt_rcvd),
131 STMMAC_STAT(av_tagged_pkt_rcvd),
132 STMMAC_STAT(vlan_tag_priority_val),
133 STMMAC_STAT(l3_filter_match),
134 STMMAC_STAT(l4_filter_match),
135 STMMAC_STAT(l3_l4_filter_no_match),
136 /* PCS */
137 STMMAC_STAT(irq_pcs_ane_n),
138 STMMAC_STAT(irq_pcs_link_n),
139 STMMAC_STAT(irq_rgmii_n),
140 /* DEBUG */
141 STMMAC_STAT(mtl_tx_status_fifo_full),
142 STMMAC_STAT(mtl_tx_fifo_not_empty),
143 STMMAC_STAT(mmtl_fifo_ctrl),
144 STMMAC_STAT(mtl_tx_fifo_read_ctrl_write),
145 STMMAC_STAT(mtl_tx_fifo_read_ctrl_wait),
146 STMMAC_STAT(mtl_tx_fifo_read_ctrl_read),
147 STMMAC_STAT(mtl_tx_fifo_read_ctrl_idle),
148 STMMAC_STAT(mac_tx_in_pause),
149 STMMAC_STAT(mac_tx_frame_ctrl_xfer),
150 STMMAC_STAT(mac_tx_frame_ctrl_idle),
151 STMMAC_STAT(mac_tx_frame_ctrl_wait),
152 STMMAC_STAT(mac_tx_frame_ctrl_pause),
153 STMMAC_STAT(mac_gmii_tx_proto_engine),
154 STMMAC_STAT(mtl_rx_fifo_fill_level_full),
155 STMMAC_STAT(mtl_rx_fifo_fill_above_thresh),
156 STMMAC_STAT(mtl_rx_fifo_fill_below_thresh),
157 STMMAC_STAT(mtl_rx_fifo_fill_level_empty),
158 STMMAC_STAT(mtl_rx_fifo_read_ctrl_flush),
159 STMMAC_STAT(mtl_rx_fifo_read_ctrl_read_data),
160 STMMAC_STAT(mtl_rx_fifo_read_ctrl_status),
161 STMMAC_STAT(mtl_rx_fifo_read_ctrl_idle),
162 STMMAC_STAT(mtl_rx_fifo_ctrl_active),
163 STMMAC_STAT(mac_rx_frame_ctrl_fifo),
164 STMMAC_STAT(mac_gmii_rx_proto_engine),
165 /* TSO */
166 STMMAC_STAT(tx_tso_frames),
167 STMMAC_STAT(tx_tso_nfrags),
168 };
169 #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
170
171 /* HW MAC Management counters (if supported) */
172 #define STMMAC_MMC_STAT(m) \
173 { #m, FIELD_SIZEOF(struct stmmac_counters, m), \
174 offsetof(struct stmmac_priv, mmc.m)}
175
176 static const struct stmmac_stats stmmac_mmc[] = {
177 STMMAC_MMC_STAT(mmc_tx_octetcount_gb),
178 STMMAC_MMC_STAT(mmc_tx_framecount_gb),
179 STMMAC_MMC_STAT(mmc_tx_broadcastframe_g),
180 STMMAC_MMC_STAT(mmc_tx_multicastframe_g),
181 STMMAC_MMC_STAT(mmc_tx_64_octets_gb),
182 STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb),
183 STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb),
184 STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb),
185 STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb),
186 STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb),
187 STMMAC_MMC_STAT(mmc_tx_unicast_gb),
188 STMMAC_MMC_STAT(mmc_tx_multicast_gb),
189 STMMAC_MMC_STAT(mmc_tx_broadcast_gb),
190 STMMAC_MMC_STAT(mmc_tx_underflow_error),
191 STMMAC_MMC_STAT(mmc_tx_singlecol_g),
192 STMMAC_MMC_STAT(mmc_tx_multicol_g),
193 STMMAC_MMC_STAT(mmc_tx_deferred),
194 STMMAC_MMC_STAT(mmc_tx_latecol),
195 STMMAC_MMC_STAT(mmc_tx_exesscol),
196 STMMAC_MMC_STAT(mmc_tx_carrier_error),
197 STMMAC_MMC_STAT(mmc_tx_octetcount_g),
198 STMMAC_MMC_STAT(mmc_tx_framecount_g),
199 STMMAC_MMC_STAT(mmc_tx_excessdef),
200 STMMAC_MMC_STAT(mmc_tx_pause_frame),
201 STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
202 STMMAC_MMC_STAT(mmc_rx_framecount_gb),
203 STMMAC_MMC_STAT(mmc_rx_octetcount_gb),
204 STMMAC_MMC_STAT(mmc_rx_octetcount_g),
205 STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
206 STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
207 STMMAC_MMC_STAT(mmc_rx_crc_error),
208 STMMAC_MMC_STAT(mmc_rx_align_error),
209 STMMAC_MMC_STAT(mmc_rx_run_error),
210 STMMAC_MMC_STAT(mmc_rx_jabber_error),
211 STMMAC_MMC_STAT(mmc_rx_undersize_g),
212 STMMAC_MMC_STAT(mmc_rx_oversize_g),
213 STMMAC_MMC_STAT(mmc_rx_64_octets_gb),
214 STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb),
215 STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb),
216 STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb),
217 STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb),
218 STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb),
219 STMMAC_MMC_STAT(mmc_rx_unicast_g),
220 STMMAC_MMC_STAT(mmc_rx_length_error),
221 STMMAC_MMC_STAT(mmc_rx_autofrangetype),
222 STMMAC_MMC_STAT(mmc_rx_pause_frames),
223 STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
224 STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
225 STMMAC_MMC_STAT(mmc_rx_watchdog_error),
226 STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
227 STMMAC_MMC_STAT(mmc_rx_ipc_intr),
228 STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
229 STMMAC_MMC_STAT(mmc_rx_ipv4_hderr),
230 STMMAC_MMC_STAT(mmc_rx_ipv4_nopay),
231 STMMAC_MMC_STAT(mmc_rx_ipv4_frag),
232 STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl),
233 STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets),
234 STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets),
235 STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets),
236 STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets),
237 STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets),
238 STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets),
239 STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets),
240 STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets),
241 STMMAC_MMC_STAT(mmc_rx_ipv6_gd),
242 STMMAC_MMC_STAT(mmc_rx_ipv6_hderr),
243 STMMAC_MMC_STAT(mmc_rx_ipv6_nopay),
244 STMMAC_MMC_STAT(mmc_rx_udp_gd),
245 STMMAC_MMC_STAT(mmc_rx_udp_err),
246 STMMAC_MMC_STAT(mmc_rx_tcp_gd),
247 STMMAC_MMC_STAT(mmc_rx_tcp_err),
248 STMMAC_MMC_STAT(mmc_rx_icmp_gd),
249 STMMAC_MMC_STAT(mmc_rx_icmp_err),
250 STMMAC_MMC_STAT(mmc_rx_udp_gd_octets),
251 STMMAC_MMC_STAT(mmc_rx_udp_err_octets),
252 STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets),
253 STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
254 STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
255 STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
256 };
257 #define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
258
stmmac_ethtool_getdrvinfo(struct net_device * dev,struct ethtool_drvinfo * info)259 static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
260 struct ethtool_drvinfo *info)
261 {
262 struct stmmac_priv *priv = netdev_priv(dev);
263
264 if (priv->plat->has_gmac || priv->plat->has_gmac4)
265 strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
266 else
267 strlcpy(info->driver, MAC100_ETHTOOL_NAME,
268 sizeof(info->driver));
269
270 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
271 }
272
stmmac_ethtool_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)273 static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
274 struct ethtool_link_ksettings *cmd)
275 {
276 struct stmmac_priv *priv = netdev_priv(dev);
277 struct phy_device *phy = dev->phydev;
278
279 if (priv->hw->pcs & STMMAC_PCS_RGMII ||
280 priv->hw->pcs & STMMAC_PCS_SGMII) {
281 struct rgmii_adv adv;
282 u32 supported, advertising, lp_advertising;
283
284 if (!priv->xstats.pcs_link) {
285 cmd->base.speed = SPEED_UNKNOWN;
286 cmd->base.duplex = DUPLEX_UNKNOWN;
287 return 0;
288 }
289 cmd->base.duplex = priv->xstats.pcs_duplex;
290
291 cmd->base.speed = priv->xstats.pcs_speed;
292
293 /* Get and convert ADV/LP_ADV from the HW AN registers */
294 if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv))
295 return -EOPNOTSUPP; /* should never happen indeed */
296
297 /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
298
299 ethtool_convert_link_mode_to_legacy_u32(
300 &supported, cmd->link_modes.supported);
301 ethtool_convert_link_mode_to_legacy_u32(
302 &advertising, cmd->link_modes.advertising);
303 ethtool_convert_link_mode_to_legacy_u32(
304 &lp_advertising, cmd->link_modes.lp_advertising);
305
306 if (adv.pause & STMMAC_PCS_PAUSE)
307 advertising |= ADVERTISED_Pause;
308 if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
309 advertising |= ADVERTISED_Asym_Pause;
310 if (adv.lp_pause & STMMAC_PCS_PAUSE)
311 lp_advertising |= ADVERTISED_Pause;
312 if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
313 lp_advertising |= ADVERTISED_Asym_Pause;
314
315 /* Reg49[3] always set because ANE is always supported */
316 cmd->base.autoneg = ADVERTISED_Autoneg;
317 supported |= SUPPORTED_Autoneg;
318 advertising |= ADVERTISED_Autoneg;
319 lp_advertising |= ADVERTISED_Autoneg;
320
321 if (adv.duplex) {
322 supported |= (SUPPORTED_1000baseT_Full |
323 SUPPORTED_100baseT_Full |
324 SUPPORTED_10baseT_Full);
325 advertising |= (ADVERTISED_1000baseT_Full |
326 ADVERTISED_100baseT_Full |
327 ADVERTISED_10baseT_Full);
328 } else {
329 supported |= (SUPPORTED_1000baseT_Half |
330 SUPPORTED_100baseT_Half |
331 SUPPORTED_10baseT_Half);
332 advertising |= (ADVERTISED_1000baseT_Half |
333 ADVERTISED_100baseT_Half |
334 ADVERTISED_10baseT_Half);
335 }
336 if (adv.lp_duplex)
337 lp_advertising |= (ADVERTISED_1000baseT_Full |
338 ADVERTISED_100baseT_Full |
339 ADVERTISED_10baseT_Full);
340 else
341 lp_advertising |= (ADVERTISED_1000baseT_Half |
342 ADVERTISED_100baseT_Half |
343 ADVERTISED_10baseT_Half);
344 cmd->base.port = PORT_OTHER;
345
346 ethtool_convert_legacy_u32_to_link_mode(
347 cmd->link_modes.supported, supported);
348 ethtool_convert_legacy_u32_to_link_mode(
349 cmd->link_modes.advertising, advertising);
350 ethtool_convert_legacy_u32_to_link_mode(
351 cmd->link_modes.lp_advertising, lp_advertising);
352
353 return 0;
354 }
355
356 if (phy == NULL) {
357 pr_err("%s: %s: PHY is not registered\n",
358 __func__, dev->name);
359 return -ENODEV;
360 }
361 if (!netif_running(dev)) {
362 pr_err("%s: interface is disabled: we cannot track "
363 "link speed / duplex setting\n", dev->name);
364 return -EBUSY;
365 }
366 phy_ethtool_ksettings_get(phy, cmd);
367 return 0;
368 }
369
370 static int
stmmac_ethtool_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)371 stmmac_ethtool_set_link_ksettings(struct net_device *dev,
372 const struct ethtool_link_ksettings *cmd)
373 {
374 struct stmmac_priv *priv = netdev_priv(dev);
375 struct phy_device *phy = dev->phydev;
376 int rc;
377
378 if (priv->hw->pcs & STMMAC_PCS_RGMII ||
379 priv->hw->pcs & STMMAC_PCS_SGMII) {
380 u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
381
382 /* Only support ANE */
383 if (cmd->base.autoneg != AUTONEG_ENABLE)
384 return -EINVAL;
385
386 mask &= (ADVERTISED_1000baseT_Half |
387 ADVERTISED_1000baseT_Full |
388 ADVERTISED_100baseT_Half |
389 ADVERTISED_100baseT_Full |
390 ADVERTISED_10baseT_Half |
391 ADVERTISED_10baseT_Full);
392
393 mutex_lock(&priv->lock);
394 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
395 mutex_unlock(&priv->lock);
396
397 return 0;
398 }
399
400 rc = phy_ethtool_ksettings_set(phy, cmd);
401
402 return rc;
403 }
404
stmmac_ethtool_getmsglevel(struct net_device * dev)405 static u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
406 {
407 struct stmmac_priv *priv = netdev_priv(dev);
408 return priv->msg_enable;
409 }
410
stmmac_ethtool_setmsglevel(struct net_device * dev,u32 level)411 static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
412 {
413 struct stmmac_priv *priv = netdev_priv(dev);
414 priv->msg_enable = level;
415
416 }
417
stmmac_check_if_running(struct net_device * dev)418 static int stmmac_check_if_running(struct net_device *dev)
419 {
420 if (!netif_running(dev))
421 return -EBUSY;
422 return 0;
423 }
424
stmmac_ethtool_get_regs_len(struct net_device * dev)425 static int stmmac_ethtool_get_regs_len(struct net_device *dev)
426 {
427 return REG_SPACE_SIZE;
428 }
429
stmmac_ethtool_gregs(struct net_device * dev,struct ethtool_regs * regs,void * space)430 static void stmmac_ethtool_gregs(struct net_device *dev,
431 struct ethtool_regs *regs, void *space)
432 {
433 u32 *reg_space = (u32 *) space;
434
435 struct stmmac_priv *priv = netdev_priv(dev);
436
437 memset(reg_space, 0x0, REG_SPACE_SIZE);
438
439 stmmac_dump_mac_regs(priv, priv->hw, reg_space);
440 stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
441 /* Copy DMA registers to where ethtool expects them */
442 memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4],
443 NUM_DWMAC1000_DMA_REGS * 4);
444 }
445
446 static void
stmmac_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)447 stmmac_get_pauseparam(struct net_device *netdev,
448 struct ethtool_pauseparam *pause)
449 {
450 struct stmmac_priv *priv = netdev_priv(netdev);
451 struct rgmii_adv adv_lp;
452
453 pause->rx_pause = 0;
454 pause->tx_pause = 0;
455
456 if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
457 pause->autoneg = 1;
458 if (!adv_lp.pause)
459 return;
460 } else {
461 if (!(netdev->phydev->supported & SUPPORTED_Pause) ||
462 !(netdev->phydev->supported & SUPPORTED_Asym_Pause))
463 return;
464 }
465
466 pause->autoneg = netdev->phydev->autoneg;
467
468 if (priv->flow_ctrl & FLOW_RX)
469 pause->rx_pause = 1;
470 if (priv->flow_ctrl & FLOW_TX)
471 pause->tx_pause = 1;
472
473 }
474
475 static int
stmmac_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)476 stmmac_set_pauseparam(struct net_device *netdev,
477 struct ethtool_pauseparam *pause)
478 {
479 struct stmmac_priv *priv = netdev_priv(netdev);
480 u32 tx_cnt = priv->plat->tx_queues_to_use;
481 struct phy_device *phy = netdev->phydev;
482 int new_pause = FLOW_OFF;
483 struct rgmii_adv adv_lp;
484
485 if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
486 pause->autoneg = 1;
487 if (!adv_lp.pause)
488 return -EOPNOTSUPP;
489 } else {
490 if (!(phy->supported & SUPPORTED_Pause) ||
491 !(phy->supported & SUPPORTED_Asym_Pause))
492 return -EOPNOTSUPP;
493 }
494
495 if (pause->rx_pause)
496 new_pause |= FLOW_RX;
497 if (pause->tx_pause)
498 new_pause |= FLOW_TX;
499
500 priv->flow_ctrl = new_pause;
501 phy->autoneg = pause->autoneg;
502
503 if (phy->autoneg) {
504 if (netif_running(netdev))
505 return phy_start_aneg(phy);
506 }
507
508 stmmac_flow_ctrl(priv, priv->hw, phy->duplex, priv->flow_ctrl,
509 priv->pause, tx_cnt);
510 return 0;
511 }
512
stmmac_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * dummy,u64 * data)513 static void stmmac_get_ethtool_stats(struct net_device *dev,
514 struct ethtool_stats *dummy, u64 *data)
515 {
516 struct stmmac_priv *priv = netdev_priv(dev);
517 u32 rx_queues_count = priv->plat->rx_queues_to_use;
518 u32 tx_queues_count = priv->plat->tx_queues_to_use;
519 unsigned long count;
520 int i, j = 0, ret;
521
522 if (priv->dma_cap.asp) {
523 for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
524 if (!stmmac_safety_feat_dump(priv, &priv->sstats, i,
525 &count, NULL))
526 data[j++] = count;
527 }
528 }
529
530 /* Update the DMA HW counters for dwmac10/100 */
531 ret = stmmac_dma_diagnostic_fr(priv, &dev->stats, (void *) &priv->xstats,
532 priv->ioaddr);
533 if (ret) {
534 /* If supported, for new GMAC chips expose the MMC counters */
535 if (priv->dma_cap.rmon) {
536 dwmac_mmc_read(priv->mmcaddr, &priv->mmc);
537
538 for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
539 char *p;
540 p = (char *)priv + stmmac_mmc[i].stat_offset;
541
542 data[j++] = (stmmac_mmc[i].sizeof_stat ==
543 sizeof(u64)) ? (*(u64 *)p) :
544 (*(u32 *)p);
545 }
546 }
547 if (priv->eee_enabled) {
548 int val = phy_get_eee_err(dev->phydev);
549 if (val)
550 priv->xstats.phy_eee_wakeup_error_n = val;
551 }
552
553 if (priv->synopsys_id >= DWMAC_CORE_3_50)
554 stmmac_mac_debug(priv, priv->ioaddr,
555 (void *)&priv->xstats,
556 rx_queues_count, tx_queues_count);
557 }
558 for (i = 0; i < STMMAC_STATS_LEN; i++) {
559 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
560 data[j++] = (stmmac_gstrings_stats[i].sizeof_stat ==
561 sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
562 }
563 }
564
stmmac_get_sset_count(struct net_device * netdev,int sset)565 static int stmmac_get_sset_count(struct net_device *netdev, int sset)
566 {
567 struct stmmac_priv *priv = netdev_priv(netdev);
568 int i, len, safety_len = 0;
569
570 switch (sset) {
571 case ETH_SS_STATS:
572 len = STMMAC_STATS_LEN;
573
574 if (priv->dma_cap.rmon)
575 len += STMMAC_MMC_STATS_LEN;
576 if (priv->dma_cap.asp) {
577 for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
578 if (!stmmac_safety_feat_dump(priv,
579 &priv->sstats, i,
580 NULL, NULL))
581 safety_len++;
582 }
583
584 len += safety_len;
585 }
586
587 return len;
588 default:
589 return -EOPNOTSUPP;
590 }
591 }
592
stmmac_get_strings(struct net_device * dev,u32 stringset,u8 * data)593 static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
594 {
595 int i;
596 u8 *p = data;
597 struct stmmac_priv *priv = netdev_priv(dev);
598
599 switch (stringset) {
600 case ETH_SS_STATS:
601 if (priv->dma_cap.asp) {
602 for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
603 const char *desc;
604 if (!stmmac_safety_feat_dump(priv,
605 &priv->sstats, i,
606 NULL, &desc)) {
607 memcpy(p, desc, ETH_GSTRING_LEN);
608 p += ETH_GSTRING_LEN;
609 }
610 }
611 }
612 if (priv->dma_cap.rmon)
613 for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
614 memcpy(p, stmmac_mmc[i].stat_string,
615 ETH_GSTRING_LEN);
616 p += ETH_GSTRING_LEN;
617 }
618 for (i = 0; i < STMMAC_STATS_LEN; i++) {
619 memcpy(p, stmmac_gstrings_stats[i].stat_string,
620 ETH_GSTRING_LEN);
621 p += ETH_GSTRING_LEN;
622 }
623 break;
624 default:
625 WARN_ON(1);
626 break;
627 }
628 }
629
630 /* Currently only support WOL through Magic packet. */
stmmac_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)631 static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
632 {
633 struct stmmac_priv *priv = netdev_priv(dev);
634
635 mutex_lock(&priv->lock);
636 if (device_can_wakeup(priv->device)) {
637 wol->supported = WAKE_MAGIC | WAKE_UCAST;
638 wol->wolopts = priv->wolopts;
639 }
640 mutex_unlock(&priv->lock);
641 }
642
stmmac_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)643 static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
644 {
645 struct stmmac_priv *priv = netdev_priv(dev);
646 u32 support = WAKE_MAGIC | WAKE_UCAST;
647
648 /* By default almost all GMAC devices support the WoL via
649 * magic frame but we can disable it if the HW capability
650 * register shows no support for pmt_magic_frame. */
651 if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
652 wol->wolopts &= ~WAKE_MAGIC;
653
654 if (!device_can_wakeup(priv->device))
655 return -EINVAL;
656
657 if (wol->wolopts & ~support)
658 return -EINVAL;
659
660 if (wol->wolopts) {
661 pr_info("stmmac: wakeup enable\n");
662 device_set_wakeup_enable(priv->device, 1);
663 enable_irq_wake(priv->wol_irq);
664 } else {
665 device_set_wakeup_enable(priv->device, 0);
666 disable_irq_wake(priv->wol_irq);
667 }
668
669 mutex_lock(&priv->lock);
670 priv->wolopts = wol->wolopts;
671 mutex_unlock(&priv->lock);
672
673 return 0;
674 }
675
stmmac_ethtool_op_get_eee(struct net_device * dev,struct ethtool_eee * edata)676 static int stmmac_ethtool_op_get_eee(struct net_device *dev,
677 struct ethtool_eee *edata)
678 {
679 struct stmmac_priv *priv = netdev_priv(dev);
680
681 if (!priv->dma_cap.eee)
682 return -EOPNOTSUPP;
683
684 edata->eee_enabled = priv->eee_enabled;
685 edata->eee_active = priv->eee_active;
686 edata->tx_lpi_timer = priv->tx_lpi_timer;
687
688 return phy_ethtool_get_eee(dev->phydev, edata);
689 }
690
stmmac_ethtool_op_set_eee(struct net_device * dev,struct ethtool_eee * edata)691 static int stmmac_ethtool_op_set_eee(struct net_device *dev,
692 struct ethtool_eee *edata)
693 {
694 struct stmmac_priv *priv = netdev_priv(dev);
695
696 priv->eee_enabled = edata->eee_enabled;
697
698 if (!priv->eee_enabled)
699 stmmac_disable_eee_mode(priv);
700 else {
701 /* We are asking for enabling the EEE but it is safe
702 * to verify all by invoking the eee_init function.
703 * In case of failure it will return an error.
704 */
705 priv->eee_enabled = stmmac_eee_init(priv);
706 if (!priv->eee_enabled)
707 return -EOPNOTSUPP;
708
709 /* Do not change tx_lpi_timer in case of failure */
710 priv->tx_lpi_timer = edata->tx_lpi_timer;
711 }
712
713 return phy_ethtool_set_eee(dev->phydev, edata);
714 }
715
stmmac_usec2riwt(u32 usec,struct stmmac_priv * priv)716 static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
717 {
718 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
719
720 if (!clk)
721 return 0;
722
723 return (usec * (clk / 1000000)) / 256;
724 }
725
stmmac_riwt2usec(u32 riwt,struct stmmac_priv * priv)726 static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
727 {
728 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
729
730 if (!clk)
731 return 0;
732
733 return (riwt * 256) / (clk / 1000000);
734 }
735
stmmac_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)736 static int stmmac_get_coalesce(struct net_device *dev,
737 struct ethtool_coalesce *ec)
738 {
739 struct stmmac_priv *priv = netdev_priv(dev);
740
741 ec->tx_coalesce_usecs = priv->tx_coal_timer;
742 ec->tx_max_coalesced_frames = priv->tx_coal_frames;
743
744 if (priv->use_riwt)
745 ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv);
746
747 return 0;
748 }
749
stmmac_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)750 static int stmmac_set_coalesce(struct net_device *dev,
751 struct ethtool_coalesce *ec)
752 {
753 struct stmmac_priv *priv = netdev_priv(dev);
754 u32 rx_cnt = priv->plat->rx_queues_to_use;
755 unsigned int rx_riwt;
756
757 /* Check not supported parameters */
758 if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
759 (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
760 (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
761 (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
762 (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
763 (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
764 (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
765 (ec->rx_max_coalesced_frames_high) ||
766 (ec->tx_max_coalesced_frames_irq) ||
767 (ec->stats_block_coalesce_usecs) ||
768 (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
769 return -EOPNOTSUPP;
770
771 if (ec->rx_coalesce_usecs == 0)
772 return -EINVAL;
773
774 if ((ec->tx_coalesce_usecs == 0) &&
775 (ec->tx_max_coalesced_frames == 0))
776 return -EINVAL;
777
778 if ((ec->tx_coalesce_usecs > STMMAC_MAX_COAL_TX_TICK) ||
779 (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES))
780 return -EINVAL;
781
782 rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
783
784 if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT))
785 return -EINVAL;
786 else if (!priv->use_riwt)
787 return -EOPNOTSUPP;
788
789 /* Only copy relevant parameters, ignore all others. */
790 priv->tx_coal_frames = ec->tx_max_coalesced_frames;
791 priv->tx_coal_timer = ec->tx_coalesce_usecs;
792 priv->rx_riwt = rx_riwt;
793 stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt);
794
795 return 0;
796 }
797
stmmac_get_ts_info(struct net_device * dev,struct ethtool_ts_info * info)798 static int stmmac_get_ts_info(struct net_device *dev,
799 struct ethtool_ts_info *info)
800 {
801 struct stmmac_priv *priv = netdev_priv(dev);
802
803 if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
804
805 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
806 SOF_TIMESTAMPING_TX_HARDWARE |
807 SOF_TIMESTAMPING_RX_SOFTWARE |
808 SOF_TIMESTAMPING_RX_HARDWARE |
809 SOF_TIMESTAMPING_SOFTWARE |
810 SOF_TIMESTAMPING_RAW_HARDWARE;
811
812 if (priv->ptp_clock)
813 info->phc_index = ptp_clock_index(priv->ptp_clock);
814
815 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
816
817 info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
818 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
819 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
820 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
821 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
822 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
823 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
824 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
825 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
826 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
827 (1 << HWTSTAMP_FILTER_ALL));
828 return 0;
829 } else
830 return ethtool_op_get_ts_info(dev, info);
831 }
832
stmmac_get_tunable(struct net_device * dev,const struct ethtool_tunable * tuna,void * data)833 static int stmmac_get_tunable(struct net_device *dev,
834 const struct ethtool_tunable *tuna, void *data)
835 {
836 struct stmmac_priv *priv = netdev_priv(dev);
837 int ret = 0;
838
839 switch (tuna->id) {
840 case ETHTOOL_RX_COPYBREAK:
841 *(u32 *)data = priv->rx_copybreak;
842 break;
843 default:
844 ret = -EINVAL;
845 break;
846 }
847
848 return ret;
849 }
850
stmmac_set_tunable(struct net_device * dev,const struct ethtool_tunable * tuna,const void * data)851 static int stmmac_set_tunable(struct net_device *dev,
852 const struct ethtool_tunable *tuna,
853 const void *data)
854 {
855 struct stmmac_priv *priv = netdev_priv(dev);
856 int ret = 0;
857
858 switch (tuna->id) {
859 case ETHTOOL_RX_COPYBREAK:
860 priv->rx_copybreak = *(u32 *)data;
861 break;
862 default:
863 ret = -EINVAL;
864 break;
865 }
866
867 return ret;
868 }
869
870 static const struct ethtool_ops stmmac_ethtool_ops = {
871 .begin = stmmac_check_if_running,
872 .get_drvinfo = stmmac_ethtool_getdrvinfo,
873 .get_msglevel = stmmac_ethtool_getmsglevel,
874 .set_msglevel = stmmac_ethtool_setmsglevel,
875 .get_regs = stmmac_ethtool_gregs,
876 .get_regs_len = stmmac_ethtool_get_regs_len,
877 .get_link = ethtool_op_get_link,
878 .nway_reset = phy_ethtool_nway_reset,
879 .get_pauseparam = stmmac_get_pauseparam,
880 .set_pauseparam = stmmac_set_pauseparam,
881 .get_ethtool_stats = stmmac_get_ethtool_stats,
882 .get_strings = stmmac_get_strings,
883 .get_wol = stmmac_get_wol,
884 .set_wol = stmmac_set_wol,
885 .get_eee = stmmac_ethtool_op_get_eee,
886 .set_eee = stmmac_ethtool_op_set_eee,
887 .get_sset_count = stmmac_get_sset_count,
888 .get_ts_info = stmmac_get_ts_info,
889 .get_coalesce = stmmac_get_coalesce,
890 .set_coalesce = stmmac_set_coalesce,
891 .get_tunable = stmmac_get_tunable,
892 .set_tunable = stmmac_set_tunable,
893 .get_link_ksettings = stmmac_ethtool_get_link_ksettings,
894 .set_link_ksettings = stmmac_ethtool_set_link_ksettings,
895 };
896
stmmac_set_ethtool_ops(struct net_device * netdev)897 void stmmac_set_ethtool_ops(struct net_device *netdev)
898 {
899 netdev->ethtool_ops = &stmmac_ethtool_ops;
900 }
901