1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Broadcom BCM7xxx System Port Ethernet MAC driver
4 *
5 * Copyright (C) 2014 Broadcom Corporation
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/platform_device.h>
17 #include <linux/of.h>
18 #include <linux/of_net.h>
19 #include <linux/of_mdio.h>
20 #include <linux/phy.h>
21 #include <linux/phy_fixed.h>
22 #include <net/dsa.h>
23 #include <linux/clk.h>
24 #include <net/ip.h>
25 #include <net/ipv6.h>
26
27 #include "bcmsysport.h"
28
29 /* I/O accessors register helpers */
30 #define BCM_SYSPORT_IO_MACRO(name, offset) \
31 static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
32 { \
33 u32 reg = readl_relaxed(priv->base + offset + off); \
34 return reg; \
35 } \
36 static inline void name##_writel(struct bcm_sysport_priv *priv, \
37 u32 val, u32 off) \
38 { \
39 writel_relaxed(val, priv->base + offset + off); \
40 } \
41
42 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
43 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
44 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
45 BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
46 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
47 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
48 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
49 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
50 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
51 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
52
53 /* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
54 * same layout, except it has been moved by 4 bytes up, *sigh*
55 */
rdma_readl(struct bcm_sysport_priv * priv,u32 off)56 static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
57 {
58 if (priv->is_lite && off >= RDMA_STATUS)
59 off += 4;
60 return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
61 }
62
rdma_writel(struct bcm_sysport_priv * priv,u32 val,u32 off)63 static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
64 {
65 if (priv->is_lite && off >= RDMA_STATUS)
66 off += 4;
67 writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
68 }
69
tdma_control_bit(struct bcm_sysport_priv * priv,u32 bit)70 static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
71 {
72 if (!priv->is_lite) {
73 return BIT(bit);
74 } else {
75 if (bit >= ACB_ALGO)
76 return BIT(bit + 1);
77 else
78 return BIT(bit);
79 }
80 }
81
82 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
83 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
84 */
85 #define BCM_SYSPORT_INTR_L2(which) \
86 static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
87 u32 mask) \
88 { \
89 priv->irq##which##_mask &= ~(mask); \
90 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
91 } \
92 static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
93 u32 mask) \
94 { \
95 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
96 priv->irq##which##_mask |= (mask); \
97 } \
98
99 BCM_SYSPORT_INTR_L2(0)
100 BCM_SYSPORT_INTR_L2(1)
101
102 /* Register accesses to GISB/RBUS registers are expensive (few hundred
103 * nanoseconds), so keep the check for 64-bits explicit here to save
104 * one register write per-packet on 32-bits platforms.
105 */
dma_desc_set_addr(struct bcm_sysport_priv * priv,void __iomem * d,dma_addr_t addr)106 static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
107 void __iomem *d,
108 dma_addr_t addr)
109 {
110 #ifdef CONFIG_PHYS_ADDR_T_64BIT
111 writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
112 d + DESC_ADDR_HI_STATUS_LEN);
113 #endif
114 writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
115 }
116
117 /* Ethtool operations */
bcm_sysport_set_rx_csum(struct net_device * dev,netdev_features_t wanted)118 static void bcm_sysport_set_rx_csum(struct net_device *dev,
119 netdev_features_t wanted)
120 {
121 struct bcm_sysport_priv *priv = netdev_priv(dev);
122 u32 reg;
123
124 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
125 reg = rxchk_readl(priv, RXCHK_CONTROL);
126 /* Clear L2 header checks, which would prevent BPDUs
127 * from being received.
128 */
129 reg &= ~RXCHK_L2_HDR_DIS;
130 if (priv->rx_chk_en)
131 reg |= RXCHK_EN;
132 else
133 reg &= ~RXCHK_EN;
134
135 /* If UniMAC forwards CRC, we need to skip over it to get
136 * a valid CHK bit to be set in the per-packet status word
137 */
138 if (priv->rx_chk_en && priv->crc_fwd)
139 reg |= RXCHK_SKIP_FCS;
140 else
141 reg &= ~RXCHK_SKIP_FCS;
142
143 /* If Broadcom tags are enabled (e.g: using a switch), make
144 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
145 * tag after the Ethernet MAC Source Address.
146 */
147 if (netdev_uses_dsa(dev))
148 reg |= RXCHK_BRCM_TAG_EN;
149 else
150 reg &= ~RXCHK_BRCM_TAG_EN;
151
152 rxchk_writel(priv, reg, RXCHK_CONTROL);
153 }
154
bcm_sysport_set_tx_csum(struct net_device * dev,netdev_features_t wanted)155 static void bcm_sysport_set_tx_csum(struct net_device *dev,
156 netdev_features_t wanted)
157 {
158 struct bcm_sysport_priv *priv = netdev_priv(dev);
159 u32 reg;
160
161 /* Hardware transmit checksum requires us to enable the Transmit status
162 * block prepended to the packet contents
163 */
164 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
165 NETIF_F_HW_VLAN_CTAG_TX));
166 reg = tdma_readl(priv, TDMA_CONTROL);
167 if (priv->tsb_en)
168 reg |= tdma_control_bit(priv, TSB_EN);
169 else
170 reg &= ~tdma_control_bit(priv, TSB_EN);
171 /* Indicating that software inserts Broadcom tags is needed for the TX
172 * checksum to be computed correctly when using VLAN HW acceleration,
173 * else it has no effect, so it can always be turned on.
174 */
175 if (netdev_uses_dsa(dev))
176 reg |= tdma_control_bit(priv, SW_BRCM_TAG);
177 else
178 reg &= ~tdma_control_bit(priv, SW_BRCM_TAG);
179 tdma_writel(priv, reg, TDMA_CONTROL);
180
181 /* Default TPID is ETH_P_8021AD, change to ETH_P_8021Q */
182 if (wanted & NETIF_F_HW_VLAN_CTAG_TX)
183 tdma_writel(priv, ETH_P_8021Q, TDMA_TPID);
184 }
185
bcm_sysport_set_features(struct net_device * dev,netdev_features_t features)186 static int bcm_sysport_set_features(struct net_device *dev,
187 netdev_features_t features)
188 {
189 struct bcm_sysport_priv *priv = netdev_priv(dev);
190 int ret;
191
192 ret = clk_prepare_enable(priv->clk);
193 if (ret)
194 return ret;
195
196 /* Read CRC forward */
197 if (!priv->is_lite)
198 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
199 else
200 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
201 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
202
203 bcm_sysport_set_rx_csum(dev, features);
204 bcm_sysport_set_tx_csum(dev, features);
205
206 clk_disable_unprepare(priv->clk);
207
208 return 0;
209 }
210
211 /* Hardware counters must be kept in sync because the order/offset
212 * is important here (order in structure declaration = order in hardware)
213 */
214 static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
215 /* general stats */
216 STAT_NETDEV64(rx_packets),
217 STAT_NETDEV64(tx_packets),
218 STAT_NETDEV64(rx_bytes),
219 STAT_NETDEV64(tx_bytes),
220 STAT_NETDEV(rx_errors),
221 STAT_NETDEV(tx_errors),
222 STAT_NETDEV(rx_dropped),
223 STAT_NETDEV(tx_dropped),
224 STAT_NETDEV(multicast),
225 /* UniMAC RSV counters */
226 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
227 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
228 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
229 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
230 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
231 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
232 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
233 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
234 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
235 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
236 STAT_MIB_RX("rx_pkts", mib.rx.pkt),
237 STAT_MIB_RX("rx_bytes", mib.rx.bytes),
238 STAT_MIB_RX("rx_multicast", mib.rx.mca),
239 STAT_MIB_RX("rx_broadcast", mib.rx.bca),
240 STAT_MIB_RX("rx_fcs", mib.rx.fcs),
241 STAT_MIB_RX("rx_control", mib.rx.cf),
242 STAT_MIB_RX("rx_pause", mib.rx.pf),
243 STAT_MIB_RX("rx_unknown", mib.rx.uo),
244 STAT_MIB_RX("rx_align", mib.rx.aln),
245 STAT_MIB_RX("rx_outrange", mib.rx.flr),
246 STAT_MIB_RX("rx_code", mib.rx.cde),
247 STAT_MIB_RX("rx_carrier", mib.rx.fcr),
248 STAT_MIB_RX("rx_oversize", mib.rx.ovr),
249 STAT_MIB_RX("rx_jabber", mib.rx.jbr),
250 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
251 STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
252 STAT_MIB_RX("rx_unicast", mib.rx.uc),
253 STAT_MIB_RX("rx_ppp", mib.rx.ppp),
254 STAT_MIB_RX("rx_crc", mib.rx.rcrc),
255 /* UniMAC TSV counters */
256 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
257 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
258 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
259 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
260 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
261 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
262 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
263 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
264 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
265 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
266 STAT_MIB_TX("tx_pkts", mib.tx.pkts),
267 STAT_MIB_TX("tx_multicast", mib.tx.mca),
268 STAT_MIB_TX("tx_broadcast", mib.tx.bca),
269 STAT_MIB_TX("tx_pause", mib.tx.pf),
270 STAT_MIB_TX("tx_control", mib.tx.cf),
271 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
272 STAT_MIB_TX("tx_oversize", mib.tx.ovr),
273 STAT_MIB_TX("tx_defer", mib.tx.drf),
274 STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
275 STAT_MIB_TX("tx_single_col", mib.tx.scl),
276 STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
277 STAT_MIB_TX("tx_late_col", mib.tx.lcl),
278 STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
279 STAT_MIB_TX("tx_frags", mib.tx.frg),
280 STAT_MIB_TX("tx_total_col", mib.tx.ncl),
281 STAT_MIB_TX("tx_jabber", mib.tx.jbr),
282 STAT_MIB_TX("tx_bytes", mib.tx.bytes),
283 STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
284 STAT_MIB_TX("tx_unicast", mib.tx.uc),
285 /* UniMAC RUNT counters */
286 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
287 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
288 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
289 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
290 /* RXCHK misc statistics */
291 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
292 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
293 RXCHK_OTHER_DISC_CNTR),
294 /* RBUF misc statistics */
295 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
296 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
297 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
298 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
299 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
300 STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb),
301 STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed),
302 /* Per TX-queue statistics are dynamically appended */
303 };
304
305 #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
306
bcm_sysport_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)307 static void bcm_sysport_get_drvinfo(struct net_device *dev,
308 struct ethtool_drvinfo *info)
309 {
310 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
311 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
312 }
313
bcm_sysport_get_msglvl(struct net_device * dev)314 static u32 bcm_sysport_get_msglvl(struct net_device *dev)
315 {
316 struct bcm_sysport_priv *priv = netdev_priv(dev);
317
318 return priv->msg_enable;
319 }
320
bcm_sysport_set_msglvl(struct net_device * dev,u32 enable)321 static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
322 {
323 struct bcm_sysport_priv *priv = netdev_priv(dev);
324
325 priv->msg_enable = enable;
326 }
327
bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)328 static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
329 {
330 switch (type) {
331 case BCM_SYSPORT_STAT_NETDEV:
332 case BCM_SYSPORT_STAT_NETDEV64:
333 case BCM_SYSPORT_STAT_RXCHK:
334 case BCM_SYSPORT_STAT_RBUF:
335 case BCM_SYSPORT_STAT_SOFT:
336 return true;
337 default:
338 return false;
339 }
340 }
341
bcm_sysport_get_sset_count(struct net_device * dev,int string_set)342 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
343 {
344 struct bcm_sysport_priv *priv = netdev_priv(dev);
345 const struct bcm_sysport_stats *s;
346 unsigned int i, j;
347
348 switch (string_set) {
349 case ETH_SS_STATS:
350 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
351 s = &bcm_sysport_gstrings_stats[i];
352 if (priv->is_lite &&
353 !bcm_sysport_lite_stat_valid(s->type))
354 continue;
355 j++;
356 }
357 /* Include per-queue statistics */
358 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
359 default:
360 return -EOPNOTSUPP;
361 }
362 }
363
bcm_sysport_get_strings(struct net_device * dev,u32 stringset,u8 * data)364 static void bcm_sysport_get_strings(struct net_device *dev,
365 u32 stringset, u8 *data)
366 {
367 struct bcm_sysport_priv *priv = netdev_priv(dev);
368 const struct bcm_sysport_stats *s;
369 char buf[128];
370 int i, j;
371
372 switch (stringset) {
373 case ETH_SS_STATS:
374 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
375 s = &bcm_sysport_gstrings_stats[i];
376 if (priv->is_lite &&
377 !bcm_sysport_lite_stat_valid(s->type))
378 continue;
379
380 memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
381 ETH_GSTRING_LEN);
382 j++;
383 }
384
385 for (i = 0; i < dev->num_tx_queues; i++) {
386 snprintf(buf, sizeof(buf), "txq%d_packets", i);
387 memcpy(data + j * ETH_GSTRING_LEN, buf,
388 ETH_GSTRING_LEN);
389 j++;
390
391 snprintf(buf, sizeof(buf), "txq%d_bytes", i);
392 memcpy(data + j * ETH_GSTRING_LEN, buf,
393 ETH_GSTRING_LEN);
394 j++;
395 }
396 break;
397 default:
398 break;
399 }
400 }
401
bcm_sysport_update_mib_counters(struct bcm_sysport_priv * priv)402 static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
403 {
404 int i, j = 0;
405
406 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
407 const struct bcm_sysport_stats *s;
408 u8 offset = 0;
409 u32 val = 0;
410 char *p;
411
412 s = &bcm_sysport_gstrings_stats[i];
413 switch (s->type) {
414 case BCM_SYSPORT_STAT_NETDEV:
415 case BCM_SYSPORT_STAT_NETDEV64:
416 case BCM_SYSPORT_STAT_SOFT:
417 continue;
418 case BCM_SYSPORT_STAT_MIB_RX:
419 case BCM_SYSPORT_STAT_MIB_TX:
420 case BCM_SYSPORT_STAT_RUNT:
421 if (priv->is_lite)
422 continue;
423
424 if (s->type != BCM_SYSPORT_STAT_MIB_RX)
425 offset = UMAC_MIB_STAT_OFFSET;
426 val = umac_readl(priv, UMAC_MIB_START + j + offset);
427 break;
428 case BCM_SYSPORT_STAT_RXCHK:
429 val = rxchk_readl(priv, s->reg_offset);
430 if (val == ~0)
431 rxchk_writel(priv, 0, s->reg_offset);
432 break;
433 case BCM_SYSPORT_STAT_RBUF:
434 val = rbuf_readl(priv, s->reg_offset);
435 if (val == ~0)
436 rbuf_writel(priv, 0, s->reg_offset);
437 break;
438 }
439
440 j += s->stat_sizeof;
441 p = (char *)priv + s->stat_offset;
442 *(u32 *)p = val;
443 }
444
445 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
446 }
447
bcm_sysport_update_tx_stats(struct bcm_sysport_priv * priv,u64 * tx_bytes,u64 * tx_packets)448 static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
449 u64 *tx_bytes, u64 *tx_packets)
450 {
451 struct bcm_sysport_tx_ring *ring;
452 u64 bytes = 0, packets = 0;
453 unsigned int start;
454 unsigned int q;
455
456 for (q = 0; q < priv->netdev->num_tx_queues; q++) {
457 ring = &priv->tx_rings[q];
458 do {
459 start = u64_stats_fetch_begin_irq(&priv->syncp);
460 bytes = ring->bytes;
461 packets = ring->packets;
462 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
463
464 *tx_bytes += bytes;
465 *tx_packets += packets;
466 }
467 }
468
bcm_sysport_get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)469 static void bcm_sysport_get_stats(struct net_device *dev,
470 struct ethtool_stats *stats, u64 *data)
471 {
472 struct bcm_sysport_priv *priv = netdev_priv(dev);
473 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
474 struct u64_stats_sync *syncp = &priv->syncp;
475 struct bcm_sysport_tx_ring *ring;
476 u64 tx_bytes = 0, tx_packets = 0;
477 unsigned int start;
478 int i, j;
479
480 if (netif_running(dev)) {
481 bcm_sysport_update_mib_counters(priv);
482 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
483 stats64->tx_bytes = tx_bytes;
484 stats64->tx_packets = tx_packets;
485 }
486
487 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
488 const struct bcm_sysport_stats *s;
489 char *p;
490
491 s = &bcm_sysport_gstrings_stats[i];
492 if (s->type == BCM_SYSPORT_STAT_NETDEV)
493 p = (char *)&dev->stats;
494 else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
495 p = (char *)stats64;
496 else
497 p = (char *)priv;
498
499 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
500 continue;
501 p += s->stat_offset;
502
503 if (s->stat_sizeof == sizeof(u64) &&
504 s->type == BCM_SYSPORT_STAT_NETDEV64) {
505 do {
506 start = u64_stats_fetch_begin_irq(syncp);
507 data[i] = *(u64 *)p;
508 } while (u64_stats_fetch_retry_irq(syncp, start));
509 } else
510 data[i] = *(u32 *)p;
511 j++;
512 }
513
514 /* For SYSTEMPORT Lite since we have holes in our statistics, j would
515 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
516 * needs to point to how many total statistics we have minus the
517 * number of per TX queue statistics
518 */
519 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
520 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
521
522 for (i = 0; i < dev->num_tx_queues; i++) {
523 ring = &priv->tx_rings[i];
524 data[j] = ring->packets;
525 j++;
526 data[j] = ring->bytes;
527 j++;
528 }
529 }
530
bcm_sysport_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)531 static void bcm_sysport_get_wol(struct net_device *dev,
532 struct ethtool_wolinfo *wol)
533 {
534 struct bcm_sysport_priv *priv = netdev_priv(dev);
535
536 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
537 wol->wolopts = priv->wolopts;
538
539 if (!(priv->wolopts & WAKE_MAGICSECURE))
540 return;
541
542 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
543 }
544
bcm_sysport_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)545 static int bcm_sysport_set_wol(struct net_device *dev,
546 struct ethtool_wolinfo *wol)
547 {
548 struct bcm_sysport_priv *priv = netdev_priv(dev);
549 struct device *kdev = &priv->pdev->dev;
550 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
551
552 if (!device_can_wakeup(kdev))
553 return -ENOTSUPP;
554
555 if (wol->wolopts & ~supported)
556 return -EINVAL;
557
558 if (wol->wolopts & WAKE_MAGICSECURE)
559 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
560
561 /* Flag the device and relevant IRQ as wakeup capable */
562 if (wol->wolopts) {
563 device_set_wakeup_enable(kdev, 1);
564 if (priv->wol_irq_disabled)
565 enable_irq_wake(priv->wol_irq);
566 priv->wol_irq_disabled = 0;
567 } else {
568 device_set_wakeup_enable(kdev, 0);
569 /* Avoid unbalanced disable_irq_wake calls */
570 if (!priv->wol_irq_disabled)
571 disable_irq_wake(priv->wol_irq);
572 priv->wol_irq_disabled = 1;
573 }
574
575 priv->wolopts = wol->wolopts;
576
577 return 0;
578 }
579
bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv * priv,u32 usecs,u32 pkts)580 static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
581 u32 usecs, u32 pkts)
582 {
583 u32 reg;
584
585 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
586 reg &= ~(RDMA_INTR_THRESH_MASK |
587 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
588 reg |= pkts;
589 reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT;
590 rdma_writel(priv, reg, RDMA_MBDONE_INTR);
591 }
592
bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring * ring,struct ethtool_coalesce * ec)593 static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
594 struct ethtool_coalesce *ec)
595 {
596 struct bcm_sysport_priv *priv = ring->priv;
597 u32 reg;
598
599 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
600 reg &= ~(RING_INTR_THRESH_MASK |
601 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
602 reg |= ec->tx_max_coalesced_frames;
603 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
604 RING_TIMEOUT_SHIFT;
605 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
606 }
607
bcm_sysport_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)608 static int bcm_sysport_get_coalesce(struct net_device *dev,
609 struct ethtool_coalesce *ec)
610 {
611 struct bcm_sysport_priv *priv = netdev_priv(dev);
612 u32 reg;
613
614 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
615
616 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
617 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
618
619 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
620
621 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
622 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
623 ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
624
625 return 0;
626 }
627
bcm_sysport_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)628 static int bcm_sysport_set_coalesce(struct net_device *dev,
629 struct ethtool_coalesce *ec)
630 {
631 struct bcm_sysport_priv *priv = netdev_priv(dev);
632 struct dim_cq_moder moder;
633 u32 usecs, pkts;
634 unsigned int i;
635
636 /* Base system clock is 125Mhz, DMA timeout is this reference clock
637 * divided by 1024, which yield roughly 8.192 us, our maximum value has
638 * to fit in the RING_TIMEOUT_MASK (16 bits).
639 */
640 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
641 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
642 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
643 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
644 return -EINVAL;
645
646 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
647 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
648 return -EINVAL;
649
650 for (i = 0; i < dev->num_tx_queues; i++)
651 bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
652
653 priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
654 priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
655 usecs = priv->rx_coalesce_usecs;
656 pkts = priv->rx_max_coalesced_frames;
657
658 if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
659 moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
660 usecs = moder.usec;
661 pkts = moder.pkts;
662 }
663
664 priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
665
666 /* Apply desired coalescing parameters */
667 bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
668
669 return 0;
670 }
671
bcm_sysport_free_cb(struct bcm_sysport_cb * cb)672 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
673 {
674 dev_consume_skb_any(cb->skb);
675 cb->skb = NULL;
676 dma_unmap_addr_set(cb, dma_addr, 0);
677 }
678
bcm_sysport_rx_refill(struct bcm_sysport_priv * priv,struct bcm_sysport_cb * cb)679 static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
680 struct bcm_sysport_cb *cb)
681 {
682 struct device *kdev = &priv->pdev->dev;
683 struct net_device *ndev = priv->netdev;
684 struct sk_buff *skb, *rx_skb;
685 dma_addr_t mapping;
686
687 /* Allocate a new SKB for a new packet */
688 skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
689 GFP_ATOMIC | __GFP_NOWARN);
690 if (!skb) {
691 priv->mib.alloc_rx_buff_failed++;
692 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
693 return NULL;
694 }
695
696 mapping = dma_map_single(kdev, skb->data,
697 RX_BUF_LENGTH, DMA_FROM_DEVICE);
698 if (dma_mapping_error(kdev, mapping)) {
699 priv->mib.rx_dma_failed++;
700 dev_kfree_skb_any(skb);
701 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
702 return NULL;
703 }
704
705 /* Grab the current SKB on the ring */
706 rx_skb = cb->skb;
707 if (likely(rx_skb))
708 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
709 RX_BUF_LENGTH, DMA_FROM_DEVICE);
710
711 /* Put the new SKB on the ring */
712 cb->skb = skb;
713 dma_unmap_addr_set(cb, dma_addr, mapping);
714 dma_desc_set_addr(priv, cb->bd_addr, mapping);
715
716 netif_dbg(priv, rx_status, ndev, "RX refill\n");
717
718 /* Return the current SKB to the caller */
719 return rx_skb;
720 }
721
bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv * priv)722 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
723 {
724 struct bcm_sysport_cb *cb;
725 struct sk_buff *skb;
726 unsigned int i;
727
728 for (i = 0; i < priv->num_rx_bds; i++) {
729 cb = &priv->rx_cbs[i];
730 skb = bcm_sysport_rx_refill(priv, cb);
731 dev_kfree_skb(skb);
732 if (!cb->skb)
733 return -ENOMEM;
734 }
735
736 return 0;
737 }
738
739 /* Poll the hardware for up to budget packets to process */
bcm_sysport_desc_rx(struct bcm_sysport_priv * priv,unsigned int budget)740 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
741 unsigned int budget)
742 {
743 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
744 struct net_device *ndev = priv->netdev;
745 unsigned int processed = 0, to_process;
746 unsigned int processed_bytes = 0;
747 struct bcm_sysport_cb *cb;
748 struct sk_buff *skb;
749 unsigned int p_index;
750 u16 len, status;
751 struct bcm_rsb *rsb;
752
753 /* Clear status before servicing to reduce spurious interrupts */
754 intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
755
756 /* Determine how much we should process since last call, SYSTEMPORT Lite
757 * groups the producer and consumer indexes into the same 32-bit
758 * which we access using RDMA_CONS_INDEX
759 */
760 if (!priv->is_lite)
761 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
762 else
763 p_index = rdma_readl(priv, RDMA_CONS_INDEX);
764 p_index &= RDMA_PROD_INDEX_MASK;
765
766 to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
767
768 netif_dbg(priv, rx_status, ndev,
769 "p_index=%d rx_c_index=%d to_process=%d\n",
770 p_index, priv->rx_c_index, to_process);
771
772 while ((processed < to_process) && (processed < budget)) {
773 cb = &priv->rx_cbs[priv->rx_read_ptr];
774 skb = bcm_sysport_rx_refill(priv, cb);
775
776
777 /* We do not have a backing SKB, so we do not a corresponding
778 * DMA mapping for this incoming packet since
779 * bcm_sysport_rx_refill always either has both skb and mapping
780 * or none.
781 */
782 if (unlikely(!skb)) {
783 netif_err(priv, rx_err, ndev, "out of memory!\n");
784 ndev->stats.rx_dropped++;
785 ndev->stats.rx_errors++;
786 goto next;
787 }
788
789 /* Extract the Receive Status Block prepended */
790 rsb = (struct bcm_rsb *)skb->data;
791 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
792 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
793 DESC_STATUS_MASK;
794
795 netif_dbg(priv, rx_status, ndev,
796 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
797 p_index, priv->rx_c_index, priv->rx_read_ptr,
798 len, status);
799
800 if (unlikely(len > RX_BUF_LENGTH)) {
801 netif_err(priv, rx_status, ndev, "oversized packet\n");
802 ndev->stats.rx_length_errors++;
803 ndev->stats.rx_errors++;
804 dev_kfree_skb_any(skb);
805 goto next;
806 }
807
808 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
809 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
810 ndev->stats.rx_dropped++;
811 ndev->stats.rx_errors++;
812 dev_kfree_skb_any(skb);
813 goto next;
814 }
815
816 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
817 netif_err(priv, rx_err, ndev, "error packet\n");
818 if (status & RX_STATUS_OVFLOW)
819 ndev->stats.rx_over_errors++;
820 ndev->stats.rx_dropped++;
821 ndev->stats.rx_errors++;
822 dev_kfree_skb_any(skb);
823 goto next;
824 }
825
826 skb_put(skb, len);
827
828 /* Hardware validated our checksum */
829 if (likely(status & DESC_L4_CSUM))
830 skb->ip_summed = CHECKSUM_UNNECESSARY;
831
832 /* Hardware pre-pends packets with 2bytes before Ethernet
833 * header plus we have the Receive Status Block, strip off all
834 * of this from the SKB.
835 */
836 skb_pull(skb, sizeof(*rsb) + 2);
837 len -= (sizeof(*rsb) + 2);
838 processed_bytes += len;
839
840 /* UniMAC may forward CRC */
841 if (priv->crc_fwd) {
842 skb_trim(skb, len - ETH_FCS_LEN);
843 len -= ETH_FCS_LEN;
844 }
845
846 skb->protocol = eth_type_trans(skb, ndev);
847 ndev->stats.rx_packets++;
848 ndev->stats.rx_bytes += len;
849 u64_stats_update_begin(&priv->syncp);
850 stats64->rx_packets++;
851 stats64->rx_bytes += len;
852 u64_stats_update_end(&priv->syncp);
853
854 napi_gro_receive(&priv->napi, skb);
855 next:
856 processed++;
857 priv->rx_read_ptr++;
858
859 if (priv->rx_read_ptr == priv->num_rx_bds)
860 priv->rx_read_ptr = 0;
861 }
862
863 priv->dim.packets = processed;
864 priv->dim.bytes = processed_bytes;
865
866 return processed;
867 }
868
bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring * ring,struct bcm_sysport_cb * cb,unsigned int * bytes_compl,unsigned int * pkts_compl)869 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
870 struct bcm_sysport_cb *cb,
871 unsigned int *bytes_compl,
872 unsigned int *pkts_compl)
873 {
874 struct bcm_sysport_priv *priv = ring->priv;
875 struct device *kdev = &priv->pdev->dev;
876
877 if (cb->skb) {
878 *bytes_compl += cb->skb->len;
879 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
880 dma_unmap_len(cb, dma_len),
881 DMA_TO_DEVICE);
882 (*pkts_compl)++;
883 bcm_sysport_free_cb(cb);
884 /* SKB fragment */
885 } else if (dma_unmap_addr(cb, dma_addr)) {
886 *bytes_compl += dma_unmap_len(cb, dma_len);
887 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
888 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
889 dma_unmap_addr_set(cb, dma_addr, 0);
890 }
891 }
892
893 /* Reclaim queued SKBs for transmission completion, lockless version */
__bcm_sysport_tx_reclaim(struct bcm_sysport_priv * priv,struct bcm_sysport_tx_ring * ring)894 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
895 struct bcm_sysport_tx_ring *ring)
896 {
897 unsigned int pkts_compl = 0, bytes_compl = 0;
898 struct net_device *ndev = priv->netdev;
899 unsigned int txbds_processed = 0;
900 struct bcm_sysport_cb *cb;
901 unsigned int txbds_ready;
902 unsigned int c_index;
903 u32 hw_ind;
904
905 /* Clear status before servicing to reduce spurious interrupts */
906 if (!ring->priv->is_lite)
907 intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
908 else
909 intrl2_0_writel(ring->priv, BIT(ring->index +
910 INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
911
912 /* Compute how many descriptors have been processed since last call */
913 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
914 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
915 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
916
917 netif_dbg(priv, tx_done, ndev,
918 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
919 ring->index, ring->c_index, c_index, txbds_ready);
920
921 while (txbds_processed < txbds_ready) {
922 cb = &ring->cbs[ring->clean_index];
923 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
924
925 ring->desc_count++;
926 txbds_processed++;
927
928 if (likely(ring->clean_index < ring->size - 1))
929 ring->clean_index++;
930 else
931 ring->clean_index = 0;
932 }
933
934 u64_stats_update_begin(&priv->syncp);
935 ring->packets += pkts_compl;
936 ring->bytes += bytes_compl;
937 u64_stats_update_end(&priv->syncp);
938
939 ring->c_index = c_index;
940
941 netif_dbg(priv, tx_done, ndev,
942 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
943 ring->index, ring->c_index, pkts_compl, bytes_compl);
944
945 return pkts_compl;
946 }
947
948 /* Locked version of the per-ring TX reclaim routine */
bcm_sysport_tx_reclaim(struct bcm_sysport_priv * priv,struct bcm_sysport_tx_ring * ring)949 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
950 struct bcm_sysport_tx_ring *ring)
951 {
952 struct netdev_queue *txq;
953 unsigned int released;
954 unsigned long flags;
955
956 txq = netdev_get_tx_queue(priv->netdev, ring->index);
957
958 spin_lock_irqsave(&ring->lock, flags);
959 released = __bcm_sysport_tx_reclaim(priv, ring);
960 if (released)
961 netif_tx_wake_queue(txq);
962
963 spin_unlock_irqrestore(&ring->lock, flags);
964
965 return released;
966 }
967
968 /* Locked version of the per-ring TX reclaim, but does not wake the queue */
bcm_sysport_tx_clean(struct bcm_sysport_priv * priv,struct bcm_sysport_tx_ring * ring)969 static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
970 struct bcm_sysport_tx_ring *ring)
971 {
972 unsigned long flags;
973
974 spin_lock_irqsave(&ring->lock, flags);
975 __bcm_sysport_tx_reclaim(priv, ring);
976 spin_unlock_irqrestore(&ring->lock, flags);
977 }
978
bcm_sysport_tx_poll(struct napi_struct * napi,int budget)979 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
980 {
981 struct bcm_sysport_tx_ring *ring =
982 container_of(napi, struct bcm_sysport_tx_ring, napi);
983 unsigned int work_done = 0;
984
985 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
986
987 if (work_done == 0) {
988 napi_complete(napi);
989 /* re-enable TX interrupt */
990 if (!ring->priv->is_lite)
991 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
992 else
993 intrl2_0_mask_clear(ring->priv, BIT(ring->index +
994 INTRL2_0_TDMA_MBDONE_SHIFT));
995
996 return 0;
997 }
998
999 return budget;
1000 }
1001
bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv * priv)1002 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
1003 {
1004 unsigned int q;
1005
1006 for (q = 0; q < priv->netdev->num_tx_queues; q++)
1007 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
1008 }
1009
bcm_sysport_poll(struct napi_struct * napi,int budget)1010 static int bcm_sysport_poll(struct napi_struct *napi, int budget)
1011 {
1012 struct bcm_sysport_priv *priv =
1013 container_of(napi, struct bcm_sysport_priv, napi);
1014 struct dim_sample dim_sample = {};
1015 unsigned int work_done = 0;
1016
1017 work_done = bcm_sysport_desc_rx(priv, budget);
1018
1019 priv->rx_c_index += work_done;
1020 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
1021
1022 /* SYSTEMPORT Lite groups the producer/consumer index, producer is
1023 * maintained by HW, but writes to it will be ignore while RDMA
1024 * is active
1025 */
1026 if (!priv->is_lite)
1027 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
1028 else
1029 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
1030
1031 if (work_done < budget) {
1032 napi_complete_done(napi, work_done);
1033 /* re-enable RX interrupts */
1034 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
1035 }
1036
1037 if (priv->dim.use_dim) {
1038 dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
1039 priv->dim.bytes, &dim_sample);
1040 net_dim(&priv->dim.dim, dim_sample);
1041 }
1042
1043 return work_done;
1044 }
1045
mpd_enable_set(struct bcm_sysport_priv * priv,bool enable)1046 static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
1047 {
1048 u32 reg, bit;
1049
1050 reg = umac_readl(priv, UMAC_MPD_CTRL);
1051 if (enable)
1052 reg |= MPD_EN;
1053 else
1054 reg &= ~MPD_EN;
1055 umac_writel(priv, reg, UMAC_MPD_CTRL);
1056
1057 if (priv->is_lite)
1058 bit = RBUF_ACPI_EN_LITE;
1059 else
1060 bit = RBUF_ACPI_EN;
1061
1062 reg = rbuf_readl(priv, RBUF_CONTROL);
1063 if (enable)
1064 reg |= bit;
1065 else
1066 reg &= ~bit;
1067 rbuf_writel(priv, reg, RBUF_CONTROL);
1068 }
1069
bcm_sysport_resume_from_wol(struct bcm_sysport_priv * priv)1070 static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
1071 {
1072 unsigned int index;
1073 u32 reg;
1074
1075 /* Disable RXCHK, active filters and Broadcom tag matching */
1076 reg = rxchk_readl(priv, RXCHK_CONTROL);
1077 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
1078 RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
1079 rxchk_writel(priv, reg, RXCHK_CONTROL);
1080
1081 /* Make sure we restore correct CID index in case HW lost
1082 * its context during deep idle state
1083 */
1084 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
1085 rxchk_writel(priv, priv->filters_loc[index] <<
1086 RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
1087 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
1088 }
1089
1090 /* Clear the MagicPacket detection logic */
1091 mpd_enable_set(priv, false);
1092
1093 reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
1094 if (reg & INTRL2_0_MPD)
1095 netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
1096
1097 if (reg & INTRL2_0_BRCM_MATCH_TAG) {
1098 reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
1099 RXCHK_BRCM_TAG_MATCH_MASK;
1100 netdev_info(priv->netdev,
1101 "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
1102 }
1103
1104 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
1105 }
1106
bcm_sysport_dim_work(struct work_struct * work)1107 static void bcm_sysport_dim_work(struct work_struct *work)
1108 {
1109 struct dim *dim = container_of(work, struct dim, work);
1110 struct bcm_sysport_net_dim *ndim =
1111 container_of(dim, struct bcm_sysport_net_dim, dim);
1112 struct bcm_sysport_priv *priv =
1113 container_of(ndim, struct bcm_sysport_priv, dim);
1114 struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode,
1115 dim->profile_ix);
1116
1117 bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
1118 dim->state = DIM_START_MEASURE;
1119 }
1120
1121 /* RX and misc interrupt routine */
bcm_sysport_rx_isr(int irq,void * dev_id)1122 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
1123 {
1124 struct net_device *dev = dev_id;
1125 struct bcm_sysport_priv *priv = netdev_priv(dev);
1126 struct bcm_sysport_tx_ring *txr;
1127 unsigned int ring, ring_bit;
1128
1129 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
1130 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1131 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1132
1133 if (unlikely(priv->irq0_stat == 0)) {
1134 netdev_warn(priv->netdev, "spurious RX interrupt\n");
1135 return IRQ_NONE;
1136 }
1137
1138 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
1139 priv->dim.event_ctr++;
1140 if (likely(napi_schedule_prep(&priv->napi))) {
1141 /* disable RX interrupts */
1142 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
1143 __napi_schedule_irqoff(&priv->napi);
1144 }
1145 }
1146
1147 /* TX ring is full, perform a full reclaim since we do not know
1148 * which one would trigger this interrupt
1149 */
1150 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
1151 bcm_sysport_tx_reclaim_all(priv);
1152
1153 if (!priv->is_lite)
1154 goto out;
1155
1156 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1157 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
1158 if (!(priv->irq0_stat & ring_bit))
1159 continue;
1160
1161 txr = &priv->tx_rings[ring];
1162
1163 if (likely(napi_schedule_prep(&txr->napi))) {
1164 intrl2_0_mask_set(priv, ring_bit);
1165 __napi_schedule(&txr->napi);
1166 }
1167 }
1168 out:
1169 return IRQ_HANDLED;
1170 }
1171
1172 /* TX interrupt service routine */
bcm_sysport_tx_isr(int irq,void * dev_id)1173 static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
1174 {
1175 struct net_device *dev = dev_id;
1176 struct bcm_sysport_priv *priv = netdev_priv(dev);
1177 struct bcm_sysport_tx_ring *txr;
1178 unsigned int ring;
1179
1180 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1181 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1182 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1183
1184 if (unlikely(priv->irq1_stat == 0)) {
1185 netdev_warn(priv->netdev, "spurious TX interrupt\n");
1186 return IRQ_NONE;
1187 }
1188
1189 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1190 if (!(priv->irq1_stat & BIT(ring)))
1191 continue;
1192
1193 txr = &priv->tx_rings[ring];
1194
1195 if (likely(napi_schedule_prep(&txr->napi))) {
1196 intrl2_1_mask_set(priv, BIT(ring));
1197 __napi_schedule_irqoff(&txr->napi);
1198 }
1199 }
1200
1201 return IRQ_HANDLED;
1202 }
1203
bcm_sysport_wol_isr(int irq,void * dev_id)1204 static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
1205 {
1206 struct bcm_sysport_priv *priv = dev_id;
1207
1208 pm_wakeup_event(&priv->pdev->dev, 0);
1209
1210 return IRQ_HANDLED;
1211 }
1212
1213 #ifdef CONFIG_NET_POLL_CONTROLLER
bcm_sysport_poll_controller(struct net_device * dev)1214 static void bcm_sysport_poll_controller(struct net_device *dev)
1215 {
1216 struct bcm_sysport_priv *priv = netdev_priv(dev);
1217
1218 disable_irq(priv->irq0);
1219 bcm_sysport_rx_isr(priv->irq0, priv);
1220 enable_irq(priv->irq0);
1221
1222 if (!priv->is_lite) {
1223 disable_irq(priv->irq1);
1224 bcm_sysport_tx_isr(priv->irq1, priv);
1225 enable_irq(priv->irq1);
1226 }
1227 }
1228 #endif
1229
bcm_sysport_insert_tsb(struct sk_buff * skb,struct net_device * dev)1230 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
1231 struct net_device *dev)
1232 {
1233 struct bcm_sysport_priv *priv = netdev_priv(dev);
1234 struct sk_buff *nskb;
1235 struct bcm_tsb *tsb;
1236 u32 csum_info;
1237 u8 ip_proto;
1238 u16 csum_start;
1239 __be16 ip_ver;
1240
1241 /* Re-allocate SKB if needed */
1242 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
1243 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
1244 if (!nskb) {
1245 dev_kfree_skb_any(skb);
1246 priv->mib.tx_realloc_tsb_failed++;
1247 dev->stats.tx_errors++;
1248 dev->stats.tx_dropped++;
1249 return NULL;
1250 }
1251 dev_consume_skb_any(skb);
1252 skb = nskb;
1253 priv->mib.tx_realloc_tsb++;
1254 }
1255
1256 tsb = skb_push(skb, sizeof(*tsb));
1257 /* Zero-out TSB by default */
1258 memset(tsb, 0, sizeof(*tsb));
1259
1260 if (skb_vlan_tag_present(skb)) {
1261 tsb->pcp_dei_vid = skb_vlan_tag_get_prio(skb) & PCP_DEI_MASK;
1262 tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT;
1263 }
1264
1265 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1266 ip_ver = skb->protocol;
1267 switch (ip_ver) {
1268 case htons(ETH_P_IP):
1269 ip_proto = ip_hdr(skb)->protocol;
1270 break;
1271 case htons(ETH_P_IPV6):
1272 ip_proto = ipv6_hdr(skb)->nexthdr;
1273 break;
1274 default:
1275 return skb;
1276 }
1277
1278 /* Get the checksum offset and the L4 (transport) offset */
1279 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1280 /* Account for the HW inserted VLAN tag */
1281 if (skb_vlan_tag_present(skb))
1282 csum_start += VLAN_HLEN;
1283 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
1284 csum_info |= (csum_start << L4_PTR_SHIFT);
1285
1286 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1287 csum_info |= L4_LENGTH_VALID;
1288 if (ip_proto == IPPROTO_UDP &&
1289 ip_ver == htons(ETH_P_IP))
1290 csum_info |= L4_UDP;
1291 } else {
1292 csum_info = 0;
1293 }
1294
1295 tsb->l4_ptr_dest_map = csum_info;
1296 }
1297
1298 return skb;
1299 }
1300
bcm_sysport_xmit(struct sk_buff * skb,struct net_device * dev)1301 static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1302 struct net_device *dev)
1303 {
1304 struct bcm_sysport_priv *priv = netdev_priv(dev);
1305 struct device *kdev = &priv->pdev->dev;
1306 struct bcm_sysport_tx_ring *ring;
1307 struct bcm_sysport_cb *cb;
1308 struct netdev_queue *txq;
1309 u32 len_status, addr_lo;
1310 unsigned int skb_len;
1311 unsigned long flags;
1312 dma_addr_t mapping;
1313 u16 queue;
1314 int ret;
1315
1316 queue = skb_get_queue_mapping(skb);
1317 txq = netdev_get_tx_queue(dev, queue);
1318 ring = &priv->tx_rings[queue];
1319
1320 /* lock against tx reclaim in BH context and TX ring full interrupt */
1321 spin_lock_irqsave(&ring->lock, flags);
1322 if (unlikely(ring->desc_count == 0)) {
1323 netif_tx_stop_queue(txq);
1324 netdev_err(dev, "queue %d awake and ring full!\n", queue);
1325 ret = NETDEV_TX_BUSY;
1326 goto out;
1327 }
1328
1329 /* Insert TSB and checksum infos */
1330 if (priv->tsb_en) {
1331 skb = bcm_sysport_insert_tsb(skb, dev);
1332 if (!skb) {
1333 ret = NETDEV_TX_OK;
1334 goto out;
1335 }
1336 }
1337
1338 skb_len = skb->len;
1339
1340 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1341 if (dma_mapping_error(kdev, mapping)) {
1342 priv->mib.tx_dma_failed++;
1343 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
1344 skb->data, skb_len);
1345 ret = NETDEV_TX_OK;
1346 goto out;
1347 }
1348
1349 /* Remember the SKB for future freeing */
1350 cb = &ring->cbs[ring->curr_desc];
1351 cb->skb = skb;
1352 dma_unmap_addr_set(cb, dma_addr, mapping);
1353 dma_unmap_len_set(cb, dma_len, skb_len);
1354
1355 addr_lo = lower_32_bits(mapping);
1356 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
1357 len_status |= (skb_len << DESC_LEN_SHIFT);
1358 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
1359 DESC_STATUS_SHIFT;
1360 if (skb->ip_summed == CHECKSUM_PARTIAL)
1361 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1362 if (skb_vlan_tag_present(skb))
1363 len_status |= (TX_STATUS_VLAN_VID_TSB << DESC_STATUS_SHIFT);
1364
1365 ring->curr_desc++;
1366 if (ring->curr_desc == ring->size)
1367 ring->curr_desc = 0;
1368 ring->desc_count--;
1369
1370 /* Ports are latched, so write upper address first */
1371 tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
1372 tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
1373
1374 /* Check ring space and update SW control flow */
1375 if (ring->desc_count == 0)
1376 netif_tx_stop_queue(txq);
1377
1378 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
1379 ring->index, ring->desc_count, ring->curr_desc);
1380
1381 ret = NETDEV_TX_OK;
1382 out:
1383 spin_unlock_irqrestore(&ring->lock, flags);
1384 return ret;
1385 }
1386
bcm_sysport_tx_timeout(struct net_device * dev,unsigned int txqueue)1387 static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue)
1388 {
1389 netdev_warn(dev, "transmit timeout!\n");
1390
1391 netif_trans_update(dev);
1392 dev->stats.tx_errors++;
1393
1394 netif_tx_wake_all_queues(dev);
1395 }
1396
1397 /* phylib adjust link callback */
bcm_sysport_adj_link(struct net_device * dev)1398 static void bcm_sysport_adj_link(struct net_device *dev)
1399 {
1400 struct bcm_sysport_priv *priv = netdev_priv(dev);
1401 struct phy_device *phydev = dev->phydev;
1402 unsigned int changed = 0;
1403 u32 cmd_bits = 0, reg;
1404
1405 if (priv->old_link != phydev->link) {
1406 changed = 1;
1407 priv->old_link = phydev->link;
1408 }
1409
1410 if (priv->old_duplex != phydev->duplex) {
1411 changed = 1;
1412 priv->old_duplex = phydev->duplex;
1413 }
1414
1415 if (priv->is_lite)
1416 goto out;
1417
1418 switch (phydev->speed) {
1419 case SPEED_2500:
1420 cmd_bits = CMD_SPEED_2500;
1421 break;
1422 case SPEED_1000:
1423 cmd_bits = CMD_SPEED_1000;
1424 break;
1425 case SPEED_100:
1426 cmd_bits = CMD_SPEED_100;
1427 break;
1428 case SPEED_10:
1429 cmd_bits = CMD_SPEED_10;
1430 break;
1431 default:
1432 break;
1433 }
1434 cmd_bits <<= CMD_SPEED_SHIFT;
1435
1436 if (phydev->duplex == DUPLEX_HALF)
1437 cmd_bits |= CMD_HD_EN;
1438
1439 if (priv->old_pause != phydev->pause) {
1440 changed = 1;
1441 priv->old_pause = phydev->pause;
1442 }
1443
1444 if (!phydev->pause)
1445 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1446
1447 if (!changed)
1448 return;
1449
1450 if (phydev->link) {
1451 reg = umac_readl(priv, UMAC_CMD);
1452 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
1453 CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1454 CMD_TX_PAUSE_IGNORE);
1455 reg |= cmd_bits;
1456 umac_writel(priv, reg, UMAC_CMD);
1457 }
1458 out:
1459 if (changed)
1460 phy_print_status(phydev);
1461 }
1462
bcm_sysport_init_dim(struct bcm_sysport_priv * priv,void (* cb)(struct work_struct * work))1463 static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
1464 void (*cb)(struct work_struct *work))
1465 {
1466 struct bcm_sysport_net_dim *dim = &priv->dim;
1467
1468 INIT_WORK(&dim->dim.work, cb);
1469 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1470 dim->event_ctr = 0;
1471 dim->packets = 0;
1472 dim->bytes = 0;
1473 }
1474
bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv * priv)1475 static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
1476 {
1477 struct bcm_sysport_net_dim *dim = &priv->dim;
1478 struct dim_cq_moder moder;
1479 u32 usecs, pkts;
1480
1481 usecs = priv->rx_coalesce_usecs;
1482 pkts = priv->rx_max_coalesced_frames;
1483
1484 /* If DIM was enabled, re-apply default parameters */
1485 if (dim->use_dim) {
1486 moder = net_dim_get_def_rx_moderation(dim->dim.mode);
1487 usecs = moder.usec;
1488 pkts = moder.pkts;
1489 }
1490
1491 bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
1492 }
1493
bcm_sysport_init_tx_ring(struct bcm_sysport_priv * priv,unsigned int index)1494 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1495 unsigned int index)
1496 {
1497 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1498 size_t size;
1499 u32 reg;
1500
1501 /* Simple descriptors partitioning for now */
1502 size = 256;
1503
1504 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
1505 if (!ring->cbs) {
1506 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1507 return -ENOMEM;
1508 }
1509
1510 /* Initialize SW view of the ring */
1511 spin_lock_init(&ring->lock);
1512 ring->priv = priv;
1513 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1514 ring->index = index;
1515 ring->size = size;
1516 ring->clean_index = 0;
1517 ring->alloc_size = ring->size;
1518 ring->desc_count = ring->size;
1519 ring->curr_desc = 0;
1520
1521 /* Initialize HW ring */
1522 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1523 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1524 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1525 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1526
1527 /* Configure QID and port mapping */
1528 reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
1529 reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
1530 if (ring->inspect) {
1531 reg |= ring->switch_queue & RING_QID_MASK;
1532 reg |= ring->switch_port << RING_PORT_ID_SHIFT;
1533 } else {
1534 reg |= RING_IGNORE_STATUS;
1535 }
1536 tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
1537 reg = 0;
1538 /* Adjust the packet size calculations if SYSTEMPORT is responsible
1539 * for HW insertion of VLAN tags
1540 */
1541 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
1542 reg = VLAN_HLEN << RING_PKT_SIZE_ADJ_SHIFT;
1543 tdma_writel(priv, reg, TDMA_DESC_RING_PCP_DEI_VID(index));
1544
1545 /* Enable ACB algorithm 2 */
1546 reg = tdma_readl(priv, TDMA_CONTROL);
1547 reg |= tdma_control_bit(priv, ACB_ALGO);
1548 tdma_writel(priv, reg, TDMA_CONTROL);
1549
1550 /* Do not use tdma_control_bit() here because TSB_SWAP1 collides
1551 * with the original definition of ACB_ALGO
1552 */
1553 reg = tdma_readl(priv, TDMA_CONTROL);
1554 if (priv->is_lite)
1555 reg &= ~BIT(TSB_SWAP1);
1556 /* Set a correct TSB format based on host endian */
1557 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1558 reg |= tdma_control_bit(priv, TSB_SWAP0);
1559 else
1560 reg &= ~tdma_control_bit(priv, TSB_SWAP0);
1561 tdma_writel(priv, reg, TDMA_CONTROL);
1562
1563 /* Program the number of descriptors as MAX_THRESHOLD and half of
1564 * its size for the hysteresis trigger
1565 */
1566 tdma_writel(priv, ring->size |
1567 1 << RING_HYST_THRESH_SHIFT,
1568 TDMA_DESC_RING_MAX_HYST(index));
1569
1570 /* Enable the ring queue in the arbiter */
1571 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1572 reg |= (1 << index);
1573 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1574
1575 napi_enable(&ring->napi);
1576
1577 netif_dbg(priv, hw, priv->netdev,
1578 "TDMA cfg, size=%d, switch q=%d,port=%d\n",
1579 ring->size, ring->switch_queue,
1580 ring->switch_port);
1581
1582 return 0;
1583 }
1584
bcm_sysport_fini_tx_ring(struct bcm_sysport_priv * priv,unsigned int index)1585 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1586 unsigned int index)
1587 {
1588 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1589 u32 reg;
1590
1591 /* Caller should stop the TDMA engine */
1592 reg = tdma_readl(priv, TDMA_STATUS);
1593 if (!(reg & TDMA_DISABLED))
1594 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1595
1596 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1597 * fail, so by checking this pointer we know whether the TX ring was
1598 * fully initialized or not.
1599 */
1600 if (!ring->cbs)
1601 return;
1602
1603 napi_disable(&ring->napi);
1604 netif_napi_del(&ring->napi);
1605
1606 bcm_sysport_tx_clean(priv, ring);
1607
1608 kfree(ring->cbs);
1609 ring->cbs = NULL;
1610 ring->size = 0;
1611 ring->alloc_size = 0;
1612
1613 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1614 }
1615
1616 /* RDMA helper */
rdma_enable_set(struct bcm_sysport_priv * priv,unsigned int enable)1617 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
1618 unsigned int enable)
1619 {
1620 unsigned int timeout = 1000;
1621 u32 reg;
1622
1623 reg = rdma_readl(priv, RDMA_CONTROL);
1624 if (enable)
1625 reg |= RDMA_EN;
1626 else
1627 reg &= ~RDMA_EN;
1628 rdma_writel(priv, reg, RDMA_CONTROL);
1629
1630 /* Poll for RMDA disabling completion */
1631 do {
1632 reg = rdma_readl(priv, RDMA_STATUS);
1633 if (!!(reg & RDMA_DISABLED) == !enable)
1634 return 0;
1635 usleep_range(1000, 2000);
1636 } while (timeout-- > 0);
1637
1638 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1639
1640 return -ETIMEDOUT;
1641 }
1642
1643 /* TDMA helper */
tdma_enable_set(struct bcm_sysport_priv * priv,unsigned int enable)1644 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
1645 unsigned int enable)
1646 {
1647 unsigned int timeout = 1000;
1648 u32 reg;
1649
1650 reg = tdma_readl(priv, TDMA_CONTROL);
1651 if (enable)
1652 reg |= tdma_control_bit(priv, TDMA_EN);
1653 else
1654 reg &= ~tdma_control_bit(priv, TDMA_EN);
1655 tdma_writel(priv, reg, TDMA_CONTROL);
1656
1657 /* Poll for TMDA disabling completion */
1658 do {
1659 reg = tdma_readl(priv, TDMA_STATUS);
1660 if (!!(reg & TDMA_DISABLED) == !enable)
1661 return 0;
1662
1663 usleep_range(1000, 2000);
1664 } while (timeout-- > 0);
1665
1666 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1667
1668 return -ETIMEDOUT;
1669 }
1670
bcm_sysport_init_rx_ring(struct bcm_sysport_priv * priv)1671 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1672 {
1673 struct bcm_sysport_cb *cb;
1674 u32 reg;
1675 int ret;
1676 int i;
1677
1678 /* Initialize SW view of the RX ring */
1679 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
1680 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
1681 priv->rx_c_index = 0;
1682 priv->rx_read_ptr = 0;
1683 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1684 GFP_KERNEL);
1685 if (!priv->rx_cbs) {
1686 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1687 return -ENOMEM;
1688 }
1689
1690 for (i = 0; i < priv->num_rx_bds; i++) {
1691 cb = priv->rx_cbs + i;
1692 cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1693 }
1694
1695 ret = bcm_sysport_alloc_rx_bufs(priv);
1696 if (ret) {
1697 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1698 return ret;
1699 }
1700
1701 /* Initialize HW, ensure RDMA is disabled */
1702 reg = rdma_readl(priv, RDMA_STATUS);
1703 if (!(reg & RDMA_DISABLED))
1704 rdma_enable_set(priv, 0);
1705
1706 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1707 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1708 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1709 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1710 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1711 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1712 /* Operate the queue in ring mode */
1713 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1714 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1715 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
1716 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
1717
1718 netif_dbg(priv, hw, priv->netdev,
1719 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1720 priv->num_rx_bds, priv->rx_bds);
1721
1722 return 0;
1723 }
1724
bcm_sysport_fini_rx_ring(struct bcm_sysport_priv * priv)1725 static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1726 {
1727 struct bcm_sysport_cb *cb;
1728 unsigned int i;
1729 u32 reg;
1730
1731 /* Caller should ensure RDMA is disabled */
1732 reg = rdma_readl(priv, RDMA_STATUS);
1733 if (!(reg & RDMA_DISABLED))
1734 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1735
1736 for (i = 0; i < priv->num_rx_bds; i++) {
1737 cb = &priv->rx_cbs[i];
1738 if (dma_unmap_addr(cb, dma_addr))
1739 dma_unmap_single(&priv->pdev->dev,
1740 dma_unmap_addr(cb, dma_addr),
1741 RX_BUF_LENGTH, DMA_FROM_DEVICE);
1742 bcm_sysport_free_cb(cb);
1743 }
1744
1745 kfree(priv->rx_cbs);
1746 priv->rx_cbs = NULL;
1747
1748 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1749 }
1750
bcm_sysport_set_rx_mode(struct net_device * dev)1751 static void bcm_sysport_set_rx_mode(struct net_device *dev)
1752 {
1753 struct bcm_sysport_priv *priv = netdev_priv(dev);
1754 u32 reg;
1755
1756 if (priv->is_lite)
1757 return;
1758
1759 reg = umac_readl(priv, UMAC_CMD);
1760 if (dev->flags & IFF_PROMISC)
1761 reg |= CMD_PROMISC;
1762 else
1763 reg &= ~CMD_PROMISC;
1764 umac_writel(priv, reg, UMAC_CMD);
1765
1766 /* No support for ALLMULTI */
1767 if (dev->flags & IFF_ALLMULTI)
1768 return;
1769 }
1770
umac_enable_set(struct bcm_sysport_priv * priv,u32 mask,unsigned int enable)1771 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1772 u32 mask, unsigned int enable)
1773 {
1774 u32 reg;
1775
1776 if (!priv->is_lite) {
1777 reg = umac_readl(priv, UMAC_CMD);
1778 if (enable)
1779 reg |= mask;
1780 else
1781 reg &= ~mask;
1782 umac_writel(priv, reg, UMAC_CMD);
1783 } else {
1784 reg = gib_readl(priv, GIB_CONTROL);
1785 if (enable)
1786 reg |= mask;
1787 else
1788 reg &= ~mask;
1789 gib_writel(priv, reg, GIB_CONTROL);
1790 }
1791
1792 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1793 * to be processed (1 msec).
1794 */
1795 if (enable == 0)
1796 usleep_range(1000, 2000);
1797 }
1798
umac_reset(struct bcm_sysport_priv * priv)1799 static inline void umac_reset(struct bcm_sysport_priv *priv)
1800 {
1801 u32 reg;
1802
1803 if (priv->is_lite)
1804 return;
1805
1806 reg = umac_readl(priv, UMAC_CMD);
1807 reg |= CMD_SW_RESET;
1808 umac_writel(priv, reg, UMAC_CMD);
1809 udelay(10);
1810 reg = umac_readl(priv, UMAC_CMD);
1811 reg &= ~CMD_SW_RESET;
1812 umac_writel(priv, reg, UMAC_CMD);
1813 }
1814
umac_set_hw_addr(struct bcm_sysport_priv * priv,unsigned char * addr)1815 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
1816 unsigned char *addr)
1817 {
1818 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
1819 addr[3];
1820 u32 mac1 = (addr[4] << 8) | addr[5];
1821
1822 if (!priv->is_lite) {
1823 umac_writel(priv, mac0, UMAC_MAC0);
1824 umac_writel(priv, mac1, UMAC_MAC1);
1825 } else {
1826 gib_writel(priv, mac0, GIB_MAC0);
1827 gib_writel(priv, mac1, GIB_MAC1);
1828 }
1829 }
1830
topctrl_flush(struct bcm_sysport_priv * priv)1831 static void topctrl_flush(struct bcm_sysport_priv *priv)
1832 {
1833 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1834 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1835 mdelay(1);
1836 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1837 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1838 }
1839
bcm_sysport_change_mac(struct net_device * dev,void * p)1840 static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1841 {
1842 struct bcm_sysport_priv *priv = netdev_priv(dev);
1843 struct sockaddr *addr = p;
1844
1845 if (!is_valid_ether_addr(addr->sa_data))
1846 return -EINVAL;
1847
1848 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1849
1850 /* interface is disabled, changes to MAC will be reflected on next
1851 * open call
1852 */
1853 if (!netif_running(dev))
1854 return 0;
1855
1856 umac_set_hw_addr(priv, dev->dev_addr);
1857
1858 return 0;
1859 }
1860
bcm_sysport_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1861 static void bcm_sysport_get_stats64(struct net_device *dev,
1862 struct rtnl_link_stats64 *stats)
1863 {
1864 struct bcm_sysport_priv *priv = netdev_priv(dev);
1865 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
1866 unsigned int start;
1867
1868 netdev_stats_to_stats64(stats, &dev->stats);
1869
1870 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
1871 &stats->tx_packets);
1872
1873 do {
1874 start = u64_stats_fetch_begin_irq(&priv->syncp);
1875 stats->rx_packets = stats64->rx_packets;
1876 stats->rx_bytes = stats64->rx_bytes;
1877 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
1878 }
1879
bcm_sysport_netif_start(struct net_device * dev)1880 static void bcm_sysport_netif_start(struct net_device *dev)
1881 {
1882 struct bcm_sysport_priv *priv = netdev_priv(dev);
1883
1884 /* Enable NAPI */
1885 bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
1886 bcm_sysport_init_rx_coalesce(priv);
1887 napi_enable(&priv->napi);
1888
1889 /* Enable RX interrupt and TX ring full interrupt */
1890 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1891
1892 phy_start(dev->phydev);
1893
1894 /* Enable TX interrupts for the TXQs */
1895 if (!priv->is_lite)
1896 intrl2_1_mask_clear(priv, 0xffffffff);
1897 else
1898 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
1899 }
1900
rbuf_init(struct bcm_sysport_priv * priv)1901 static void rbuf_init(struct bcm_sysport_priv *priv)
1902 {
1903 u32 reg;
1904
1905 reg = rbuf_readl(priv, RBUF_CONTROL);
1906 reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
1907 /* Set a correct RSB format on SYSTEMPORT Lite */
1908 if (priv->is_lite)
1909 reg &= ~RBUF_RSB_SWAP1;
1910
1911 /* Set a correct RSB format based on host endian */
1912 if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1913 reg |= RBUF_RSB_SWAP0;
1914 else
1915 reg &= ~RBUF_RSB_SWAP0;
1916 rbuf_writel(priv, reg, RBUF_CONTROL);
1917 }
1918
bcm_sysport_mask_all_intrs(struct bcm_sysport_priv * priv)1919 static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1920 {
1921 intrl2_0_mask_set(priv, 0xffffffff);
1922 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1923 if (!priv->is_lite) {
1924 intrl2_1_mask_set(priv, 0xffffffff);
1925 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1926 }
1927 }
1928
gib_set_pad_extension(struct bcm_sysport_priv * priv)1929 static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1930 {
1931 u32 reg;
1932
1933 reg = gib_readl(priv, GIB_CONTROL);
1934 /* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
1935 if (netdev_uses_dsa(priv->netdev)) {
1936 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
1937 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
1938 }
1939 reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
1940 reg |= 12 << GIB_IPG_LEN_SHIFT;
1941 gib_writel(priv, reg, GIB_CONTROL);
1942 }
1943
bcm_sysport_open(struct net_device * dev)1944 static int bcm_sysport_open(struct net_device *dev)
1945 {
1946 struct bcm_sysport_priv *priv = netdev_priv(dev);
1947 struct phy_device *phydev;
1948 unsigned int i;
1949 int ret;
1950
1951 clk_prepare_enable(priv->clk);
1952
1953 /* Reset UniMAC */
1954 umac_reset(priv);
1955
1956 /* Flush TX and RX FIFOs at TOPCTRL level */
1957 topctrl_flush(priv);
1958
1959 /* Disable the UniMAC RX/TX */
1960 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
1961
1962 /* Enable RBUF 2bytes alignment and Receive Status Block */
1963 rbuf_init(priv);
1964
1965 /* Set maximum frame length */
1966 if (!priv->is_lite)
1967 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1968 else
1969 gib_set_pad_extension(priv);
1970
1971 /* Apply features again in case we changed them while interface was
1972 * down
1973 */
1974 bcm_sysport_set_features(dev, dev->features);
1975
1976 /* Set MAC address */
1977 umac_set_hw_addr(priv, dev->dev_addr);
1978
1979 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1980 0, priv->phy_interface);
1981 if (!phydev) {
1982 netdev_err(dev, "could not attach to PHY\n");
1983 ret = -ENODEV;
1984 goto out_clk_disable;
1985 }
1986
1987 /* Reset house keeping link status */
1988 priv->old_duplex = -1;
1989 priv->old_link = -1;
1990 priv->old_pause = -1;
1991
1992 /* mask all interrupts and request them */
1993 bcm_sysport_mask_all_intrs(priv);
1994
1995 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1996 if (ret) {
1997 netdev_err(dev, "failed to request RX interrupt\n");
1998 goto out_phy_disconnect;
1999 }
2000
2001 if (!priv->is_lite) {
2002 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
2003 dev->name, dev);
2004 if (ret) {
2005 netdev_err(dev, "failed to request TX interrupt\n");
2006 goto out_free_irq0;
2007 }
2008 }
2009
2010 /* Initialize both hardware and software ring */
2011 for (i = 0; i < dev->num_tx_queues; i++) {
2012 ret = bcm_sysport_init_tx_ring(priv, i);
2013 if (ret) {
2014 netdev_err(dev, "failed to initialize TX ring %d\n",
2015 i);
2016 goto out_free_tx_ring;
2017 }
2018 }
2019
2020 /* Initialize linked-list */
2021 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2022
2023 /* Initialize RX ring */
2024 ret = bcm_sysport_init_rx_ring(priv);
2025 if (ret) {
2026 netdev_err(dev, "failed to initialize RX ring\n");
2027 goto out_free_rx_ring;
2028 }
2029
2030 /* Turn on RDMA */
2031 ret = rdma_enable_set(priv, 1);
2032 if (ret)
2033 goto out_free_rx_ring;
2034
2035 /* Turn on TDMA */
2036 ret = tdma_enable_set(priv, 1);
2037 if (ret)
2038 goto out_clear_rx_int;
2039
2040 /* Turn on UniMAC TX/RX */
2041 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
2042
2043 bcm_sysport_netif_start(dev);
2044
2045 netif_tx_start_all_queues(dev);
2046
2047 return 0;
2048
2049 out_clear_rx_int:
2050 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
2051 out_free_rx_ring:
2052 bcm_sysport_fini_rx_ring(priv);
2053 out_free_tx_ring:
2054 for (i = 0; i < dev->num_tx_queues; i++)
2055 bcm_sysport_fini_tx_ring(priv, i);
2056 if (!priv->is_lite)
2057 free_irq(priv->irq1, dev);
2058 out_free_irq0:
2059 free_irq(priv->irq0, dev);
2060 out_phy_disconnect:
2061 phy_disconnect(phydev);
2062 out_clk_disable:
2063 clk_disable_unprepare(priv->clk);
2064 return ret;
2065 }
2066
bcm_sysport_netif_stop(struct net_device * dev)2067 static void bcm_sysport_netif_stop(struct net_device *dev)
2068 {
2069 struct bcm_sysport_priv *priv = netdev_priv(dev);
2070
2071 /* stop all software from updating hardware */
2072 netif_tx_disable(dev);
2073 napi_disable(&priv->napi);
2074 cancel_work_sync(&priv->dim.dim.work);
2075 phy_stop(dev->phydev);
2076
2077 /* mask all interrupts */
2078 bcm_sysport_mask_all_intrs(priv);
2079 }
2080
bcm_sysport_stop(struct net_device * dev)2081 static int bcm_sysport_stop(struct net_device *dev)
2082 {
2083 struct bcm_sysport_priv *priv = netdev_priv(dev);
2084 unsigned int i;
2085 int ret;
2086
2087 bcm_sysport_netif_stop(dev);
2088
2089 /* Disable UniMAC RX */
2090 umac_enable_set(priv, CMD_RX_EN, 0);
2091
2092 ret = tdma_enable_set(priv, 0);
2093 if (ret) {
2094 netdev_err(dev, "timeout disabling RDMA\n");
2095 return ret;
2096 }
2097
2098 /* Wait for a maximum packet size to be drained */
2099 usleep_range(2000, 3000);
2100
2101 ret = rdma_enable_set(priv, 0);
2102 if (ret) {
2103 netdev_err(dev, "timeout disabling TDMA\n");
2104 return ret;
2105 }
2106
2107 /* Disable UniMAC TX */
2108 umac_enable_set(priv, CMD_TX_EN, 0);
2109
2110 /* Free RX/TX rings SW structures */
2111 for (i = 0; i < dev->num_tx_queues; i++)
2112 bcm_sysport_fini_tx_ring(priv, i);
2113 bcm_sysport_fini_rx_ring(priv);
2114
2115 free_irq(priv->irq0, dev);
2116 if (!priv->is_lite)
2117 free_irq(priv->irq1, dev);
2118
2119 /* Disconnect from PHY */
2120 phy_disconnect(dev->phydev);
2121
2122 clk_disable_unprepare(priv->clk);
2123
2124 return 0;
2125 }
2126
bcm_sysport_rule_find(struct bcm_sysport_priv * priv,u64 location)2127 static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
2128 u64 location)
2129 {
2130 unsigned int index;
2131 u32 reg;
2132
2133 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2134 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2135 reg >>= RXCHK_BRCM_TAG_CID_SHIFT;
2136 reg &= RXCHK_BRCM_TAG_CID_MASK;
2137 if (reg == location)
2138 return index;
2139 }
2140
2141 return -EINVAL;
2142 }
2143
bcm_sysport_rule_get(struct bcm_sysport_priv * priv,struct ethtool_rxnfc * nfc)2144 static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
2145 struct ethtool_rxnfc *nfc)
2146 {
2147 int index;
2148
2149 /* This is not a rule that we know about */
2150 index = bcm_sysport_rule_find(priv, nfc->fs.location);
2151 if (index < 0)
2152 return -EOPNOTSUPP;
2153
2154 nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE;
2155
2156 return 0;
2157 }
2158
bcm_sysport_rule_set(struct bcm_sysport_priv * priv,struct ethtool_rxnfc * nfc)2159 static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
2160 struct ethtool_rxnfc *nfc)
2161 {
2162 unsigned int index;
2163 u32 reg;
2164
2165 /* We cannot match locations greater than what the classification ID
2166 * permits (256 entries)
2167 */
2168 if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK)
2169 return -E2BIG;
2170
2171 /* We cannot support flows that are not destined for a wake-up */
2172 if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
2173 return -EOPNOTSUPP;
2174
2175 /* All filters are already in use, we cannot match more rules */
2176 if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
2177 RXCHK_BRCM_TAG_MAX)
2178 return -ENOSPC;
2179
2180 index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
2181 if (index >= RXCHK_BRCM_TAG_MAX)
2182 return -ENOSPC;
2183
2184 /* Location is the classification ID, and index is the position
2185 * within one of our 8 possible filters to be programmed
2186 */
2187 reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
2188 reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT);
2189 reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT;
2190 rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
2191 rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
2192
2193 priv->filters_loc[index] = nfc->fs.location;
2194 set_bit(index, priv->filters);
2195
2196 return 0;
2197 }
2198
bcm_sysport_rule_del(struct bcm_sysport_priv * priv,u64 location)2199 static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
2200 u64 location)
2201 {
2202 int index;
2203
2204 /* This is not a rule that we know about */
2205 index = bcm_sysport_rule_find(priv, location);
2206 if (index < 0)
2207 return -EOPNOTSUPP;
2208
2209 /* No need to disable this filter if it was enabled, this will
2210 * be taken care of during suspend time by bcm_sysport_suspend_to_wol
2211 */
2212 clear_bit(index, priv->filters);
2213 priv->filters_loc[index] = 0;
2214
2215 return 0;
2216 }
2217
bcm_sysport_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * nfc,u32 * rule_locs)2218 static int bcm_sysport_get_rxnfc(struct net_device *dev,
2219 struct ethtool_rxnfc *nfc, u32 *rule_locs)
2220 {
2221 struct bcm_sysport_priv *priv = netdev_priv(dev);
2222 int ret = -EOPNOTSUPP;
2223
2224 switch (nfc->cmd) {
2225 case ETHTOOL_GRXCLSRULE:
2226 ret = bcm_sysport_rule_get(priv, nfc);
2227 break;
2228 default:
2229 break;
2230 }
2231
2232 return ret;
2233 }
2234
bcm_sysport_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * nfc)2235 static int bcm_sysport_set_rxnfc(struct net_device *dev,
2236 struct ethtool_rxnfc *nfc)
2237 {
2238 struct bcm_sysport_priv *priv = netdev_priv(dev);
2239 int ret = -EOPNOTSUPP;
2240
2241 switch (nfc->cmd) {
2242 case ETHTOOL_SRXCLSRLINS:
2243 ret = bcm_sysport_rule_set(priv, nfc);
2244 break;
2245 case ETHTOOL_SRXCLSRLDEL:
2246 ret = bcm_sysport_rule_del(priv, nfc->fs.location);
2247 break;
2248 default:
2249 break;
2250 }
2251
2252 return ret;
2253 }
2254
2255 static const struct ethtool_ops bcm_sysport_ethtool_ops = {
2256 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2257 ETHTOOL_COALESCE_MAX_FRAMES |
2258 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
2259 .get_drvinfo = bcm_sysport_get_drvinfo,
2260 .get_msglevel = bcm_sysport_get_msglvl,
2261 .set_msglevel = bcm_sysport_set_msglvl,
2262 .get_link = ethtool_op_get_link,
2263 .get_strings = bcm_sysport_get_strings,
2264 .get_ethtool_stats = bcm_sysport_get_stats,
2265 .get_sset_count = bcm_sysport_get_sset_count,
2266 .get_wol = bcm_sysport_get_wol,
2267 .set_wol = bcm_sysport_set_wol,
2268 .get_coalesce = bcm_sysport_get_coalesce,
2269 .set_coalesce = bcm_sysport_set_coalesce,
2270 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2271 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2272 .get_rxnfc = bcm_sysport_get_rxnfc,
2273 .set_rxnfc = bcm_sysport_set_rxnfc,
2274 };
2275
bcm_sysport_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)2276 static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
2277 struct net_device *sb_dev)
2278 {
2279 struct bcm_sysport_priv *priv = netdev_priv(dev);
2280 u16 queue = skb_get_queue_mapping(skb);
2281 struct bcm_sysport_tx_ring *tx_ring;
2282 unsigned int q, port;
2283
2284 if (!netdev_uses_dsa(dev))
2285 return netdev_pick_tx(dev, skb, NULL);
2286
2287 /* DSA tagging layer will have configured the correct queue */
2288 q = BRCM_TAG_GET_QUEUE(queue);
2289 port = BRCM_TAG_GET_PORT(queue);
2290 tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
2291
2292 if (unlikely(!tx_ring))
2293 return netdev_pick_tx(dev, skb, NULL);
2294
2295 return tx_ring->index;
2296 }
2297
2298 static const struct net_device_ops bcm_sysport_netdev_ops = {
2299 .ndo_start_xmit = bcm_sysport_xmit,
2300 .ndo_tx_timeout = bcm_sysport_tx_timeout,
2301 .ndo_open = bcm_sysport_open,
2302 .ndo_stop = bcm_sysport_stop,
2303 .ndo_set_features = bcm_sysport_set_features,
2304 .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
2305 .ndo_set_mac_address = bcm_sysport_change_mac,
2306 #ifdef CONFIG_NET_POLL_CONTROLLER
2307 .ndo_poll_controller = bcm_sysport_poll_controller,
2308 #endif
2309 .ndo_get_stats64 = bcm_sysport_get_stats64,
2310 .ndo_select_queue = bcm_sysport_select_queue,
2311 };
2312
bcm_sysport_map_queues(struct notifier_block * nb,struct dsa_notifier_register_info * info)2313 static int bcm_sysport_map_queues(struct notifier_block *nb,
2314 struct dsa_notifier_register_info *info)
2315 {
2316 struct bcm_sysport_tx_ring *ring;
2317 struct bcm_sysport_priv *priv;
2318 struct net_device *slave_dev;
2319 unsigned int num_tx_queues;
2320 unsigned int q, qp, port;
2321 struct net_device *dev;
2322
2323 priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
2324 if (priv->netdev != info->master)
2325 return 0;
2326
2327 dev = info->master;
2328
2329 /* We can't be setting up queue inspection for non directly attached
2330 * switches
2331 */
2332 if (info->switch_number)
2333 return 0;
2334
2335 if (dev->netdev_ops != &bcm_sysport_netdev_ops)
2336 return 0;
2337
2338 port = info->port_number;
2339 slave_dev = info->info.dev;
2340
2341 /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
2342 * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
2343 * per-port (slave_dev) network devices queue, we achieve just that.
2344 * This need to happen now before any slave network device is used such
2345 * it accurately reflects the number of real TX queues.
2346 */
2347 if (priv->is_lite)
2348 netif_set_real_num_tx_queues(slave_dev,
2349 slave_dev->num_tx_queues / 2);
2350
2351 num_tx_queues = slave_dev->real_num_tx_queues;
2352
2353 if (priv->per_port_num_tx_queues &&
2354 priv->per_port_num_tx_queues != num_tx_queues)
2355 netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
2356
2357 priv->per_port_num_tx_queues = num_tx_queues;
2358
2359 for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
2360 q++) {
2361 ring = &priv->tx_rings[q];
2362
2363 if (ring->inspect)
2364 continue;
2365
2366 /* Just remember the mapping actual programming done
2367 * during bcm_sysport_init_tx_ring
2368 */
2369 ring->switch_queue = qp;
2370 ring->switch_port = port;
2371 ring->inspect = true;
2372 priv->ring_map[qp + port * num_tx_queues] = ring;
2373 qp++;
2374 }
2375
2376 return 0;
2377 }
2378
bcm_sysport_unmap_queues(struct notifier_block * nb,struct dsa_notifier_register_info * info)2379 static int bcm_sysport_unmap_queues(struct notifier_block *nb,
2380 struct dsa_notifier_register_info *info)
2381 {
2382 struct bcm_sysport_tx_ring *ring;
2383 struct bcm_sysport_priv *priv;
2384 struct net_device *slave_dev;
2385 unsigned int num_tx_queues;
2386 struct net_device *dev;
2387 unsigned int q, qp, port;
2388
2389 priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
2390 if (priv->netdev != info->master)
2391 return 0;
2392
2393 dev = info->master;
2394
2395 if (dev->netdev_ops != &bcm_sysport_netdev_ops)
2396 return 0;
2397
2398 port = info->port_number;
2399 slave_dev = info->info.dev;
2400
2401 num_tx_queues = slave_dev->real_num_tx_queues;
2402
2403 for (q = 0; q < dev->num_tx_queues; q++) {
2404 ring = &priv->tx_rings[q];
2405
2406 if (ring->switch_port != port)
2407 continue;
2408
2409 if (!ring->inspect)
2410 continue;
2411
2412 ring->inspect = false;
2413 qp = ring->switch_queue;
2414 priv->ring_map[qp + port * num_tx_queues] = NULL;
2415 }
2416
2417 return 0;
2418 }
2419
bcm_sysport_dsa_notifier(struct notifier_block * nb,unsigned long event,void * ptr)2420 static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
2421 unsigned long event, void *ptr)
2422 {
2423 int ret = NOTIFY_DONE;
2424
2425 switch (event) {
2426 case DSA_PORT_REGISTER:
2427 ret = bcm_sysport_map_queues(nb, ptr);
2428 break;
2429 case DSA_PORT_UNREGISTER:
2430 ret = bcm_sysport_unmap_queues(nb, ptr);
2431 break;
2432 }
2433
2434 return notifier_from_errno(ret);
2435 }
2436
2437 #define REV_FMT "v%2x.%02x"
2438
2439 static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
2440 [SYSTEMPORT] = {
2441 .is_lite = false,
2442 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
2443 },
2444 [SYSTEMPORT_LITE] = {
2445 .is_lite = true,
2446 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
2447 },
2448 };
2449
2450 static const struct of_device_id bcm_sysport_of_match[] = {
2451 { .compatible = "brcm,systemportlite-v1.00",
2452 .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
2453 { .compatible = "brcm,systemport-v1.00",
2454 .data = &bcm_sysport_params[SYSTEMPORT] },
2455 { .compatible = "brcm,systemport",
2456 .data = &bcm_sysport_params[SYSTEMPORT] },
2457 { /* sentinel */ }
2458 };
2459 MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
2460
bcm_sysport_probe(struct platform_device * pdev)2461 static int bcm_sysport_probe(struct platform_device *pdev)
2462 {
2463 const struct bcm_sysport_hw_params *params;
2464 const struct of_device_id *of_id = NULL;
2465 struct bcm_sysport_priv *priv;
2466 struct device_node *dn;
2467 struct net_device *dev;
2468 const void *macaddr;
2469 u32 txq, rxq;
2470 int ret;
2471
2472 dn = pdev->dev.of_node;
2473 of_id = of_match_node(bcm_sysport_of_match, dn);
2474 if (!of_id || !of_id->data)
2475 return -EINVAL;
2476
2477 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
2478 if (ret)
2479 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2480 if (ret) {
2481 dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret);
2482 return ret;
2483 }
2484
2485 /* Fairly quickly we need to know the type of adapter we have */
2486 params = of_id->data;
2487
2488 /* Read the Transmit/Receive Queue properties */
2489 if (of_property_read_u32(dn, "systemport,num-txq", &txq))
2490 txq = TDMA_NUM_RINGS;
2491 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
2492 rxq = 1;
2493
2494 /* Sanity check the number of transmit queues */
2495 if (!txq || txq > TDMA_NUM_RINGS)
2496 return -EINVAL;
2497
2498 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2499 if (!dev)
2500 return -ENOMEM;
2501
2502 /* Initialize private members */
2503 priv = netdev_priv(dev);
2504
2505 priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
2506 if (IS_ERR(priv->clk))
2507 return PTR_ERR(priv->clk);
2508
2509 /* Allocate number of TX rings */
2510 priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2511 sizeof(struct bcm_sysport_tx_ring),
2512 GFP_KERNEL);
2513 if (!priv->tx_rings) {
2514 ret = -ENOMEM;
2515 goto err_free_netdev;
2516 }
2517
2518 priv->is_lite = params->is_lite;
2519 priv->num_rx_desc_words = params->num_rx_desc_words;
2520
2521 priv->irq0 = platform_get_irq(pdev, 0);
2522 if (!priv->is_lite) {
2523 priv->irq1 = platform_get_irq(pdev, 1);
2524 priv->wol_irq = platform_get_irq(pdev, 2);
2525 } else {
2526 priv->wol_irq = platform_get_irq(pdev, 1);
2527 }
2528 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
2529 ret = -EINVAL;
2530 goto err_free_netdev;
2531 }
2532
2533 priv->base = devm_platform_ioremap_resource(pdev, 0);
2534 if (IS_ERR(priv->base)) {
2535 ret = PTR_ERR(priv->base);
2536 goto err_free_netdev;
2537 }
2538
2539 priv->netdev = dev;
2540 priv->pdev = pdev;
2541
2542 ret = of_get_phy_mode(dn, &priv->phy_interface);
2543 /* Default to GMII interface mode */
2544 if (ret)
2545 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2546
2547 /* In the case of a fixed PHY, the DT node associated
2548 * to the PHY is the Ethernet MAC DT node.
2549 */
2550 if (of_phy_is_fixed_link(dn)) {
2551 ret = of_phy_register_fixed_link(dn);
2552 if (ret) {
2553 dev_err(&pdev->dev, "failed to register fixed PHY\n");
2554 goto err_free_netdev;
2555 }
2556
2557 priv->phy_dn = dn;
2558 }
2559
2560 /* Initialize netdevice members */
2561 macaddr = of_get_mac_address(dn);
2562 if (IS_ERR(macaddr)) {
2563 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
2564 eth_hw_addr_random(dev);
2565 } else {
2566 ether_addr_copy(dev->dev_addr, macaddr);
2567 }
2568
2569 SET_NETDEV_DEV(dev, &pdev->dev);
2570 dev_set_drvdata(&pdev->dev, dev);
2571 dev->ethtool_ops = &bcm_sysport_ethtool_ops;
2572 dev->netdev_ops = &bcm_sysport_netdev_ops;
2573 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
2574
2575 dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2576 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2577 NETIF_F_HW_VLAN_CTAG_TX;
2578 dev->hw_features |= dev->features;
2579 dev->vlan_features |= dev->features;
2580
2581 /* Request the WOL interrupt and advertise suspend if available */
2582 priv->wol_irq_disabled = 1;
2583 ret = devm_request_irq(&pdev->dev, priv->wol_irq,
2584 bcm_sysport_wol_isr, 0, dev->name, priv);
2585 if (!ret)
2586 device_set_wakeup_capable(&pdev->dev, 1);
2587
2588 priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
2589 if (IS_ERR(priv->wol_clk))
2590 return PTR_ERR(priv->wol_clk);
2591
2592 /* Set the needed headroom once and for all */
2593 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
2594 dev->needed_headroom += sizeof(struct bcm_tsb);
2595
2596 /* libphy will adjust the link state accordingly */
2597 netif_carrier_off(dev);
2598
2599 priv->rx_max_coalesced_frames = 1;
2600 u64_stats_init(&priv->syncp);
2601
2602 priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;
2603
2604 ret = register_dsa_notifier(&priv->dsa_notifier);
2605 if (ret) {
2606 dev_err(&pdev->dev, "failed to register DSA notifier\n");
2607 goto err_deregister_fixed_link;
2608 }
2609
2610 ret = register_netdev(dev);
2611 if (ret) {
2612 dev_err(&pdev->dev, "failed to register net_device\n");
2613 goto err_deregister_notifier;
2614 }
2615
2616 clk_prepare_enable(priv->clk);
2617
2618 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2619 dev_info(&pdev->dev,
2620 "Broadcom SYSTEMPORT%s " REV_FMT
2621 " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
2622 priv->is_lite ? " Lite" : "",
2623 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2624 priv->irq0, priv->irq1, txq, rxq);
2625
2626 clk_disable_unprepare(priv->clk);
2627
2628 return 0;
2629
2630 err_deregister_notifier:
2631 unregister_dsa_notifier(&priv->dsa_notifier);
2632 err_deregister_fixed_link:
2633 if (of_phy_is_fixed_link(dn))
2634 of_phy_deregister_fixed_link(dn);
2635 err_free_netdev:
2636 free_netdev(dev);
2637 return ret;
2638 }
2639
bcm_sysport_remove(struct platform_device * pdev)2640 static int bcm_sysport_remove(struct platform_device *pdev)
2641 {
2642 struct net_device *dev = dev_get_drvdata(&pdev->dev);
2643 struct bcm_sysport_priv *priv = netdev_priv(dev);
2644 struct device_node *dn = pdev->dev.of_node;
2645
2646 /* Not much to do, ndo_close has been called
2647 * and we use managed allocations
2648 */
2649 unregister_dsa_notifier(&priv->dsa_notifier);
2650 unregister_netdev(dev);
2651 if (of_phy_is_fixed_link(dn))
2652 of_phy_deregister_fixed_link(dn);
2653 free_netdev(dev);
2654 dev_set_drvdata(&pdev->dev, NULL);
2655
2656 return 0;
2657 }
2658
bcm_sysport_suspend_to_wol(struct bcm_sysport_priv * priv)2659 static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2660 {
2661 struct net_device *ndev = priv->netdev;
2662 unsigned int timeout = 1000;
2663 unsigned int index, i = 0;
2664 u32 reg;
2665
2666 reg = umac_readl(priv, UMAC_MPD_CTRL);
2667 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
2668 reg |= MPD_EN;
2669 reg &= ~PSW_EN;
2670 if (priv->wolopts & WAKE_MAGICSECURE) {
2671 /* Program the SecureOn password */
2672 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
2673 UMAC_PSW_MS);
2674 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
2675 UMAC_PSW_LS);
2676 reg |= PSW_EN;
2677 }
2678 umac_writel(priv, reg, UMAC_MPD_CTRL);
2679
2680 if (priv->wolopts & WAKE_FILTER) {
2681 /* Turn on ACPI matching to steal packets from RBUF */
2682 reg = rbuf_readl(priv, RBUF_CONTROL);
2683 if (priv->is_lite)
2684 reg |= RBUF_ACPI_EN_LITE;
2685 else
2686 reg |= RBUF_ACPI_EN;
2687 rbuf_writel(priv, reg, RBUF_CONTROL);
2688
2689 /* Enable RXCHK, active filters and Broadcom tag matching */
2690 reg = rxchk_readl(priv, RXCHK_CONTROL);
2691 reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
2692 RXCHK_BRCM_TAG_MATCH_SHIFT);
2693 for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
2694 reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i);
2695 i++;
2696 }
2697 reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN;
2698 rxchk_writel(priv, reg, RXCHK_CONTROL);
2699 }
2700
2701 /* Make sure RBUF entered WoL mode as result */
2702 do {
2703 reg = rbuf_readl(priv, RBUF_STATUS);
2704 if (reg & RBUF_WOL_MODE)
2705 break;
2706
2707 udelay(10);
2708 } while (timeout-- > 0);
2709
2710 /* Do not leave the UniMAC RBUF matching only MPD packets */
2711 if (!timeout) {
2712 mpd_enable_set(priv, false);
2713 netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2714 return -ETIMEDOUT;
2715 }
2716
2717 /* UniMAC receive needs to be turned on */
2718 umac_enable_set(priv, CMD_RX_EN, 1);
2719
2720 netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2721
2722 return 0;
2723 }
2724
bcm_sysport_suspend(struct device * d)2725 static int __maybe_unused bcm_sysport_suspend(struct device *d)
2726 {
2727 struct net_device *dev = dev_get_drvdata(d);
2728 struct bcm_sysport_priv *priv = netdev_priv(dev);
2729 unsigned int i;
2730 int ret = 0;
2731 u32 reg;
2732
2733 if (!netif_running(dev))
2734 return 0;
2735
2736 netif_device_detach(dev);
2737
2738 bcm_sysport_netif_stop(dev);
2739
2740 phy_suspend(dev->phydev);
2741
2742 /* Disable UniMAC RX */
2743 umac_enable_set(priv, CMD_RX_EN, 0);
2744
2745 ret = rdma_enable_set(priv, 0);
2746 if (ret) {
2747 netdev_err(dev, "RDMA timeout!\n");
2748 return ret;
2749 }
2750
2751 /* Disable RXCHK if enabled */
2752 if (priv->rx_chk_en) {
2753 reg = rxchk_readl(priv, RXCHK_CONTROL);
2754 reg &= ~RXCHK_EN;
2755 rxchk_writel(priv, reg, RXCHK_CONTROL);
2756 }
2757
2758 /* Flush RX pipe */
2759 if (!priv->wolopts)
2760 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
2761
2762 ret = tdma_enable_set(priv, 0);
2763 if (ret) {
2764 netdev_err(dev, "TDMA timeout!\n");
2765 return ret;
2766 }
2767
2768 /* Wait for a packet boundary */
2769 usleep_range(2000, 3000);
2770
2771 umac_enable_set(priv, CMD_TX_EN, 0);
2772
2773 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2774
2775 /* Free RX/TX rings SW structures */
2776 for (i = 0; i < dev->num_tx_queues; i++)
2777 bcm_sysport_fini_tx_ring(priv, i);
2778 bcm_sysport_fini_rx_ring(priv);
2779
2780 /* Get prepared for Wake-on-LAN */
2781 if (device_may_wakeup(d) && priv->wolopts) {
2782 clk_prepare_enable(priv->wol_clk);
2783 ret = bcm_sysport_suspend_to_wol(priv);
2784 }
2785
2786 clk_disable_unprepare(priv->clk);
2787
2788 return ret;
2789 }
2790
bcm_sysport_resume(struct device * d)2791 static int __maybe_unused bcm_sysport_resume(struct device *d)
2792 {
2793 struct net_device *dev = dev_get_drvdata(d);
2794 struct bcm_sysport_priv *priv = netdev_priv(dev);
2795 unsigned int i;
2796 int ret;
2797
2798 if (!netif_running(dev))
2799 return 0;
2800
2801 clk_prepare_enable(priv->clk);
2802 if (priv->wolopts)
2803 clk_disable_unprepare(priv->wol_clk);
2804
2805 umac_reset(priv);
2806
2807 /* Disable the UniMAC RX/TX */
2808 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
2809
2810 /* We may have been suspended and never received a WOL event that
2811 * would turn off MPD detection, take care of that now
2812 */
2813 bcm_sysport_resume_from_wol(priv);
2814
2815 /* Initialize both hardware and software ring */
2816 for (i = 0; i < dev->num_tx_queues; i++) {
2817 ret = bcm_sysport_init_tx_ring(priv, i);
2818 if (ret) {
2819 netdev_err(dev, "failed to initialize TX ring %d\n",
2820 i);
2821 goto out_free_tx_rings;
2822 }
2823 }
2824
2825 /* Initialize linked-list */
2826 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2827
2828 /* Initialize RX ring */
2829 ret = bcm_sysport_init_rx_ring(priv);
2830 if (ret) {
2831 netdev_err(dev, "failed to initialize RX ring\n");
2832 goto out_free_rx_ring;
2833 }
2834
2835 /* RX pipe enable */
2836 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2837
2838 ret = rdma_enable_set(priv, 1);
2839 if (ret) {
2840 netdev_err(dev, "failed to enable RDMA\n");
2841 goto out_free_rx_ring;
2842 }
2843
2844 /* Restore enabled features */
2845 bcm_sysport_set_features(dev, dev->features);
2846
2847 rbuf_init(priv);
2848
2849 /* Set maximum frame length */
2850 if (!priv->is_lite)
2851 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2852 else
2853 gib_set_pad_extension(priv);
2854
2855 /* Set MAC address */
2856 umac_set_hw_addr(priv, dev->dev_addr);
2857
2858 umac_enable_set(priv, CMD_RX_EN, 1);
2859
2860 /* TX pipe enable */
2861 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2862
2863 umac_enable_set(priv, CMD_TX_EN, 1);
2864
2865 ret = tdma_enable_set(priv, 1);
2866 if (ret) {
2867 netdev_err(dev, "TDMA timeout!\n");
2868 goto out_free_rx_ring;
2869 }
2870
2871 phy_resume(dev->phydev);
2872
2873 bcm_sysport_netif_start(dev);
2874
2875 netif_device_attach(dev);
2876
2877 return 0;
2878
2879 out_free_rx_ring:
2880 bcm_sysport_fini_rx_ring(priv);
2881 out_free_tx_rings:
2882 for (i = 0; i < dev->num_tx_queues; i++)
2883 bcm_sysport_fini_tx_ring(priv, i);
2884 clk_disable_unprepare(priv->clk);
2885 return ret;
2886 }
2887
2888 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2889 bcm_sysport_suspend, bcm_sysport_resume);
2890
2891 static struct platform_driver bcm_sysport_driver = {
2892 .probe = bcm_sysport_probe,
2893 .remove = bcm_sysport_remove,
2894 .driver = {
2895 .name = "brcm-systemport",
2896 .of_match_table = bcm_sysport_of_match,
2897 .pm = &bcm_sysport_pm_ops,
2898 },
2899 };
2900 module_platform_driver(bcm_sysport_driver);
2901
2902 MODULE_AUTHOR("Broadcom Corporation");
2903 MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2904 MODULE_ALIAS("platform:brcm-systemport");
2905 MODULE_LICENSE("GPL");
2906