1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * sni_ave.c - Socionext UniPhier AVE ethernet driver
4   * Copyright 2014 Panasonic Corporation
5   * Copyright 2015-2017 Socionext Inc.
6   */
7  
8  #include <linux/bitops.h>
9  #include <linux/clk.h>
10  #include <linux/etherdevice.h>
11  #include <linux/interrupt.h>
12  #include <linux/io.h>
13  #include <linux/iopoll.h>
14  #include <linux/mfd/syscon.h>
15  #include <linux/mii.h>
16  #include <linux/module.h>
17  #include <linux/netdevice.h>
18  #include <linux/of.h>
19  #include <linux/of_net.h>
20  #include <linux/of_mdio.h>
21  #include <linux/phy.h>
22  #include <linux/platform_device.h>
23  #include <linux/regmap.h>
24  #include <linux/reset.h>
25  #include <linux/types.h>
26  #include <linux/u64_stats_sync.h>
27  
28  /* General Register Group */
29  #define AVE_IDR			0x000	/* ID */
30  #define AVE_VR			0x004	/* Version */
31  #define AVE_GRR			0x008	/* Global Reset */
32  #define AVE_CFGR		0x00c	/* Configuration */
33  
34  /* Interrupt Register Group */
35  #define AVE_GIMR		0x100	/* Global Interrupt Mask */
36  #define AVE_GISR		0x104	/* Global Interrupt Status */
37  
38  /* MAC Register Group */
39  #define AVE_TXCR		0x200	/* TX Setup */
40  #define AVE_RXCR		0x204	/* RX Setup */
41  #define AVE_RXMAC1R		0x208	/* MAC address (lower) */
42  #define AVE_RXMAC2R		0x20c	/* MAC address (upper) */
43  #define AVE_MDIOCTR		0x214	/* MDIO Control */
44  #define AVE_MDIOAR		0x218	/* MDIO Address */
45  #define AVE_MDIOWDR		0x21c	/* MDIO Data */
46  #define AVE_MDIOSR		0x220	/* MDIO Status */
47  #define AVE_MDIORDR		0x224	/* MDIO Rd Data */
48  
49  /* Descriptor Control Register Group */
50  #define AVE_DESCC		0x300	/* Descriptor Control */
51  #define AVE_TXDC		0x304	/* TX Descriptor Configuration */
52  #define AVE_RXDC0		0x308	/* RX Descriptor Ring0 Configuration */
53  #define AVE_IIRQC		0x34c	/* Interval IRQ Control */
54  
55  /* Packet Filter Register Group */
56  #define AVE_PKTF_BASE		0x800	/* PF Base Address */
57  #define AVE_PFMBYTE_BASE	0xd00	/* PF Mask Byte Base Address */
58  #define AVE_PFMBIT_BASE		0xe00	/* PF Mask Bit Base Address */
59  #define AVE_PFSEL_BASE		0xf00	/* PF Selector Base Address */
60  #define AVE_PFEN		0xffc	/* Packet Filter Enable */
61  #define AVE_PKTF(ent)		(AVE_PKTF_BASE + (ent) * 0x40)
62  #define AVE_PFMBYTE(ent)	(AVE_PFMBYTE_BASE + (ent) * 8)
63  #define AVE_PFMBIT(ent)		(AVE_PFMBIT_BASE + (ent) * 4)
64  #define AVE_PFSEL(ent)		(AVE_PFSEL_BASE + (ent) * 4)
65  
66  /* 64bit descriptor memory */
67  #define AVE_DESC_SIZE_64	12	/* Descriptor Size */
68  
69  #define AVE_TXDM_64		0x1000	/* Tx Descriptor Memory */
70  #define AVE_RXDM_64		0x1c00	/* Rx Descriptor Memory */
71  
72  #define AVE_TXDM_SIZE_64	0x0ba0	/* Tx Descriptor Memory Size 3KB */
73  #define AVE_RXDM_SIZE_64	0x6000	/* Rx Descriptor Memory Size 24KB */
74  
75  /* 32bit descriptor memory */
76  #define AVE_DESC_SIZE_32	8	/* Descriptor Size */
77  
78  #define AVE_TXDM_32		0x1000	/* Tx Descriptor Memory */
79  #define AVE_RXDM_32		0x1800	/* Rx Descriptor Memory */
80  
81  #define AVE_TXDM_SIZE_32	0x07c0	/* Tx Descriptor Memory Size 2KB */
82  #define AVE_RXDM_SIZE_32	0x4000	/* Rx Descriptor Memory Size 16KB */
83  
84  /* RMII Bridge Register Group */
85  #define AVE_RSTCTRL		0x8028	/* Reset control */
86  #define AVE_RSTCTRL_RMIIRST	BIT(16)
87  #define AVE_LINKSEL		0x8034	/* Link speed setting */
88  #define AVE_LINKSEL_100M	BIT(0)
89  
90  /* AVE_GRR */
91  #define AVE_GRR_RXFFR		BIT(5)	/* Reset RxFIFO */
92  #define AVE_GRR_PHYRST		BIT(4)	/* Reset external PHY */
93  #define AVE_GRR_GRST		BIT(0)	/* Reset all MAC */
94  
95  /* AVE_CFGR */
96  #define AVE_CFGR_FLE		BIT(31)	/* Filter Function */
97  #define AVE_CFGR_CHE		BIT(30)	/* Checksum Function */
98  #define AVE_CFGR_MII		BIT(27)	/* Func mode (1:MII/RMII, 0:RGMII) */
99  #define AVE_CFGR_IPFCEN		BIT(24)	/* IP fragment sum Enable */
100  
101  /* AVE_GISR (common with GIMR) */
102  #define AVE_GI_PHY		BIT(24)	/* PHY interrupt */
103  #define AVE_GI_TX		BIT(16)	/* Tx complete */
104  #define AVE_GI_RXERR		BIT(8)	/* Receive frame more than max size */
105  #define AVE_GI_RXOVF		BIT(7)	/* Overflow at the RxFIFO */
106  #define AVE_GI_RXDROP		BIT(6)	/* Drop packet */
107  #define AVE_GI_RXIINT		BIT(5)	/* Interval interrupt */
108  
109  /* AVE_TXCR */
110  #define AVE_TXCR_FLOCTR		BIT(18)	/* Flow control */
111  #define AVE_TXCR_TXSPD_1G	BIT(17)
112  #define AVE_TXCR_TXSPD_100	BIT(16)
113  
114  /* AVE_RXCR */
115  #define AVE_RXCR_RXEN		BIT(30)	/* Rx enable */
116  #define AVE_RXCR_FDUPEN		BIT(22)	/* Interface mode */
117  #define AVE_RXCR_FLOCTR		BIT(21)	/* Flow control */
118  #define AVE_RXCR_AFEN		BIT(19)	/* MAC address filter */
119  #define AVE_RXCR_DRPEN		BIT(18)	/* Drop pause frame */
120  #define AVE_RXCR_MPSIZ_MASK	GENMASK(10, 0)
121  
122  /* AVE_MDIOCTR */
123  #define AVE_MDIOCTR_RREQ	BIT(3)	/* Read request */
124  #define AVE_MDIOCTR_WREQ	BIT(2)	/* Write request */
125  
126  /* AVE_MDIOSR */
127  #define AVE_MDIOSR_STS		BIT(0)	/* access status */
128  
129  /* AVE_DESCC */
130  #define AVE_DESCC_STATUS_MASK	GENMASK(31, 16)
131  #define AVE_DESCC_RD0		BIT(8)	/* Enable Rx descriptor Ring0 */
132  #define AVE_DESCC_RDSTP		BIT(4)	/* Pause Rx descriptor */
133  #define AVE_DESCC_TD		BIT(0)	/* Enable Tx descriptor */
134  
135  /* AVE_TXDC */
136  #define AVE_TXDC_SIZE		GENMASK(27, 16)	/* Size of Tx descriptor */
137  #define AVE_TXDC_ADDR		GENMASK(11, 0)	/* Start address */
138  #define AVE_TXDC_ADDR_START	0
139  
140  /* AVE_RXDC0 */
141  #define AVE_RXDC0_SIZE		GENMASK(30, 16)	/* Size of Rx descriptor */
142  #define AVE_RXDC0_ADDR		GENMASK(14, 0)	/* Start address */
143  #define AVE_RXDC0_ADDR_START	0
144  
145  /* AVE_IIRQC */
146  #define AVE_IIRQC_EN0		BIT(27)	/* Enable interval interrupt Ring0 */
147  #define AVE_IIRQC_BSCK		GENMASK(15, 0)	/* Interval count unit */
148  
149  /* Command status for descriptor */
150  #define AVE_STS_OWN		BIT(31)	/* Descriptor ownership */
151  #define AVE_STS_INTR		BIT(29)	/* Request for interrupt */
152  #define AVE_STS_OK		BIT(27)	/* Normal transmit */
153  /* TX */
154  #define AVE_STS_NOCSUM		BIT(28)	/* No use HW checksum */
155  #define AVE_STS_1ST		BIT(26)	/* Head of buffer chain */
156  #define AVE_STS_LAST		BIT(25)	/* Tail of buffer chain */
157  #define AVE_STS_OWC		BIT(21)	/* Out of window,Late Collision */
158  #define AVE_STS_EC		BIT(20)	/* Excess collision occurred */
159  #define AVE_STS_PKTLEN_TX_MASK	GENMASK(15, 0)
160  /* RX */
161  #define AVE_STS_CSSV		BIT(21)	/* Checksum check performed */
162  #define AVE_STS_CSER		BIT(20)	/* Checksum error detected */
163  #define AVE_STS_PKTLEN_RX_MASK	GENMASK(10, 0)
164  
165  /* Packet filter */
166  #define AVE_PFMBYTE_MASK0	(GENMASK(31, 8) | GENMASK(5, 0))
167  #define AVE_PFMBYTE_MASK1	GENMASK(25, 0)
168  #define AVE_PFMBIT_MASK		GENMASK(15, 0)
169  
170  #define AVE_PF_SIZE		17	/* Number of all packet filter */
171  #define AVE_PF_MULTICAST_SIZE	7	/* Number of multicast filter */
172  
173  #define AVE_PFNUM_FILTER	0	/* No.0 */
174  #define AVE_PFNUM_UNICAST	1	/* No.1 */
175  #define AVE_PFNUM_BROADCAST	2	/* No.2 */
176  #define AVE_PFNUM_MULTICAST	11	/* No.11-17 */
177  
178  /* NETIF Message control */
179  #define AVE_DEFAULT_MSG_ENABLE	(NETIF_MSG_DRV    |	\
180  				 NETIF_MSG_PROBE  |	\
181  				 NETIF_MSG_LINK   |	\
182  				 NETIF_MSG_TIMER  |	\
183  				 NETIF_MSG_IFDOWN |	\
184  				 NETIF_MSG_IFUP   |	\
185  				 NETIF_MSG_RX_ERR |	\
186  				 NETIF_MSG_TX_ERR)
187  
188  /* Parameter for descriptor */
189  #define AVE_NR_TXDESC		64	/* Tx descriptor */
190  #define AVE_NR_RXDESC		256	/* Rx descriptor */
191  
192  #define AVE_DESC_OFS_CMDSTS	0
193  #define AVE_DESC_OFS_ADDRL	4
194  #define AVE_DESC_OFS_ADDRU	8
195  
196  /* Parameter for ethernet frame */
197  #define AVE_MAX_ETHFRAME	1518
198  #define AVE_FRAME_HEADROOM	2
199  
200  /* Parameter for interrupt */
201  #define AVE_INTM_COUNT		20
202  #define AVE_FORCE_TXINTCNT	1
203  
204  /* SG */
205  #define SG_ETPINMODE		0x540
206  #define SG_ETPINMODE_EXTPHY	BIT(1)	/* for LD11 */
207  #define SG_ETPINMODE_RMII(ins)	BIT(ins)
208  
209  #define IS_DESC_64BIT(p)	((p)->data->is_desc_64bit)
210  
211  #define AVE_MAX_CLKS		4
212  #define AVE_MAX_RSTS		2
213  
214  enum desc_id {
215  	AVE_DESCID_RX,
216  	AVE_DESCID_TX,
217  };
218  
219  enum desc_state {
220  	AVE_DESC_RX_PERMIT,
221  	AVE_DESC_RX_SUSPEND,
222  	AVE_DESC_START,
223  	AVE_DESC_STOP,
224  };
225  
226  struct ave_desc {
227  	struct sk_buff	*skbs;
228  	dma_addr_t	skbs_dma;
229  	size_t		skbs_dmalen;
230  };
231  
232  struct ave_desc_info {
233  	u32	ndesc;		/* number of descriptor */
234  	u32	daddr;		/* start address of descriptor */
235  	u32	proc_idx;	/* index of processing packet */
236  	u32	done_idx;	/* index of processed packet */
237  	struct ave_desc *desc;	/* skb info related descriptor */
238  };
239  
240  struct ave_stats {
241  	struct	u64_stats_sync	syncp;
242  	u64	packets;
243  	u64	bytes;
244  	u64	errors;
245  	u64	dropped;
246  	u64	collisions;
247  	u64	fifo_errors;
248  };
249  
250  struct ave_private {
251  	void __iomem            *base;
252  	int                     irq;
253  	int			phy_id;
254  	unsigned int		desc_size;
255  	u32			msg_enable;
256  	int			nclks;
257  	struct clk		*clk[AVE_MAX_CLKS];
258  	int			nrsts;
259  	struct reset_control	*rst[AVE_MAX_RSTS];
260  	phy_interface_t		phy_mode;
261  	struct phy_device	*phydev;
262  	struct mii_bus		*mdio;
263  	struct regmap		*regmap;
264  	unsigned int		pinmode_mask;
265  	unsigned int		pinmode_val;
266  	u32			wolopts;
267  
268  	/* stats */
269  	struct ave_stats	stats_rx;
270  	struct ave_stats	stats_tx;
271  
272  	/* NAPI support */
273  	struct net_device	*ndev;
274  	struct napi_struct	napi_rx;
275  	struct napi_struct	napi_tx;
276  
277  	/* descriptor */
278  	struct ave_desc_info	rx;
279  	struct ave_desc_info	tx;
280  
281  	/* flow control */
282  	int pause_auto;
283  	int pause_rx;
284  	int pause_tx;
285  
286  	const struct ave_soc_data *data;
287  };
288  
289  struct ave_soc_data {
290  	bool	is_desc_64bit;
291  	const char	*clock_names[AVE_MAX_CLKS];
292  	const char	*reset_names[AVE_MAX_RSTS];
293  	int	(*get_pinmode)(struct ave_private *priv,
294  			       phy_interface_t phy_mode, u32 arg);
295  };
296  
ave_desc_read(struct net_device * ndev,enum desc_id id,int entry,int offset)297  static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry,
298  			 int offset)
299  {
300  	struct ave_private *priv = netdev_priv(ndev);
301  	u32 addr;
302  
303  	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
304  		+ entry * priv->desc_size + offset;
305  
306  	return readl(priv->base + addr);
307  }
308  
ave_desc_read_cmdsts(struct net_device * ndev,enum desc_id id,int entry)309  static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id,
310  				int entry)
311  {
312  	return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS);
313  }
314  
ave_desc_write(struct net_device * ndev,enum desc_id id,int entry,int offset,u32 val)315  static void ave_desc_write(struct net_device *ndev, enum desc_id id,
316  			   int entry, int offset, u32 val)
317  {
318  	struct ave_private *priv = netdev_priv(ndev);
319  	u32 addr;
320  
321  	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
322  		+ entry * priv->desc_size + offset;
323  
324  	writel(val, priv->base + addr);
325  }
326  
ave_desc_write_cmdsts(struct net_device * ndev,enum desc_id id,int entry,u32 val)327  static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id,
328  				  int entry, u32 val)
329  {
330  	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val);
331  }
332  
ave_desc_write_addr(struct net_device * ndev,enum desc_id id,int entry,dma_addr_t paddr)333  static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id,
334  				int entry, dma_addr_t paddr)
335  {
336  	struct ave_private *priv = netdev_priv(ndev);
337  
338  	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL,
339  		       lower_32_bits(paddr));
340  	if (IS_DESC_64BIT(priv))
341  		ave_desc_write(ndev, id,
342  			       entry, AVE_DESC_OFS_ADDRU,
343  			       upper_32_bits(paddr));
344  }
345  
ave_irq_disable_all(struct net_device * ndev)346  static u32 ave_irq_disable_all(struct net_device *ndev)
347  {
348  	struct ave_private *priv = netdev_priv(ndev);
349  	u32 ret;
350  
351  	ret = readl(priv->base + AVE_GIMR);
352  	writel(0, priv->base + AVE_GIMR);
353  
354  	return ret;
355  }
356  
ave_irq_restore(struct net_device * ndev,u32 val)357  static void ave_irq_restore(struct net_device *ndev, u32 val)
358  {
359  	struct ave_private *priv = netdev_priv(ndev);
360  
361  	writel(val, priv->base + AVE_GIMR);
362  }
363  
ave_irq_enable(struct net_device * ndev,u32 bitflag)364  static void ave_irq_enable(struct net_device *ndev, u32 bitflag)
365  {
366  	struct ave_private *priv = netdev_priv(ndev);
367  
368  	writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR);
369  	writel(bitflag, priv->base + AVE_GISR);
370  }
371  
ave_hw_write_macaddr(struct net_device * ndev,const unsigned char * mac_addr,int reg1,int reg2)372  static void ave_hw_write_macaddr(struct net_device *ndev,
373  				 const unsigned char *mac_addr,
374  				 int reg1, int reg2)
375  {
376  	struct ave_private *priv = netdev_priv(ndev);
377  
378  	writel(mac_addr[0] | mac_addr[1] << 8 |
379  	       mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1);
380  	writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2);
381  }
382  
ave_hw_read_version(struct net_device * ndev,char * buf,int len)383  static void ave_hw_read_version(struct net_device *ndev, char *buf, int len)
384  {
385  	struct ave_private *priv = netdev_priv(ndev);
386  	u32 major, minor, vr;
387  
388  	vr = readl(priv->base + AVE_VR);
389  	major = (vr & GENMASK(15, 8)) >> 8;
390  	minor = (vr & GENMASK(7, 0));
391  	snprintf(buf, len, "v%u.%u", major, minor);
392  }
393  
ave_ethtool_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * info)394  static void ave_ethtool_get_drvinfo(struct net_device *ndev,
395  				    struct ethtool_drvinfo *info)
396  {
397  	struct device *dev = ndev->dev.parent;
398  
399  	strscpy(info->driver, dev->driver->name, sizeof(info->driver));
400  	strscpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
401  	ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
402  }
403  
ave_ethtool_get_msglevel(struct net_device * ndev)404  static u32 ave_ethtool_get_msglevel(struct net_device *ndev)
405  {
406  	struct ave_private *priv = netdev_priv(ndev);
407  
408  	return priv->msg_enable;
409  }
410  
ave_ethtool_set_msglevel(struct net_device * ndev,u32 val)411  static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val)
412  {
413  	struct ave_private *priv = netdev_priv(ndev);
414  
415  	priv->msg_enable = val;
416  }
417  
ave_ethtool_get_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)418  static void ave_ethtool_get_wol(struct net_device *ndev,
419  				struct ethtool_wolinfo *wol)
420  {
421  	wol->supported = 0;
422  	wol->wolopts   = 0;
423  
424  	if (ndev->phydev)
425  		phy_ethtool_get_wol(ndev->phydev, wol);
426  }
427  
__ave_ethtool_set_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)428  static int __ave_ethtool_set_wol(struct net_device *ndev,
429  				 struct ethtool_wolinfo *wol)
430  {
431  	if (!ndev->phydev ||
432  	    (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
433  		return -EOPNOTSUPP;
434  
435  	return phy_ethtool_set_wol(ndev->phydev, wol);
436  }
437  
ave_ethtool_set_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)438  static int ave_ethtool_set_wol(struct net_device *ndev,
439  			       struct ethtool_wolinfo *wol)
440  {
441  	int ret;
442  
443  	ret = __ave_ethtool_set_wol(ndev, wol);
444  	if (!ret)
445  		device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
446  
447  	return ret;
448  }
449  
ave_ethtool_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * pause)450  static void ave_ethtool_get_pauseparam(struct net_device *ndev,
451  				       struct ethtool_pauseparam *pause)
452  {
453  	struct ave_private *priv = netdev_priv(ndev);
454  
455  	pause->autoneg  = priv->pause_auto;
456  	pause->rx_pause = priv->pause_rx;
457  	pause->tx_pause = priv->pause_tx;
458  }
459  
ave_ethtool_set_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * pause)460  static int ave_ethtool_set_pauseparam(struct net_device *ndev,
461  				      struct ethtool_pauseparam *pause)
462  {
463  	struct ave_private *priv = netdev_priv(ndev);
464  	struct phy_device *phydev = ndev->phydev;
465  
466  	if (!phydev)
467  		return -EINVAL;
468  
469  	priv->pause_auto = pause->autoneg;
470  	priv->pause_rx   = pause->rx_pause;
471  	priv->pause_tx   = pause->tx_pause;
472  
473  	phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
474  
475  	return 0;
476  }
477  
478  static const struct ethtool_ops ave_ethtool_ops = {
479  	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
480  	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
481  	.get_drvinfo		= ave_ethtool_get_drvinfo,
482  	.nway_reset		= phy_ethtool_nway_reset,
483  	.get_link		= ethtool_op_get_link,
484  	.get_msglevel		= ave_ethtool_get_msglevel,
485  	.set_msglevel		= ave_ethtool_set_msglevel,
486  	.get_wol		= ave_ethtool_get_wol,
487  	.set_wol		= ave_ethtool_set_wol,
488  	.get_pauseparam         = ave_ethtool_get_pauseparam,
489  	.set_pauseparam         = ave_ethtool_set_pauseparam,
490  };
491  
ave_mdiobus_read(struct mii_bus * bus,int phyid,int regnum)492  static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum)
493  {
494  	struct net_device *ndev = bus->priv;
495  	struct ave_private *priv;
496  	u32 mdioctl, mdiosr;
497  	int ret;
498  
499  	priv = netdev_priv(ndev);
500  
501  	/* write address */
502  	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
503  
504  	/* read request */
505  	mdioctl = readl(priv->base + AVE_MDIOCTR);
506  	writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ,
507  	       priv->base + AVE_MDIOCTR);
508  
509  	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
510  				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
511  	if (ret) {
512  		netdev_err(ndev, "failed to read (phy:%d reg:%x)\n",
513  			   phyid, regnum);
514  		return ret;
515  	}
516  
517  	return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0);
518  }
519  
ave_mdiobus_write(struct mii_bus * bus,int phyid,int regnum,u16 val)520  static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum,
521  			     u16 val)
522  {
523  	struct net_device *ndev = bus->priv;
524  	struct ave_private *priv;
525  	u32 mdioctl, mdiosr;
526  	int ret;
527  
528  	priv = netdev_priv(ndev);
529  
530  	/* write address */
531  	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
532  
533  	/* write data */
534  	writel(val, priv->base + AVE_MDIOWDR);
535  
536  	/* write request */
537  	mdioctl = readl(priv->base + AVE_MDIOCTR);
538  	writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ,
539  	       priv->base + AVE_MDIOCTR);
540  
541  	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
542  				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
543  	if (ret)
544  		netdev_err(ndev, "failed to write (phy:%d reg:%x)\n",
545  			   phyid, regnum);
546  
547  	return ret;
548  }
549  
ave_dma_map(struct net_device * ndev,struct ave_desc * desc,void * ptr,size_t len,enum dma_data_direction dir,dma_addr_t * paddr)550  static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc,
551  		       void *ptr, size_t len, enum dma_data_direction dir,
552  		       dma_addr_t *paddr)
553  {
554  	dma_addr_t map_addr;
555  
556  	map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir);
557  	if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr)))
558  		return -ENOMEM;
559  
560  	desc->skbs_dma = map_addr;
561  	desc->skbs_dmalen = len;
562  	*paddr = map_addr;
563  
564  	return 0;
565  }
566  
ave_dma_unmap(struct net_device * ndev,struct ave_desc * desc,enum dma_data_direction dir)567  static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc,
568  			  enum dma_data_direction dir)
569  {
570  	if (!desc->skbs_dma)
571  		return;
572  
573  	dma_unmap_single(ndev->dev.parent,
574  			 desc->skbs_dma, desc->skbs_dmalen, dir);
575  	desc->skbs_dma = 0;
576  }
577  
578  /* Prepare Rx descriptor and memory */
ave_rxdesc_prepare(struct net_device * ndev,int entry)579  static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
580  {
581  	struct ave_private *priv = netdev_priv(ndev);
582  	struct sk_buff *skb;
583  	dma_addr_t paddr;
584  	int ret;
585  
586  	skb = priv->rx.desc[entry].skbs;
587  	if (!skb) {
588  		skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME);
589  		if (!skb) {
590  			netdev_err(ndev, "can't allocate skb for Rx\n");
591  			return -ENOMEM;
592  		}
593  		skb->data += AVE_FRAME_HEADROOM;
594  		skb->tail += AVE_FRAME_HEADROOM;
595  	}
596  
597  	/* set disable to cmdsts */
598  	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
599  			      AVE_STS_INTR | AVE_STS_OWN);
600  
601  	/* map Rx buffer
602  	 * Rx buffer set to the Rx descriptor has two restrictions:
603  	 * - Rx buffer address is 4 byte aligned.
604  	 * - Rx buffer begins with 2 byte headroom, and data will be put from
605  	 *   (buffer + 2).
606  	 * To satisfy this, specify the address to put back the buffer
607  	 * pointer advanced by AVE_FRAME_HEADROOM, and expand the map size
608  	 * by AVE_FRAME_HEADROOM.
609  	 */
610  	ret = ave_dma_map(ndev, &priv->rx.desc[entry],
611  			  skb->data - AVE_FRAME_HEADROOM,
612  			  AVE_MAX_ETHFRAME + AVE_FRAME_HEADROOM,
613  			  DMA_FROM_DEVICE, &paddr);
614  	if (ret) {
615  		netdev_err(ndev, "can't map skb for Rx\n");
616  		dev_kfree_skb_any(skb);
617  		return ret;
618  	}
619  	priv->rx.desc[entry].skbs = skb;
620  
621  	/* set buffer pointer */
622  	ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr);
623  
624  	/* set enable to cmdsts */
625  	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
626  			      AVE_STS_INTR | AVE_MAX_ETHFRAME);
627  
628  	return ret;
629  }
630  
631  /* Switch state of descriptor */
ave_desc_switch(struct net_device * ndev,enum desc_state state)632  static int ave_desc_switch(struct net_device *ndev, enum desc_state state)
633  {
634  	struct ave_private *priv = netdev_priv(ndev);
635  	int ret = 0;
636  	u32 val;
637  
638  	switch (state) {
639  	case AVE_DESC_START:
640  		writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC);
641  		break;
642  
643  	case AVE_DESC_STOP:
644  		writel(0, priv->base + AVE_DESCC);
645  		if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val,
646  				       150, 15000)) {
647  			netdev_err(ndev, "can't stop descriptor\n");
648  			ret = -EBUSY;
649  		}
650  		break;
651  
652  	case AVE_DESC_RX_SUSPEND:
653  		val = readl(priv->base + AVE_DESCC);
654  		val |= AVE_DESCC_RDSTP;
655  		val &= ~AVE_DESCC_STATUS_MASK;
656  		writel(val, priv->base + AVE_DESCC);
657  		if (readl_poll_timeout(priv->base + AVE_DESCC, val,
658  				       val & (AVE_DESCC_RDSTP << 16),
659  				       150, 150000)) {
660  			netdev_err(ndev, "can't suspend descriptor\n");
661  			ret = -EBUSY;
662  		}
663  		break;
664  
665  	case AVE_DESC_RX_PERMIT:
666  		val = readl(priv->base + AVE_DESCC);
667  		val &= ~AVE_DESCC_RDSTP;
668  		val &= ~AVE_DESCC_STATUS_MASK;
669  		writel(val, priv->base + AVE_DESCC);
670  		break;
671  
672  	default:
673  		ret = -EINVAL;
674  		break;
675  	}
676  
677  	return ret;
678  }
679  
ave_tx_complete(struct net_device * ndev)680  static int ave_tx_complete(struct net_device *ndev)
681  {
682  	struct ave_private *priv = netdev_priv(ndev);
683  	u32 proc_idx, done_idx, ndesc, cmdsts;
684  	unsigned int nr_freebuf = 0;
685  	unsigned int tx_packets = 0;
686  	unsigned int tx_bytes = 0;
687  
688  	proc_idx = priv->tx.proc_idx;
689  	done_idx = priv->tx.done_idx;
690  	ndesc    = priv->tx.ndesc;
691  
692  	/* free pre-stored skb from done_idx to proc_idx */
693  	while (proc_idx != done_idx) {
694  		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx);
695  
696  		/* do nothing if owner is HW (==1 for Tx) */
697  		if (cmdsts & AVE_STS_OWN)
698  			break;
699  
700  		/* check Tx status and updates statistics */
701  		if (cmdsts & AVE_STS_OK) {
702  			tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK;
703  			/* success */
704  			if (cmdsts & AVE_STS_LAST)
705  				tx_packets++;
706  		} else {
707  			/* error */
708  			if (cmdsts & AVE_STS_LAST) {
709  				priv->stats_tx.errors++;
710  				if (cmdsts & (AVE_STS_OWC | AVE_STS_EC))
711  					priv->stats_tx.collisions++;
712  			}
713  		}
714  
715  		/* release skb */
716  		if (priv->tx.desc[done_idx].skbs) {
717  			ave_dma_unmap(ndev, &priv->tx.desc[done_idx],
718  				      DMA_TO_DEVICE);
719  			dev_consume_skb_any(priv->tx.desc[done_idx].skbs);
720  			priv->tx.desc[done_idx].skbs = NULL;
721  			nr_freebuf++;
722  		}
723  		done_idx = (done_idx + 1) % ndesc;
724  	}
725  
726  	priv->tx.done_idx = done_idx;
727  
728  	/* update stats */
729  	u64_stats_update_begin(&priv->stats_tx.syncp);
730  	priv->stats_tx.packets += tx_packets;
731  	priv->stats_tx.bytes   += tx_bytes;
732  	u64_stats_update_end(&priv->stats_tx.syncp);
733  
734  	/* wake queue for freeing buffer */
735  	if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf)
736  		netif_wake_queue(ndev);
737  
738  	return nr_freebuf;
739  }
740  
ave_rx_receive(struct net_device * ndev,int num)741  static int ave_rx_receive(struct net_device *ndev, int num)
742  {
743  	struct ave_private *priv = netdev_priv(ndev);
744  	unsigned int rx_packets = 0;
745  	unsigned int rx_bytes = 0;
746  	u32 proc_idx, done_idx;
747  	struct sk_buff *skb;
748  	unsigned int pktlen;
749  	int restpkt, npkts;
750  	u32 ndesc, cmdsts;
751  
752  	proc_idx = priv->rx.proc_idx;
753  	done_idx = priv->rx.done_idx;
754  	ndesc    = priv->rx.ndesc;
755  	restpkt  = ((proc_idx + ndesc - 1) - done_idx) % ndesc;
756  
757  	for (npkts = 0; npkts < num; npkts++) {
758  		/* we can't receive more packet, so fill desc quickly */
759  		if (--restpkt < 0)
760  			break;
761  
762  		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx);
763  
764  		/* do nothing if owner is HW (==0 for Rx) */
765  		if (!(cmdsts & AVE_STS_OWN))
766  			break;
767  
768  		if (!(cmdsts & AVE_STS_OK)) {
769  			priv->stats_rx.errors++;
770  			proc_idx = (proc_idx + 1) % ndesc;
771  			continue;
772  		}
773  
774  		pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK;
775  
776  		/* get skbuff for rx */
777  		skb = priv->rx.desc[proc_idx].skbs;
778  		priv->rx.desc[proc_idx].skbs = NULL;
779  
780  		ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE);
781  
782  		skb->dev = ndev;
783  		skb_put(skb, pktlen);
784  		skb->protocol = eth_type_trans(skb, ndev);
785  
786  		if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER)))
787  			skb->ip_summed = CHECKSUM_UNNECESSARY;
788  
789  		rx_packets++;
790  		rx_bytes += pktlen;
791  
792  		netif_receive_skb(skb);
793  
794  		proc_idx = (proc_idx + 1) % ndesc;
795  	}
796  
797  	priv->rx.proc_idx = proc_idx;
798  
799  	/* update stats */
800  	u64_stats_update_begin(&priv->stats_rx.syncp);
801  	priv->stats_rx.packets += rx_packets;
802  	priv->stats_rx.bytes   += rx_bytes;
803  	u64_stats_update_end(&priv->stats_rx.syncp);
804  
805  	/* refill the Rx buffers */
806  	while (proc_idx != done_idx) {
807  		if (ave_rxdesc_prepare(ndev, done_idx))
808  			break;
809  		done_idx = (done_idx + 1) % ndesc;
810  	}
811  
812  	priv->rx.done_idx = done_idx;
813  
814  	return npkts;
815  }
816  
ave_napi_poll_rx(struct napi_struct * napi,int budget)817  static int ave_napi_poll_rx(struct napi_struct *napi, int budget)
818  {
819  	struct ave_private *priv;
820  	struct net_device *ndev;
821  	int num;
822  
823  	priv = container_of(napi, struct ave_private, napi_rx);
824  	ndev = priv->ndev;
825  
826  	num = ave_rx_receive(ndev, budget);
827  	if (num < budget) {
828  		napi_complete_done(napi, num);
829  
830  		/* enable Rx interrupt when NAPI finishes */
831  		ave_irq_enable(ndev, AVE_GI_RXIINT);
832  	}
833  
834  	return num;
835  }
836  
ave_napi_poll_tx(struct napi_struct * napi,int budget)837  static int ave_napi_poll_tx(struct napi_struct *napi, int budget)
838  {
839  	struct ave_private *priv;
840  	struct net_device *ndev;
841  	int num;
842  
843  	priv = container_of(napi, struct ave_private, napi_tx);
844  	ndev = priv->ndev;
845  
846  	num = ave_tx_complete(ndev);
847  	napi_complete(napi);
848  
849  	/* enable Tx interrupt when NAPI finishes */
850  	ave_irq_enable(ndev, AVE_GI_TX);
851  
852  	return num;
853  }
854  
ave_global_reset(struct net_device * ndev)855  static void ave_global_reset(struct net_device *ndev)
856  {
857  	struct ave_private *priv = netdev_priv(ndev);
858  	u32 val;
859  
860  	/* set config register */
861  	val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE;
862  	if (!phy_interface_mode_is_rgmii(priv->phy_mode))
863  		val |= AVE_CFGR_MII;
864  	writel(val, priv->base + AVE_CFGR);
865  
866  	/* reset RMII register */
867  	val = readl(priv->base + AVE_RSTCTRL);
868  	val &= ~AVE_RSTCTRL_RMIIRST;
869  	writel(val, priv->base + AVE_RSTCTRL);
870  
871  	/* assert reset */
872  	writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR);
873  	msleep(20);
874  
875  	/* 1st, negate PHY reset only */
876  	writel(AVE_GRR_GRST, priv->base + AVE_GRR);
877  	msleep(40);
878  
879  	/* negate reset */
880  	writel(0, priv->base + AVE_GRR);
881  	msleep(40);
882  
883  	/* negate RMII register */
884  	val = readl(priv->base + AVE_RSTCTRL);
885  	val |= AVE_RSTCTRL_RMIIRST;
886  	writel(val, priv->base + AVE_RSTCTRL);
887  
888  	ave_irq_disable_all(ndev);
889  }
890  
ave_rxfifo_reset(struct net_device * ndev)891  static void ave_rxfifo_reset(struct net_device *ndev)
892  {
893  	struct ave_private *priv = netdev_priv(ndev);
894  	u32 rxcr_org;
895  
896  	/* save and disable MAC receive op */
897  	rxcr_org = readl(priv->base + AVE_RXCR);
898  	writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR);
899  
900  	/* suspend Rx descriptor */
901  	ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND);
902  
903  	/* receive all packets before descriptor starts */
904  	ave_rx_receive(ndev, priv->rx.ndesc);
905  
906  	/* assert reset */
907  	writel(AVE_GRR_RXFFR, priv->base + AVE_GRR);
908  	udelay(50);
909  
910  	/* negate reset */
911  	writel(0, priv->base + AVE_GRR);
912  	udelay(20);
913  
914  	/* negate interrupt status */
915  	writel(AVE_GI_RXOVF, priv->base + AVE_GISR);
916  
917  	/* permit descriptor */
918  	ave_desc_switch(ndev, AVE_DESC_RX_PERMIT);
919  
920  	/* restore MAC reccieve op */
921  	writel(rxcr_org, priv->base + AVE_RXCR);
922  }
923  
ave_irq_handler(int irq,void * netdev)924  static irqreturn_t ave_irq_handler(int irq, void *netdev)
925  {
926  	struct net_device *ndev = (struct net_device *)netdev;
927  	struct ave_private *priv = netdev_priv(ndev);
928  	u32 gimr_val, gisr_val;
929  
930  	gimr_val = ave_irq_disable_all(ndev);
931  
932  	/* get interrupt status */
933  	gisr_val = readl(priv->base + AVE_GISR);
934  
935  	/* PHY */
936  	if (gisr_val & AVE_GI_PHY)
937  		writel(AVE_GI_PHY, priv->base + AVE_GISR);
938  
939  	/* check exceeding packet */
940  	if (gisr_val & AVE_GI_RXERR) {
941  		writel(AVE_GI_RXERR, priv->base + AVE_GISR);
942  		netdev_err(ndev, "receive a packet exceeding frame buffer\n");
943  	}
944  
945  	gisr_val &= gimr_val;
946  	if (!gisr_val)
947  		goto exit_isr;
948  
949  	/* RxFIFO overflow */
950  	if (gisr_val & AVE_GI_RXOVF) {
951  		priv->stats_rx.fifo_errors++;
952  		ave_rxfifo_reset(ndev);
953  		goto exit_isr;
954  	}
955  
956  	/* Rx drop */
957  	if (gisr_val & AVE_GI_RXDROP) {
958  		priv->stats_rx.dropped++;
959  		writel(AVE_GI_RXDROP, priv->base + AVE_GISR);
960  	}
961  
962  	/* Rx interval */
963  	if (gisr_val & AVE_GI_RXIINT) {
964  		napi_schedule(&priv->napi_rx);
965  		/* still force to disable Rx interrupt until NAPI finishes */
966  		gimr_val &= ~AVE_GI_RXIINT;
967  	}
968  
969  	/* Tx completed */
970  	if (gisr_val & AVE_GI_TX) {
971  		napi_schedule(&priv->napi_tx);
972  		/* still force to disable Tx interrupt until NAPI finishes */
973  		gimr_val &= ~AVE_GI_TX;
974  	}
975  
976  exit_isr:
977  	ave_irq_restore(ndev, gimr_val);
978  
979  	return IRQ_HANDLED;
980  }
981  
ave_pfsel_start(struct net_device * ndev,unsigned int entry)982  static int ave_pfsel_start(struct net_device *ndev, unsigned int entry)
983  {
984  	struct ave_private *priv = netdev_priv(ndev);
985  	u32 val;
986  
987  	if (WARN_ON(entry > AVE_PF_SIZE))
988  		return -EINVAL;
989  
990  	val = readl(priv->base + AVE_PFEN);
991  	writel(val | BIT(entry), priv->base + AVE_PFEN);
992  
993  	return 0;
994  }
995  
ave_pfsel_stop(struct net_device * ndev,unsigned int entry)996  static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry)
997  {
998  	struct ave_private *priv = netdev_priv(ndev);
999  	u32 val;
1000  
1001  	if (WARN_ON(entry > AVE_PF_SIZE))
1002  		return -EINVAL;
1003  
1004  	val = readl(priv->base + AVE_PFEN);
1005  	writel(val & ~BIT(entry), priv->base + AVE_PFEN);
1006  
1007  	return 0;
1008  }
1009  
ave_pfsel_set_macaddr(struct net_device * ndev,unsigned int entry,const unsigned char * mac_addr,unsigned int set_size)1010  static int ave_pfsel_set_macaddr(struct net_device *ndev,
1011  				 unsigned int entry,
1012  				 const unsigned char *mac_addr,
1013  				 unsigned int set_size)
1014  {
1015  	struct ave_private *priv = netdev_priv(ndev);
1016  
1017  	if (WARN_ON(entry > AVE_PF_SIZE))
1018  		return -EINVAL;
1019  	if (WARN_ON(set_size > 6))
1020  		return -EINVAL;
1021  
1022  	ave_pfsel_stop(ndev, entry);
1023  
1024  	/* set MAC address for the filter */
1025  	ave_hw_write_macaddr(ndev, mac_addr,
1026  			     AVE_PKTF(entry), AVE_PKTF(entry) + 4);
1027  
1028  	/* set byte mask */
1029  	writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0,
1030  	       priv->base + AVE_PFMBYTE(entry));
1031  	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
1032  
1033  	/* set bit mask filter */
1034  	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
1035  
1036  	/* set selector to ring 0 */
1037  	writel(0, priv->base + AVE_PFSEL(entry));
1038  
1039  	/* restart filter */
1040  	ave_pfsel_start(ndev, entry);
1041  
1042  	return 0;
1043  }
1044  
ave_pfsel_set_promisc(struct net_device * ndev,unsigned int entry,u32 rxring)1045  static void ave_pfsel_set_promisc(struct net_device *ndev,
1046  				  unsigned int entry, u32 rxring)
1047  {
1048  	struct ave_private *priv = netdev_priv(ndev);
1049  
1050  	if (WARN_ON(entry > AVE_PF_SIZE))
1051  		return;
1052  
1053  	ave_pfsel_stop(ndev, entry);
1054  
1055  	/* set byte mask */
1056  	writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry));
1057  	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
1058  
1059  	/* set bit mask filter */
1060  	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
1061  
1062  	/* set selector to rxring */
1063  	writel(rxring, priv->base + AVE_PFSEL(entry));
1064  
1065  	ave_pfsel_start(ndev, entry);
1066  }
1067  
ave_pfsel_init(struct net_device * ndev)1068  static void ave_pfsel_init(struct net_device *ndev)
1069  {
1070  	unsigned char bcast_mac[ETH_ALEN];
1071  	int i;
1072  
1073  	eth_broadcast_addr(bcast_mac);
1074  
1075  	for (i = 0; i < AVE_PF_SIZE; i++)
1076  		ave_pfsel_stop(ndev, i);
1077  
1078  	/* promiscious entry, select ring 0 */
1079  	ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0);
1080  
1081  	/* unicast entry */
1082  	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
1083  
1084  	/* broadcast entry */
1085  	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6);
1086  }
1087  
ave_phy_adjust_link(struct net_device * ndev)1088  static void ave_phy_adjust_link(struct net_device *ndev)
1089  {
1090  	struct ave_private *priv = netdev_priv(ndev);
1091  	struct phy_device *phydev = ndev->phydev;
1092  	u32 val, txcr, rxcr, rxcr_org;
1093  	u16 rmt_adv = 0, lcl_adv = 0;
1094  	u8 cap;
1095  
1096  	/* set RGMII speed */
1097  	val = readl(priv->base + AVE_TXCR);
1098  	val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G);
1099  
1100  	if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000)
1101  		val |= AVE_TXCR_TXSPD_1G;
1102  	else if (phydev->speed == SPEED_100)
1103  		val |= AVE_TXCR_TXSPD_100;
1104  
1105  	writel(val, priv->base + AVE_TXCR);
1106  
1107  	/* set RMII speed (100M/10M only) */
1108  	if (!phy_interface_is_rgmii(phydev)) {
1109  		val = readl(priv->base + AVE_LINKSEL);
1110  		if (phydev->speed == SPEED_10)
1111  			val &= ~AVE_LINKSEL_100M;
1112  		else
1113  			val |= AVE_LINKSEL_100M;
1114  		writel(val, priv->base + AVE_LINKSEL);
1115  	}
1116  
1117  	/* check current RXCR/TXCR */
1118  	rxcr = readl(priv->base + AVE_RXCR);
1119  	txcr = readl(priv->base + AVE_TXCR);
1120  	rxcr_org = rxcr;
1121  
1122  	if (phydev->duplex) {
1123  		rxcr |= AVE_RXCR_FDUPEN;
1124  
1125  		if (phydev->pause)
1126  			rmt_adv |= LPA_PAUSE_CAP;
1127  		if (phydev->asym_pause)
1128  			rmt_adv |= LPA_PAUSE_ASYM;
1129  
1130  		lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1131  		cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1132  		if (cap & FLOW_CTRL_TX)
1133  			txcr |= AVE_TXCR_FLOCTR;
1134  		else
1135  			txcr &= ~AVE_TXCR_FLOCTR;
1136  		if (cap & FLOW_CTRL_RX)
1137  			rxcr |= AVE_RXCR_FLOCTR;
1138  		else
1139  			rxcr &= ~AVE_RXCR_FLOCTR;
1140  	} else {
1141  		rxcr &= ~AVE_RXCR_FDUPEN;
1142  		rxcr &= ~AVE_RXCR_FLOCTR;
1143  		txcr &= ~AVE_TXCR_FLOCTR;
1144  	}
1145  
1146  	if (rxcr_org != rxcr) {
1147  		/* disable Rx mac */
1148  		writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR);
1149  		/* change and enable TX/Rx mac */
1150  		writel(txcr, priv->base + AVE_TXCR);
1151  		writel(rxcr, priv->base + AVE_RXCR);
1152  	}
1153  
1154  	phy_print_status(phydev);
1155  }
1156  
ave_macaddr_init(struct net_device * ndev)1157  static void ave_macaddr_init(struct net_device *ndev)
1158  {
1159  	ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R);
1160  
1161  	/* pfsel unicast entry */
1162  	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
1163  }
1164  
ave_init(struct net_device * ndev)1165  static int ave_init(struct net_device *ndev)
1166  {
1167  	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1168  	struct ave_private *priv = netdev_priv(ndev);
1169  	struct device *dev = ndev->dev.parent;
1170  	struct device_node *np = dev->of_node;
1171  	struct device_node *mdio_np;
1172  	struct phy_device *phydev;
1173  	int nc, nr, ret;
1174  
1175  	/* enable clk because of hw access until ndo_open */
1176  	for (nc = 0; nc < priv->nclks; nc++) {
1177  		ret = clk_prepare_enable(priv->clk[nc]);
1178  		if (ret) {
1179  			dev_err(dev, "can't enable clock\n");
1180  			goto out_clk_disable;
1181  		}
1182  	}
1183  
1184  	for (nr = 0; nr < priv->nrsts; nr++) {
1185  		ret = reset_control_deassert(priv->rst[nr]);
1186  		if (ret) {
1187  			dev_err(dev, "can't deassert reset\n");
1188  			goto out_reset_assert;
1189  		}
1190  	}
1191  
1192  	ret = regmap_update_bits(priv->regmap, SG_ETPINMODE,
1193  				 priv->pinmode_mask, priv->pinmode_val);
1194  	if (ret)
1195  		goto out_reset_assert;
1196  
1197  	ave_global_reset(ndev);
1198  
1199  	mdio_np = of_get_child_by_name(np, "mdio");
1200  	if (!mdio_np) {
1201  		dev_err(dev, "mdio node not found\n");
1202  		ret = -EINVAL;
1203  		goto out_reset_assert;
1204  	}
1205  	ret = of_mdiobus_register(priv->mdio, mdio_np);
1206  	of_node_put(mdio_np);
1207  	if (ret) {
1208  		dev_err(dev, "failed to register mdiobus\n");
1209  		goto out_reset_assert;
1210  	}
1211  
1212  	phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link);
1213  	if (!phydev) {
1214  		dev_err(dev, "could not attach to PHY\n");
1215  		ret = -ENODEV;
1216  		goto out_mdio_unregister;
1217  	}
1218  
1219  	priv->phydev = phydev;
1220  
1221  	ave_ethtool_get_wol(ndev, &wol);
1222  	device_set_wakeup_capable(&ndev->dev, !!wol.supported);
1223  
1224  	/* set wol initial state disabled */
1225  	wol.wolopts = 0;
1226  	__ave_ethtool_set_wol(ndev, &wol);
1227  
1228  	if (!phy_interface_is_rgmii(phydev))
1229  		phy_set_max_speed(phydev, SPEED_100);
1230  
1231  	phy_support_asym_pause(phydev);
1232  
1233  	phydev->mac_managed_pm = true;
1234  
1235  	phy_attached_info(phydev);
1236  
1237  	return 0;
1238  
1239  out_mdio_unregister:
1240  	mdiobus_unregister(priv->mdio);
1241  out_reset_assert:
1242  	while (--nr >= 0)
1243  		reset_control_assert(priv->rst[nr]);
1244  out_clk_disable:
1245  	while (--nc >= 0)
1246  		clk_disable_unprepare(priv->clk[nc]);
1247  
1248  	return ret;
1249  }
1250  
ave_uninit(struct net_device * ndev)1251  static void ave_uninit(struct net_device *ndev)
1252  {
1253  	struct ave_private *priv = netdev_priv(ndev);
1254  	int i;
1255  
1256  	phy_disconnect(priv->phydev);
1257  	mdiobus_unregister(priv->mdio);
1258  
1259  	/* disable clk because of hw access after ndo_stop */
1260  	for (i = 0; i < priv->nrsts; i++)
1261  		reset_control_assert(priv->rst[i]);
1262  	for (i = 0; i < priv->nclks; i++)
1263  		clk_disable_unprepare(priv->clk[i]);
1264  }
1265  
ave_open(struct net_device * ndev)1266  static int ave_open(struct net_device *ndev)
1267  {
1268  	struct ave_private *priv = netdev_priv(ndev);
1269  	int entry;
1270  	int ret;
1271  	u32 val;
1272  
1273  	ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name,
1274  			  ndev);
1275  	if (ret)
1276  		return ret;
1277  
1278  	priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc),
1279  				GFP_KERNEL);
1280  	if (!priv->tx.desc) {
1281  		ret = -ENOMEM;
1282  		goto out_free_irq;
1283  	}
1284  
1285  	priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc),
1286  				GFP_KERNEL);
1287  	if (!priv->rx.desc) {
1288  		kfree(priv->tx.desc);
1289  		ret = -ENOMEM;
1290  		goto out_free_irq;
1291  	}
1292  
1293  	/* initialize Tx work and descriptor */
1294  	priv->tx.proc_idx = 0;
1295  	priv->tx.done_idx = 0;
1296  	for (entry = 0; entry < priv->tx.ndesc; entry++) {
1297  		ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0);
1298  		ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0);
1299  	}
1300  	writel(AVE_TXDC_ADDR_START |
1301  	       (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE),
1302  	       priv->base + AVE_TXDC);
1303  
1304  	/* initialize Rx work and descriptor */
1305  	priv->rx.proc_idx = 0;
1306  	priv->rx.done_idx = 0;
1307  	for (entry = 0; entry < priv->rx.ndesc; entry++) {
1308  		if (ave_rxdesc_prepare(ndev, entry))
1309  			break;
1310  	}
1311  	writel(AVE_RXDC0_ADDR_START |
1312  	       (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE),
1313  	       priv->base + AVE_RXDC0);
1314  
1315  	ave_desc_switch(ndev, AVE_DESC_START);
1316  
1317  	ave_pfsel_init(ndev);
1318  	ave_macaddr_init(ndev);
1319  
1320  	/* set Rx configuration */
1321  	/* full duplex, enable pause drop, enalbe flow control */
1322  	val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN |
1323  		AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK);
1324  	writel(val, priv->base + AVE_RXCR);
1325  
1326  	/* set Tx configuration */
1327  	/* enable flow control, disable loopback */
1328  	writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR);
1329  
1330  	/* enable timer, clear EN,INTM, and mask interval unit(BSCK) */
1331  	val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK;
1332  	val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
1333  	writel(val, priv->base + AVE_IIRQC);
1334  
1335  	val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP;
1336  	ave_irq_restore(ndev, val);
1337  
1338  	napi_enable(&priv->napi_rx);
1339  	napi_enable(&priv->napi_tx);
1340  
1341  	phy_start(ndev->phydev);
1342  	phy_start_aneg(ndev->phydev);
1343  	netif_start_queue(ndev);
1344  
1345  	return 0;
1346  
1347  out_free_irq:
1348  	disable_irq(priv->irq);
1349  	free_irq(priv->irq, ndev);
1350  
1351  	return ret;
1352  }
1353  
ave_stop(struct net_device * ndev)1354  static int ave_stop(struct net_device *ndev)
1355  {
1356  	struct ave_private *priv = netdev_priv(ndev);
1357  	int entry;
1358  
1359  	ave_irq_disable_all(ndev);
1360  	disable_irq(priv->irq);
1361  	free_irq(priv->irq, ndev);
1362  
1363  	netif_tx_disable(ndev);
1364  	phy_stop(ndev->phydev);
1365  	napi_disable(&priv->napi_tx);
1366  	napi_disable(&priv->napi_rx);
1367  
1368  	ave_desc_switch(ndev, AVE_DESC_STOP);
1369  
1370  	/* free Tx buffer */
1371  	for (entry = 0; entry < priv->tx.ndesc; entry++) {
1372  		if (!priv->tx.desc[entry].skbs)
1373  			continue;
1374  
1375  		ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE);
1376  		dev_kfree_skb_any(priv->tx.desc[entry].skbs);
1377  		priv->tx.desc[entry].skbs = NULL;
1378  	}
1379  	priv->tx.proc_idx = 0;
1380  	priv->tx.done_idx = 0;
1381  
1382  	/* free Rx buffer */
1383  	for (entry = 0; entry < priv->rx.ndesc; entry++) {
1384  		if (!priv->rx.desc[entry].skbs)
1385  			continue;
1386  
1387  		ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE);
1388  		dev_kfree_skb_any(priv->rx.desc[entry].skbs);
1389  		priv->rx.desc[entry].skbs = NULL;
1390  	}
1391  	priv->rx.proc_idx = 0;
1392  	priv->rx.done_idx = 0;
1393  
1394  	kfree(priv->tx.desc);
1395  	kfree(priv->rx.desc);
1396  
1397  	return 0;
1398  }
1399  
ave_start_xmit(struct sk_buff * skb,struct net_device * ndev)1400  static netdev_tx_t ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1401  {
1402  	struct ave_private *priv = netdev_priv(ndev);
1403  	u32 proc_idx, done_idx, ndesc, cmdsts;
1404  	int ret, freepkt;
1405  	dma_addr_t paddr;
1406  
1407  	proc_idx = priv->tx.proc_idx;
1408  	done_idx = priv->tx.done_idx;
1409  	ndesc = priv->tx.ndesc;
1410  	freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc;
1411  
1412  	/* stop queue when not enough entry */
1413  	if (unlikely(freepkt < 1)) {
1414  		netif_stop_queue(ndev);
1415  		return NETDEV_TX_BUSY;
1416  	}
1417  
1418  	/* add padding for short packet */
1419  	if (skb_put_padto(skb, ETH_ZLEN)) {
1420  		priv->stats_tx.dropped++;
1421  		return NETDEV_TX_OK;
1422  	}
1423  
1424  	/* map Tx buffer
1425  	 * Tx buffer set to the Tx descriptor doesn't have any restriction.
1426  	 */
1427  	ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx],
1428  			  skb->data, skb->len, DMA_TO_DEVICE, &paddr);
1429  	if (ret) {
1430  		dev_kfree_skb_any(skb);
1431  		priv->stats_tx.dropped++;
1432  		return NETDEV_TX_OK;
1433  	}
1434  
1435  	priv->tx.desc[proc_idx].skbs = skb;
1436  
1437  	ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr);
1438  
1439  	cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST |
1440  		(skb->len & AVE_STS_PKTLEN_TX_MASK);
1441  
1442  	/* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */
1443  	if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev))
1444  		cmdsts |= AVE_STS_INTR;
1445  
1446  	/* disable checksum calculation when skb doesn't calurate checksum */
1447  	if (skb->ip_summed == CHECKSUM_NONE ||
1448  	    skb->ip_summed == CHECKSUM_UNNECESSARY)
1449  		cmdsts |= AVE_STS_NOCSUM;
1450  
1451  	ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts);
1452  
1453  	priv->tx.proc_idx = (proc_idx + 1) % ndesc;
1454  
1455  	return NETDEV_TX_OK;
1456  }
1457  
ave_ioctl(struct net_device * ndev,struct ifreq * ifr,int cmd)1458  static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1459  {
1460  	return phy_mii_ioctl(ndev->phydev, ifr, cmd);
1461  }
1462  
1463  static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
1464  static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 };
1465  
ave_set_rx_mode(struct net_device * ndev)1466  static void ave_set_rx_mode(struct net_device *ndev)
1467  {
1468  	struct ave_private *priv = netdev_priv(ndev);
1469  	struct netdev_hw_addr *hw_adr;
1470  	int count, mc_cnt;
1471  	u32 val;
1472  
1473  	/* MAC addr filter enable for promiscious mode */
1474  	mc_cnt = netdev_mc_count(ndev);
1475  	val = readl(priv->base + AVE_RXCR);
1476  	if (ndev->flags & IFF_PROMISC || !mc_cnt)
1477  		val &= ~AVE_RXCR_AFEN;
1478  	else
1479  		val |= AVE_RXCR_AFEN;
1480  	writel(val, priv->base + AVE_RXCR);
1481  
1482  	/* set all multicast address */
1483  	if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) {
1484  		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST,
1485  				      v4multi_macadr, 1);
1486  		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1,
1487  				      v6multi_macadr, 1);
1488  	} else {
1489  		/* stop all multicast filter */
1490  		for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++)
1491  			ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count);
1492  
1493  		/* set multicast addresses */
1494  		count = 0;
1495  		netdev_for_each_mc_addr(hw_adr, ndev) {
1496  			if (count == mc_cnt)
1497  				break;
1498  			ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count,
1499  					      hw_adr->addr, 6);
1500  			count++;
1501  		}
1502  	}
1503  }
1504  
ave_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * stats)1505  static void ave_get_stats64(struct net_device *ndev,
1506  			    struct rtnl_link_stats64 *stats)
1507  {
1508  	struct ave_private *priv = netdev_priv(ndev);
1509  	unsigned int start;
1510  
1511  	do {
1512  		start = u64_stats_fetch_begin(&priv->stats_rx.syncp);
1513  		stats->rx_packets = priv->stats_rx.packets;
1514  		stats->rx_bytes	  = priv->stats_rx.bytes;
1515  	} while (u64_stats_fetch_retry(&priv->stats_rx.syncp, start));
1516  
1517  	do {
1518  		start = u64_stats_fetch_begin(&priv->stats_tx.syncp);
1519  		stats->tx_packets = priv->stats_tx.packets;
1520  		stats->tx_bytes	  = priv->stats_tx.bytes;
1521  	} while (u64_stats_fetch_retry(&priv->stats_tx.syncp, start));
1522  
1523  	stats->rx_errors      = priv->stats_rx.errors;
1524  	stats->tx_errors      = priv->stats_tx.errors;
1525  	stats->rx_dropped     = priv->stats_rx.dropped;
1526  	stats->tx_dropped     = priv->stats_tx.dropped;
1527  	stats->rx_fifo_errors = priv->stats_rx.fifo_errors;
1528  	stats->collisions     = priv->stats_tx.collisions;
1529  }
1530  
ave_set_mac_address(struct net_device * ndev,void * p)1531  static int ave_set_mac_address(struct net_device *ndev, void *p)
1532  {
1533  	int ret = eth_mac_addr(ndev, p);
1534  
1535  	if (ret)
1536  		return ret;
1537  
1538  	ave_macaddr_init(ndev);
1539  
1540  	return 0;
1541  }
1542  
1543  static const struct net_device_ops ave_netdev_ops = {
1544  	.ndo_init		= ave_init,
1545  	.ndo_uninit		= ave_uninit,
1546  	.ndo_open		= ave_open,
1547  	.ndo_stop		= ave_stop,
1548  	.ndo_start_xmit		= ave_start_xmit,
1549  	.ndo_eth_ioctl		= ave_ioctl,
1550  	.ndo_set_rx_mode	= ave_set_rx_mode,
1551  	.ndo_get_stats64	= ave_get_stats64,
1552  	.ndo_set_mac_address	= ave_set_mac_address,
1553  };
1554  
ave_probe(struct platform_device * pdev)1555  static int ave_probe(struct platform_device *pdev)
1556  {
1557  	const struct ave_soc_data *data;
1558  	struct device *dev = &pdev->dev;
1559  	char buf[ETHTOOL_FWVERS_LEN];
1560  	struct of_phandle_args args;
1561  	phy_interface_t phy_mode;
1562  	struct ave_private *priv;
1563  	struct net_device *ndev;
1564  	struct device_node *np;
1565  	void __iomem *base;
1566  	const char *name;
1567  	int i, irq, ret;
1568  	u64 dma_mask;
1569  	u32 ave_id;
1570  
1571  	data = of_device_get_match_data(dev);
1572  	if (WARN_ON(!data))
1573  		return -EINVAL;
1574  
1575  	np = dev->of_node;
1576  	ret = of_get_phy_mode(np, &phy_mode);
1577  	if (ret) {
1578  		dev_err(dev, "phy-mode not found\n");
1579  		return ret;
1580  	}
1581  
1582  	irq = platform_get_irq(pdev, 0);
1583  	if (irq < 0)
1584  		return irq;
1585  
1586  	base = devm_platform_ioremap_resource(pdev, 0);
1587  	if (IS_ERR(base))
1588  		return PTR_ERR(base);
1589  
1590  	ndev = devm_alloc_etherdev(dev, sizeof(struct ave_private));
1591  	if (!ndev) {
1592  		dev_err(dev, "can't allocate ethernet device\n");
1593  		return -ENOMEM;
1594  	}
1595  
1596  	ndev->netdev_ops = &ave_netdev_ops;
1597  	ndev->ethtool_ops = &ave_ethtool_ops;
1598  	SET_NETDEV_DEV(ndev, dev);
1599  
1600  	ndev->features    |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
1601  	ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
1602  
1603  	ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
1604  
1605  	ret = of_get_ethdev_address(np, ndev);
1606  	if (ret) {
1607  		/* if the mac address is invalid, use random mac address */
1608  		eth_hw_addr_random(ndev);
1609  		dev_warn(dev, "Using random MAC address: %pM\n",
1610  			 ndev->dev_addr);
1611  	}
1612  
1613  	priv = netdev_priv(ndev);
1614  	priv->base = base;
1615  	priv->irq = irq;
1616  	priv->ndev = ndev;
1617  	priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE);
1618  	priv->phy_mode = phy_mode;
1619  	priv->data = data;
1620  
1621  	if (IS_DESC_64BIT(priv)) {
1622  		priv->desc_size = AVE_DESC_SIZE_64;
1623  		priv->tx.daddr  = AVE_TXDM_64;
1624  		priv->rx.daddr  = AVE_RXDM_64;
1625  		dma_mask = DMA_BIT_MASK(64);
1626  	} else {
1627  		priv->desc_size = AVE_DESC_SIZE_32;
1628  		priv->tx.daddr  = AVE_TXDM_32;
1629  		priv->rx.daddr  = AVE_RXDM_32;
1630  		dma_mask = DMA_BIT_MASK(32);
1631  	}
1632  	ret = dma_set_mask(dev, dma_mask);
1633  	if (ret)
1634  		return ret;
1635  
1636  	priv->tx.ndesc = AVE_NR_TXDESC;
1637  	priv->rx.ndesc = AVE_NR_RXDESC;
1638  
1639  	u64_stats_init(&priv->stats_tx.syncp);
1640  	u64_stats_init(&priv->stats_rx.syncp);
1641  
1642  	for (i = 0; i < AVE_MAX_CLKS; i++) {
1643  		name = priv->data->clock_names[i];
1644  		if (!name)
1645  			break;
1646  		priv->clk[i] = devm_clk_get(dev, name);
1647  		if (IS_ERR(priv->clk[i]))
1648  			return PTR_ERR(priv->clk[i]);
1649  		priv->nclks++;
1650  	}
1651  
1652  	for (i = 0; i < AVE_MAX_RSTS; i++) {
1653  		name = priv->data->reset_names[i];
1654  		if (!name)
1655  			break;
1656  		priv->rst[i] = devm_reset_control_get_shared(dev, name);
1657  		if (IS_ERR(priv->rst[i]))
1658  			return PTR_ERR(priv->rst[i]);
1659  		priv->nrsts++;
1660  	}
1661  
1662  	ret = of_parse_phandle_with_fixed_args(np,
1663  					       "socionext,syscon-phy-mode",
1664  					       1, 0, &args);
1665  	if (ret) {
1666  		dev_err(dev, "can't get syscon-phy-mode property\n");
1667  		return ret;
1668  	}
1669  	priv->regmap = syscon_node_to_regmap(args.np);
1670  	of_node_put(args.np);
1671  	if (IS_ERR(priv->regmap)) {
1672  		dev_err(dev, "can't map syscon-phy-mode\n");
1673  		return PTR_ERR(priv->regmap);
1674  	}
1675  	ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]);
1676  	if (ret) {
1677  		dev_err(dev, "invalid phy-mode setting\n");
1678  		return ret;
1679  	}
1680  
1681  	priv->mdio = devm_mdiobus_alloc(dev);
1682  	if (!priv->mdio)
1683  		return -ENOMEM;
1684  	priv->mdio->priv = ndev;
1685  	priv->mdio->parent = dev;
1686  	priv->mdio->read = ave_mdiobus_read;
1687  	priv->mdio->write = ave_mdiobus_write;
1688  	priv->mdio->name = "uniphier-mdio";
1689  	snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x",
1690  		 pdev->name, pdev->id);
1691  
1692  	/* Register as a NAPI supported driver */
1693  	netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx);
1694  	netif_napi_add_tx(ndev, &priv->napi_tx, ave_napi_poll_tx);
1695  
1696  	platform_set_drvdata(pdev, ndev);
1697  
1698  	ret = register_netdev(ndev);
1699  	if (ret) {
1700  		dev_err(dev, "failed to register netdevice\n");
1701  		goto out_del_napi;
1702  	}
1703  
1704  	/* get ID and version */
1705  	ave_id = readl(priv->base + AVE_IDR);
1706  	ave_hw_read_version(ndev, buf, sizeof(buf));
1707  
1708  	dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n",
1709  		 (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff,
1710  		 (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff,
1711  		 buf, priv->irq, phy_modes(phy_mode));
1712  
1713  	return 0;
1714  
1715  out_del_napi:
1716  	netif_napi_del(&priv->napi_rx);
1717  	netif_napi_del(&priv->napi_tx);
1718  
1719  	return ret;
1720  }
1721  
ave_remove(struct platform_device * pdev)1722  static int ave_remove(struct platform_device *pdev)
1723  {
1724  	struct net_device *ndev = platform_get_drvdata(pdev);
1725  	struct ave_private *priv = netdev_priv(ndev);
1726  
1727  	unregister_netdev(ndev);
1728  	netif_napi_del(&priv->napi_rx);
1729  	netif_napi_del(&priv->napi_tx);
1730  
1731  	return 0;
1732  }
1733  
1734  #ifdef CONFIG_PM_SLEEP
ave_suspend(struct device * dev)1735  static int ave_suspend(struct device *dev)
1736  {
1737  	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1738  	struct net_device *ndev = dev_get_drvdata(dev);
1739  	struct ave_private *priv = netdev_priv(ndev);
1740  	int ret = 0;
1741  
1742  	if (netif_running(ndev)) {
1743  		ret = ave_stop(ndev);
1744  		netif_device_detach(ndev);
1745  	}
1746  
1747  	ave_ethtool_get_wol(ndev, &wol);
1748  	priv->wolopts = wol.wolopts;
1749  
1750  	return ret;
1751  }
1752  
ave_resume(struct device * dev)1753  static int ave_resume(struct device *dev)
1754  {
1755  	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1756  	struct net_device *ndev = dev_get_drvdata(dev);
1757  	struct ave_private *priv = netdev_priv(ndev);
1758  	int ret = 0;
1759  
1760  	ave_global_reset(ndev);
1761  
1762  	ret = phy_init_hw(ndev->phydev);
1763  	if (ret)
1764  		return ret;
1765  
1766  	ave_ethtool_get_wol(ndev, &wol);
1767  	wol.wolopts = priv->wolopts;
1768  	__ave_ethtool_set_wol(ndev, &wol);
1769  
1770  	if (netif_running(ndev)) {
1771  		ret = ave_open(ndev);
1772  		netif_device_attach(ndev);
1773  	}
1774  
1775  	return ret;
1776  }
1777  
1778  static SIMPLE_DEV_PM_OPS(ave_pm_ops, ave_suspend, ave_resume);
1779  #define AVE_PM_OPS	(&ave_pm_ops)
1780  #else
1781  #define AVE_PM_OPS	NULL
1782  #endif
1783  
ave_pro4_get_pinmode(struct ave_private * priv,phy_interface_t phy_mode,u32 arg)1784  static int ave_pro4_get_pinmode(struct ave_private *priv,
1785  				phy_interface_t phy_mode, u32 arg)
1786  {
1787  	if (arg > 0)
1788  		return -EINVAL;
1789  
1790  	priv->pinmode_mask = SG_ETPINMODE_RMII(0);
1791  
1792  	switch (phy_mode) {
1793  	case PHY_INTERFACE_MODE_RMII:
1794  		priv->pinmode_val = SG_ETPINMODE_RMII(0);
1795  		break;
1796  	case PHY_INTERFACE_MODE_MII:
1797  	case PHY_INTERFACE_MODE_RGMII:
1798  	case PHY_INTERFACE_MODE_RGMII_ID:
1799  	case PHY_INTERFACE_MODE_RGMII_RXID:
1800  	case PHY_INTERFACE_MODE_RGMII_TXID:
1801  		priv->pinmode_val = 0;
1802  		break;
1803  	default:
1804  		return -EINVAL;
1805  	}
1806  
1807  	return 0;
1808  }
1809  
ave_ld11_get_pinmode(struct ave_private * priv,phy_interface_t phy_mode,u32 arg)1810  static int ave_ld11_get_pinmode(struct ave_private *priv,
1811  				phy_interface_t phy_mode, u32 arg)
1812  {
1813  	if (arg > 0)
1814  		return -EINVAL;
1815  
1816  	priv->pinmode_mask = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
1817  
1818  	switch (phy_mode) {
1819  	case PHY_INTERFACE_MODE_INTERNAL:
1820  		priv->pinmode_val = 0;
1821  		break;
1822  	case PHY_INTERFACE_MODE_RMII:
1823  		priv->pinmode_val = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0);
1824  		break;
1825  	default:
1826  		return -EINVAL;
1827  	}
1828  
1829  	return 0;
1830  }
1831  
ave_ld20_get_pinmode(struct ave_private * priv,phy_interface_t phy_mode,u32 arg)1832  static int ave_ld20_get_pinmode(struct ave_private *priv,
1833  				phy_interface_t phy_mode, u32 arg)
1834  {
1835  	if (arg > 0)
1836  		return -EINVAL;
1837  
1838  	priv->pinmode_mask = SG_ETPINMODE_RMII(0);
1839  
1840  	switch (phy_mode) {
1841  	case PHY_INTERFACE_MODE_RMII:
1842  		priv->pinmode_val = SG_ETPINMODE_RMII(0);
1843  		break;
1844  	case PHY_INTERFACE_MODE_RGMII:
1845  	case PHY_INTERFACE_MODE_RGMII_ID:
1846  	case PHY_INTERFACE_MODE_RGMII_RXID:
1847  	case PHY_INTERFACE_MODE_RGMII_TXID:
1848  		priv->pinmode_val = 0;
1849  		break;
1850  	default:
1851  		return -EINVAL;
1852  	}
1853  
1854  	return 0;
1855  }
1856  
ave_pxs3_get_pinmode(struct ave_private * priv,phy_interface_t phy_mode,u32 arg)1857  static int ave_pxs3_get_pinmode(struct ave_private *priv,
1858  				phy_interface_t phy_mode, u32 arg)
1859  {
1860  	if (arg > 1)
1861  		return -EINVAL;
1862  
1863  	priv->pinmode_mask = SG_ETPINMODE_RMII(arg);
1864  
1865  	switch (phy_mode) {
1866  	case PHY_INTERFACE_MODE_RMII:
1867  		priv->pinmode_val = SG_ETPINMODE_RMII(arg);
1868  		break;
1869  	case PHY_INTERFACE_MODE_RGMII:
1870  	case PHY_INTERFACE_MODE_RGMII_ID:
1871  	case PHY_INTERFACE_MODE_RGMII_RXID:
1872  	case PHY_INTERFACE_MODE_RGMII_TXID:
1873  		priv->pinmode_val = 0;
1874  		break;
1875  	default:
1876  		return -EINVAL;
1877  	}
1878  
1879  	return 0;
1880  }
1881  
1882  static const struct ave_soc_data ave_pro4_data = {
1883  	.is_desc_64bit = false,
1884  	.clock_names = {
1885  		"gio", "ether", "ether-gb", "ether-phy",
1886  	},
1887  	.reset_names = {
1888  		"gio", "ether",
1889  	},
1890  	.get_pinmode = ave_pro4_get_pinmode,
1891  };
1892  
1893  static const struct ave_soc_data ave_pxs2_data = {
1894  	.is_desc_64bit = false,
1895  	.clock_names = {
1896  		"ether",
1897  	},
1898  	.reset_names = {
1899  		"ether",
1900  	},
1901  	.get_pinmode = ave_pro4_get_pinmode,
1902  };
1903  
1904  static const struct ave_soc_data ave_ld11_data = {
1905  	.is_desc_64bit = false,
1906  	.clock_names = {
1907  		"ether",
1908  	},
1909  	.reset_names = {
1910  		"ether",
1911  	},
1912  	.get_pinmode = ave_ld11_get_pinmode,
1913  };
1914  
1915  static const struct ave_soc_data ave_ld20_data = {
1916  	.is_desc_64bit = true,
1917  	.clock_names = {
1918  		"ether",
1919  	},
1920  	.reset_names = {
1921  		"ether",
1922  	},
1923  	.get_pinmode = ave_ld20_get_pinmode,
1924  };
1925  
1926  static const struct ave_soc_data ave_pxs3_data = {
1927  	.is_desc_64bit = false,
1928  	.clock_names = {
1929  		"ether",
1930  	},
1931  	.reset_names = {
1932  		"ether",
1933  	},
1934  	.get_pinmode = ave_pxs3_get_pinmode,
1935  };
1936  
1937  static const struct ave_soc_data ave_nx1_data = {
1938  	.is_desc_64bit = true,
1939  	.clock_names = {
1940  		"ether",
1941  	},
1942  	.reset_names = {
1943  		"ether",
1944  	},
1945  	.get_pinmode = ave_pxs3_get_pinmode,
1946  };
1947  
1948  static const struct of_device_id of_ave_match[] = {
1949  	{
1950  		.compatible = "socionext,uniphier-pro4-ave4",
1951  		.data = &ave_pro4_data,
1952  	},
1953  	{
1954  		.compatible = "socionext,uniphier-pxs2-ave4",
1955  		.data = &ave_pxs2_data,
1956  	},
1957  	{
1958  		.compatible = "socionext,uniphier-ld11-ave4",
1959  		.data = &ave_ld11_data,
1960  	},
1961  	{
1962  		.compatible = "socionext,uniphier-ld20-ave4",
1963  		.data = &ave_ld20_data,
1964  	},
1965  	{
1966  		.compatible = "socionext,uniphier-pxs3-ave4",
1967  		.data = &ave_pxs3_data,
1968  	},
1969  	{
1970  		.compatible = "socionext,uniphier-nx1-ave4",
1971  		.data = &ave_nx1_data,
1972  	},
1973  	{ /* Sentinel */ }
1974  };
1975  MODULE_DEVICE_TABLE(of, of_ave_match);
1976  
1977  static struct platform_driver ave_driver = {
1978  	.probe  = ave_probe,
1979  	.remove = ave_remove,
1980  	.driver	= {
1981  		.name = "ave",
1982  		.pm   = AVE_PM_OPS,
1983  		.of_match_table	= of_ave_match,
1984  	},
1985  };
1986  module_platform_driver(ave_driver);
1987  
1988  MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
1989  MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
1990  MODULE_LICENSE("GPL v2");
1991