1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * drivers/net/ethernet/nxp/lpc_eth.c
4 *
5 * Author: Kevin Wells <kevin.wells@nxp.com>
6 *
7 * Copyright (C) 2010 NXP Semiconductors
8 * Copyright (C) 2012 Roland Stigge <stigge@antcom.de>
9 */
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/clk.h>
14 #include <linux/crc32.h>
15 #include <linux/etherdevice.h>
16 #include <linux/module.h>
17 #include <linux/of.h>
18 #include <linux/of_net.h>
19 #include <linux/phy.h>
20 #include <linux/platform_device.h>
21 #include <linux/spinlock.h>
22 #include <linux/soc/nxp/lpc32xx-misc.h>
23
24 #define MODNAME "lpc-eth"
25 #define DRV_VERSION "1.00"
26
27 #define ENET_MAXF_SIZE 1536
28 #define ENET_RX_DESC 48
29 #define ENET_TX_DESC 16
30
31 #define NAPI_WEIGHT 16
32
33 /*
34 * Ethernet MAC controller Register offsets
35 */
36 #define LPC_ENET_MAC1(x) (x + 0x000)
37 #define LPC_ENET_MAC2(x) (x + 0x004)
38 #define LPC_ENET_IPGT(x) (x + 0x008)
39 #define LPC_ENET_IPGR(x) (x + 0x00C)
40 #define LPC_ENET_CLRT(x) (x + 0x010)
41 #define LPC_ENET_MAXF(x) (x + 0x014)
42 #define LPC_ENET_SUPP(x) (x + 0x018)
43 #define LPC_ENET_TEST(x) (x + 0x01C)
44 #define LPC_ENET_MCFG(x) (x + 0x020)
45 #define LPC_ENET_MCMD(x) (x + 0x024)
46 #define LPC_ENET_MADR(x) (x + 0x028)
47 #define LPC_ENET_MWTD(x) (x + 0x02C)
48 #define LPC_ENET_MRDD(x) (x + 0x030)
49 #define LPC_ENET_MIND(x) (x + 0x034)
50 #define LPC_ENET_SA0(x) (x + 0x040)
51 #define LPC_ENET_SA1(x) (x + 0x044)
52 #define LPC_ENET_SA2(x) (x + 0x048)
53 #define LPC_ENET_COMMAND(x) (x + 0x100)
54 #define LPC_ENET_STATUS(x) (x + 0x104)
55 #define LPC_ENET_RXDESCRIPTOR(x) (x + 0x108)
56 #define LPC_ENET_RXSTATUS(x) (x + 0x10C)
57 #define LPC_ENET_RXDESCRIPTORNUMBER(x) (x + 0x110)
58 #define LPC_ENET_RXPRODUCEINDEX(x) (x + 0x114)
59 #define LPC_ENET_RXCONSUMEINDEX(x) (x + 0x118)
60 #define LPC_ENET_TXDESCRIPTOR(x) (x + 0x11C)
61 #define LPC_ENET_TXSTATUS(x) (x + 0x120)
62 #define LPC_ENET_TXDESCRIPTORNUMBER(x) (x + 0x124)
63 #define LPC_ENET_TXPRODUCEINDEX(x) (x + 0x128)
64 #define LPC_ENET_TXCONSUMEINDEX(x) (x + 0x12C)
65 #define LPC_ENET_TSV0(x) (x + 0x158)
66 #define LPC_ENET_TSV1(x) (x + 0x15C)
67 #define LPC_ENET_RSV(x) (x + 0x160)
68 #define LPC_ENET_FLOWCONTROLCOUNTER(x) (x + 0x170)
69 #define LPC_ENET_FLOWCONTROLSTATUS(x) (x + 0x174)
70 #define LPC_ENET_RXFILTER_CTRL(x) (x + 0x200)
71 #define LPC_ENET_RXFILTERWOLSTATUS(x) (x + 0x204)
72 #define LPC_ENET_RXFILTERWOLCLEAR(x) (x + 0x208)
73 #define LPC_ENET_HASHFILTERL(x) (x + 0x210)
74 #define LPC_ENET_HASHFILTERH(x) (x + 0x214)
75 #define LPC_ENET_INTSTATUS(x) (x + 0xFE0)
76 #define LPC_ENET_INTENABLE(x) (x + 0xFE4)
77 #define LPC_ENET_INTCLEAR(x) (x + 0xFE8)
78 #define LPC_ENET_INTSET(x) (x + 0xFEC)
79 #define LPC_ENET_POWERDOWN(x) (x + 0xFF4)
80
81 /*
82 * mac1 register definitions
83 */
84 #define LPC_MAC1_RECV_ENABLE (1 << 0)
85 #define LPC_MAC1_PASS_ALL_RX_FRAMES (1 << 1)
86 #define LPC_MAC1_RX_FLOW_CONTROL (1 << 2)
87 #define LPC_MAC1_TX_FLOW_CONTROL (1 << 3)
88 #define LPC_MAC1_LOOPBACK (1 << 4)
89 #define LPC_MAC1_RESET_TX (1 << 8)
90 #define LPC_MAC1_RESET_MCS_TX (1 << 9)
91 #define LPC_MAC1_RESET_RX (1 << 10)
92 #define LPC_MAC1_RESET_MCS_RX (1 << 11)
93 #define LPC_MAC1_SIMULATION_RESET (1 << 14)
94 #define LPC_MAC1_SOFT_RESET (1 << 15)
95
96 /*
97 * mac2 register definitions
98 */
99 #define LPC_MAC2_FULL_DUPLEX (1 << 0)
100 #define LPC_MAC2_FRAME_LENGTH_CHECKING (1 << 1)
101 #define LPC_MAC2_HUGH_LENGTH_CHECKING (1 << 2)
102 #define LPC_MAC2_DELAYED_CRC (1 << 3)
103 #define LPC_MAC2_CRC_ENABLE (1 << 4)
104 #define LPC_MAC2_PAD_CRC_ENABLE (1 << 5)
105 #define LPC_MAC2_VLAN_PAD_ENABLE (1 << 6)
106 #define LPC_MAC2_AUTO_DETECT_PAD_ENABLE (1 << 7)
107 #define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT (1 << 8)
108 #define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT (1 << 9)
109 #define LPC_MAC2_NO_BACKOFF (1 << 12)
110 #define LPC_MAC2_BACK_PRESSURE (1 << 13)
111 #define LPC_MAC2_EXCESS_DEFER (1 << 14)
112
113 /*
114 * ipgt register definitions
115 */
116 #define LPC_IPGT_LOAD(n) ((n) & 0x7F)
117
118 /*
119 * ipgr register definitions
120 */
121 #define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F)
122 #define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8)
123
124 /*
125 * clrt register definitions
126 */
127 #define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF)
128 #define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8)
129
130 /*
131 * maxf register definitions
132 */
133 #define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF)
134
135 /*
136 * supp register definitions
137 */
138 #define LPC_SUPP_SPEED (1 << 8)
139 #define LPC_SUPP_RESET_RMII (1 << 11)
140
141 /*
142 * test register definitions
143 */
144 #define LPC_TEST_SHORTCUT_PAUSE_QUANTA (1 << 0)
145 #define LPC_TEST_PAUSE (1 << 1)
146 #define LPC_TEST_BACKPRESSURE (1 << 2)
147
148 /*
149 * mcfg register definitions
150 */
151 #define LPC_MCFG_SCAN_INCREMENT (1 << 0)
152 #define LPC_MCFG_SUPPRESS_PREAMBLE (1 << 1)
153 #define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2)
154 #define LPC_MCFG_CLOCK_HOST_DIV_4 0
155 #define LPC_MCFG_CLOCK_HOST_DIV_6 2
156 #define LPC_MCFG_CLOCK_HOST_DIV_8 3
157 #define LPC_MCFG_CLOCK_HOST_DIV_10 4
158 #define LPC_MCFG_CLOCK_HOST_DIV_14 5
159 #define LPC_MCFG_CLOCK_HOST_DIV_20 6
160 #define LPC_MCFG_CLOCK_HOST_DIV_28 7
161 #define LPC_MCFG_RESET_MII_MGMT (1 << 15)
162
163 /*
164 * mcmd register definitions
165 */
166 #define LPC_MCMD_READ (1 << 0)
167 #define LPC_MCMD_SCAN (1 << 1)
168
169 /*
170 * madr register definitions
171 */
172 #define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F)
173 #define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8)
174
175 /*
176 * mwtd register definitions
177 */
178 #define LPC_MWDT_WRITE(n) ((n) & 0xFFFF)
179
180 /*
181 * mrdd register definitions
182 */
183 #define LPC_MRDD_READ_MASK 0xFFFF
184
185 /*
186 * mind register definitions
187 */
188 #define LPC_MIND_BUSY (1 << 0)
189 #define LPC_MIND_SCANNING (1 << 1)
190 #define LPC_MIND_NOT_VALID (1 << 2)
191 #define LPC_MIND_MII_LINK_FAIL (1 << 3)
192
193 /*
194 * command register definitions
195 */
196 #define LPC_COMMAND_RXENABLE (1 << 0)
197 #define LPC_COMMAND_TXENABLE (1 << 1)
198 #define LPC_COMMAND_REG_RESET (1 << 3)
199 #define LPC_COMMAND_TXRESET (1 << 4)
200 #define LPC_COMMAND_RXRESET (1 << 5)
201 #define LPC_COMMAND_PASSRUNTFRAME (1 << 6)
202 #define LPC_COMMAND_PASSRXFILTER (1 << 7)
203 #define LPC_COMMAND_TXFLOWCONTROL (1 << 8)
204 #define LPC_COMMAND_RMII (1 << 9)
205 #define LPC_COMMAND_FULLDUPLEX (1 << 10)
206
207 /*
208 * status register definitions
209 */
210 #define LPC_STATUS_RXACTIVE (1 << 0)
211 #define LPC_STATUS_TXACTIVE (1 << 1)
212
213 /*
214 * tsv0 register definitions
215 */
216 #define LPC_TSV0_CRC_ERROR (1 << 0)
217 #define LPC_TSV0_LENGTH_CHECK_ERROR (1 << 1)
218 #define LPC_TSV0_LENGTH_OUT_OF_RANGE (1 << 2)
219 #define LPC_TSV0_DONE (1 << 3)
220 #define LPC_TSV0_MULTICAST (1 << 4)
221 #define LPC_TSV0_BROADCAST (1 << 5)
222 #define LPC_TSV0_PACKET_DEFER (1 << 6)
223 #define LPC_TSV0_ESCESSIVE_DEFER (1 << 7)
224 #define LPC_TSV0_ESCESSIVE_COLLISION (1 << 8)
225 #define LPC_TSV0_LATE_COLLISION (1 << 9)
226 #define LPC_TSV0_GIANT (1 << 10)
227 #define LPC_TSV0_UNDERRUN (1 << 11)
228 #define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF)
229 #define LPC_TSV0_CONTROL_FRAME (1 << 28)
230 #define LPC_TSV0_PAUSE (1 << 29)
231 #define LPC_TSV0_BACKPRESSURE (1 << 30)
232 #define LPC_TSV0_VLAN (1 << 31)
233
234 /*
235 * tsv1 register definitions
236 */
237 #define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF)
238 #define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF)
239
240 /*
241 * rsv register definitions
242 */
243 #define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF)
244 #define LPC_RSV_RXDV_EVENT_IGNORED (1 << 16)
245 #define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN (1 << 17)
246 #define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN (1 << 18)
247 #define LPC_RSV_RECEIVE_CODE_VIOLATION (1 << 19)
248 #define LPC_RSV_CRC_ERROR (1 << 20)
249 #define LPC_RSV_LENGTH_CHECK_ERROR (1 << 21)
250 #define LPC_RSV_LENGTH_OUT_OF_RANGE (1 << 22)
251 #define LPC_RSV_RECEIVE_OK (1 << 23)
252 #define LPC_RSV_MULTICAST (1 << 24)
253 #define LPC_RSV_BROADCAST (1 << 25)
254 #define LPC_RSV_DRIBBLE_NIBBLE (1 << 26)
255 #define LPC_RSV_CONTROL_FRAME (1 << 27)
256 #define LPC_RSV_PAUSE (1 << 28)
257 #define LPC_RSV_UNSUPPORTED_OPCODE (1 << 29)
258 #define LPC_RSV_VLAN (1 << 30)
259
260 /*
261 * flowcontrolcounter register definitions
262 */
263 #define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF)
264 #define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF)
265
266 /*
267 * flowcontrolstatus register definitions
268 */
269 #define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF)
270
271 /*
272 * rxfilterctrl, rxfilterwolstatus, and rxfilterwolclear shared
273 * register definitions
274 */
275 #define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0)
276 #define LPC_RXFLTRW_ACCEPTUBROADCAST (1 << 1)
277 #define LPC_RXFLTRW_ACCEPTUMULTICAST (1 << 2)
278 #define LPC_RXFLTRW_ACCEPTUNICASTHASH (1 << 3)
279 #define LPC_RXFLTRW_ACCEPTUMULTICASTHASH (1 << 4)
280 #define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5)
281
282 /*
283 * rxfilterctrl register definitions
284 */
285 #define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12)
286 #define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13)
287
288 /*
289 * rxfilterwolstatus/rxfilterwolclear register definitions
290 */
291 #define LPC_RXFLTRWSTS_RXFILTERWOL (1 << 7)
292 #define LPC_RXFLTRWSTS_MAGICPACKETWOL (1 << 8)
293
294 /*
295 * intstatus, intenable, intclear, and Intset shared register
296 * definitions
297 */
298 #define LPC_MACINT_RXOVERRUNINTEN (1 << 0)
299 #define LPC_MACINT_RXERRORONINT (1 << 1)
300 #define LPC_MACINT_RXFINISHEDINTEN (1 << 2)
301 #define LPC_MACINT_RXDONEINTEN (1 << 3)
302 #define LPC_MACINT_TXUNDERRUNINTEN (1 << 4)
303 #define LPC_MACINT_TXERRORINTEN (1 << 5)
304 #define LPC_MACINT_TXFINISHEDINTEN (1 << 6)
305 #define LPC_MACINT_TXDONEINTEN (1 << 7)
306 #define LPC_MACINT_SOFTINTEN (1 << 12)
307 #define LPC_MACINT_WAKEUPINTEN (1 << 13)
308
309 /*
310 * powerdown register definitions
311 */
312 #define LPC_POWERDOWN_MACAHB (1 << 31)
313
lpc_phy_interface_mode(struct device * dev)314 static phy_interface_t lpc_phy_interface_mode(struct device *dev)
315 {
316 if (dev && dev->of_node) {
317 const char *mode = of_get_property(dev->of_node,
318 "phy-mode", NULL);
319 if (mode && !strcmp(mode, "mii"))
320 return PHY_INTERFACE_MODE_MII;
321 }
322 return PHY_INTERFACE_MODE_RMII;
323 }
324
use_iram_for_net(struct device * dev)325 static bool use_iram_for_net(struct device *dev)
326 {
327 if (dev && dev->of_node)
328 return of_property_read_bool(dev->of_node, "use-iram");
329 return false;
330 }
331
332 /* Receive Status information word */
333 #define RXSTATUS_SIZE 0x000007FF
334 #define RXSTATUS_CONTROL (1 << 18)
335 #define RXSTATUS_VLAN (1 << 19)
336 #define RXSTATUS_FILTER (1 << 20)
337 #define RXSTATUS_MULTICAST (1 << 21)
338 #define RXSTATUS_BROADCAST (1 << 22)
339 #define RXSTATUS_CRC (1 << 23)
340 #define RXSTATUS_SYMBOL (1 << 24)
341 #define RXSTATUS_LENGTH (1 << 25)
342 #define RXSTATUS_RANGE (1 << 26)
343 #define RXSTATUS_ALIGN (1 << 27)
344 #define RXSTATUS_OVERRUN (1 << 28)
345 #define RXSTATUS_NODESC (1 << 29)
346 #define RXSTATUS_LAST (1 << 30)
347 #define RXSTATUS_ERROR (1 << 31)
348
349 #define RXSTATUS_STATUS_ERROR \
350 (RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \
351 RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC)
352
353 /* Receive Descriptor control word */
354 #define RXDESC_CONTROL_SIZE 0x000007FF
355 #define RXDESC_CONTROL_INT (1 << 31)
356
357 /* Transmit Status information word */
358 #define TXSTATUS_COLLISIONS_GET(x) (((x) >> 21) & 0xF)
359 #define TXSTATUS_DEFER (1 << 25)
360 #define TXSTATUS_EXCESSDEFER (1 << 26)
361 #define TXSTATUS_EXCESSCOLL (1 << 27)
362 #define TXSTATUS_LATECOLL (1 << 28)
363 #define TXSTATUS_UNDERRUN (1 << 29)
364 #define TXSTATUS_NODESC (1 << 30)
365 #define TXSTATUS_ERROR (1 << 31)
366
367 /* Transmit Descriptor control word */
368 #define TXDESC_CONTROL_SIZE 0x000007FF
369 #define TXDESC_CONTROL_OVERRIDE (1 << 26)
370 #define TXDESC_CONTROL_HUGE (1 << 27)
371 #define TXDESC_CONTROL_PAD (1 << 28)
372 #define TXDESC_CONTROL_CRC (1 << 29)
373 #define TXDESC_CONTROL_LAST (1 << 30)
374 #define TXDESC_CONTROL_INT (1 << 31)
375
376 /*
377 * Structure of a TX/RX descriptors and RX status
378 */
379 struct txrx_desc_t {
380 __le32 packet;
381 __le32 control;
382 };
383 struct rx_status_t {
384 __le32 statusinfo;
385 __le32 statushashcrc;
386 };
387
388 /*
389 * Device driver data structure
390 */
391 struct netdata_local {
392 struct platform_device *pdev;
393 struct net_device *ndev;
394 spinlock_t lock;
395 void __iomem *net_base;
396 u32 msg_enable;
397 unsigned int skblen[ENET_TX_DESC];
398 unsigned int last_tx_idx;
399 unsigned int num_used_tx_buffs;
400 struct mii_bus *mii_bus;
401 struct clk *clk;
402 dma_addr_t dma_buff_base_p;
403 void *dma_buff_base_v;
404 size_t dma_buff_size;
405 struct txrx_desc_t *tx_desc_v;
406 u32 *tx_stat_v;
407 void *tx_buff_v;
408 struct txrx_desc_t *rx_desc_v;
409 struct rx_status_t *rx_stat_v;
410 void *rx_buff_v;
411 int link;
412 int speed;
413 int duplex;
414 struct napi_struct napi;
415 };
416
417 /*
418 * MAC support functions
419 */
__lpc_set_mac(struct netdata_local * pldat,u8 * mac)420 static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
421 {
422 u32 tmp;
423
424 /* Set station address */
425 tmp = mac[0] | ((u32)mac[1] << 8);
426 writel(tmp, LPC_ENET_SA2(pldat->net_base));
427 tmp = mac[2] | ((u32)mac[3] << 8);
428 writel(tmp, LPC_ENET_SA1(pldat->net_base));
429 tmp = mac[4] | ((u32)mac[5] << 8);
430 writel(tmp, LPC_ENET_SA0(pldat->net_base));
431
432 netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac);
433 }
434
__lpc_get_mac(struct netdata_local * pldat,u8 * mac)435 static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
436 {
437 u32 tmp;
438
439 /* Get station address */
440 tmp = readl(LPC_ENET_SA2(pldat->net_base));
441 mac[0] = tmp & 0xFF;
442 mac[1] = tmp >> 8;
443 tmp = readl(LPC_ENET_SA1(pldat->net_base));
444 mac[2] = tmp & 0xFF;
445 mac[3] = tmp >> 8;
446 tmp = readl(LPC_ENET_SA0(pldat->net_base));
447 mac[4] = tmp & 0xFF;
448 mac[5] = tmp >> 8;
449 }
450
__lpc_params_setup(struct netdata_local * pldat)451 static void __lpc_params_setup(struct netdata_local *pldat)
452 {
453 u32 tmp;
454
455 if (pldat->duplex == DUPLEX_FULL) {
456 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
457 tmp |= LPC_MAC2_FULL_DUPLEX;
458 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
459 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
460 tmp |= LPC_COMMAND_FULLDUPLEX;
461 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
462 writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base));
463 } else {
464 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
465 tmp &= ~LPC_MAC2_FULL_DUPLEX;
466 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
467 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
468 tmp &= ~LPC_COMMAND_FULLDUPLEX;
469 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
470 writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base));
471 }
472
473 if (pldat->speed == SPEED_100)
474 writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base));
475 else
476 writel(0, LPC_ENET_SUPP(pldat->net_base));
477 }
478
__lpc_eth_reset(struct netdata_local * pldat)479 static void __lpc_eth_reset(struct netdata_local *pldat)
480 {
481 /* Reset all MAC logic */
482 writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX |
483 LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET |
484 LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base));
485 writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET |
486 LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base));
487 }
488
__lpc_mii_mngt_reset(struct netdata_local * pldat)489 static int __lpc_mii_mngt_reset(struct netdata_local *pldat)
490 {
491 /* Reset MII management hardware */
492 writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base));
493
494 /* Setup MII clock to slowest rate with a /28 divider */
495 writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28),
496 LPC_ENET_MCFG(pldat->net_base));
497
498 return 0;
499 }
500
__va_to_pa(void * addr,struct netdata_local * pldat)501 static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat)
502 {
503 phys_addr_t phaddr;
504
505 phaddr = addr - pldat->dma_buff_base_v;
506 phaddr += pldat->dma_buff_base_p;
507
508 return phaddr;
509 }
510
lpc_eth_enable_int(void __iomem * regbase)511 static void lpc_eth_enable_int(void __iomem *regbase)
512 {
513 writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN),
514 LPC_ENET_INTENABLE(regbase));
515 }
516
lpc_eth_disable_int(void __iomem * regbase)517 static void lpc_eth_disable_int(void __iomem *regbase)
518 {
519 writel(0, LPC_ENET_INTENABLE(regbase));
520 }
521
522 /* Setup TX/RX descriptors */
__lpc_txrx_desc_setup(struct netdata_local * pldat)523 static void __lpc_txrx_desc_setup(struct netdata_local *pldat)
524 {
525 u32 *ptxstat;
526 void *tbuff;
527 int i;
528 struct txrx_desc_t *ptxrxdesc;
529 struct rx_status_t *prxstat;
530
531 tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16);
532
533 /* Setup TX descriptors, status, and buffers */
534 pldat->tx_desc_v = tbuff;
535 tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC;
536
537 pldat->tx_stat_v = tbuff;
538 tbuff += sizeof(u32) * ENET_TX_DESC;
539
540 tbuff = PTR_ALIGN(tbuff, 16);
541 pldat->tx_buff_v = tbuff;
542 tbuff += ENET_MAXF_SIZE * ENET_TX_DESC;
543
544 /* Setup RX descriptors, status, and buffers */
545 pldat->rx_desc_v = tbuff;
546 tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC;
547
548 tbuff = PTR_ALIGN(tbuff, 16);
549 pldat->rx_stat_v = tbuff;
550 tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC;
551
552 tbuff = PTR_ALIGN(tbuff, 16);
553 pldat->rx_buff_v = tbuff;
554 tbuff += ENET_MAXF_SIZE * ENET_RX_DESC;
555
556 /* Map the TX descriptors to the TX buffers in hardware */
557 for (i = 0; i < ENET_TX_DESC; i++) {
558 ptxstat = &pldat->tx_stat_v[i];
559 ptxrxdesc = &pldat->tx_desc_v[i];
560
561 ptxrxdesc->packet = __va_to_pa(
562 pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat);
563 ptxrxdesc->control = 0;
564 *ptxstat = 0;
565 }
566
567 /* Map the RX descriptors to the RX buffers in hardware */
568 for (i = 0; i < ENET_RX_DESC; i++) {
569 prxstat = &pldat->rx_stat_v[i];
570 ptxrxdesc = &pldat->rx_desc_v[i];
571
572 ptxrxdesc->packet = __va_to_pa(
573 pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat);
574 ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1);
575 prxstat->statusinfo = 0;
576 prxstat->statushashcrc = 0;
577 }
578
579 /* Setup base addresses in hardware to point to buffers and
580 * descriptors
581 */
582 writel((ENET_TX_DESC - 1),
583 LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base));
584 writel(__va_to_pa(pldat->tx_desc_v, pldat),
585 LPC_ENET_TXDESCRIPTOR(pldat->net_base));
586 writel(__va_to_pa(pldat->tx_stat_v, pldat),
587 LPC_ENET_TXSTATUS(pldat->net_base));
588 writel((ENET_RX_DESC - 1),
589 LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base));
590 writel(__va_to_pa(pldat->rx_desc_v, pldat),
591 LPC_ENET_RXDESCRIPTOR(pldat->net_base));
592 writel(__va_to_pa(pldat->rx_stat_v, pldat),
593 LPC_ENET_RXSTATUS(pldat->net_base));
594 }
595
__lpc_eth_init(struct netdata_local * pldat)596 static void __lpc_eth_init(struct netdata_local *pldat)
597 {
598 u32 tmp;
599
600 /* Disable controller and reset */
601 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
602 tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
603 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
604 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
605 tmp &= ~LPC_MAC1_RECV_ENABLE;
606 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
607
608 /* Initial MAC setup */
609 writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base));
610 writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE),
611 LPC_ENET_MAC2(pldat->net_base));
612 writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base));
613
614 /* Collision window, gap */
615 writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) |
616 LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)),
617 LPC_ENET_CLRT(pldat->net_base));
618 writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base));
619
620 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
621 writel(LPC_COMMAND_PASSRUNTFRAME,
622 LPC_ENET_COMMAND(pldat->net_base));
623 else {
624 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
625 LPC_ENET_COMMAND(pldat->net_base));
626 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
627 }
628
629 __lpc_params_setup(pldat);
630
631 /* Setup TX and RX descriptors */
632 __lpc_txrx_desc_setup(pldat);
633
634 /* Setup packet filtering */
635 writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT),
636 LPC_ENET_RXFILTER_CTRL(pldat->net_base));
637
638 /* Get the next TX buffer output index */
639 pldat->num_used_tx_buffs = 0;
640 pldat->last_tx_idx =
641 readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
642
643 /* Clear and enable interrupts */
644 writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base));
645 smp_wmb();
646 lpc_eth_enable_int(pldat->net_base);
647
648 /* Enable controller */
649 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
650 tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
651 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
652 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
653 tmp |= LPC_MAC1_RECV_ENABLE;
654 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
655 }
656
__lpc_eth_shutdown(struct netdata_local * pldat)657 static void __lpc_eth_shutdown(struct netdata_local *pldat)
658 {
659 /* Reset ethernet and power down PHY */
660 __lpc_eth_reset(pldat);
661 writel(0, LPC_ENET_MAC1(pldat->net_base));
662 writel(0, LPC_ENET_MAC2(pldat->net_base));
663 }
664
665 /*
666 * MAC<--->PHY support functions
667 */
lpc_mdio_read(struct mii_bus * bus,int phy_id,int phyreg)668 static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg)
669 {
670 struct netdata_local *pldat = bus->priv;
671 unsigned long timeout = jiffies + msecs_to_jiffies(100);
672 int lps;
673
674 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
675 writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base));
676
677 /* Wait for unbusy status */
678 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
679 if (time_after(jiffies, timeout))
680 return -EIO;
681 cpu_relax();
682 }
683
684 lps = readl(LPC_ENET_MRDD(pldat->net_base));
685 writel(0, LPC_ENET_MCMD(pldat->net_base));
686
687 return lps;
688 }
689
lpc_mdio_write(struct mii_bus * bus,int phy_id,int phyreg,u16 phydata)690 static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg,
691 u16 phydata)
692 {
693 struct netdata_local *pldat = bus->priv;
694 unsigned long timeout = jiffies + msecs_to_jiffies(100);
695
696 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
697 writel(phydata, LPC_ENET_MWTD(pldat->net_base));
698
699 /* Wait for completion */
700 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
701 if (time_after(jiffies, timeout))
702 return -EIO;
703 cpu_relax();
704 }
705
706 return 0;
707 }
708
lpc_mdio_reset(struct mii_bus * bus)709 static int lpc_mdio_reset(struct mii_bus *bus)
710 {
711 return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv);
712 }
713
lpc_handle_link_change(struct net_device * ndev)714 static void lpc_handle_link_change(struct net_device *ndev)
715 {
716 struct netdata_local *pldat = netdev_priv(ndev);
717 struct phy_device *phydev = ndev->phydev;
718 unsigned long flags;
719
720 bool status_change = false;
721
722 spin_lock_irqsave(&pldat->lock, flags);
723
724 if (phydev->link) {
725 if ((pldat->speed != phydev->speed) ||
726 (pldat->duplex != phydev->duplex)) {
727 pldat->speed = phydev->speed;
728 pldat->duplex = phydev->duplex;
729 status_change = true;
730 }
731 }
732
733 if (phydev->link != pldat->link) {
734 if (!phydev->link) {
735 pldat->speed = 0;
736 pldat->duplex = -1;
737 }
738 pldat->link = phydev->link;
739
740 status_change = true;
741 }
742
743 spin_unlock_irqrestore(&pldat->lock, flags);
744
745 if (status_change)
746 __lpc_params_setup(pldat);
747 }
748
lpc_mii_probe(struct net_device * ndev)749 static int lpc_mii_probe(struct net_device *ndev)
750 {
751 struct netdata_local *pldat = netdev_priv(ndev);
752 struct phy_device *phydev = phy_find_first(pldat->mii_bus);
753
754 if (!phydev) {
755 netdev_err(ndev, "no PHY found\n");
756 return -ENODEV;
757 }
758
759 /* Attach to the PHY */
760 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
761 netdev_info(ndev, "using MII interface\n");
762 else
763 netdev_info(ndev, "using RMII interface\n");
764 phydev = phy_connect(ndev, phydev_name(phydev),
765 &lpc_handle_link_change,
766 lpc_phy_interface_mode(&pldat->pdev->dev));
767
768 if (IS_ERR(phydev)) {
769 netdev_err(ndev, "Could not attach to PHY\n");
770 return PTR_ERR(phydev);
771 }
772
773 phy_set_max_speed(phydev, SPEED_100);
774
775 pldat->link = 0;
776 pldat->speed = 0;
777 pldat->duplex = -1;
778
779 phy_attached_info(phydev);
780
781 return 0;
782 }
783
lpc_mii_init(struct netdata_local * pldat)784 static int lpc_mii_init(struct netdata_local *pldat)
785 {
786 int err = -ENXIO;
787
788 pldat->mii_bus = mdiobus_alloc();
789 if (!pldat->mii_bus) {
790 err = -ENOMEM;
791 goto err_out;
792 }
793
794 /* Setup MII mode */
795 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
796 writel(LPC_COMMAND_PASSRUNTFRAME,
797 LPC_ENET_COMMAND(pldat->net_base));
798 else {
799 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
800 LPC_ENET_COMMAND(pldat->net_base));
801 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
802 }
803
804 pldat->mii_bus->name = "lpc_mii_bus";
805 pldat->mii_bus->read = &lpc_mdio_read;
806 pldat->mii_bus->write = &lpc_mdio_write;
807 pldat->mii_bus->reset = &lpc_mdio_reset;
808 snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
809 pldat->pdev->name, pldat->pdev->id);
810 pldat->mii_bus->priv = pldat;
811 pldat->mii_bus->parent = &pldat->pdev->dev;
812
813 platform_set_drvdata(pldat->pdev, pldat->mii_bus);
814
815 if (mdiobus_register(pldat->mii_bus))
816 goto err_out_unregister_bus;
817
818 if (lpc_mii_probe(pldat->ndev) != 0)
819 goto err_out_unregister_bus;
820
821 return 0;
822
823 err_out_unregister_bus:
824 mdiobus_unregister(pldat->mii_bus);
825 mdiobus_free(pldat->mii_bus);
826 err_out:
827 return err;
828 }
829
__lpc_handle_xmit(struct net_device * ndev)830 static void __lpc_handle_xmit(struct net_device *ndev)
831 {
832 struct netdata_local *pldat = netdev_priv(ndev);
833 u32 txcidx, *ptxstat, txstat;
834
835 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
836 while (pldat->last_tx_idx != txcidx) {
837 unsigned int skblen = pldat->skblen[pldat->last_tx_idx];
838
839 /* A buffer is available, get buffer status */
840 ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
841 txstat = *ptxstat;
842
843 /* Next buffer and decrement used buffer counter */
844 pldat->num_used_tx_buffs--;
845 pldat->last_tx_idx++;
846 if (pldat->last_tx_idx >= ENET_TX_DESC)
847 pldat->last_tx_idx = 0;
848
849 /* Update collision counter */
850 ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat);
851
852 /* Any errors occurred? */
853 if (txstat & TXSTATUS_ERROR) {
854 if (txstat & TXSTATUS_UNDERRUN) {
855 /* FIFO underrun */
856 ndev->stats.tx_fifo_errors++;
857 }
858 if (txstat & TXSTATUS_LATECOLL) {
859 /* Late collision */
860 ndev->stats.tx_aborted_errors++;
861 }
862 if (txstat & TXSTATUS_EXCESSCOLL) {
863 /* Excessive collision */
864 ndev->stats.tx_aborted_errors++;
865 }
866 if (txstat & TXSTATUS_EXCESSDEFER) {
867 /* Defer limit */
868 ndev->stats.tx_aborted_errors++;
869 }
870 ndev->stats.tx_errors++;
871 } else {
872 /* Update stats */
873 ndev->stats.tx_packets++;
874 ndev->stats.tx_bytes += skblen;
875 }
876
877 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
878 }
879
880 if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
881 if (netif_queue_stopped(ndev))
882 netif_wake_queue(ndev);
883 }
884 }
885
__lpc_handle_recv(struct net_device * ndev,int budget)886 static int __lpc_handle_recv(struct net_device *ndev, int budget)
887 {
888 struct netdata_local *pldat = netdev_priv(ndev);
889 struct sk_buff *skb;
890 u32 rxconsidx, len, ethst;
891 struct rx_status_t *prxstat;
892 int rx_done = 0;
893
894 /* Get the current RX buffer indexes */
895 rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
896 while (rx_done < budget && rxconsidx !=
897 readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) {
898 /* Get pointer to receive status */
899 prxstat = &pldat->rx_stat_v[rxconsidx];
900 len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1;
901
902 /* Status error? */
903 ethst = prxstat->statusinfo;
904 if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) ==
905 (RXSTATUS_ERROR | RXSTATUS_RANGE))
906 ethst &= ~RXSTATUS_ERROR;
907
908 if (ethst & RXSTATUS_ERROR) {
909 int si = prxstat->statusinfo;
910 /* Check statuses */
911 if (si & RXSTATUS_OVERRUN) {
912 /* Overrun error */
913 ndev->stats.rx_fifo_errors++;
914 } else if (si & RXSTATUS_CRC) {
915 /* CRC error */
916 ndev->stats.rx_crc_errors++;
917 } else if (si & RXSTATUS_LENGTH) {
918 /* Length error */
919 ndev->stats.rx_length_errors++;
920 } else if (si & RXSTATUS_ERROR) {
921 /* Other error */
922 ndev->stats.rx_length_errors++;
923 }
924 ndev->stats.rx_errors++;
925 } else {
926 /* Packet is good */
927 skb = dev_alloc_skb(len);
928 if (!skb) {
929 ndev->stats.rx_dropped++;
930 } else {
931 /* Copy packet from buffer */
932 skb_put_data(skb,
933 pldat->rx_buff_v + rxconsidx * ENET_MAXF_SIZE,
934 len);
935
936 /* Pass to upper layer */
937 skb->protocol = eth_type_trans(skb, ndev);
938 netif_receive_skb(skb);
939 ndev->stats.rx_packets++;
940 ndev->stats.rx_bytes += len;
941 }
942 }
943
944 /* Increment consume index */
945 rxconsidx = rxconsidx + 1;
946 if (rxconsidx >= ENET_RX_DESC)
947 rxconsidx = 0;
948 writel(rxconsidx,
949 LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
950 rx_done++;
951 }
952
953 return rx_done;
954 }
955
lpc_eth_poll(struct napi_struct * napi,int budget)956 static int lpc_eth_poll(struct napi_struct *napi, int budget)
957 {
958 struct netdata_local *pldat = container_of(napi,
959 struct netdata_local, napi);
960 struct net_device *ndev = pldat->ndev;
961 int rx_done = 0;
962 struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);
963
964 __netif_tx_lock(txq, smp_processor_id());
965 __lpc_handle_xmit(ndev);
966 __netif_tx_unlock(txq);
967 rx_done = __lpc_handle_recv(ndev, budget);
968
969 if (rx_done < budget) {
970 napi_complete_done(napi, rx_done);
971 lpc_eth_enable_int(pldat->net_base);
972 }
973
974 return rx_done;
975 }
976
__lpc_eth_interrupt(int irq,void * dev_id)977 static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id)
978 {
979 struct net_device *ndev = dev_id;
980 struct netdata_local *pldat = netdev_priv(ndev);
981 u32 tmp;
982
983 spin_lock(&pldat->lock);
984
985 tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base));
986 /* Clear interrupts */
987 writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base));
988
989 lpc_eth_disable_int(pldat->net_base);
990 if (likely(napi_schedule_prep(&pldat->napi)))
991 __napi_schedule(&pldat->napi);
992
993 spin_unlock(&pldat->lock);
994
995 return IRQ_HANDLED;
996 }
997
lpc_eth_close(struct net_device * ndev)998 static int lpc_eth_close(struct net_device *ndev)
999 {
1000 unsigned long flags;
1001 struct netdata_local *pldat = netdev_priv(ndev);
1002
1003 if (netif_msg_ifdown(pldat))
1004 dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name);
1005
1006 napi_disable(&pldat->napi);
1007 netif_stop_queue(ndev);
1008
1009 if (ndev->phydev)
1010 phy_stop(ndev->phydev);
1011
1012 spin_lock_irqsave(&pldat->lock, flags);
1013 __lpc_eth_reset(pldat);
1014 netif_carrier_off(ndev);
1015 writel(0, LPC_ENET_MAC1(pldat->net_base));
1016 writel(0, LPC_ENET_MAC2(pldat->net_base));
1017 spin_unlock_irqrestore(&pldat->lock, flags);
1018
1019 clk_disable_unprepare(pldat->clk);
1020
1021 return 0;
1022 }
1023
lpc_eth_hard_start_xmit(struct sk_buff * skb,struct net_device * ndev)1024 static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1025 {
1026 struct netdata_local *pldat = netdev_priv(ndev);
1027 u32 len, txidx;
1028 u32 *ptxstat;
1029 struct txrx_desc_t *ptxrxdesc;
1030
1031 len = skb->len;
1032
1033 spin_lock_irq(&pldat->lock);
1034
1035 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) {
1036 /* This function should never be called when there are no
1037 buffers */
1038 netif_stop_queue(ndev);
1039 spin_unlock_irq(&pldat->lock);
1040 WARN(1, "BUG! TX request when no free TX buffers!\n");
1041 return NETDEV_TX_BUSY;
1042 }
1043
1044 /* Get the next TX descriptor index */
1045 txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1046
1047 /* Setup control for the transfer */
1048 ptxstat = &pldat->tx_stat_v[txidx];
1049 *ptxstat = 0;
1050 ptxrxdesc = &pldat->tx_desc_v[txidx];
1051 ptxrxdesc->control =
1052 (len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT;
1053
1054 /* Copy data to the DMA buffer */
1055 memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
1056
1057 /* Save the buffer and increment the buffer counter */
1058 pldat->skblen[txidx] = len;
1059 pldat->num_used_tx_buffs++;
1060
1061 /* Start transmit */
1062 txidx++;
1063 if (txidx >= ENET_TX_DESC)
1064 txidx = 0;
1065 writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1066
1067 /* Stop queue if no more TX buffers */
1068 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1))
1069 netif_stop_queue(ndev);
1070
1071 spin_unlock_irq(&pldat->lock);
1072
1073 dev_kfree_skb(skb);
1074 return NETDEV_TX_OK;
1075 }
1076
lpc_set_mac_address(struct net_device * ndev,void * p)1077 static int lpc_set_mac_address(struct net_device *ndev, void *p)
1078 {
1079 struct sockaddr *addr = p;
1080 struct netdata_local *pldat = netdev_priv(ndev);
1081 unsigned long flags;
1082
1083 if (!is_valid_ether_addr(addr->sa_data))
1084 return -EADDRNOTAVAIL;
1085 memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
1086
1087 spin_lock_irqsave(&pldat->lock, flags);
1088
1089 /* Set station address */
1090 __lpc_set_mac(pldat, ndev->dev_addr);
1091
1092 spin_unlock_irqrestore(&pldat->lock, flags);
1093
1094 return 0;
1095 }
1096
lpc_eth_set_multicast_list(struct net_device * ndev)1097 static void lpc_eth_set_multicast_list(struct net_device *ndev)
1098 {
1099 struct netdata_local *pldat = netdev_priv(ndev);
1100 struct netdev_hw_addr_list *mcptr = &ndev->mc;
1101 struct netdev_hw_addr *ha;
1102 u32 tmp32, hash_val, hashlo, hashhi;
1103 unsigned long flags;
1104
1105 spin_lock_irqsave(&pldat->lock, flags);
1106
1107 /* Set station address */
1108 __lpc_set_mac(pldat, ndev->dev_addr);
1109
1110 tmp32 = LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT;
1111
1112 if (ndev->flags & IFF_PROMISC)
1113 tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST |
1114 LPC_RXFLTRW_ACCEPTUMULTICAST;
1115 if (ndev->flags & IFF_ALLMULTI)
1116 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST;
1117
1118 if (netdev_hw_addr_list_count(mcptr))
1119 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH;
1120
1121 writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base));
1122
1123
1124 /* Set initial hash table */
1125 hashlo = 0x0;
1126 hashhi = 0x0;
1127
1128 /* 64 bits : multicast address in hash table */
1129 netdev_hw_addr_list_for_each(ha, mcptr) {
1130 hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F;
1131
1132 if (hash_val >= 32)
1133 hashhi |= 1 << (hash_val - 32);
1134 else
1135 hashlo |= 1 << hash_val;
1136 }
1137
1138 writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base));
1139 writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base));
1140
1141 spin_unlock_irqrestore(&pldat->lock, flags);
1142 }
1143
lpc_eth_ioctl(struct net_device * ndev,struct ifreq * req,int cmd)1144 static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1145 {
1146 struct phy_device *phydev = ndev->phydev;
1147
1148 if (!netif_running(ndev))
1149 return -EINVAL;
1150
1151 if (!phydev)
1152 return -ENODEV;
1153
1154 return phy_mii_ioctl(phydev, req, cmd);
1155 }
1156
lpc_eth_open(struct net_device * ndev)1157 static int lpc_eth_open(struct net_device *ndev)
1158 {
1159 struct netdata_local *pldat = netdev_priv(ndev);
1160 int ret;
1161
1162 if (netif_msg_ifup(pldat))
1163 dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
1164
1165 ret = clk_prepare_enable(pldat->clk);
1166 if (ret)
1167 return ret;
1168
1169 /* Suspended PHY makes LPC ethernet core block, so resume now */
1170 phy_resume(ndev->phydev);
1171
1172 /* Reset and initialize */
1173 __lpc_eth_reset(pldat);
1174 __lpc_eth_init(pldat);
1175
1176 /* schedule a link state check */
1177 phy_start(ndev->phydev);
1178 netif_start_queue(ndev);
1179 napi_enable(&pldat->napi);
1180
1181 return 0;
1182 }
1183
1184 /*
1185 * Ethtool ops
1186 */
lpc_eth_ethtool_getdrvinfo(struct net_device * ndev,struct ethtool_drvinfo * info)1187 static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
1188 struct ethtool_drvinfo *info)
1189 {
1190 strlcpy(info->driver, MODNAME, sizeof(info->driver));
1191 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1192 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
1193 sizeof(info->bus_info));
1194 }
1195
lpc_eth_ethtool_getmsglevel(struct net_device * ndev)1196 static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
1197 {
1198 struct netdata_local *pldat = netdev_priv(ndev);
1199
1200 return pldat->msg_enable;
1201 }
1202
lpc_eth_ethtool_setmsglevel(struct net_device * ndev,u32 level)1203 static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
1204 {
1205 struct netdata_local *pldat = netdev_priv(ndev);
1206
1207 pldat->msg_enable = level;
1208 }
1209
1210 static const struct ethtool_ops lpc_eth_ethtool_ops = {
1211 .get_drvinfo = lpc_eth_ethtool_getdrvinfo,
1212 .get_msglevel = lpc_eth_ethtool_getmsglevel,
1213 .set_msglevel = lpc_eth_ethtool_setmsglevel,
1214 .get_link = ethtool_op_get_link,
1215 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1216 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1217 };
1218
1219 static const struct net_device_ops lpc_netdev_ops = {
1220 .ndo_open = lpc_eth_open,
1221 .ndo_stop = lpc_eth_close,
1222 .ndo_start_xmit = lpc_eth_hard_start_xmit,
1223 .ndo_set_rx_mode = lpc_eth_set_multicast_list,
1224 .ndo_do_ioctl = lpc_eth_ioctl,
1225 .ndo_set_mac_address = lpc_set_mac_address,
1226 .ndo_validate_addr = eth_validate_addr,
1227 };
1228
lpc_eth_drv_probe(struct platform_device * pdev)1229 static int lpc_eth_drv_probe(struct platform_device *pdev)
1230 {
1231 struct device *dev = &pdev->dev;
1232 struct device_node *np = dev->of_node;
1233 struct netdata_local *pldat;
1234 struct net_device *ndev;
1235 dma_addr_t dma_handle;
1236 struct resource *res;
1237 int irq, ret;
1238
1239 /* Setup network interface for RMII or MII mode */
1240 lpc32xx_set_phy_interface_mode(lpc_phy_interface_mode(dev));
1241
1242 /* Get platform resources */
1243 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1244 irq = platform_get_irq(pdev, 0);
1245 if (!res || irq < 0) {
1246 dev_err(dev, "error getting resources.\n");
1247 ret = -ENXIO;
1248 goto err_exit;
1249 }
1250
1251 /* Allocate net driver data structure */
1252 ndev = alloc_etherdev(sizeof(struct netdata_local));
1253 if (!ndev) {
1254 dev_err(dev, "could not allocate device.\n");
1255 ret = -ENOMEM;
1256 goto err_exit;
1257 }
1258
1259 SET_NETDEV_DEV(ndev, dev);
1260
1261 pldat = netdev_priv(ndev);
1262 pldat->pdev = pdev;
1263 pldat->ndev = ndev;
1264
1265 spin_lock_init(&pldat->lock);
1266
1267 /* Save resources */
1268 ndev->irq = irq;
1269
1270 /* Get clock for the device */
1271 pldat->clk = clk_get(dev, NULL);
1272 if (IS_ERR(pldat->clk)) {
1273 dev_err(dev, "error getting clock.\n");
1274 ret = PTR_ERR(pldat->clk);
1275 goto err_out_free_dev;
1276 }
1277
1278 /* Enable network clock */
1279 ret = clk_prepare_enable(pldat->clk);
1280 if (ret)
1281 goto err_out_clk_put;
1282
1283 /* Map IO space */
1284 pldat->net_base = ioremap(res->start, resource_size(res));
1285 if (!pldat->net_base) {
1286 dev_err(dev, "failed to map registers\n");
1287 ret = -ENOMEM;
1288 goto err_out_disable_clocks;
1289 }
1290 ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0,
1291 ndev->name, ndev);
1292 if (ret) {
1293 dev_err(dev, "error requesting interrupt.\n");
1294 goto err_out_iounmap;
1295 }
1296
1297 /* Setup driver functions */
1298 ndev->netdev_ops = &lpc_netdev_ops;
1299 ndev->ethtool_ops = &lpc_eth_ethtool_ops;
1300 ndev->watchdog_timeo = msecs_to_jiffies(2500);
1301
1302 /* Get size of DMA buffers/descriptors region */
1303 pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE +
1304 sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
1305
1306 if (use_iram_for_net(dev)) {
1307 if (pldat->dma_buff_size >
1308 lpc32xx_return_iram(&pldat->dma_buff_base_v, &dma_handle)) {
1309 pldat->dma_buff_base_v = NULL;
1310 pldat->dma_buff_size = 0;
1311 netdev_err(ndev,
1312 "IRAM not big enough for net buffers, using SDRAM instead.\n");
1313 }
1314 }
1315
1316 if (pldat->dma_buff_base_v == NULL) {
1317 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
1318 if (ret)
1319 goto err_out_free_irq;
1320
1321 pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
1322
1323 /* Allocate a chunk of memory for the DMA ethernet buffers
1324 and descriptors */
1325 pldat->dma_buff_base_v =
1326 dma_alloc_coherent(dev,
1327 pldat->dma_buff_size, &dma_handle,
1328 GFP_KERNEL);
1329 if (pldat->dma_buff_base_v == NULL) {
1330 ret = -ENOMEM;
1331 goto err_out_free_irq;
1332 }
1333 }
1334 pldat->dma_buff_base_p = dma_handle;
1335
1336 netdev_dbg(ndev, "IO address space :%pR\n", res);
1337 netdev_dbg(ndev, "IO address size :%zd\n",
1338 (size_t)resource_size(res));
1339 netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
1340 pldat->net_base);
1341 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
1342 netdev_dbg(ndev, "DMA buffer size :%zd\n", pldat->dma_buff_size);
1343 netdev_dbg(ndev, "DMA buffer P address :%pad\n",
1344 &pldat->dma_buff_base_p);
1345 netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
1346 pldat->dma_buff_base_v);
1347
1348 /* Get MAC address from current HW setting (POR state is all zeros) */
1349 __lpc_get_mac(pldat, ndev->dev_addr);
1350
1351 if (!is_valid_ether_addr(ndev->dev_addr)) {
1352 const char *macaddr = of_get_mac_address(np);
1353 if (!IS_ERR(macaddr))
1354 ether_addr_copy(ndev->dev_addr, macaddr);
1355 }
1356 if (!is_valid_ether_addr(ndev->dev_addr))
1357 eth_hw_addr_random(ndev);
1358
1359 /* then shut everything down to save power */
1360 __lpc_eth_shutdown(pldat);
1361
1362 /* Set default parameters */
1363 pldat->msg_enable = NETIF_MSG_LINK;
1364
1365 /* Force an MII interface reset and clock setup */
1366 __lpc_mii_mngt_reset(pldat);
1367
1368 /* Force default PHY interface setup in chip, this will probably be
1369 changed by the PHY driver */
1370 pldat->link = 0;
1371 pldat->speed = 100;
1372 pldat->duplex = DUPLEX_FULL;
1373 __lpc_params_setup(pldat);
1374
1375 netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT);
1376
1377 ret = register_netdev(ndev);
1378 if (ret) {
1379 dev_err(dev, "Cannot register net device, aborting.\n");
1380 goto err_out_dma_unmap;
1381 }
1382 platform_set_drvdata(pdev, ndev);
1383
1384 ret = lpc_mii_init(pldat);
1385 if (ret)
1386 goto err_out_unregister_netdev;
1387
1388 netdev_info(ndev, "LPC mac at 0x%08lx irq %d\n",
1389 (unsigned long)res->start, ndev->irq);
1390
1391 device_init_wakeup(dev, 1);
1392 device_set_wakeup_enable(dev, 0);
1393
1394 return 0;
1395
1396 err_out_unregister_netdev:
1397 unregister_netdev(ndev);
1398 err_out_dma_unmap:
1399 if (!use_iram_for_net(dev) ||
1400 pldat->dma_buff_size > lpc32xx_return_iram(NULL, NULL))
1401 dma_free_coherent(dev, pldat->dma_buff_size,
1402 pldat->dma_buff_base_v,
1403 pldat->dma_buff_base_p);
1404 err_out_free_irq:
1405 free_irq(ndev->irq, ndev);
1406 err_out_iounmap:
1407 iounmap(pldat->net_base);
1408 err_out_disable_clocks:
1409 clk_disable_unprepare(pldat->clk);
1410 err_out_clk_put:
1411 clk_put(pldat->clk);
1412 err_out_free_dev:
1413 free_netdev(ndev);
1414 err_exit:
1415 pr_err("%s: not found (%d).\n", MODNAME, ret);
1416 return ret;
1417 }
1418
lpc_eth_drv_remove(struct platform_device * pdev)1419 static int lpc_eth_drv_remove(struct platform_device *pdev)
1420 {
1421 struct net_device *ndev = platform_get_drvdata(pdev);
1422 struct netdata_local *pldat = netdev_priv(ndev);
1423
1424 unregister_netdev(ndev);
1425
1426 if (!use_iram_for_net(&pldat->pdev->dev) ||
1427 pldat->dma_buff_size > lpc32xx_return_iram(NULL, NULL))
1428 dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1429 pldat->dma_buff_base_v,
1430 pldat->dma_buff_base_p);
1431 free_irq(ndev->irq, ndev);
1432 iounmap(pldat->net_base);
1433 mdiobus_unregister(pldat->mii_bus);
1434 mdiobus_free(pldat->mii_bus);
1435 clk_disable_unprepare(pldat->clk);
1436 clk_put(pldat->clk);
1437 free_netdev(ndev);
1438
1439 return 0;
1440 }
1441
1442 #ifdef CONFIG_PM
lpc_eth_drv_suspend(struct platform_device * pdev,pm_message_t state)1443 static int lpc_eth_drv_suspend(struct platform_device *pdev,
1444 pm_message_t state)
1445 {
1446 struct net_device *ndev = platform_get_drvdata(pdev);
1447 struct netdata_local *pldat = netdev_priv(ndev);
1448
1449 if (device_may_wakeup(&pdev->dev))
1450 enable_irq_wake(ndev->irq);
1451
1452 if (ndev) {
1453 if (netif_running(ndev)) {
1454 netif_device_detach(ndev);
1455 __lpc_eth_shutdown(pldat);
1456 clk_disable_unprepare(pldat->clk);
1457
1458 /*
1459 * Reset again now clock is disable to be sure
1460 * EMC_MDC is down
1461 */
1462 __lpc_eth_reset(pldat);
1463 }
1464 }
1465
1466 return 0;
1467 }
1468
lpc_eth_drv_resume(struct platform_device * pdev)1469 static int lpc_eth_drv_resume(struct platform_device *pdev)
1470 {
1471 struct net_device *ndev = platform_get_drvdata(pdev);
1472 struct netdata_local *pldat;
1473
1474 if (device_may_wakeup(&pdev->dev))
1475 disable_irq_wake(ndev->irq);
1476
1477 if (ndev) {
1478 if (netif_running(ndev)) {
1479 pldat = netdev_priv(ndev);
1480
1481 /* Enable interface clock */
1482 clk_enable(pldat->clk);
1483
1484 /* Reset and initialize */
1485 __lpc_eth_reset(pldat);
1486 __lpc_eth_init(pldat);
1487
1488 netif_device_attach(ndev);
1489 }
1490 }
1491
1492 return 0;
1493 }
1494 #endif
1495
1496 static const struct of_device_id lpc_eth_match[] = {
1497 { .compatible = "nxp,lpc-eth" },
1498 { }
1499 };
1500 MODULE_DEVICE_TABLE(of, lpc_eth_match);
1501
1502 static struct platform_driver lpc_eth_driver = {
1503 .probe = lpc_eth_drv_probe,
1504 .remove = lpc_eth_drv_remove,
1505 #ifdef CONFIG_PM
1506 .suspend = lpc_eth_drv_suspend,
1507 .resume = lpc_eth_drv_resume,
1508 #endif
1509 .driver = {
1510 .name = MODNAME,
1511 .of_match_table = lpc_eth_match,
1512 },
1513 };
1514
1515 module_platform_driver(lpc_eth_driver);
1516
1517 MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1518 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1519 MODULE_DESCRIPTION("LPC Ethernet Driver");
1520 MODULE_LICENSE("GPL");
1521