1 /*
2 * Freescale Ethernet controllers
3 *
4 * Copyright (c) 2005 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/ptrace.h>
20 #include <linux/errno.h>
21 #include <linux/crc32.h>
22 #include <linux/ioport.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/mii.h>
30 #include <linux/ethtool.h>
31 #include <linux/bitops.h>
32 #include <linux/fs.h>
33 #include <linux/platform_device.h>
34 #include <linux/of_address.h>
35 #include <linux/of_device.h>
36 #include <linux/of_irq.h>
37 #include <linux/gfp.h>
38
39 #include <asm/irq.h>
40 #include <linux/uaccess.h>
41
42 #include "fs_enet.h"
43 #include "fec.h"
44
45 /*************************************************/
46
47 #if defined(CONFIG_CPM1)
48 /* for a CPM1 __raw_xxx's are sufficient */
49 #define __fs_out32(addr, x) __raw_writel(x, addr)
50 #define __fs_out16(addr, x) __raw_writew(x, addr)
51 #define __fs_in32(addr) __raw_readl(addr)
52 #define __fs_in16(addr) __raw_readw(addr)
53 #else
54 /* for others play it safe */
55 #define __fs_out32(addr, x) out_be32(addr, x)
56 #define __fs_out16(addr, x) out_be16(addr, x)
57 #define __fs_in32(addr) in_be32(addr)
58 #define __fs_in16(addr) in_be16(addr)
59 #endif
60
61 /* write */
62 #define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v))
63
64 /* read */
65 #define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg)
66
67 /* set bits */
68 #define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
69
70 /* clear bits */
71 #define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
72
73 /*
74 * Delay to wait for FEC reset command to complete (in us)
75 */
76 #define FEC_RESET_DELAY 50
77
whack_reset(struct fec __iomem * fecp)78 static int whack_reset(struct fec __iomem *fecp)
79 {
80 int i;
81
82 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
83 for (i = 0; i < FEC_RESET_DELAY; i++) {
84 if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0)
85 return 0; /* OK */
86 udelay(1);
87 }
88
89 return -1;
90 }
91
do_pd_setup(struct fs_enet_private * fep)92 static int do_pd_setup(struct fs_enet_private *fep)
93 {
94 struct platform_device *ofdev = to_platform_device(fep->dev);
95
96 fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
97 if (!fep->interrupt)
98 return -EINVAL;
99
100 fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
101 if (!fep->fcc.fccp)
102 return -EINVAL;
103
104 return 0;
105 }
106
107 #define FEC_NAPI_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_TXF)
108 #define FEC_EVENT (FEC_ENET_RXF | FEC_ENET_TXF)
109 #define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
110 FEC_ENET_BABT | FEC_ENET_EBERR)
111
setup_data(struct net_device * dev)112 static int setup_data(struct net_device *dev)
113 {
114 struct fs_enet_private *fep = netdev_priv(dev);
115
116 if (do_pd_setup(fep) != 0)
117 return -EINVAL;
118
119 fep->fec.hthi = 0;
120 fep->fec.htlo = 0;
121
122 fep->ev_napi = FEC_NAPI_EVENT_MSK;
123 fep->ev = FEC_EVENT;
124 fep->ev_err = FEC_ERR_EVENT_MSK;
125
126 return 0;
127 }
128
allocate_bd(struct net_device * dev)129 static int allocate_bd(struct net_device *dev)
130 {
131 struct fs_enet_private *fep = netdev_priv(dev);
132 const struct fs_platform_info *fpi = fep->fpi;
133
134 fep->ring_base = (void __force __iomem *)dma_alloc_coherent(fep->dev,
135 (fpi->tx_ring + fpi->rx_ring) *
136 sizeof(cbd_t), &fep->ring_mem_addr,
137 GFP_KERNEL);
138 if (fep->ring_base == NULL)
139 return -ENOMEM;
140
141 return 0;
142 }
143
free_bd(struct net_device * dev)144 static void free_bd(struct net_device *dev)
145 {
146 struct fs_enet_private *fep = netdev_priv(dev);
147 const struct fs_platform_info *fpi = fep->fpi;
148
149 if(fep->ring_base)
150 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring)
151 * sizeof(cbd_t),
152 (void __force *)fep->ring_base,
153 fep->ring_mem_addr);
154 }
155
cleanup_data(struct net_device * dev)156 static void cleanup_data(struct net_device *dev)
157 {
158 /* nothing */
159 }
160
set_promiscuous_mode(struct net_device * dev)161 static void set_promiscuous_mode(struct net_device *dev)
162 {
163 struct fs_enet_private *fep = netdev_priv(dev);
164 struct fec __iomem *fecp = fep->fec.fecp;
165
166 FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
167 }
168
set_multicast_start(struct net_device * dev)169 static void set_multicast_start(struct net_device *dev)
170 {
171 struct fs_enet_private *fep = netdev_priv(dev);
172
173 fep->fec.hthi = 0;
174 fep->fec.htlo = 0;
175 }
176
set_multicast_one(struct net_device * dev,const u8 * mac)177 static void set_multicast_one(struct net_device *dev, const u8 *mac)
178 {
179 struct fs_enet_private *fep = netdev_priv(dev);
180 int temp, hash_index;
181 u32 crc, csrVal;
182
183 crc = ether_crc(6, mac);
184
185 temp = (crc & 0x3f) >> 1;
186 hash_index = ((temp & 0x01) << 4) |
187 ((temp & 0x02) << 2) |
188 ((temp & 0x04)) |
189 ((temp & 0x08) >> 2) |
190 ((temp & 0x10) >> 4);
191 csrVal = 1 << hash_index;
192 if (crc & 1)
193 fep->fec.hthi |= csrVal;
194 else
195 fep->fec.htlo |= csrVal;
196 }
197
set_multicast_finish(struct net_device * dev)198 static void set_multicast_finish(struct net_device *dev)
199 {
200 struct fs_enet_private *fep = netdev_priv(dev);
201 struct fec __iomem *fecp = fep->fec.fecp;
202
203 /* if all multi or too many multicasts; just enable all */
204 if ((dev->flags & IFF_ALLMULTI) != 0 ||
205 netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) {
206 fep->fec.hthi = 0xffffffffU;
207 fep->fec.htlo = 0xffffffffU;
208 }
209
210 FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
211 FW(fecp, grp_hash_table_high, fep->fec.hthi);
212 FW(fecp, grp_hash_table_low, fep->fec.htlo);
213 }
214
set_multicast_list(struct net_device * dev)215 static void set_multicast_list(struct net_device *dev)
216 {
217 struct netdev_hw_addr *ha;
218
219 if ((dev->flags & IFF_PROMISC) == 0) {
220 set_multicast_start(dev);
221 netdev_for_each_mc_addr(ha, dev)
222 set_multicast_one(dev, ha->addr);
223 set_multicast_finish(dev);
224 } else
225 set_promiscuous_mode(dev);
226 }
227
restart(struct net_device * dev)228 static void restart(struct net_device *dev)
229 {
230 struct fs_enet_private *fep = netdev_priv(dev);
231 struct fec __iomem *fecp = fep->fec.fecp;
232 const struct fs_platform_info *fpi = fep->fpi;
233 dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
234 int r;
235 u32 addrhi, addrlo;
236
237 struct mii_bus *mii = dev->phydev->mdio.bus;
238 struct fec_info* fec_inf = mii->priv;
239
240 r = whack_reset(fep->fec.fecp);
241 if (r != 0)
242 dev_err(fep->dev, "FEC Reset FAILED!\n");
243 /*
244 * Set station address.
245 */
246 addrhi = ((u32) dev->dev_addr[0] << 24) |
247 ((u32) dev->dev_addr[1] << 16) |
248 ((u32) dev->dev_addr[2] << 8) |
249 (u32) dev->dev_addr[3];
250 addrlo = ((u32) dev->dev_addr[4] << 24) |
251 ((u32) dev->dev_addr[5] << 16);
252 FW(fecp, addr_low, addrhi);
253 FW(fecp, addr_high, addrlo);
254
255 /*
256 * Reset all multicast.
257 */
258 FW(fecp, grp_hash_table_high, fep->fec.hthi);
259 FW(fecp, grp_hash_table_low, fep->fec.htlo);
260
261 /*
262 * Set maximum receive buffer size.
263 */
264 FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
265 #ifdef CONFIG_FS_ENET_MPC5121_FEC
266 FW(fecp, r_cntrl, PKT_MAXBUF_SIZE << 16);
267 #else
268 FW(fecp, r_hash, PKT_MAXBUF_SIZE);
269 #endif
270
271 /* get physical address */
272 rx_bd_base_phys = fep->ring_mem_addr;
273 tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
274
275 /*
276 * Set receive and transmit descriptor base.
277 */
278 FW(fecp, r_des_start, rx_bd_base_phys);
279 FW(fecp, x_des_start, tx_bd_base_phys);
280
281 fs_init_bds(dev);
282
283 /*
284 * Enable big endian and don't care about SDMA FC.
285 */
286 #ifdef CONFIG_FS_ENET_MPC5121_FEC
287 FS(fecp, dma_control, 0xC0000000);
288 #else
289 FW(fecp, fun_code, 0x78000000);
290 #endif
291
292 /*
293 * Set MII speed.
294 */
295 FW(fecp, mii_speed, fec_inf->mii_speed);
296
297 /*
298 * Clear any outstanding interrupt.
299 */
300 FW(fecp, ievent, 0xffc0);
301 #ifndef CONFIG_FS_ENET_MPC5121_FEC
302 FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29);
303
304 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
305 #else
306 /*
307 * Only set MII/RMII mode - do not touch maximum frame length
308 * configured before.
309 */
310 FS(fecp, r_cntrl, fpi->use_rmii ?
311 FEC_RCNTRL_RMII_MODE : FEC_RCNTRL_MII_MODE);
312 #endif
313 /*
314 * adjust to duplex mode
315 */
316 if (dev->phydev->duplex) {
317 FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
318 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
319 } else {
320 FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
321 FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
322 }
323
324 /* Restore multicast and promiscuous settings */
325 set_multicast_list(dev);
326
327 /*
328 * Enable interrupts we wish to service.
329 */
330 FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
331 FEC_ENET_RXF | FEC_ENET_RXB);
332
333 /*
334 * And last, enable the transmit and receive processing.
335 */
336 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
337 FW(fecp, r_des_active, 0x01000000);
338 }
339
stop(struct net_device * dev)340 static void stop(struct net_device *dev)
341 {
342 struct fs_enet_private *fep = netdev_priv(dev);
343 const struct fs_platform_info *fpi = fep->fpi;
344 struct fec __iomem *fecp = fep->fec.fecp;
345
346 struct fec_info *feci = dev->phydev->mdio.bus->priv;
347
348 int i;
349
350 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
351 return; /* already down */
352
353 FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
354 for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
355 i < FEC_RESET_DELAY; i++)
356 udelay(1);
357
358 if (i == FEC_RESET_DELAY)
359 dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n");
360 /*
361 * Disable FEC. Let only MII interrupts.
362 */
363 FW(fecp, imask, 0);
364 FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
365
366 fs_cleanup_bds(dev);
367
368 /* shut down FEC1? that's where the mii bus is */
369 if (fpi->has_phy) {
370 FS(fecp, r_cntrl, fpi->use_rmii ?
371 FEC_RCNTRL_RMII_MODE :
372 FEC_RCNTRL_MII_MODE); /* MII/RMII enable */
373 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
374 FW(fecp, ievent, FEC_ENET_MII);
375 FW(fecp, mii_speed, feci->mii_speed);
376 }
377 }
378
napi_clear_event_fs(struct net_device * dev)379 static void napi_clear_event_fs(struct net_device *dev)
380 {
381 struct fs_enet_private *fep = netdev_priv(dev);
382 struct fec __iomem *fecp = fep->fec.fecp;
383
384 FW(fecp, ievent, FEC_NAPI_EVENT_MSK);
385 }
386
napi_enable_fs(struct net_device * dev)387 static void napi_enable_fs(struct net_device *dev)
388 {
389 struct fs_enet_private *fep = netdev_priv(dev);
390 struct fec __iomem *fecp = fep->fec.fecp;
391
392 FS(fecp, imask, FEC_NAPI_EVENT_MSK);
393 }
394
napi_disable_fs(struct net_device * dev)395 static void napi_disable_fs(struct net_device *dev)
396 {
397 struct fs_enet_private *fep = netdev_priv(dev);
398 struct fec __iomem *fecp = fep->fec.fecp;
399
400 FC(fecp, imask, FEC_NAPI_EVENT_MSK);
401 }
402
rx_bd_done(struct net_device * dev)403 static void rx_bd_done(struct net_device *dev)
404 {
405 struct fs_enet_private *fep = netdev_priv(dev);
406 struct fec __iomem *fecp = fep->fec.fecp;
407
408 FW(fecp, r_des_active, 0x01000000);
409 }
410
tx_kickstart(struct net_device * dev)411 static void tx_kickstart(struct net_device *dev)
412 {
413 struct fs_enet_private *fep = netdev_priv(dev);
414 struct fec __iomem *fecp = fep->fec.fecp;
415
416 FW(fecp, x_des_active, 0x01000000);
417 }
418
get_int_events(struct net_device * dev)419 static u32 get_int_events(struct net_device *dev)
420 {
421 struct fs_enet_private *fep = netdev_priv(dev);
422 struct fec __iomem *fecp = fep->fec.fecp;
423
424 return FR(fecp, ievent) & FR(fecp, imask);
425 }
426
clear_int_events(struct net_device * dev,u32 int_events)427 static void clear_int_events(struct net_device *dev, u32 int_events)
428 {
429 struct fs_enet_private *fep = netdev_priv(dev);
430 struct fec __iomem *fecp = fep->fec.fecp;
431
432 FW(fecp, ievent, int_events);
433 }
434
ev_error(struct net_device * dev,u32 int_events)435 static void ev_error(struct net_device *dev, u32 int_events)
436 {
437 struct fs_enet_private *fep = netdev_priv(dev);
438
439 dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events);
440 }
441
get_regs(struct net_device * dev,void * p,int * sizep)442 static int get_regs(struct net_device *dev, void *p, int *sizep)
443 {
444 struct fs_enet_private *fep = netdev_priv(dev);
445
446 if (*sizep < sizeof(struct fec))
447 return -EINVAL;
448
449 memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec));
450
451 return 0;
452 }
453
get_regs_len(struct net_device * dev)454 static int get_regs_len(struct net_device *dev)
455 {
456 return sizeof(struct fec);
457 }
458
tx_restart(struct net_device * dev)459 static void tx_restart(struct net_device *dev)
460 {
461 /* nothing */
462 }
463
464 /*************************************************************************/
465
466 const struct fs_ops fs_fec_ops = {
467 .setup_data = setup_data,
468 .cleanup_data = cleanup_data,
469 .set_multicast_list = set_multicast_list,
470 .restart = restart,
471 .stop = stop,
472 .napi_clear_event = napi_clear_event_fs,
473 .napi_enable = napi_enable_fs,
474 .napi_disable = napi_disable_fs,
475 .rx_bd_done = rx_bd_done,
476 .tx_kickstart = tx_kickstart,
477 .get_int_events = get_int_events,
478 .clear_int_events = clear_int_events,
479 .ev_error = ev_error,
480 .get_regs = get_regs,
481 .get_regs_len = get_regs_len,
482 .tx_restart = tx_restart,
483 .allocate_bd = allocate_bd,
484 .free_bd = free_bd,
485 };
486
487