1 /*	tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
2 
3 	Copyright 2000,2001  The Linux Kernel Team
4 	Written/copyright 1994-2001 by Donald Becker.
5 
6 	This software may be used and distributed according to the terms
7 	of the GNU General Public License, incorporated herein by reference.
8 
9 	Please submit bugs to http://bugzilla.kernel.org/ .
10 */
11 
12 #define pr_fmt(fmt) "tulip: " fmt
13 
14 #define DRV_NAME	"tulip"
15 
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include "tulip.h"
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/etherdevice.h>
23 #include <linux/delay.h>
24 #include <linux/mii.h>
25 #include <linux/crc32.h>
26 #include <asm/unaligned.h>
27 #include <linux/uaccess.h>
28 
29 #ifdef CONFIG_SPARC
30 #include <asm/prom.h>
31 #endif
32 
33 /* A few user-configurable values. */
34 
35 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
36 static unsigned int max_interrupt_work = 25;
37 
38 #define MAX_UNITS 8
39 /* Used to pass the full-duplex flag, etc. */
40 static int full_duplex[MAX_UNITS];
41 static int options[MAX_UNITS];
42 static int mtu[MAX_UNITS];			/* Jumbo MTU for interfaces. */
43 
44 /*  The possible media types that can be set in options[] are: */
45 const char * const medianame[32] = {
46 	"10baseT", "10base2", "AUI", "100baseTx",
47 	"10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
48 	"100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
49 	"10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
50 	"MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
51 	"","","","", "","","","",  "","","","Transceiver reset",
52 };
53 
54 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
55 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
56 	defined(CONFIG_SPARC) || defined(__ia64__) || \
57 	defined(__sh__) || defined(__mips__)
58 static int rx_copybreak = 1518;
59 #else
60 static int rx_copybreak = 100;
61 #endif
62 
63 /*
64   Set the bus performance register.
65 	Typical: Set 16 longword cache alignment, no burst limit.
66 	Cache alignment bits 15:14	     Burst length 13:8
67 		0000	No alignment  0x00000000 unlimited		0800 8 longwords
68 		4000	8  longwords		0100 1 longword		1000 16 longwords
69 		8000	16 longwords		0200 2 longwords	2000 32 longwords
70 		C000	32  longwords		0400 4 longwords
71 	Warning: many older 486 systems are broken and require setting 0x00A04800
72 	   8 longword cache alignment, 8 longword burst.
73 	ToDo: Non-Intel setting could be better.
74 */
75 
76 #if defined(__alpha__) || defined(__ia64__)
77 static int csr0 = 0x01A00000 | 0xE000;
78 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
79 static int csr0 = 0x01A00000 | 0x8000;
80 #elif defined(CONFIG_SPARC) || defined(__hppa__)
81 /* The UltraSparc PCI controllers will disconnect at every 64-byte
82  * crossing anyways so it makes no sense to tell Tulip to burst
83  * any more than that.
84  */
85 static int csr0 = 0x01A00000 | 0x9000;
86 #elif defined(__arm__) || defined(__sh__)
87 static int csr0 = 0x01A00000 | 0x4800;
88 #elif defined(__mips__)
89 static int csr0 = 0x00200000 | 0x4000;
90 #else
91 static int csr0;
92 #endif
93 
94 /* Operational parameters that usually are not changed. */
95 /* Time in jiffies before concluding the transmitter is hung. */
96 #define TX_TIMEOUT  (4*HZ)
97 
98 
99 MODULE_AUTHOR("The Linux Kernel Team");
100 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
101 MODULE_LICENSE("GPL");
102 module_param(tulip_debug, int, 0);
103 module_param(max_interrupt_work, int, 0);
104 module_param(rx_copybreak, int, 0);
105 module_param(csr0, int, 0);
106 module_param_array(options, int, NULL, 0);
107 module_param_array(full_duplex, int, NULL, 0);
108 
109 #ifdef TULIP_DEBUG
110 int tulip_debug = TULIP_DEBUG;
111 #else
112 int tulip_debug = 1;
113 #endif
114 
tulip_timer(struct timer_list * t)115 static void tulip_timer(struct timer_list *t)
116 {
117 	struct tulip_private *tp = from_timer(tp, t, timer);
118 	struct net_device *dev = tp->dev;
119 
120 	if (netif_running(dev))
121 		schedule_work(&tp->media_work);
122 }
123 
124 /*
125  * This table use during operation for capabilities and media timer.
126  *
127  * It is indexed via the values in 'enum chips'
128  */
129 
130 const struct tulip_chip_table tulip_tbl[] = {
131   { }, /* placeholder for array, slot unused currently */
132   { }, /* placeholder for array, slot unused currently */
133 
134   /* DC21140 */
135   { "Digital DS21140 Tulip", 128, 0x0001ebef,
136 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
137 	tulip_media_task },
138 
139   /* DC21142, DC21143 */
140   { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
141 	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
142 	| HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
143 
144   /* LC82C168 */
145   { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
146 	HAS_MII | HAS_PNICNWAY, pnic_timer, },
147 
148   /* MX98713 */
149   { "Macronix 98713 PMAC", 128, 0x0001ebef,
150 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
151 
152   /* MX98715 */
153   { "Macronix 98715 PMAC", 256, 0x0001ebef,
154 	HAS_MEDIA_TABLE, mxic_timer, },
155 
156   /* MX98725 */
157   { "Macronix 98725 PMAC", 256, 0x0001ebef,
158 	HAS_MEDIA_TABLE, mxic_timer, },
159 
160   /* AX88140 */
161   { "ASIX AX88140", 128, 0x0001fbff,
162 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
163 	| IS_ASIX, tulip_timer, tulip_media_task },
164 
165   /* PNIC2 */
166   { "Lite-On PNIC-II", 256, 0x0801fbff,
167 	HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
168 
169   /* COMET */
170   { "ADMtek Comet", 256, 0x0001abef,
171 	HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
172 
173   /* COMPEX9881 */
174   { "Compex 9881 PMAC", 128, 0x0001ebef,
175 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
176 
177   /* I21145 */
178   { "Intel DS21145 Tulip", 128, 0x0801fbff,
179 	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
180 	| HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
181 
182   /* DM910X */
183 #ifdef CONFIG_TULIP_DM910X
184   { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
185 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
186 	tulip_timer, tulip_media_task },
187 #else
188   { NULL },
189 #endif
190 
191   /* RS7112 */
192   { "Conexant LANfinity", 256, 0x0001ebef,
193 	HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
194 
195 };
196 
197 
198 static const struct pci_device_id tulip_pci_tbl[] = {
199 	{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
200 	{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
201 	{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
202 	{ 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
203 	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
204 /*	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
205 	{ 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
206 	{ 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
207 	{ 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
208 	{ 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
209 	{ 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
210 	{ 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
211 	{ 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
212 	{ 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
213 	{ 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
214 	{ 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
215 	{ 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
216 	{ 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
217 	{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
218 	{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
219 #ifdef CONFIG_TULIP_DM910X
220 	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
221 	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
222 #endif
223 	{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 	{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
225 	{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 	{ 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 	{ 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 	{ 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 	{ 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
230 	{ 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
231 	{ 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
232 	{ 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
233 	{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
234 	{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
235 	{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
236 	{ 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
237 	{ 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 	{ } /* terminate list */
239 };
240 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
241 
242 
243 /* A full-duplex map for media types. */
244 const char tulip_media_cap[32] =
245 {0,0,0,16,  3,19,16,24,  27,4,7,5, 0,20,23,20,  28,31,0,0, };
246 
247 static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue);
248 static void tulip_init_ring(struct net_device *dev);
249 static void tulip_free_ring(struct net_device *dev);
250 static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
251 					  struct net_device *dev);
252 static int tulip_open(struct net_device *dev);
253 static int tulip_close(struct net_device *dev);
254 static void tulip_up(struct net_device *dev);
255 static void tulip_down(struct net_device *dev);
256 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
257 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
258 static void set_rx_mode(struct net_device *dev);
259 static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
260 #ifdef CONFIG_NET_POLL_CONTROLLER
261 static void poll_tulip(struct net_device *dev);
262 #endif
263 
tulip_set_power_state(struct tulip_private * tp,int sleep,int snooze)264 static void tulip_set_power_state (struct tulip_private *tp,
265 				   int sleep, int snooze)
266 {
267 	if (tp->flags & HAS_ACPI) {
268 		u32 tmp, newtmp;
269 		pci_read_config_dword (tp->pdev, CFDD, &tmp);
270 		newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
271 		if (sleep)
272 			newtmp |= CFDD_Sleep;
273 		else if (snooze)
274 			newtmp |= CFDD_Snooze;
275 		if (tmp != newtmp)
276 			pci_write_config_dword (tp->pdev, CFDD, newtmp);
277 	}
278 
279 }
280 
281 
tulip_up(struct net_device * dev)282 static void tulip_up(struct net_device *dev)
283 {
284 	struct tulip_private *tp = netdev_priv(dev);
285 	void __iomem *ioaddr = tp->base_addr;
286 	int next_tick = 3*HZ;
287 	u32 reg;
288 	int i;
289 
290 #ifdef CONFIG_TULIP_NAPI
291 	napi_enable(&tp->napi);
292 #endif
293 
294 	/* Wake the chip from sleep/snooze mode. */
295 	tulip_set_power_state (tp, 0, 0);
296 
297 	/* Disable all WOL events */
298 	pci_enable_wake(tp->pdev, PCI_D3hot, 0);
299 	pci_enable_wake(tp->pdev, PCI_D3cold, 0);
300 	tulip_set_wolopts(tp->pdev, 0);
301 
302 	/* On some chip revs we must set the MII/SYM port before the reset!? */
303 	if (tp->mii_cnt  ||  (tp->mtable  &&  tp->mtable->has_mii))
304 		iowrite32(0x00040000, ioaddr + CSR6);
305 
306 	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
307 	iowrite32(0x00000001, ioaddr + CSR0);
308 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
309 	udelay(100);
310 
311 	/* Deassert reset.
312 	   Wait the specified 50 PCI cycles after a reset by initializing
313 	   Tx and Rx queues and the address filter list. */
314 	iowrite32(tp->csr0, ioaddr + CSR0);
315 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
316 	udelay(100);
317 
318 	if (tulip_debug > 1)
319 		netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
320 
321 	iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
322 	iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
323 	tp->cur_rx = tp->cur_tx = 0;
324 	tp->dirty_rx = tp->dirty_tx = 0;
325 
326 	if (tp->flags & MC_HASH_ONLY) {
327 		u32 addr_low = get_unaligned_le32(dev->dev_addr);
328 		u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
329 		if (tp->chip_id == AX88140) {
330 			iowrite32(0, ioaddr + CSR13);
331 			iowrite32(addr_low,  ioaddr + CSR14);
332 			iowrite32(1, ioaddr + CSR13);
333 			iowrite32(addr_high, ioaddr + CSR14);
334 		} else if (tp->flags & COMET_MAC_ADDR) {
335 			iowrite32(addr_low,  ioaddr + 0xA4);
336 			iowrite32(addr_high, ioaddr + 0xA8);
337 			iowrite32(0, ioaddr + CSR27);
338 			iowrite32(0, ioaddr + CSR28);
339 		}
340 	} else {
341 		/* This is set_rx_mode(), but without starting the transmitter. */
342 		u16 *eaddrs = (u16 *)dev->dev_addr;
343 		u16 *setup_frm = &tp->setup_frame[15*6];
344 		dma_addr_t mapping;
345 
346 		/* 21140 bug: you must add the broadcast address. */
347 		memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
348 		/* Fill the final entry of the table with our physical address. */
349 		*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
350 		*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
351 		*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
352 
353 		mapping = dma_map_single(&tp->pdev->dev, tp->setup_frame,
354 					 sizeof(tp->setup_frame),
355 					 DMA_TO_DEVICE);
356 		tp->tx_buffers[tp->cur_tx].skb = NULL;
357 		tp->tx_buffers[tp->cur_tx].mapping = mapping;
358 
359 		/* Put the setup frame on the Tx list. */
360 		tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
361 		tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
362 		tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
363 
364 		tp->cur_tx++;
365 	}
366 
367 	tp->saved_if_port = dev->if_port;
368 	if (dev->if_port == 0)
369 		dev->if_port = tp->default_port;
370 
371 	/* Allow selecting a default media. */
372 	i = 0;
373 	if (tp->mtable == NULL)
374 		goto media_picked;
375 	if (dev->if_port) {
376 		int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
377 			(dev->if_port == 12 ? 0 : dev->if_port);
378 		for (i = 0; i < tp->mtable->leafcount; i++)
379 			if (tp->mtable->mleaf[i].media == looking_for) {
380 				dev_info(&dev->dev,
381 					 "Using user-specified media %s\n",
382 					 medianame[dev->if_port]);
383 				goto media_picked;
384 			}
385 	}
386 	if ((tp->mtable->defaultmedia & 0x0800) == 0) {
387 		int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
388 		for (i = 0; i < tp->mtable->leafcount; i++)
389 			if (tp->mtable->mleaf[i].media == looking_for) {
390 				dev_info(&dev->dev,
391 					 "Using EEPROM-set media %s\n",
392 					 medianame[looking_for]);
393 				goto media_picked;
394 			}
395 	}
396 	/* Start sensing first non-full-duplex media. */
397 	for (i = tp->mtable->leafcount - 1;
398 		 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
399 		;
400 media_picked:
401 
402 	tp->csr6 = 0;
403 	tp->cur_index = i;
404 	tp->nwayset = 0;
405 
406 	if (dev->if_port) {
407 		if (tp->chip_id == DC21143  &&
408 		    (tulip_media_cap[dev->if_port] & MediaIsMII)) {
409 			/* We must reset the media CSRs when we force-select MII mode. */
410 			iowrite32(0x0000, ioaddr + CSR13);
411 			iowrite32(0x0000, ioaddr + CSR14);
412 			iowrite32(0x0008, ioaddr + CSR15);
413 		}
414 		tulip_select_media(dev, 1);
415 	} else if (tp->chip_id == DC21142) {
416 		if (tp->mii_cnt) {
417 			tulip_select_media(dev, 1);
418 			if (tulip_debug > 1)
419 				dev_info(&dev->dev,
420 					 "Using MII transceiver %d, status %04x\n",
421 					 tp->phys[0],
422 					 tulip_mdio_read(dev, tp->phys[0], 1));
423 			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
424 			tp->csr6 = csr6_mask_hdcap;
425 			dev->if_port = 11;
426 			iowrite32(0x0000, ioaddr + CSR13);
427 			iowrite32(0x0000, ioaddr + CSR14);
428 		} else
429 			t21142_start_nway(dev);
430 	} else if (tp->chip_id == PNIC2) {
431 	        /* for initial startup advertise 10/100 Full and Half */
432 	        tp->sym_advertise = 0x01E0;
433                 /* enable autonegotiate end interrupt */
434 	        iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
435 	        iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
436 		pnic2_start_nway(dev);
437 	} else if (tp->chip_id == LC82C168  &&  ! tp->medialock) {
438 		if (tp->mii_cnt) {
439 			dev->if_port = 11;
440 			tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
441 			iowrite32(0x0001, ioaddr + CSR15);
442 		} else if (ioread32(ioaddr + CSR5) & TPLnkPass)
443 			pnic_do_nway(dev);
444 		else {
445 			/* Start with 10mbps to do autonegotiation. */
446 			iowrite32(0x32, ioaddr + CSR12);
447 			tp->csr6 = 0x00420000;
448 			iowrite32(0x0001B078, ioaddr + 0xB8);
449 			iowrite32(0x0201B078, ioaddr + 0xB8);
450 			next_tick = 1*HZ;
451 		}
452 	} else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
453 		   ! tp->medialock) {
454 		dev->if_port = 0;
455 		tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
456 		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
457 	} else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
458 		/* Provided by BOLO, Macronix - 12/10/1998. */
459 		dev->if_port = 0;
460 		tp->csr6 = 0x01a80200;
461 		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
462 		iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
463 	} else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
464 		/* Enable automatic Tx underrun recovery. */
465 		iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
466 		dev->if_port = tp->mii_cnt ? 11 : 0;
467 		tp->csr6 = 0x00040000;
468 	} else if (tp->chip_id == AX88140) {
469 		tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
470 	} else
471 		tulip_select_media(dev, 1);
472 
473 	/* Start the chip's Tx to process setup frame. */
474 	tulip_stop_rxtx(tp);
475 	barrier();
476 	udelay(5);
477 	iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
478 
479 	/* Enable interrupts by setting the interrupt mask. */
480 	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
481 	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
482 	tulip_start_rxtx(tp);
483 	iowrite32(0, ioaddr + CSR2);		/* Rx poll demand */
484 
485 	if (tulip_debug > 2) {
486 		netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
487 			   ioread32(ioaddr + CSR0),
488 			   ioread32(ioaddr + CSR5),
489 			   ioread32(ioaddr + CSR6));
490 	}
491 
492 	/* Set the timer to switch to check for link beat and perhaps switch
493 	   to an alternate media type. */
494 	tp->timer.expires = RUN_AT(next_tick);
495 	add_timer(&tp->timer);
496 #ifdef CONFIG_TULIP_NAPI
497 	timer_setup(&tp->oom_timer, oom_timer, 0);
498 #endif
499 }
500 
501 static int
tulip_open(struct net_device * dev)502 tulip_open(struct net_device *dev)
503 {
504 	struct tulip_private *tp = netdev_priv(dev);
505 	int retval;
506 
507 	tulip_init_ring (dev);
508 
509 	retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
510 			     dev->name, dev);
511 	if (retval)
512 		goto free_ring;
513 
514 	tulip_up (dev);
515 
516 	netif_start_queue (dev);
517 
518 	return 0;
519 
520 free_ring:
521 	tulip_free_ring (dev);
522 	return retval;
523 }
524 
525 
tulip_tx_timeout(struct net_device * dev,unsigned int txqueue)526 static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue)
527 {
528 	struct tulip_private *tp = netdev_priv(dev);
529 	void __iomem *ioaddr = tp->base_addr;
530 	unsigned long flags;
531 
532 	spin_lock_irqsave (&tp->lock, flags);
533 
534 	if (tulip_media_cap[dev->if_port] & MediaIsMII) {
535 		/* Do nothing -- the media monitor should handle this. */
536 		if (tulip_debug > 1)
537 			dev_warn(&dev->dev,
538 				 "Transmit timeout using MII device\n");
539 	} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
540 		   tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
541 		   tp->chip_id == DM910X) {
542 		dev_warn(&dev->dev,
543 			 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
544 			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
545 			 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
546 			 ioread32(ioaddr + CSR15));
547 		tp->timeout_recovery = 1;
548 		schedule_work(&tp->media_work);
549 		goto out_unlock;
550 	} else if (tp->chip_id == PNIC2) {
551 		dev_warn(&dev->dev,
552 			 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
553 			 (int)ioread32(ioaddr + CSR5),
554 			 (int)ioread32(ioaddr + CSR6),
555 			 (int)ioread32(ioaddr + CSR7),
556 			 (int)ioread32(ioaddr + CSR12));
557 	} else {
558 		dev_warn(&dev->dev,
559 			 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
560 			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
561 		dev->if_port = 0;
562 	}
563 
564 #if defined(way_too_many_messages)
565 	if (tulip_debug > 3) {
566 		int i;
567 		for (i = 0; i < RX_RING_SIZE; i++) {
568 			u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
569 			int j;
570 			printk(KERN_DEBUG
571 			       "%2d: %08x %08x %08x %08x  %02x %02x %02x\n",
572 			       i,
573 			       (unsigned int)tp->rx_ring[i].status,
574 			       (unsigned int)tp->rx_ring[i].length,
575 			       (unsigned int)tp->rx_ring[i].buffer1,
576 			       (unsigned int)tp->rx_ring[i].buffer2,
577 			       buf[0], buf[1], buf[2]);
578 			for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
579 				if (j < 100)
580 					pr_cont(" %02x", buf[j]);
581 			pr_cont(" j=%d\n", j);
582 		}
583 		printk(KERN_DEBUG "  Rx ring %p: ", tp->rx_ring);
584 		for (i = 0; i < RX_RING_SIZE; i++)
585 			pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
586 		printk(KERN_DEBUG "  Tx ring %p: ", tp->tx_ring);
587 		for (i = 0; i < TX_RING_SIZE; i++)
588 			pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
589 		pr_cont("\n");
590 	}
591 #endif
592 
593 	tulip_tx_timeout_complete(tp, ioaddr);
594 
595 out_unlock:
596 	spin_unlock_irqrestore (&tp->lock, flags);
597 	netif_trans_update(dev); /* prevent tx timeout */
598 	netif_wake_queue (dev);
599 }
600 
601 
602 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
tulip_init_ring(struct net_device * dev)603 static void tulip_init_ring(struct net_device *dev)
604 {
605 	struct tulip_private *tp = netdev_priv(dev);
606 	int i;
607 
608 	tp->susp_rx = 0;
609 	tp->ttimer = 0;
610 	tp->nir = 0;
611 
612 	for (i = 0; i < RX_RING_SIZE; i++) {
613 		tp->rx_ring[i].status = 0x00000000;
614 		tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
615 		tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
616 		tp->rx_buffers[i].skb = NULL;
617 		tp->rx_buffers[i].mapping = 0;
618 	}
619 	/* Mark the last entry as wrapping the ring. */
620 	tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
621 	tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
622 
623 	for (i = 0; i < RX_RING_SIZE; i++) {
624 		dma_addr_t mapping;
625 
626 		/* Note the receive buffer must be longword aligned.
627 		   netdev_alloc_skb() provides 16 byte alignment.  But do *not*
628 		   use skb_reserve() to align the IP header! */
629 		struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
630 		tp->rx_buffers[i].skb = skb;
631 		if (skb == NULL)
632 			break;
633 		mapping = dma_map_single(&tp->pdev->dev, skb->data,
634 					 PKT_BUF_SZ, DMA_FROM_DEVICE);
635 		tp->rx_buffers[i].mapping = mapping;
636 		tp->rx_ring[i].status = cpu_to_le32(DescOwned);	/* Owned by Tulip chip */
637 		tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
638 	}
639 	tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
640 
641 	/* The Tx buffer descriptor is filled in as needed, but we
642 	   do need to clear the ownership bit. */
643 	for (i = 0; i < TX_RING_SIZE; i++) {
644 		tp->tx_buffers[i].skb = NULL;
645 		tp->tx_buffers[i].mapping = 0;
646 		tp->tx_ring[i].status = 0x00000000;
647 		tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
648 	}
649 	tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
650 }
651 
652 static netdev_tx_t
tulip_start_xmit(struct sk_buff * skb,struct net_device * dev)653 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
654 {
655 	struct tulip_private *tp = netdev_priv(dev);
656 	int entry;
657 	u32 flag;
658 	dma_addr_t mapping;
659 	unsigned long flags;
660 
661 	spin_lock_irqsave(&tp->lock, flags);
662 
663 	/* Calculate the next Tx descriptor entry. */
664 	entry = tp->cur_tx % TX_RING_SIZE;
665 
666 	tp->tx_buffers[entry].skb = skb;
667 	mapping = dma_map_single(&tp->pdev->dev, skb->data, skb->len,
668 				 DMA_TO_DEVICE);
669 	tp->tx_buffers[entry].mapping = mapping;
670 	tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
671 
672 	if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
673 		flag = 0x60000000; /* No interrupt */
674 	} else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
675 		flag = 0xe0000000; /* Tx-done intr. */
676 	} else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
677 		flag = 0x60000000; /* No Tx-done intr. */
678 	} else {		/* Leave room for set_rx_mode() to fill entries. */
679 		flag = 0xe0000000; /* Tx-done intr. */
680 		netif_stop_queue(dev);
681 	}
682 	if (entry == TX_RING_SIZE-1)
683 		flag = 0xe0000000 | DESC_RING_WRAP;
684 
685 	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
686 	/* if we were using Transmit Automatic Polling, we would need a
687 	 * wmb() here. */
688 	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
689 	wmb();
690 
691 	tp->cur_tx++;
692 
693 	/* Trigger an immediate transmit demand. */
694 	iowrite32(0, tp->base_addr + CSR1);
695 
696 	spin_unlock_irqrestore(&tp->lock, flags);
697 
698 	return NETDEV_TX_OK;
699 }
700 
tulip_clean_tx_ring(struct tulip_private * tp)701 static void tulip_clean_tx_ring(struct tulip_private *tp)
702 {
703 	unsigned int dirty_tx;
704 
705 	for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
706 		dirty_tx++) {
707 		int entry = dirty_tx % TX_RING_SIZE;
708 		int status = le32_to_cpu(tp->tx_ring[entry].status);
709 
710 		if (status < 0) {
711 			tp->dev->stats.tx_errors++;	/* It wasn't Txed */
712 			tp->tx_ring[entry].status = 0;
713 		}
714 
715 		/* Check for Tx filter setup frames. */
716 		if (tp->tx_buffers[entry].skb == NULL) {
717 			/* test because dummy frames not mapped */
718 			if (tp->tx_buffers[entry].mapping)
719 				dma_unmap_single(&tp->pdev->dev,
720 						 tp->tx_buffers[entry].mapping,
721 						 sizeof(tp->setup_frame),
722 						 DMA_TO_DEVICE);
723 			continue;
724 		}
725 
726 		dma_unmap_single(&tp->pdev->dev,
727 				 tp->tx_buffers[entry].mapping,
728 				 tp->tx_buffers[entry].skb->len,
729 				 DMA_TO_DEVICE);
730 
731 		/* Free the original skb. */
732 		dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
733 		tp->tx_buffers[entry].skb = NULL;
734 		tp->tx_buffers[entry].mapping = 0;
735 	}
736 }
737 
tulip_down(struct net_device * dev)738 static void tulip_down (struct net_device *dev)
739 {
740 	struct tulip_private *tp = netdev_priv(dev);
741 	void __iomem *ioaddr = tp->base_addr;
742 	unsigned long flags;
743 
744 	cancel_work_sync(&tp->media_work);
745 
746 #ifdef CONFIG_TULIP_NAPI
747 	napi_disable(&tp->napi);
748 #endif
749 
750 	del_timer_sync (&tp->timer);
751 #ifdef CONFIG_TULIP_NAPI
752 	del_timer_sync (&tp->oom_timer);
753 #endif
754 	spin_lock_irqsave (&tp->lock, flags);
755 
756 	/* Disable interrupts by clearing the interrupt mask. */
757 	iowrite32 (0x00000000, ioaddr + CSR7);
758 
759 	/* Stop the Tx and Rx processes. */
760 	tulip_stop_rxtx(tp);
761 
762 	/* prepare receive buffers */
763 	tulip_refill_rx(dev);
764 
765 	/* release any unconsumed transmit buffers */
766 	tulip_clean_tx_ring(tp);
767 
768 	if (ioread32(ioaddr + CSR6) != 0xffffffff)
769 		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
770 
771 	spin_unlock_irqrestore (&tp->lock, flags);
772 
773 	timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
774 
775 	dev->if_port = tp->saved_if_port;
776 
777 	/* Leave the driver in snooze, not sleep, mode. */
778 	tulip_set_power_state (tp, 0, 1);
779 }
780 
tulip_free_ring(struct net_device * dev)781 static void tulip_free_ring (struct net_device *dev)
782 {
783 	struct tulip_private *tp = netdev_priv(dev);
784 	int i;
785 
786 	/* Free all the skbuffs in the Rx queue. */
787 	for (i = 0; i < RX_RING_SIZE; i++) {
788 		struct sk_buff *skb = tp->rx_buffers[i].skb;
789 		dma_addr_t mapping = tp->rx_buffers[i].mapping;
790 
791 		tp->rx_buffers[i].skb = NULL;
792 		tp->rx_buffers[i].mapping = 0;
793 
794 		tp->rx_ring[i].status = 0;	/* Not owned by Tulip chip. */
795 		tp->rx_ring[i].length = 0;
796 		/* An invalid address. */
797 		tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
798 		if (skb) {
799 			dma_unmap_single(&tp->pdev->dev, mapping, PKT_BUF_SZ,
800 					 DMA_FROM_DEVICE);
801 			dev_kfree_skb (skb);
802 		}
803 	}
804 
805 	for (i = 0; i < TX_RING_SIZE; i++) {
806 		struct sk_buff *skb = tp->tx_buffers[i].skb;
807 
808 		if (skb != NULL) {
809 			dma_unmap_single(&tp->pdev->dev,
810 					 tp->tx_buffers[i].mapping, skb->len,
811 					 DMA_TO_DEVICE);
812 			dev_kfree_skb (skb);
813 		}
814 		tp->tx_buffers[i].skb = NULL;
815 		tp->tx_buffers[i].mapping = 0;
816 	}
817 }
818 
tulip_close(struct net_device * dev)819 static int tulip_close (struct net_device *dev)
820 {
821 	struct tulip_private *tp = netdev_priv(dev);
822 	void __iomem *ioaddr = tp->base_addr;
823 
824 	netif_stop_queue (dev);
825 
826 	tulip_down (dev);
827 
828 	if (tulip_debug > 1)
829 		netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
830 			   ioread32 (ioaddr + CSR5));
831 
832 	free_irq (tp->pdev->irq, dev);
833 
834 	tulip_free_ring (dev);
835 
836 	return 0;
837 }
838 
tulip_get_stats(struct net_device * dev)839 static struct net_device_stats *tulip_get_stats(struct net_device *dev)
840 {
841 	struct tulip_private *tp = netdev_priv(dev);
842 	void __iomem *ioaddr = tp->base_addr;
843 
844 	if (netif_running(dev)) {
845 		unsigned long flags;
846 
847 		spin_lock_irqsave (&tp->lock, flags);
848 
849 		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
850 
851 		spin_unlock_irqrestore(&tp->lock, flags);
852 	}
853 
854 	return &dev->stats;
855 }
856 
857 
tulip_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)858 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
859 {
860 	struct tulip_private *np = netdev_priv(dev);
861 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
862 	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
863 }
864 
865 
tulip_ethtool_set_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)866 static int tulip_ethtool_set_wol(struct net_device *dev,
867 				 struct ethtool_wolinfo *wolinfo)
868 {
869 	struct tulip_private *tp = netdev_priv(dev);
870 
871 	if (wolinfo->wolopts & (~tp->wolinfo.supported))
872 		   return -EOPNOTSUPP;
873 
874 	tp->wolinfo.wolopts = wolinfo->wolopts;
875 	device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
876 	return 0;
877 }
878 
tulip_ethtool_get_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)879 static void tulip_ethtool_get_wol(struct net_device *dev,
880 				  struct ethtool_wolinfo *wolinfo)
881 {
882 	struct tulip_private *tp = netdev_priv(dev);
883 
884 	wolinfo->supported = tp->wolinfo.supported;
885 	wolinfo->wolopts = tp->wolinfo.wolopts;
886 	return;
887 }
888 
889 
890 static const struct ethtool_ops ops = {
891 	.get_drvinfo = tulip_get_drvinfo,
892 	.set_wol     = tulip_ethtool_set_wol,
893 	.get_wol     = tulip_ethtool_get_wol,
894 };
895 
896 /* Provide ioctl() calls to examine the MII xcvr state. */
private_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)897 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
898 {
899 	struct tulip_private *tp = netdev_priv(dev);
900 	void __iomem *ioaddr = tp->base_addr;
901 	struct mii_ioctl_data *data = if_mii(rq);
902 	const unsigned int phy_idx = 0;
903 	int phy = tp->phys[phy_idx] & 0x1f;
904 	unsigned int regnum = data->reg_num;
905 
906 	switch (cmd) {
907 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
908 		if (tp->mii_cnt)
909 			data->phy_id = phy;
910 		else if (tp->flags & HAS_NWAY)
911 			data->phy_id = 32;
912 		else if (tp->chip_id == COMET)
913 			data->phy_id = 1;
914 		else
915 			return -ENODEV;
916 		fallthrough;
917 
918 	case SIOCGMIIREG:		/* Read MII PHY register. */
919 		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
920 			int csr12 = ioread32 (ioaddr + CSR12);
921 			int csr14 = ioread32 (ioaddr + CSR14);
922 			switch (regnum) {
923 			case 0:
924                                 if (((csr14<<5) & 0x1000) ||
925                                         (dev->if_port == 5 && tp->nwayset))
926                                         data->val_out = 0x1000;
927                                 else
928                                         data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
929                                                 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
930 				break;
931 			case 1:
932                                 data->val_out =
933 					0x1848 +
934 					((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
935 					((csr12&0x06) == 6 ? 0 : 4);
936                                 data->val_out |= 0x6048;
937 				break;
938 			case 4:
939                                 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
940                                 data->val_out =
941 					((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
942 					((csr14 >> 1) & 0x20) + 1;
943                                 data->val_out |= ((csr14 >> 9) & 0x03C0);
944 				break;
945 			case 5: data->val_out = tp->lpar; break;
946 			default: data->val_out = 0; break;
947 			}
948 		} else {
949 			data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
950 		}
951 		return 0;
952 
953 	case SIOCSMIIREG:		/* Write MII PHY register. */
954 		if (regnum & ~0x1f)
955 			return -EINVAL;
956 		if (data->phy_id == phy) {
957 			u16 value = data->val_in;
958 			switch (regnum) {
959 			case 0:	/* Check for autonegotiation on or reset. */
960 				tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
961 				if (tp->full_duplex_lock)
962 					tp->full_duplex = (value & 0x0100) ? 1 : 0;
963 				break;
964 			case 4:
965 				tp->advertising[phy_idx] =
966 				tp->mii_advertise = data->val_in;
967 				break;
968 			}
969 		}
970 		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
971 			u16 value = data->val_in;
972 			if (regnum == 0) {
973 			  if ((value & 0x1200) == 0x1200) {
974 			    if (tp->chip_id == PNIC2) {
975                                    pnic2_start_nway (dev);
976                             } else {
977 				   t21142_start_nway (dev);
978                             }
979 			  }
980 			} else if (regnum == 4)
981 				tp->sym_advertise = value;
982 		} else {
983 			tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
984 		}
985 		return 0;
986 	default:
987 		return -EOPNOTSUPP;
988 	}
989 
990 	return -EOPNOTSUPP;
991 }
992 
993 
994 /* Set or clear the multicast filter for this adaptor.
995    Note that we only use exclusion around actually queueing the
996    new frame, not around filling tp->setup_frame.  This is non-deterministic
997    when re-entered but still correct. */
998 
build_setup_frame_hash(u16 * setup_frm,struct net_device * dev)999 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1000 {
1001 	struct tulip_private *tp = netdev_priv(dev);
1002 	u16 hash_table[32];
1003 	struct netdev_hw_addr *ha;
1004 	int i;
1005 	u16 *eaddrs;
1006 
1007 	memset(hash_table, 0, sizeof(hash_table));
1008 	__set_bit_le(255, hash_table);			/* Broadcast entry */
1009 	/* This should work on big-endian machines as well. */
1010 	netdev_for_each_mc_addr(ha, dev) {
1011 		int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1012 
1013 		__set_bit_le(index, hash_table);
1014 	}
1015 	for (i = 0; i < 32; i++) {
1016 		*setup_frm++ = hash_table[i];
1017 		*setup_frm++ = hash_table[i];
1018 	}
1019 	setup_frm = &tp->setup_frame[13*6];
1020 
1021 	/* Fill the final entry with our physical address. */
1022 	eaddrs = (u16 *)dev->dev_addr;
1023 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1024 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1025 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1026 }
1027 
build_setup_frame_perfect(u16 * setup_frm,struct net_device * dev)1028 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1029 {
1030 	struct tulip_private *tp = netdev_priv(dev);
1031 	struct netdev_hw_addr *ha;
1032 	u16 *eaddrs;
1033 
1034 	/* We have <= 14 addresses so we can use the wonderful
1035 	   16 address perfect filtering of the Tulip. */
1036 	netdev_for_each_mc_addr(ha, dev) {
1037 		eaddrs = (u16 *) ha->addr;
1038 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1039 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1040 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1041 	}
1042 	/* Fill the unused entries with the broadcast address. */
1043 	memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1044 	setup_frm = &tp->setup_frame[15*6];
1045 
1046 	/* Fill the final entry with our physical address. */
1047 	eaddrs = (u16 *)dev->dev_addr;
1048 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1049 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1050 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1051 }
1052 
1053 
set_rx_mode(struct net_device * dev)1054 static void set_rx_mode(struct net_device *dev)
1055 {
1056 	struct tulip_private *tp = netdev_priv(dev);
1057 	void __iomem *ioaddr = tp->base_addr;
1058 	int csr6;
1059 
1060 	csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1061 
1062 	tp->csr6 &= ~0x00D5;
1063 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1064 		tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1065 		csr6 |= AcceptAllMulticast | AcceptAllPhys;
1066 	} else if ((netdev_mc_count(dev) > 1000) ||
1067 		   (dev->flags & IFF_ALLMULTI)) {
1068 		/* Too many to filter well -- accept all multicasts. */
1069 		tp->csr6 |= AcceptAllMulticast;
1070 		csr6 |= AcceptAllMulticast;
1071 	} else	if (tp->flags & MC_HASH_ONLY) {
1072 		/* Some work-alikes have only a 64-entry hash filter table. */
1073 		/* Should verify correctness on big-endian/__powerpc__ */
1074 		struct netdev_hw_addr *ha;
1075 		if (netdev_mc_count(dev) > 64) {
1076 			/* Arbitrary non-effective limit. */
1077 			tp->csr6 |= AcceptAllMulticast;
1078 			csr6 |= AcceptAllMulticast;
1079 		} else {
1080 			u32 mc_filter[2] = {0, 0};		 /* Multicast hash filter */
1081 			int filterbit;
1082 			netdev_for_each_mc_addr(ha, dev) {
1083 				if (tp->flags & COMET_MAC_ADDR)
1084 					filterbit = ether_crc_le(ETH_ALEN,
1085 								 ha->addr);
1086 				else
1087 					filterbit = ether_crc(ETH_ALEN,
1088 							      ha->addr) >> 26;
1089 				filterbit &= 0x3f;
1090 				mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1091 				if (tulip_debug > 2)
1092 					dev_info(&dev->dev,
1093 						 "Added filter for %pM  %08x bit %d\n",
1094 						 ha->addr,
1095 						 ether_crc(ETH_ALEN, ha->addr),
1096 						 filterbit);
1097 			}
1098 			if (mc_filter[0] == tp->mc_filter[0]  &&
1099 				mc_filter[1] == tp->mc_filter[1])
1100 				;				/* No change. */
1101 			else if (tp->flags & IS_ASIX) {
1102 				iowrite32(2, ioaddr + CSR13);
1103 				iowrite32(mc_filter[0], ioaddr + CSR14);
1104 				iowrite32(3, ioaddr + CSR13);
1105 				iowrite32(mc_filter[1], ioaddr + CSR14);
1106 			} else if (tp->flags & COMET_MAC_ADDR) {
1107 				iowrite32(mc_filter[0], ioaddr + CSR27);
1108 				iowrite32(mc_filter[1], ioaddr + CSR28);
1109 			}
1110 			tp->mc_filter[0] = mc_filter[0];
1111 			tp->mc_filter[1] = mc_filter[1];
1112 		}
1113 	} else {
1114 		unsigned long flags;
1115 		u32 tx_flags = 0x08000000 | 192;
1116 
1117 		/* Note that only the low-address shortword of setup_frame is valid!
1118 		   The values are doubled for big-endian architectures. */
1119 		if (netdev_mc_count(dev) > 14) {
1120 			/* Must use a multicast hash table. */
1121 			build_setup_frame_hash(tp->setup_frame, dev);
1122 			tx_flags = 0x08400000 | 192;
1123 		} else {
1124 			build_setup_frame_perfect(tp->setup_frame, dev);
1125 		}
1126 
1127 		spin_lock_irqsave(&tp->lock, flags);
1128 
1129 		if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1130 			/* Same setup recently queued, we need not add it. */
1131 		} else {
1132 			unsigned int entry;
1133 			int dummy = -1;
1134 
1135 			/* Now add this frame to the Tx list. */
1136 
1137 			entry = tp->cur_tx++ % TX_RING_SIZE;
1138 
1139 			if (entry != 0) {
1140 				/* Avoid a chip errata by prefixing a dummy entry. */
1141 				tp->tx_buffers[entry].skb = NULL;
1142 				tp->tx_buffers[entry].mapping = 0;
1143 				tp->tx_ring[entry].length =
1144 					(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1145 				tp->tx_ring[entry].buffer1 = 0;
1146 				/* Must set DescOwned later to avoid race with chip */
1147 				dummy = entry;
1148 				entry = tp->cur_tx++ % TX_RING_SIZE;
1149 
1150 			}
1151 
1152 			tp->tx_buffers[entry].skb = NULL;
1153 			tp->tx_buffers[entry].mapping =
1154 				dma_map_single(&tp->pdev->dev,
1155 					       tp->setup_frame,
1156 					       sizeof(tp->setup_frame),
1157 					       DMA_TO_DEVICE);
1158 			/* Put the setup frame on the Tx list. */
1159 			if (entry == TX_RING_SIZE-1)
1160 				tx_flags |= DESC_RING_WRAP;		/* Wrap ring. */
1161 			tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1162 			tp->tx_ring[entry].buffer1 =
1163 				cpu_to_le32(tp->tx_buffers[entry].mapping);
1164 			tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1165 			if (dummy >= 0)
1166 				tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1167 			if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1168 				netif_stop_queue(dev);
1169 
1170 			/* Trigger an immediate transmit demand. */
1171 			iowrite32(0, ioaddr + CSR1);
1172 		}
1173 
1174 		spin_unlock_irqrestore(&tp->lock, flags);
1175 	}
1176 
1177 	iowrite32(csr6, ioaddr + CSR6);
1178 }
1179 
1180 #ifdef CONFIG_TULIP_MWI
tulip_mwi_config(struct pci_dev * pdev,struct net_device * dev)1181 static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
1182 {
1183 	struct tulip_private *tp = netdev_priv(dev);
1184 	u8 cache;
1185 	u16 pci_command;
1186 	u32 csr0;
1187 
1188 	if (tulip_debug > 3)
1189 		netdev_dbg(dev, "tulip_mwi_config()\n");
1190 
1191 	tp->csr0 = csr0 = 0;
1192 
1193 	/* if we have any cache line size at all, we can do MRM and MWI */
1194 	csr0 |= MRM | MWI;
1195 
1196 	/* Enable MWI in the standard PCI command bit.
1197 	 * Check for the case where MWI is desired but not available
1198 	 */
1199 	pci_try_set_mwi(pdev);
1200 
1201 	/* read result from hardware (in case bit refused to enable) */
1202 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1203 	if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1204 		csr0 &= ~MWI;
1205 
1206 	/* if cache line size hardwired to zero, no MWI */
1207 	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1208 	if ((csr0 & MWI) && (cache == 0)) {
1209 		csr0 &= ~MWI;
1210 		pci_clear_mwi(pdev);
1211 	}
1212 
1213 	/* assign per-cacheline-size cache alignment and
1214 	 * burst length values
1215 	 */
1216 	switch (cache) {
1217 	case 8:
1218 		csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1219 		break;
1220 	case 16:
1221 		csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1222 		break;
1223 	case 32:
1224 		csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1225 		break;
1226 	default:
1227 		cache = 0;
1228 		break;
1229 	}
1230 
1231 	/* if we have a good cache line size, we by now have a good
1232 	 * csr0, so save it and exit
1233 	 */
1234 	if (cache)
1235 		goto out;
1236 
1237 	/* we don't have a good csr0 or cache line size, disable MWI */
1238 	if (csr0 & MWI) {
1239 		pci_clear_mwi(pdev);
1240 		csr0 &= ~MWI;
1241 	}
1242 
1243 	/* sane defaults for burst length and cache alignment
1244 	 * originally from de4x5 driver
1245 	 */
1246 	csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1247 
1248 out:
1249 	tp->csr0 = csr0;
1250 	if (tulip_debug > 2)
1251 		netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1252 			   cache, csr0);
1253 }
1254 #endif
1255 
1256 /*
1257  *	Chips that have the MRM/reserved bit quirk and the burst quirk. That
1258  *	is the DM910X and the on chip ULi devices
1259  */
1260 
tulip_uli_dm_quirk(struct pci_dev * pdev)1261 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1262 {
1263 	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1264 		return 1;
1265 	return 0;
1266 }
1267 
1268 static const struct net_device_ops tulip_netdev_ops = {
1269 	.ndo_open		= tulip_open,
1270 	.ndo_start_xmit		= tulip_start_xmit,
1271 	.ndo_tx_timeout		= tulip_tx_timeout,
1272 	.ndo_stop		= tulip_close,
1273 	.ndo_get_stats		= tulip_get_stats,
1274 	.ndo_do_ioctl 		= private_ioctl,
1275 	.ndo_set_rx_mode	= set_rx_mode,
1276 	.ndo_set_mac_address	= eth_mac_addr,
1277 	.ndo_validate_addr	= eth_validate_addr,
1278 #ifdef CONFIG_NET_POLL_CONTROLLER
1279 	.ndo_poll_controller	 = poll_tulip,
1280 #endif
1281 };
1282 
1283 static const struct pci_device_id early_486_chipsets[] = {
1284 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1285 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1286 	{ },
1287 };
1288 
tulip_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1289 static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1290 {
1291 	struct tulip_private *tp;
1292 	/* See note below on the multiport cards. */
1293 	static unsigned char last_phys_addr[ETH_ALEN] = {
1294 		0x00, 'L', 'i', 'n', 'u', 'x'
1295 	};
1296 	static int last_irq;
1297 	int i, irq;
1298 	unsigned short sum;
1299 	unsigned char *ee_data;
1300 	struct net_device *dev;
1301 	void __iomem *ioaddr;
1302 	static int board_idx = -1;
1303 	int chip_idx = ent->driver_data;
1304 	const char *chip_name = tulip_tbl[chip_idx].chip_name;
1305 	unsigned int eeprom_missing = 0;
1306 	unsigned int force_csr0 = 0;
1307 
1308 	board_idx++;
1309 
1310 	/*
1311 	 *	Lan media wire a tulip chip to a wan interface. Needs a very
1312 	 *	different driver (lmc driver)
1313 	 */
1314 
1315         if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1316 		pr_err("skipping LMC card\n");
1317 		return -ENODEV;
1318 	} else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1319 		   (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1320 		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1321 		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1322 		pr_err("skipping SBE T3E3 port\n");
1323 		return -ENODEV;
1324 	}
1325 
1326 	/*
1327 	 *	DM910x chips should be handled by the dmfe driver, except
1328 	 *	on-board chips on SPARC systems.  Also, early DM9100s need
1329 	 *	software CRC which only the dmfe driver supports.
1330 	 */
1331 
1332 #ifdef CONFIG_TULIP_DM910X
1333 	if (chip_idx == DM910X) {
1334 		struct device_node *dp;
1335 
1336 		if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1337 		    pdev->revision < 0x30) {
1338 			pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1339 			return -ENODEV;
1340 		}
1341 
1342 		dp = pci_device_to_OF_node(pdev);
1343 		if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1344 			pr_info("skipping DM910x expansion card (use dmfe)\n");
1345 			return -ENODEV;
1346 		}
1347 	}
1348 #endif
1349 
1350 	/*
1351 	 *	Looks for early PCI chipsets where people report hangs
1352 	 *	without the workarounds being on.
1353 	 */
1354 
1355 	/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1356 	      aligned.  Aries might need this too. The Saturn errata are not
1357 	      pretty reading but thankfully it's an old 486 chipset.
1358 
1359 	   2. The dreaded SiS496 486 chipset. Same workaround as Intel
1360 	      Saturn.
1361 	*/
1362 
1363 	if (pci_dev_present(early_486_chipsets)) {
1364 		csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1365 		force_csr0 = 1;
1366 	}
1367 
1368 	/* bugfix: the ASIX must have a burst limit or horrible things happen. */
1369 	if (chip_idx == AX88140) {
1370 		if ((csr0 & 0x3f00) == 0)
1371 			csr0 |= 0x2000;
1372 	}
1373 
1374 	/* PNIC doesn't have MWI/MRL/MRM... */
1375 	if (chip_idx == LC82C168)
1376 		csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1377 
1378 	/* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1379 	if (tulip_uli_dm_quirk(pdev)) {
1380 		csr0 &= ~0x01f100ff;
1381 #if defined(CONFIG_SPARC)
1382                 csr0 = (csr0 & ~0xff00) | 0xe000;
1383 #endif
1384 	}
1385 	/*
1386 	 *	And back to business
1387 	 */
1388 
1389 	i = pci_enable_device(pdev);
1390 	if (i) {
1391 		pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1392 		return i;
1393 	}
1394 
1395 	irq = pdev->irq;
1396 
1397 	/* alloc_etherdev ensures aligned and zeroed private structures */
1398 	dev = alloc_etherdev (sizeof (*tp));
1399 	if (!dev)
1400 		return -ENOMEM;
1401 
1402 	SET_NETDEV_DEV(dev, &pdev->dev);
1403 	if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1404 		pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1405 		       pci_name(pdev),
1406 		       (unsigned long long)pci_resource_len (pdev, 0),
1407 		       (unsigned long long)pci_resource_start (pdev, 0));
1408 		goto err_out_free_netdev;
1409 	}
1410 
1411 	/* grab all resources from both PIO and MMIO regions, as we
1412 	 * don't want anyone else messing around with our hardware */
1413 	if (pci_request_regions (pdev, DRV_NAME))
1414 		goto err_out_free_netdev;
1415 
1416 	ioaddr =  pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1417 
1418 	if (!ioaddr)
1419 		goto err_out_free_res;
1420 
1421 	/*
1422 	 * initialize private data structure 'tp'
1423 	 * it is zeroed and aligned in alloc_etherdev
1424 	 */
1425 	tp = netdev_priv(dev);
1426 	tp->dev = dev;
1427 
1428 	tp->rx_ring = dma_alloc_coherent(&pdev->dev,
1429 					 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1430 					 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1431 					 &tp->rx_ring_dma, GFP_KERNEL);
1432 	if (!tp->rx_ring)
1433 		goto err_out_mtable;
1434 	tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1435 	tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1436 
1437 	tp->chip_id = chip_idx;
1438 	tp->flags = tulip_tbl[chip_idx].flags;
1439 
1440 	tp->wolinfo.supported = 0;
1441 	tp->wolinfo.wolopts = 0;
1442 	/* COMET: Enable power management only for AN983B */
1443 	if (chip_idx == COMET ) {
1444 		u32 sig;
1445 		pci_read_config_dword (pdev, 0x80, &sig);
1446 		if (sig == 0x09811317) {
1447 			tp->flags |= COMET_PM;
1448 			tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1449 			pr_info("%s: Enabled WOL support for AN983B\n",
1450 				__func__);
1451 		}
1452 	}
1453 	tp->pdev = pdev;
1454 	tp->base_addr = ioaddr;
1455 	tp->revision = pdev->revision;
1456 	tp->csr0 = csr0;
1457 	spin_lock_init(&tp->lock);
1458 	spin_lock_init(&tp->mii_lock);
1459 	timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
1460 
1461 	INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1462 
1463 #ifdef CONFIG_TULIP_MWI
1464 	if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1465 		tulip_mwi_config (pdev, dev);
1466 #endif
1467 
1468 	/* Stop the chip's Tx and Rx processes. */
1469 	tulip_stop_rxtx(tp);
1470 
1471 	pci_set_master(pdev);
1472 
1473 #ifdef CONFIG_GSC
1474 	if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1475 		switch (pdev->subsystem_device) {
1476 		default:
1477 			break;
1478 		case 0x1061:
1479 		case 0x1062:
1480 		case 0x1063:
1481 		case 0x1098:
1482 		case 0x1099:
1483 		case 0x10EE:
1484 			tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1485 			chip_name = "GSC DS21140 Tulip";
1486 		}
1487 	}
1488 #endif
1489 
1490 	/* Clear the missed-packet counter. */
1491 	ioread32(ioaddr + CSR8);
1492 
1493 	/* The station address ROM is read byte serially.  The register must
1494 	   be polled, waiting for the value to be read bit serially from the
1495 	   EEPROM.
1496 	   */
1497 	ee_data = tp->eeprom;
1498 	memset(ee_data, 0, sizeof(tp->eeprom));
1499 	sum = 0;
1500 	if (chip_idx == LC82C168) {
1501 		for (i = 0; i < 3; i++) {
1502 			int value, boguscnt = 100000;
1503 			iowrite32(0x600 | i, ioaddr + 0x98);
1504 			do {
1505 				value = ioread32(ioaddr + CSR9);
1506 			} while (value < 0  && --boguscnt > 0);
1507 			put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1508 			sum += value & 0xffff;
1509 		}
1510 	} else if (chip_idx == COMET) {
1511 		/* No need to read the EEPROM. */
1512 		put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1513 		put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1514 		for (i = 0; i < 6; i ++)
1515 			sum += dev->dev_addr[i];
1516 	} else {
1517 		/* A serial EEPROM interface, we read now and sort it out later. */
1518 		int sa_offset = 0;
1519 		int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1520 		int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1521 
1522 		if (ee_max_addr > sizeof(tp->eeprom))
1523 			ee_max_addr = sizeof(tp->eeprom);
1524 
1525 		for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1526 			u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1527 			ee_data[i] = data & 0xff;
1528 			ee_data[i + 1] = data >> 8;
1529 		}
1530 
1531 		/* DEC now has a specification (see Notes) but early board makers
1532 		   just put the address in the first EEPROM locations. */
1533 		/* This does  memcmp(ee_data, ee_data+16, 8) */
1534 		for (i = 0; i < 8; i ++)
1535 			if (ee_data[i] != ee_data[16+i])
1536 				sa_offset = 20;
1537 		if (chip_idx == CONEXANT) {
1538 			/* Check that the tuple type and length is correct. */
1539 			if (ee_data[0x198] == 0x04  &&  ee_data[0x199] == 6)
1540 				sa_offset = 0x19A;
1541 		} else if (ee_data[0] == 0xff  &&  ee_data[1] == 0xff &&
1542 				   ee_data[2] == 0) {
1543 			sa_offset = 2;		/* Grrr, damn Matrox boards. */
1544 		}
1545 #ifdef CONFIG_MIPS_COBALT
1546                if ((pdev->bus->number == 0) &&
1547                    ((PCI_SLOT(pdev->devfn) == 7) ||
1548                     (PCI_SLOT(pdev->devfn) == 12))) {
1549                        /* Cobalt MAC address in first EEPROM locations. */
1550                        sa_offset = 0;
1551 		       /* Ensure our media table fixup get's applied */
1552 		       memcpy(ee_data + 16, ee_data, 8);
1553                }
1554 #endif
1555 #ifdef CONFIG_GSC
1556 		/* Check to see if we have a broken srom */
1557 		if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1558 			/* pci_vendor_id and subsystem_id are swapped */
1559 			ee_data[0] = ee_data[2];
1560 			ee_data[1] = ee_data[3];
1561 			ee_data[2] = 0x61;
1562 			ee_data[3] = 0x10;
1563 
1564 			/* HSC-PCI boards need to be byte-swaped and shifted
1565 			 * up 1 word.  This shift needs to happen at the end
1566 			 * of the MAC first because of the 2 byte overlap.
1567 			 */
1568 			for (i = 4; i >= 0; i -= 2) {
1569 				ee_data[17 + i + 3] = ee_data[17 + i];
1570 				ee_data[16 + i + 5] = ee_data[16 + i];
1571 			}
1572 		}
1573 #endif
1574 
1575 		for (i = 0; i < 6; i ++) {
1576 			dev->dev_addr[i] = ee_data[i + sa_offset];
1577 			sum += ee_data[i + sa_offset];
1578 		}
1579 	}
1580 	/* Lite-On boards have the address byte-swapped. */
1581 	if ((dev->dev_addr[0] == 0xA0 ||
1582 	     dev->dev_addr[0] == 0xC0 ||
1583 	     dev->dev_addr[0] == 0x02) &&
1584 	    dev->dev_addr[1] == 0x00)
1585 		for (i = 0; i < 6; i+=2) {
1586 			char tmp = dev->dev_addr[i];
1587 			dev->dev_addr[i] = dev->dev_addr[i+1];
1588 			dev->dev_addr[i+1] = tmp;
1589 		}
1590 	/* On the Zynx 315 Etherarray and other multiport boards only the
1591 	   first Tulip has an EEPROM.
1592 	   On Sparc systems the mac address is held in the OBP property
1593 	   "local-mac-address".
1594 	   The addresses of the subsequent ports are derived from the first.
1595 	   Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1596 	   that here as well. */
1597 	if (sum == 0  || sum == 6*0xff) {
1598 #if defined(CONFIG_SPARC)
1599 		struct device_node *dp = pci_device_to_OF_node(pdev);
1600 		const unsigned char *addr;
1601 		int len;
1602 #endif
1603 		eeprom_missing = 1;
1604 		for (i = 0; i < 5; i++)
1605 			dev->dev_addr[i] = last_phys_addr[i];
1606 		dev->dev_addr[i] = last_phys_addr[i] + 1;
1607 #if defined(CONFIG_SPARC)
1608 		addr = of_get_property(dp, "local-mac-address", &len);
1609 		if (addr && len == ETH_ALEN)
1610 			memcpy(dev->dev_addr, addr, ETH_ALEN);
1611 #endif
1612 #if defined(__i386__) || defined(__x86_64__)	/* Patch up x86 BIOS bug. */
1613 		if (last_irq)
1614 			irq = last_irq;
1615 #endif
1616 	}
1617 
1618 	for (i = 0; i < 6; i++)
1619 		last_phys_addr[i] = dev->dev_addr[i];
1620 	last_irq = irq;
1621 
1622 	/* The lower four bits are the media type. */
1623 	if (board_idx >= 0  &&  board_idx < MAX_UNITS) {
1624 		if (options[board_idx] & MEDIA_MASK)
1625 			tp->default_port = options[board_idx] & MEDIA_MASK;
1626 		if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1627 			tp->full_duplex = 1;
1628 		if (mtu[board_idx] > 0)
1629 			dev->mtu = mtu[board_idx];
1630 	}
1631 	if (dev->mem_start & MEDIA_MASK)
1632 		tp->default_port = dev->mem_start & MEDIA_MASK;
1633 	if (tp->default_port) {
1634 		pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1635 			board_idx, medianame[tp->default_port & MEDIA_MASK]);
1636 		tp->medialock = 1;
1637 		if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1638 			tp->full_duplex = 1;
1639 	}
1640 	if (tp->full_duplex)
1641 		tp->full_duplex_lock = 1;
1642 
1643 	if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1644 		static const u16 media2advert[] = {
1645 			0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1646 		};
1647 		tp->mii_advertise = media2advert[tp->default_port - 9];
1648 		tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1649 	}
1650 
1651 	if (tp->flags & HAS_MEDIA_TABLE) {
1652 		sprintf(dev->name, DRV_NAME "%d", board_idx);	/* hack */
1653 		tulip_parse_eeprom(dev);
1654 		strcpy(dev->name, "eth%d");			/* un-hack */
1655 	}
1656 
1657 	if ((tp->flags & ALWAYS_CHECK_MII) ||
1658 		(tp->mtable  &&  tp->mtable->has_mii) ||
1659 		( ! tp->mtable  &&  (tp->flags & HAS_MII))) {
1660 		if (tp->mtable  &&  tp->mtable->has_mii) {
1661 			for (i = 0; i < tp->mtable->leafcount; i++)
1662 				if (tp->mtable->mleaf[i].media == 11) {
1663 					tp->cur_index = i;
1664 					tp->saved_if_port = dev->if_port;
1665 					tulip_select_media(dev, 2);
1666 					dev->if_port = tp->saved_if_port;
1667 					break;
1668 				}
1669 		}
1670 
1671 		/* Find the connected MII xcvrs.
1672 		   Doing this in open() would allow detecting external xcvrs
1673 		   later, but takes much time. */
1674 		tulip_find_mii (dev, board_idx);
1675 	}
1676 
1677 	/* The Tulip-specific entries in the device structure. */
1678 	dev->netdev_ops = &tulip_netdev_ops;
1679 	dev->watchdog_timeo = TX_TIMEOUT;
1680 #ifdef CONFIG_TULIP_NAPI
1681 	netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1682 #endif
1683 	dev->ethtool_ops = &ops;
1684 
1685 	if (register_netdev(dev))
1686 		goto err_out_free_ring;
1687 
1688 	pci_set_drvdata(pdev, dev);
1689 
1690 	dev_info(&dev->dev,
1691 #ifdef CONFIG_TULIP_MMIO
1692 		 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1693 #else
1694 		 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1695 #endif
1696 		 chip_name, pdev->revision,
1697 		 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1698 		 eeprom_missing ? " EEPROM not present," : "",
1699 		 dev->dev_addr, irq);
1700 
1701         if (tp->chip_id == PNIC2)
1702 		tp->link_change = pnic2_lnk_change;
1703 	else if (tp->flags & HAS_NWAY)
1704 		tp->link_change = t21142_lnk_change;
1705 	else if (tp->flags & HAS_PNICNWAY)
1706 		tp->link_change = pnic_lnk_change;
1707 
1708 	/* Reset the xcvr interface and turn on heartbeat. */
1709 	switch (chip_idx) {
1710 	case DC21140:
1711 	case DM910X:
1712 	default:
1713 		if (tp->mtable)
1714 			iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1715 		break;
1716 	case DC21142:
1717 		if (tp->mii_cnt  ||  tulip_media_cap[dev->if_port] & MediaIsMII) {
1718 			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1719 			iowrite32(0x0000, ioaddr + CSR13);
1720 			iowrite32(0x0000, ioaddr + CSR14);
1721 			iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1722 		} else
1723 			t21142_start_nway(dev);
1724 		break;
1725 	case PNIC2:
1726 	        /* just do a reset for sanity sake */
1727 		iowrite32(0x0000, ioaddr + CSR13);
1728 		iowrite32(0x0000, ioaddr + CSR14);
1729 		break;
1730 	case LC82C168:
1731 		if ( ! tp->mii_cnt) {
1732 			tp->nway = 1;
1733 			tp->nwayset = 0;
1734 			iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1735 			iowrite32(0x30, ioaddr + CSR12);
1736 			iowrite32(0x0001F078, ioaddr + CSR6);
1737 			iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1738 		}
1739 		break;
1740 	case MX98713:
1741 	case COMPEX9881:
1742 		iowrite32(0x00000000, ioaddr + CSR6);
1743 		iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1744 		iowrite32(0x00000001, ioaddr + CSR13);
1745 		break;
1746 	case MX98715:
1747 	case MX98725:
1748 		iowrite32(0x01a80000, ioaddr + CSR6);
1749 		iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1750 		iowrite32(0x00001000, ioaddr + CSR12);
1751 		break;
1752 	case COMET:
1753 		/* No initialization necessary. */
1754 		break;
1755 	}
1756 
1757 	/* put the chip in snooze mode until opened */
1758 	tulip_set_power_state (tp, 0, 1);
1759 
1760 	return 0;
1761 
1762 err_out_free_ring:
1763 	dma_free_coherent(&pdev->dev,
1764 			  sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1765 			  sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1766 			  tp->rx_ring, tp->rx_ring_dma);
1767 
1768 err_out_mtable:
1769 	kfree (tp->mtable);
1770 	pci_iounmap(pdev, ioaddr);
1771 
1772 err_out_free_res:
1773 	pci_release_regions (pdev);
1774 
1775 err_out_free_netdev:
1776 	free_netdev (dev);
1777 	return -ENODEV;
1778 }
1779 
1780 
1781 /* set the registers according to the given wolopts */
tulip_set_wolopts(struct pci_dev * pdev,u32 wolopts)1782 static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1783 {
1784 	struct net_device *dev = pci_get_drvdata(pdev);
1785 	struct tulip_private *tp = netdev_priv(dev);
1786 	void __iomem *ioaddr = tp->base_addr;
1787 
1788 	if (tp->flags & COMET_PM) {
1789 		unsigned int tmp;
1790 
1791 		tmp = ioread32(ioaddr + CSR18);
1792 		tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1793 		tmp |= comet_csr18_pm_mode;
1794 		iowrite32(tmp, ioaddr + CSR18);
1795 
1796 		/* Set the Wake-up Control/Status Register to the given WOL options*/
1797 		tmp = ioread32(ioaddr + CSR13);
1798 		tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1799 		if (wolopts & WAKE_MAGIC)
1800 			tmp |= comet_csr13_mpre;
1801 		if (wolopts & WAKE_PHY)
1802 			tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1803 		/* Clear the event flags */
1804 		tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1805 		iowrite32(tmp, ioaddr + CSR13);
1806 	}
1807 }
1808 
tulip_suspend(struct device * dev_d)1809 static int __maybe_unused tulip_suspend(struct device *dev_d)
1810 {
1811 	struct net_device *dev = dev_get_drvdata(dev_d);
1812 	struct tulip_private *tp = netdev_priv(dev);
1813 
1814 	if (!dev)
1815 		return -EINVAL;
1816 
1817 	if (!netif_running(dev))
1818 		goto save_state;
1819 
1820 	tulip_down(dev);
1821 
1822 	netif_device_detach(dev);
1823 	/* FIXME: it needlessly adds an error path. */
1824 	free_irq(tp->pdev->irq, dev);
1825 
1826 save_state:
1827 	tulip_set_wolopts(to_pci_dev(dev_d), tp->wolinfo.wolopts);
1828 	device_set_wakeup_enable(dev_d, !!tp->wolinfo.wolopts);
1829 
1830 	return 0;
1831 }
1832 
tulip_resume(struct device * dev_d)1833 static int __maybe_unused tulip_resume(struct device *dev_d)
1834 {
1835 	struct pci_dev *pdev = to_pci_dev(dev_d);
1836 	struct net_device *dev = dev_get_drvdata(dev_d);
1837 	struct tulip_private *tp = netdev_priv(dev);
1838 	void __iomem *ioaddr = tp->base_addr;
1839 	unsigned int tmp;
1840 	int retval = 0;
1841 
1842 	if (!dev)
1843 		return -EINVAL;
1844 
1845 	if (!netif_running(dev))
1846 		return 0;
1847 
1848 	retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1849 			     dev->name, dev);
1850 	if (retval) {
1851 		pr_err("request_irq failed in resume\n");
1852 		return retval;
1853 	}
1854 
1855 	if (tp->flags & COMET_PM) {
1856 		device_set_wakeup_enable(dev_d, 0);
1857 
1858 		/* Clear the PMES flag */
1859 		tmp = ioread32(ioaddr + CSR20);
1860 		tmp |= comet_csr20_pmes;
1861 		iowrite32(tmp, ioaddr + CSR20);
1862 
1863 		/* Disable all wake-up events */
1864 		tulip_set_wolopts(pdev, 0);
1865 	}
1866 	netif_device_attach(dev);
1867 
1868 	if (netif_running(dev))
1869 		tulip_up(dev);
1870 
1871 	return 0;
1872 }
1873 
tulip_remove_one(struct pci_dev * pdev)1874 static void tulip_remove_one(struct pci_dev *pdev)
1875 {
1876 	struct net_device *dev = pci_get_drvdata (pdev);
1877 	struct tulip_private *tp;
1878 
1879 	if (!dev)
1880 		return;
1881 
1882 	tp = netdev_priv(dev);
1883 	unregister_netdev(dev);
1884 	dma_free_coherent(&pdev->dev,
1885 			  sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1886 			  sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1887 			  tp->rx_ring, tp->rx_ring_dma);
1888 	kfree (tp->mtable);
1889 	pci_iounmap(pdev, tp->base_addr);
1890 	free_netdev (dev);
1891 	pci_release_regions (pdev);
1892 	pci_disable_device(pdev);
1893 
1894 	/* pci_power_off (pdev, -1); */
1895 }
1896 
1897 #ifdef CONFIG_NET_POLL_CONTROLLER
1898 /*
1899  * Polling 'interrupt' - used by things like netconsole to send skbs
1900  * without having to re-enable interrupts. It's not called while
1901  * the interrupt routine is executing.
1902  */
1903 
poll_tulip(struct net_device * dev)1904 static void poll_tulip (struct net_device *dev)
1905 {
1906 	struct tulip_private *tp = netdev_priv(dev);
1907 	const int irq = tp->pdev->irq;
1908 
1909 	/* disable_irq here is not very nice, but with the lockless
1910 	   interrupt handler we have no other choice. */
1911 	disable_irq(irq);
1912 	tulip_interrupt (irq, dev);
1913 	enable_irq(irq);
1914 }
1915 #endif
1916 
1917 static SIMPLE_DEV_PM_OPS(tulip_pm_ops, tulip_suspend, tulip_resume);
1918 
1919 static struct pci_driver tulip_driver = {
1920 	.name		= DRV_NAME,
1921 	.id_table	= tulip_pci_tbl,
1922 	.probe		= tulip_init_one,
1923 	.remove		= tulip_remove_one,
1924 	.driver.pm	= &tulip_pm_ops,
1925 };
1926 
1927 
tulip_init(void)1928 static int __init tulip_init (void)
1929 {
1930 	if (!csr0) {
1931 		pr_warn("tulip: unknown CPU architecture, using default csr0\n");
1932 		/* default to 8 longword cache line alignment */
1933 		csr0 = 0x00A00000 | 0x4800;
1934 	}
1935 
1936 	/* copy module parms into globals */
1937 	tulip_rx_copybreak = rx_copybreak;
1938 	tulip_max_interrupt_work = max_interrupt_work;
1939 
1940 	/* probe for and init boards */
1941 	return pci_register_driver(&tulip_driver);
1942 }
1943 
1944 
tulip_cleanup(void)1945 static void __exit tulip_cleanup (void)
1946 {
1947 	pci_unregister_driver (&tulip_driver);
1948 }
1949 
1950 
1951 module_init(tulip_init);
1952 module_exit(tulip_cleanup);
1953