1 /*	tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
2 
3 	Copyright 2000,2001  The Linux Kernel Team
4 	Written/copyright 1994-2001 by Donald Becker.
5 
6 	This software may be used and distributed according to the terms
7 	of the GNU General Public License, incorporated herein by reference.
8 
9 	Please submit bugs to http://bugzilla.kernel.org/ .
10 */
11 
12 #define pr_fmt(fmt) "tulip: " fmt
13 
14 #define DRV_NAME	"tulip"
15 
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include "tulip.h"
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/etherdevice.h>
23 #include <linux/delay.h>
24 #include <linux/mii.h>
25 #include <linux/crc32.h>
26 #include <asm/unaligned.h>
27 #include <linux/uaccess.h>
28 
29 #ifdef CONFIG_SPARC
30 #include <asm/prom.h>
31 #endif
32 
33 /* A few user-configurable values. */
34 
35 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
36 static unsigned int max_interrupt_work = 25;
37 
38 #define MAX_UNITS 8
39 /* Used to pass the full-duplex flag, etc. */
40 static int full_duplex[MAX_UNITS];
41 static int options[MAX_UNITS];
42 static int mtu[MAX_UNITS];			/* Jumbo MTU for interfaces. */
43 
44 /*  The possible media types that can be set in options[] are: */
45 const char * const medianame[32] = {
46 	"10baseT", "10base2", "AUI", "100baseTx",
47 	"10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
48 	"100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
49 	"10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
50 	"MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
51 	"","","","", "","","","",  "","","","Transceiver reset",
52 };
53 
54 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
55 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
56 	defined(CONFIG_SPARC) || defined(__ia64__) || \
57 	defined(__sh__) || defined(__mips__)
58 static int rx_copybreak = 1518;
59 #else
60 static int rx_copybreak = 100;
61 #endif
62 
63 /*
64   Set the bus performance register.
65 	Typical: Set 16 longword cache alignment, no burst limit.
66 	Cache alignment bits 15:14	     Burst length 13:8
67 		0000	No alignment  0x00000000 unlimited		0800 8 longwords
68 		4000	8  longwords		0100 1 longword		1000 16 longwords
69 		8000	16 longwords		0200 2 longwords	2000 32 longwords
70 		C000	32  longwords		0400 4 longwords
71 	Warning: many older 486 systems are broken and require setting 0x00A04800
72 	   8 longword cache alignment, 8 longword burst.
73 	ToDo: Non-Intel setting could be better.
74 */
75 
76 #if defined(__alpha__) || defined(__ia64__)
77 static int csr0 = 0x01A00000 | 0xE000;
78 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
79 static int csr0 = 0x01A00000 | 0x8000;
80 #elif defined(CONFIG_SPARC) || defined(__hppa__)
81 /* The UltraSparc PCI controllers will disconnect at every 64-byte
82  * crossing anyways so it makes no sense to tell Tulip to burst
83  * any more than that.
84  */
85 static int csr0 = 0x01A00000 | 0x9000;
86 #elif defined(__arm__) || defined(__sh__)
87 static int csr0 = 0x01A00000 | 0x4800;
88 #elif defined(__mips__)
89 static int csr0 = 0x00200000 | 0x4000;
90 #else
91 static int csr0;
92 #endif
93 
94 /* Operational parameters that usually are not changed. */
95 /* Time in jiffies before concluding the transmitter is hung. */
96 #define TX_TIMEOUT  (4*HZ)
97 
98 
99 MODULE_AUTHOR("The Linux Kernel Team");
100 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
101 MODULE_LICENSE("GPL");
102 module_param(tulip_debug, int, 0);
103 module_param(max_interrupt_work, int, 0);
104 module_param(rx_copybreak, int, 0);
105 module_param(csr0, int, 0);
106 module_param_array(options, int, NULL, 0);
107 module_param_array(full_duplex, int, NULL, 0);
108 
109 #ifdef TULIP_DEBUG
110 int tulip_debug = TULIP_DEBUG;
111 #else
112 int tulip_debug = 1;
113 #endif
114 
tulip_timer(struct timer_list * t)115 static void tulip_timer(struct timer_list *t)
116 {
117 	struct tulip_private *tp = from_timer(tp, t, timer);
118 	struct net_device *dev = tp->dev;
119 
120 	if (netif_running(dev))
121 		schedule_work(&tp->media_work);
122 }
123 
124 /*
125  * This table use during operation for capabilities and media timer.
126  *
127  * It is indexed via the values in 'enum chips'
128  */
129 
130 const struct tulip_chip_table tulip_tbl[] = {
131   { }, /* placeholder for array, slot unused currently */
132   { }, /* placeholder for array, slot unused currently */
133 
134   /* DC21140 */
135   { "Digital DS21140 Tulip", 128, 0x0001ebef,
136 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
137 	tulip_media_task },
138 
139   /* DC21142, DC21143 */
140   { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
141 	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
142 	| HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
143 
144   /* LC82C168 */
145   { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
146 	HAS_MII | HAS_PNICNWAY, pnic_timer, },
147 
148   /* MX98713 */
149   { "Macronix 98713 PMAC", 128, 0x0001ebef,
150 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
151 
152   /* MX98715 */
153   { "Macronix 98715 PMAC", 256, 0x0001ebef,
154 	HAS_MEDIA_TABLE, mxic_timer, },
155 
156   /* MX98725 */
157   { "Macronix 98725 PMAC", 256, 0x0001ebef,
158 	HAS_MEDIA_TABLE, mxic_timer, },
159 
160   /* AX88140 */
161   { "ASIX AX88140", 128, 0x0001fbff,
162 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
163 	| IS_ASIX, tulip_timer, tulip_media_task },
164 
165   /* PNIC2 */
166   { "Lite-On PNIC-II", 256, 0x0801fbff,
167 	HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
168 
169   /* COMET */
170   { "ADMtek Comet", 256, 0x0001abef,
171 	HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
172 
173   /* COMPEX9881 */
174   { "Compex 9881 PMAC", 128, 0x0001ebef,
175 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
176 
177   /* I21145 */
178   { "Intel DS21145 Tulip", 128, 0x0801fbff,
179 	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
180 	| HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
181 
182   /* DM910X */
183 #ifdef CONFIG_TULIP_DM910X
184   { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
185 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
186 	tulip_timer, tulip_media_task },
187 #else
188   { NULL },
189 #endif
190 
191   /* RS7112 */
192   { "Conexant LANfinity", 256, 0x0001ebef,
193 	HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
194 
195 };
196 
197 
198 static const struct pci_device_id tulip_pci_tbl[] = {
199 	{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
200 	{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
201 	{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
202 	{ 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
203 	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
204 /*	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
205 	{ 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
206 	{ 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
207 	{ 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
208 	{ 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
209 	{ 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
210 	{ 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
211 	{ 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
212 	{ 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
213 	{ 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
214 	{ 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
215 	{ 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
216 	{ 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
217 	{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
218 	{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
219 #ifdef CONFIG_TULIP_DM910X
220 	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
221 	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
222 #endif
223 	{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 	{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
225 	{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 	{ 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 	{ 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 	{ 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 	{ 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
230 	{ 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
231 	{ 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
232 	{ 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
233 	{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
234 	{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
235 	{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
236 	{ 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
237 	{ 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 	{ } /* terminate list */
239 };
240 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
241 
242 
243 /* A full-duplex map for media types. */
244 const char tulip_media_cap[32] =
245 {0,0,0,16,  3,19,16,24,  27,4,7,5, 0,20,23,20,  28,31,0,0, };
246 
247 static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue);
248 static void tulip_init_ring(struct net_device *dev);
249 static void tulip_free_ring(struct net_device *dev);
250 static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
251 					  struct net_device *dev);
252 static int tulip_open(struct net_device *dev);
253 static int tulip_close(struct net_device *dev);
254 static void tulip_up(struct net_device *dev);
255 static void tulip_down(struct net_device *dev);
256 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
257 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
258 static void set_rx_mode(struct net_device *dev);
259 static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
260 #ifdef CONFIG_NET_POLL_CONTROLLER
261 static void poll_tulip(struct net_device *dev);
262 #endif
263 
tulip_set_power_state(struct tulip_private * tp,int sleep,int snooze)264 static void tulip_set_power_state (struct tulip_private *tp,
265 				   int sleep, int snooze)
266 {
267 	if (tp->flags & HAS_ACPI) {
268 		u32 tmp, newtmp;
269 		pci_read_config_dword (tp->pdev, CFDD, &tmp);
270 		newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
271 		if (sleep)
272 			newtmp |= CFDD_Sleep;
273 		else if (snooze)
274 			newtmp |= CFDD_Snooze;
275 		if (tmp != newtmp)
276 			pci_write_config_dword (tp->pdev, CFDD, newtmp);
277 	}
278 
279 }
280 
281 
tulip_up(struct net_device * dev)282 static void tulip_up(struct net_device *dev)
283 {
284 	struct tulip_private *tp = netdev_priv(dev);
285 	void __iomem *ioaddr = tp->base_addr;
286 	int next_tick = 3*HZ;
287 	u32 reg;
288 	int i;
289 
290 #ifdef CONFIG_TULIP_NAPI
291 	napi_enable(&tp->napi);
292 #endif
293 
294 	/* Wake the chip from sleep/snooze mode. */
295 	tulip_set_power_state (tp, 0, 0);
296 
297 	/* Disable all WOL events */
298 	pci_enable_wake(tp->pdev, PCI_D3hot, 0);
299 	pci_enable_wake(tp->pdev, PCI_D3cold, 0);
300 	tulip_set_wolopts(tp->pdev, 0);
301 
302 	/* On some chip revs we must set the MII/SYM port before the reset!? */
303 	if (tp->mii_cnt  ||  (tp->mtable  &&  tp->mtable->has_mii))
304 		iowrite32(0x00040000, ioaddr + CSR6);
305 
306 	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
307 	iowrite32(0x00000001, ioaddr + CSR0);
308 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
309 	udelay(100);
310 
311 	/* Deassert reset.
312 	   Wait the specified 50 PCI cycles after a reset by initializing
313 	   Tx and Rx queues and the address filter list. */
314 	iowrite32(tp->csr0, ioaddr + CSR0);
315 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
316 	udelay(100);
317 
318 	if (tulip_debug > 1)
319 		netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
320 
321 	iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
322 	iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
323 	tp->cur_rx = tp->cur_tx = 0;
324 	tp->dirty_rx = tp->dirty_tx = 0;
325 
326 	if (tp->flags & MC_HASH_ONLY) {
327 		u32 addr_low = get_unaligned_le32(dev->dev_addr);
328 		u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
329 		if (tp->chip_id == AX88140) {
330 			iowrite32(0, ioaddr + CSR13);
331 			iowrite32(addr_low,  ioaddr + CSR14);
332 			iowrite32(1, ioaddr + CSR13);
333 			iowrite32(addr_high, ioaddr + CSR14);
334 		} else if (tp->flags & COMET_MAC_ADDR) {
335 			iowrite32(addr_low,  ioaddr + 0xA4);
336 			iowrite32(addr_high, ioaddr + 0xA8);
337 			iowrite32(0, ioaddr + CSR27);
338 			iowrite32(0, ioaddr + CSR28);
339 		}
340 	} else {
341 		/* This is set_rx_mode(), but without starting the transmitter. */
342 		u16 *eaddrs = (u16 *)dev->dev_addr;
343 		u16 *setup_frm = &tp->setup_frame[15*6];
344 		dma_addr_t mapping;
345 
346 		/* 21140 bug: you must add the broadcast address. */
347 		memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
348 		/* Fill the final entry of the table with our physical address. */
349 		*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
350 		*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
351 		*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
352 
353 		mapping = dma_map_single(&tp->pdev->dev, tp->setup_frame,
354 					 sizeof(tp->setup_frame),
355 					 DMA_TO_DEVICE);
356 		tp->tx_buffers[tp->cur_tx].skb = NULL;
357 		tp->tx_buffers[tp->cur_tx].mapping = mapping;
358 
359 		/* Put the setup frame on the Tx list. */
360 		tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
361 		tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
362 		tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
363 
364 		tp->cur_tx++;
365 	}
366 
367 	tp->saved_if_port = dev->if_port;
368 	if (dev->if_port == 0)
369 		dev->if_port = tp->default_port;
370 
371 	/* Allow selecting a default media. */
372 	i = 0;
373 	if (tp->mtable == NULL)
374 		goto media_picked;
375 	if (dev->if_port) {
376 		int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
377 			(dev->if_port == 12 ? 0 : dev->if_port);
378 		for (i = 0; i < tp->mtable->leafcount; i++)
379 			if (tp->mtable->mleaf[i].media == looking_for) {
380 				dev_info(&dev->dev,
381 					 "Using user-specified media %s\n",
382 					 medianame[dev->if_port]);
383 				goto media_picked;
384 			}
385 	}
386 	if ((tp->mtable->defaultmedia & 0x0800) == 0) {
387 		int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
388 		for (i = 0; i < tp->mtable->leafcount; i++)
389 			if (tp->mtable->mleaf[i].media == looking_for) {
390 				dev_info(&dev->dev,
391 					 "Using EEPROM-set media %s\n",
392 					 medianame[looking_for]);
393 				goto media_picked;
394 			}
395 	}
396 	/* Start sensing first non-full-duplex media. */
397 	for (i = tp->mtable->leafcount - 1;
398 		 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
399 		;
400 media_picked:
401 
402 	tp->csr6 = 0;
403 	tp->cur_index = i;
404 	tp->nwayset = 0;
405 
406 	if (dev->if_port) {
407 		if (tp->chip_id == DC21143  &&
408 		    (tulip_media_cap[dev->if_port] & MediaIsMII)) {
409 			/* We must reset the media CSRs when we force-select MII mode. */
410 			iowrite32(0x0000, ioaddr + CSR13);
411 			iowrite32(0x0000, ioaddr + CSR14);
412 			iowrite32(0x0008, ioaddr + CSR15);
413 		}
414 		tulip_select_media(dev, 1);
415 	} else if (tp->chip_id == DC21142) {
416 		if (tp->mii_cnt) {
417 			tulip_select_media(dev, 1);
418 			if (tulip_debug > 1)
419 				dev_info(&dev->dev,
420 					 "Using MII transceiver %d, status %04x\n",
421 					 tp->phys[0],
422 					 tulip_mdio_read(dev, tp->phys[0], 1));
423 			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
424 			tp->csr6 = csr6_mask_hdcap;
425 			dev->if_port = 11;
426 			iowrite32(0x0000, ioaddr + CSR13);
427 			iowrite32(0x0000, ioaddr + CSR14);
428 		} else
429 			t21142_start_nway(dev);
430 	} else if (tp->chip_id == PNIC2) {
431 	        /* for initial startup advertise 10/100 Full and Half */
432 	        tp->sym_advertise = 0x01E0;
433                 /* enable autonegotiate end interrupt */
434 	        iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
435 	        iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
436 		pnic2_start_nway(dev);
437 	} else if (tp->chip_id == LC82C168  &&  ! tp->medialock) {
438 		if (tp->mii_cnt) {
439 			dev->if_port = 11;
440 			tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
441 			iowrite32(0x0001, ioaddr + CSR15);
442 		} else if (ioread32(ioaddr + CSR5) & TPLnkPass)
443 			pnic_do_nway(dev);
444 		else {
445 			/* Start with 10mbps to do autonegotiation. */
446 			iowrite32(0x32, ioaddr + CSR12);
447 			tp->csr6 = 0x00420000;
448 			iowrite32(0x0001B078, ioaddr + 0xB8);
449 			iowrite32(0x0201B078, ioaddr + 0xB8);
450 			next_tick = 1*HZ;
451 		}
452 	} else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
453 		   ! tp->medialock) {
454 		dev->if_port = 0;
455 		tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
456 		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
457 	} else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
458 		/* Provided by BOLO, Macronix - 12/10/1998. */
459 		dev->if_port = 0;
460 		tp->csr6 = 0x01a80200;
461 		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
462 		iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
463 	} else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
464 		/* Enable automatic Tx underrun recovery. */
465 		iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
466 		dev->if_port = tp->mii_cnt ? 11 : 0;
467 		tp->csr6 = 0x00040000;
468 	} else if (tp->chip_id == AX88140) {
469 		tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
470 	} else
471 		tulip_select_media(dev, 1);
472 
473 	/* Start the chip's Tx to process setup frame. */
474 	tulip_stop_rxtx(tp);
475 	barrier();
476 	udelay(5);
477 	iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
478 
479 	/* Enable interrupts by setting the interrupt mask. */
480 	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
481 	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
482 	tulip_start_rxtx(tp);
483 	iowrite32(0, ioaddr + CSR2);		/* Rx poll demand */
484 
485 	if (tulip_debug > 2) {
486 		netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
487 			   ioread32(ioaddr + CSR0),
488 			   ioread32(ioaddr + CSR5),
489 			   ioread32(ioaddr + CSR6));
490 	}
491 
492 	/* Set the timer to switch to check for link beat and perhaps switch
493 	   to an alternate media type. */
494 	tp->timer.expires = RUN_AT(next_tick);
495 	add_timer(&tp->timer);
496 #ifdef CONFIG_TULIP_NAPI
497 	timer_setup(&tp->oom_timer, oom_timer, 0);
498 #endif
499 }
500 
501 static int
tulip_open(struct net_device * dev)502 tulip_open(struct net_device *dev)
503 {
504 	struct tulip_private *tp = netdev_priv(dev);
505 	int retval;
506 
507 	tulip_init_ring (dev);
508 
509 	retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
510 			     dev->name, dev);
511 	if (retval)
512 		goto free_ring;
513 
514 	tulip_up (dev);
515 
516 	netif_start_queue (dev);
517 
518 	return 0;
519 
520 free_ring:
521 	tulip_free_ring (dev);
522 	return retval;
523 }
524 
525 
tulip_tx_timeout(struct net_device * dev,unsigned int txqueue)526 static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue)
527 {
528 	struct tulip_private *tp = netdev_priv(dev);
529 	void __iomem *ioaddr = tp->base_addr;
530 	unsigned long flags;
531 
532 	spin_lock_irqsave (&tp->lock, flags);
533 
534 	if (tulip_media_cap[dev->if_port] & MediaIsMII) {
535 		/* Do nothing -- the media monitor should handle this. */
536 		if (tulip_debug > 1)
537 			dev_warn(&dev->dev,
538 				 "Transmit timeout using MII device\n");
539 	} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
540 		   tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
541 		   tp->chip_id == DM910X) {
542 		dev_warn(&dev->dev,
543 			 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
544 			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
545 			 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
546 			 ioread32(ioaddr + CSR15));
547 		tp->timeout_recovery = 1;
548 		schedule_work(&tp->media_work);
549 		goto out_unlock;
550 	} else if (tp->chip_id == PNIC2) {
551 		dev_warn(&dev->dev,
552 			 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
553 			 (int)ioread32(ioaddr + CSR5),
554 			 (int)ioread32(ioaddr + CSR6),
555 			 (int)ioread32(ioaddr + CSR7),
556 			 (int)ioread32(ioaddr + CSR12));
557 	} else {
558 		dev_warn(&dev->dev,
559 			 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
560 			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
561 		dev->if_port = 0;
562 	}
563 
564 #if defined(way_too_many_messages)
565 	if (tulip_debug > 3) {
566 		int i;
567 		for (i = 0; i < RX_RING_SIZE; i++) {
568 			u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
569 			int j;
570 			printk(KERN_DEBUG
571 			       "%2d: %08x %08x %08x %08x  %02x %02x %02x\n",
572 			       i,
573 			       (unsigned int)tp->rx_ring[i].status,
574 			       (unsigned int)tp->rx_ring[i].length,
575 			       (unsigned int)tp->rx_ring[i].buffer1,
576 			       (unsigned int)tp->rx_ring[i].buffer2,
577 			       buf[0], buf[1], buf[2]);
578 			for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
579 				if (j < 100)
580 					pr_cont(" %02x", buf[j]);
581 			pr_cont(" j=%d\n", j);
582 		}
583 		printk(KERN_DEBUG "  Rx ring %p: ", tp->rx_ring);
584 		for (i = 0; i < RX_RING_SIZE; i++)
585 			pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
586 		printk(KERN_DEBUG "  Tx ring %p: ", tp->tx_ring);
587 		for (i = 0; i < TX_RING_SIZE; i++)
588 			pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
589 		pr_cont("\n");
590 	}
591 #endif
592 
593 	tulip_tx_timeout_complete(tp, ioaddr);
594 
595 out_unlock:
596 	spin_unlock_irqrestore (&tp->lock, flags);
597 	netif_trans_update(dev); /* prevent tx timeout */
598 	netif_wake_queue (dev);
599 }
600 
601 
602 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
tulip_init_ring(struct net_device * dev)603 static void tulip_init_ring(struct net_device *dev)
604 {
605 	struct tulip_private *tp = netdev_priv(dev);
606 	int i;
607 
608 	tp->susp_rx = 0;
609 	tp->ttimer = 0;
610 	tp->nir = 0;
611 
612 	for (i = 0; i < RX_RING_SIZE; i++) {
613 		tp->rx_ring[i].status = 0x00000000;
614 		tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
615 		tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
616 		tp->rx_buffers[i].skb = NULL;
617 		tp->rx_buffers[i].mapping = 0;
618 	}
619 	/* Mark the last entry as wrapping the ring. */
620 	tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
621 	tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
622 
623 	for (i = 0; i < RX_RING_SIZE; i++) {
624 		dma_addr_t mapping;
625 
626 		/* Note the receive buffer must be longword aligned.
627 		   netdev_alloc_skb() provides 16 byte alignment.  But do *not*
628 		   use skb_reserve() to align the IP header! */
629 		struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
630 		tp->rx_buffers[i].skb = skb;
631 		if (skb == NULL)
632 			break;
633 		mapping = dma_map_single(&tp->pdev->dev, skb->data,
634 					 PKT_BUF_SZ, DMA_FROM_DEVICE);
635 		tp->rx_buffers[i].mapping = mapping;
636 		tp->rx_ring[i].status = cpu_to_le32(DescOwned);	/* Owned by Tulip chip */
637 		tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
638 	}
639 	tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
640 
641 	/* The Tx buffer descriptor is filled in as needed, but we
642 	   do need to clear the ownership bit. */
643 	for (i = 0; i < TX_RING_SIZE; i++) {
644 		tp->tx_buffers[i].skb = NULL;
645 		tp->tx_buffers[i].mapping = 0;
646 		tp->tx_ring[i].status = 0x00000000;
647 		tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
648 	}
649 	tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
650 }
651 
652 static netdev_tx_t
tulip_start_xmit(struct sk_buff * skb,struct net_device * dev)653 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
654 {
655 	struct tulip_private *tp = netdev_priv(dev);
656 	int entry;
657 	u32 flag;
658 	dma_addr_t mapping;
659 	unsigned long flags;
660 
661 	spin_lock_irqsave(&tp->lock, flags);
662 
663 	/* Calculate the next Tx descriptor entry. */
664 	entry = tp->cur_tx % TX_RING_SIZE;
665 
666 	tp->tx_buffers[entry].skb = skb;
667 	mapping = dma_map_single(&tp->pdev->dev, skb->data, skb->len,
668 				 DMA_TO_DEVICE);
669 	tp->tx_buffers[entry].mapping = mapping;
670 	tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
671 
672 	if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
673 		flag = 0x60000000; /* No interrupt */
674 	} else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
675 		flag = 0xe0000000; /* Tx-done intr. */
676 	} else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
677 		flag = 0x60000000; /* No Tx-done intr. */
678 	} else {		/* Leave room for set_rx_mode() to fill entries. */
679 		flag = 0xe0000000; /* Tx-done intr. */
680 		netif_stop_queue(dev);
681 	}
682 	if (entry == TX_RING_SIZE-1)
683 		flag = 0xe0000000 | DESC_RING_WRAP;
684 
685 	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
686 	/* if we were using Transmit Automatic Polling, we would need a
687 	 * wmb() here. */
688 	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
689 	wmb();
690 
691 	tp->cur_tx++;
692 
693 	/* Trigger an immediate transmit demand. */
694 	iowrite32(0, tp->base_addr + CSR1);
695 
696 	spin_unlock_irqrestore(&tp->lock, flags);
697 
698 	return NETDEV_TX_OK;
699 }
700 
tulip_clean_tx_ring(struct tulip_private * tp)701 static void tulip_clean_tx_ring(struct tulip_private *tp)
702 {
703 	unsigned int dirty_tx;
704 
705 	for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
706 		dirty_tx++) {
707 		int entry = dirty_tx % TX_RING_SIZE;
708 		int status = le32_to_cpu(tp->tx_ring[entry].status);
709 
710 		if (status < 0) {
711 			tp->dev->stats.tx_errors++;	/* It wasn't Txed */
712 			tp->tx_ring[entry].status = 0;
713 		}
714 
715 		/* Check for Tx filter setup frames. */
716 		if (tp->tx_buffers[entry].skb == NULL) {
717 			/* test because dummy frames not mapped */
718 			if (tp->tx_buffers[entry].mapping)
719 				dma_unmap_single(&tp->pdev->dev,
720 						 tp->tx_buffers[entry].mapping,
721 						 sizeof(tp->setup_frame),
722 						 DMA_TO_DEVICE);
723 			continue;
724 		}
725 
726 		dma_unmap_single(&tp->pdev->dev,
727 				 tp->tx_buffers[entry].mapping,
728 				 tp->tx_buffers[entry].skb->len,
729 				 DMA_TO_DEVICE);
730 
731 		/* Free the original skb. */
732 		dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
733 		tp->tx_buffers[entry].skb = NULL;
734 		tp->tx_buffers[entry].mapping = 0;
735 	}
736 }
737 
tulip_down(struct net_device * dev)738 static void tulip_down (struct net_device *dev)
739 {
740 	struct tulip_private *tp = netdev_priv(dev);
741 	void __iomem *ioaddr = tp->base_addr;
742 	unsigned long flags;
743 
744 	cancel_work_sync(&tp->media_work);
745 
746 #ifdef CONFIG_TULIP_NAPI
747 	napi_disable(&tp->napi);
748 #endif
749 
750 	del_timer_sync (&tp->timer);
751 #ifdef CONFIG_TULIP_NAPI
752 	del_timer_sync (&tp->oom_timer);
753 #endif
754 	spin_lock_irqsave (&tp->lock, flags);
755 
756 	/* Disable interrupts by clearing the interrupt mask. */
757 	iowrite32 (0x00000000, ioaddr + CSR7);
758 
759 	/* Stop the Tx and Rx processes. */
760 	tulip_stop_rxtx(tp);
761 
762 	/* prepare receive buffers */
763 	tulip_refill_rx(dev);
764 
765 	/* release any unconsumed transmit buffers */
766 	tulip_clean_tx_ring(tp);
767 
768 	if (ioread32(ioaddr + CSR6) != 0xffffffff)
769 		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
770 
771 	spin_unlock_irqrestore (&tp->lock, flags);
772 
773 	timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
774 
775 	dev->if_port = tp->saved_if_port;
776 
777 	/* Leave the driver in snooze, not sleep, mode. */
778 	tulip_set_power_state (tp, 0, 1);
779 }
780 
tulip_free_ring(struct net_device * dev)781 static void tulip_free_ring (struct net_device *dev)
782 {
783 	struct tulip_private *tp = netdev_priv(dev);
784 	int i;
785 
786 	/* Free all the skbuffs in the Rx queue. */
787 	for (i = 0; i < RX_RING_SIZE; i++) {
788 		struct sk_buff *skb = tp->rx_buffers[i].skb;
789 		dma_addr_t mapping = tp->rx_buffers[i].mapping;
790 
791 		tp->rx_buffers[i].skb = NULL;
792 		tp->rx_buffers[i].mapping = 0;
793 
794 		tp->rx_ring[i].status = 0;	/* Not owned by Tulip chip. */
795 		tp->rx_ring[i].length = 0;
796 		/* An invalid address. */
797 		tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
798 		if (skb) {
799 			dma_unmap_single(&tp->pdev->dev, mapping, PKT_BUF_SZ,
800 					 DMA_FROM_DEVICE);
801 			dev_kfree_skb (skb);
802 		}
803 	}
804 
805 	for (i = 0; i < TX_RING_SIZE; i++) {
806 		struct sk_buff *skb = tp->tx_buffers[i].skb;
807 
808 		if (skb != NULL) {
809 			dma_unmap_single(&tp->pdev->dev,
810 					 tp->tx_buffers[i].mapping, skb->len,
811 					 DMA_TO_DEVICE);
812 			dev_kfree_skb (skb);
813 		}
814 		tp->tx_buffers[i].skb = NULL;
815 		tp->tx_buffers[i].mapping = 0;
816 	}
817 }
818 
tulip_close(struct net_device * dev)819 static int tulip_close (struct net_device *dev)
820 {
821 	struct tulip_private *tp = netdev_priv(dev);
822 	void __iomem *ioaddr = tp->base_addr;
823 
824 	netif_stop_queue (dev);
825 
826 	tulip_down (dev);
827 
828 	if (tulip_debug > 1)
829 		netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
830 			   ioread32 (ioaddr + CSR5));
831 
832 	free_irq (tp->pdev->irq, dev);
833 
834 	tulip_free_ring (dev);
835 
836 	return 0;
837 }
838 
tulip_get_stats(struct net_device * dev)839 static struct net_device_stats *tulip_get_stats(struct net_device *dev)
840 {
841 	struct tulip_private *tp = netdev_priv(dev);
842 	void __iomem *ioaddr = tp->base_addr;
843 
844 	if (netif_running(dev)) {
845 		unsigned long flags;
846 
847 		spin_lock_irqsave (&tp->lock, flags);
848 
849 		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
850 
851 		spin_unlock_irqrestore(&tp->lock, flags);
852 	}
853 
854 	return &dev->stats;
855 }
856 
857 
tulip_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)858 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
859 {
860 	struct tulip_private *np = netdev_priv(dev);
861 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
862 	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
863 }
864 
865 
tulip_ethtool_set_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)866 static int tulip_ethtool_set_wol(struct net_device *dev,
867 				 struct ethtool_wolinfo *wolinfo)
868 {
869 	struct tulip_private *tp = netdev_priv(dev);
870 
871 	if (wolinfo->wolopts & (~tp->wolinfo.supported))
872 		   return -EOPNOTSUPP;
873 
874 	tp->wolinfo.wolopts = wolinfo->wolopts;
875 	device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
876 	return 0;
877 }
878 
tulip_ethtool_get_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)879 static void tulip_ethtool_get_wol(struct net_device *dev,
880 				  struct ethtool_wolinfo *wolinfo)
881 {
882 	struct tulip_private *tp = netdev_priv(dev);
883 
884 	wolinfo->supported = tp->wolinfo.supported;
885 	wolinfo->wolopts = tp->wolinfo.wolopts;
886 	return;
887 }
888 
889 
890 static const struct ethtool_ops ops = {
891 	.get_drvinfo = tulip_get_drvinfo,
892 	.set_wol     = tulip_ethtool_set_wol,
893 	.get_wol     = tulip_ethtool_get_wol,
894 };
895 
896 /* Provide ioctl() calls to examine the MII xcvr state. */
private_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)897 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
898 {
899 	struct tulip_private *tp = netdev_priv(dev);
900 	void __iomem *ioaddr = tp->base_addr;
901 	struct mii_ioctl_data *data = if_mii(rq);
902 	const unsigned int phy_idx = 0;
903 	int phy = tp->phys[phy_idx] & 0x1f;
904 	unsigned int regnum = data->reg_num;
905 
906 	switch (cmd) {
907 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
908 		if (tp->mii_cnt)
909 			data->phy_id = phy;
910 		else if (tp->flags & HAS_NWAY)
911 			data->phy_id = 32;
912 		else if (tp->chip_id == COMET)
913 			data->phy_id = 1;
914 		else
915 			return -ENODEV;
916 		fallthrough;
917 
918 	case SIOCGMIIREG:		/* Read MII PHY register. */
919 		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
920 			int csr12 = ioread32 (ioaddr + CSR12);
921 			int csr14 = ioread32 (ioaddr + CSR14);
922 			switch (regnum) {
923 			case 0:
924                                 if (((csr14<<5) & 0x1000) ||
925                                         (dev->if_port == 5 && tp->nwayset))
926                                         data->val_out = 0x1000;
927                                 else
928                                         data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
929                                                 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
930 				break;
931 			case 1:
932                                 data->val_out =
933 					0x1848 +
934 					((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
935 					((csr12&0x06) == 6 ? 0 : 4);
936                                 data->val_out |= 0x6048;
937 				break;
938 			case 4:
939                                 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
940                                 data->val_out =
941 					((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
942 					((csr14 >> 1) & 0x20) + 1;
943                                 data->val_out |= ((csr14 >> 9) & 0x03C0);
944 				break;
945 			case 5: data->val_out = tp->lpar; break;
946 			default: data->val_out = 0; break;
947 			}
948 		} else {
949 			data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
950 		}
951 		return 0;
952 
953 	case SIOCSMIIREG:		/* Write MII PHY register. */
954 		if (regnum & ~0x1f)
955 			return -EINVAL;
956 		if (data->phy_id == phy) {
957 			u16 value = data->val_in;
958 			switch (regnum) {
959 			case 0:	/* Check for autonegotiation on or reset. */
960 				tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
961 				if (tp->full_duplex_lock)
962 					tp->full_duplex = (value & 0x0100) ? 1 : 0;
963 				break;
964 			case 4:
965 				tp->advertising[phy_idx] =
966 				tp->mii_advertise = data->val_in;
967 				break;
968 			}
969 		}
970 		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
971 			u16 value = data->val_in;
972 			if (regnum == 0) {
973 			  if ((value & 0x1200) == 0x1200) {
974 			    if (tp->chip_id == PNIC2) {
975                                    pnic2_start_nway (dev);
976                             } else {
977 				   t21142_start_nway (dev);
978                             }
979 			  }
980 			} else if (regnum == 4)
981 				tp->sym_advertise = value;
982 		} else {
983 			tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
984 		}
985 		return 0;
986 	default:
987 		return -EOPNOTSUPP;
988 	}
989 
990 	return -EOPNOTSUPP;
991 }
992 
993 
994 /* Set or clear the multicast filter for this adaptor.
995    Note that we only use exclusion around actually queueing the
996    new frame, not around filling tp->setup_frame.  This is non-deterministic
997    when re-entered but still correct. */
998 
build_setup_frame_hash(u16 * setup_frm,struct net_device * dev)999 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1000 {
1001 	struct tulip_private *tp = netdev_priv(dev);
1002 	u16 hash_table[32];
1003 	struct netdev_hw_addr *ha;
1004 	int i;
1005 	u16 *eaddrs;
1006 
1007 	memset(hash_table, 0, sizeof(hash_table));
1008 	__set_bit_le(255, hash_table);			/* Broadcast entry */
1009 	/* This should work on big-endian machines as well. */
1010 	netdev_for_each_mc_addr(ha, dev) {
1011 		int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1012 
1013 		__set_bit_le(index, hash_table);
1014 	}
1015 	for (i = 0; i < 32; i++) {
1016 		*setup_frm++ = hash_table[i];
1017 		*setup_frm++ = hash_table[i];
1018 	}
1019 	setup_frm = &tp->setup_frame[13*6];
1020 
1021 	/* Fill the final entry with our physical address. */
1022 	eaddrs = (u16 *)dev->dev_addr;
1023 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1024 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1025 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1026 }
1027 
build_setup_frame_perfect(u16 * setup_frm,struct net_device * dev)1028 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1029 {
1030 	struct tulip_private *tp = netdev_priv(dev);
1031 	struct netdev_hw_addr *ha;
1032 	u16 *eaddrs;
1033 
1034 	/* We have <= 14 addresses so we can use the wonderful
1035 	   16 address perfect filtering of the Tulip. */
1036 	netdev_for_each_mc_addr(ha, dev) {
1037 		eaddrs = (u16 *) ha->addr;
1038 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1039 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1040 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1041 	}
1042 	/* Fill the unused entries with the broadcast address. */
1043 	memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1044 	setup_frm = &tp->setup_frame[15*6];
1045 
1046 	/* Fill the final entry with our physical address. */
1047 	eaddrs = (u16 *)dev->dev_addr;
1048 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1049 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1050 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1051 }
1052 
1053 
set_rx_mode(struct net_device * dev)1054 static void set_rx_mode(struct net_device *dev)
1055 {
1056 	struct tulip_private *tp = netdev_priv(dev);
1057 	void __iomem *ioaddr = tp->base_addr;
1058 	int csr6;
1059 
1060 	csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1061 
1062 	tp->csr6 &= ~0x00D5;
1063 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1064 		tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1065 		csr6 |= AcceptAllMulticast | AcceptAllPhys;
1066 	} else if ((netdev_mc_count(dev) > 1000) ||
1067 		   (dev->flags & IFF_ALLMULTI)) {
1068 		/* Too many to filter well -- accept all multicasts. */
1069 		tp->csr6 |= AcceptAllMulticast;
1070 		csr6 |= AcceptAllMulticast;
1071 	} else	if (tp->flags & MC_HASH_ONLY) {
1072 		/* Some work-alikes have only a 64-entry hash filter table. */
1073 		/* Should verify correctness on big-endian/__powerpc__ */
1074 		struct netdev_hw_addr *ha;
1075 		if (netdev_mc_count(dev) > 64) {
1076 			/* Arbitrary non-effective limit. */
1077 			tp->csr6 |= AcceptAllMulticast;
1078 			csr6 |= AcceptAllMulticast;
1079 		} else {
1080 			u32 mc_filter[2] = {0, 0};		 /* Multicast hash filter */
1081 			int filterbit;
1082 			netdev_for_each_mc_addr(ha, dev) {
1083 				if (tp->flags & COMET_MAC_ADDR)
1084 					filterbit = ether_crc_le(ETH_ALEN,
1085 								 ha->addr);
1086 				else
1087 					filterbit = ether_crc(ETH_ALEN,
1088 							      ha->addr) >> 26;
1089 				filterbit &= 0x3f;
1090 				mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1091 				if (tulip_debug > 2)
1092 					dev_info(&dev->dev,
1093 						 "Added filter for %pM  %08x bit %d\n",
1094 						 ha->addr,
1095 						 ether_crc(ETH_ALEN, ha->addr),
1096 						 filterbit);
1097 			}
1098 			if (mc_filter[0] == tp->mc_filter[0]  &&
1099 				mc_filter[1] == tp->mc_filter[1])
1100 				;				/* No change. */
1101 			else if (tp->flags & IS_ASIX) {
1102 				iowrite32(2, ioaddr + CSR13);
1103 				iowrite32(mc_filter[0], ioaddr + CSR14);
1104 				iowrite32(3, ioaddr + CSR13);
1105 				iowrite32(mc_filter[1], ioaddr + CSR14);
1106 			} else if (tp->flags & COMET_MAC_ADDR) {
1107 				iowrite32(mc_filter[0], ioaddr + CSR27);
1108 				iowrite32(mc_filter[1], ioaddr + CSR28);
1109 			}
1110 			tp->mc_filter[0] = mc_filter[0];
1111 			tp->mc_filter[1] = mc_filter[1];
1112 		}
1113 	} else {
1114 		unsigned long flags;
1115 		u32 tx_flags = 0x08000000 | 192;
1116 
1117 		/* Note that only the low-address shortword of setup_frame is valid!
1118 		   The values are doubled for big-endian architectures. */
1119 		if (netdev_mc_count(dev) > 14) {
1120 			/* Must use a multicast hash table. */
1121 			build_setup_frame_hash(tp->setup_frame, dev);
1122 			tx_flags = 0x08400000 | 192;
1123 		} else {
1124 			build_setup_frame_perfect(tp->setup_frame, dev);
1125 		}
1126 
1127 		spin_lock_irqsave(&tp->lock, flags);
1128 
1129 		if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1130 			/* Same setup recently queued, we need not add it. */
1131 		} else {
1132 			unsigned int entry;
1133 			int dummy = -1;
1134 
1135 			/* Now add this frame to the Tx list. */
1136 
1137 			entry = tp->cur_tx++ % TX_RING_SIZE;
1138 
1139 			if (entry != 0) {
1140 				/* Avoid a chip errata by prefixing a dummy entry. */
1141 				tp->tx_buffers[entry].skb = NULL;
1142 				tp->tx_buffers[entry].mapping = 0;
1143 				tp->tx_ring[entry].length =
1144 					(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1145 				tp->tx_ring[entry].buffer1 = 0;
1146 				/* Must set DescOwned later to avoid race with chip */
1147 				dummy = entry;
1148 				entry = tp->cur_tx++ % TX_RING_SIZE;
1149 
1150 			}
1151 
1152 			tp->tx_buffers[entry].skb = NULL;
1153 			tp->tx_buffers[entry].mapping =
1154 				dma_map_single(&tp->pdev->dev,
1155 					       tp->setup_frame,
1156 					       sizeof(tp->setup_frame),
1157 					       DMA_TO_DEVICE);
1158 			/* Put the setup frame on the Tx list. */
1159 			if (entry == TX_RING_SIZE-1)
1160 				tx_flags |= DESC_RING_WRAP;		/* Wrap ring. */
1161 			tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1162 			tp->tx_ring[entry].buffer1 =
1163 				cpu_to_le32(tp->tx_buffers[entry].mapping);
1164 			tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1165 			if (dummy >= 0)
1166 				tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1167 			if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1168 				netif_stop_queue(dev);
1169 
1170 			/* Trigger an immediate transmit demand. */
1171 			iowrite32(0, ioaddr + CSR1);
1172 		}
1173 
1174 		spin_unlock_irqrestore(&tp->lock, flags);
1175 	}
1176 
1177 	iowrite32(csr6, ioaddr + CSR6);
1178 }
1179 
1180 #ifdef CONFIG_TULIP_MWI
tulip_mwi_config(struct pci_dev * pdev,struct net_device * dev)1181 static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
1182 {
1183 	struct tulip_private *tp = netdev_priv(dev);
1184 	u8 cache;
1185 	u16 pci_command;
1186 	u32 csr0;
1187 
1188 	if (tulip_debug > 3)
1189 		netdev_dbg(dev, "tulip_mwi_config()\n");
1190 
1191 	tp->csr0 = csr0 = 0;
1192 
1193 	/* if we have any cache line size at all, we can do MRM and MWI */
1194 	csr0 |= MRM | MWI;
1195 
1196 	/* Enable MWI in the standard PCI command bit.
1197 	 * Check for the case where MWI is desired but not available
1198 	 */
1199 	pci_try_set_mwi(pdev);
1200 
1201 	/* read result from hardware (in case bit refused to enable) */
1202 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1203 	if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1204 		csr0 &= ~MWI;
1205 
1206 	/* if cache line size hardwired to zero, no MWI */
1207 	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1208 	if ((csr0 & MWI) && (cache == 0)) {
1209 		csr0 &= ~MWI;
1210 		pci_clear_mwi(pdev);
1211 	}
1212 
1213 	/* assign per-cacheline-size cache alignment and
1214 	 * burst length values
1215 	 */
1216 	switch (cache) {
1217 	case 8:
1218 		csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1219 		break;
1220 	case 16:
1221 		csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1222 		break;
1223 	case 32:
1224 		csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1225 		break;
1226 	default:
1227 		cache = 0;
1228 		break;
1229 	}
1230 
1231 	/* if we have a good cache line size, we by now have a good
1232 	 * csr0, so save it and exit
1233 	 */
1234 	if (cache)
1235 		goto out;
1236 
1237 	/* we don't have a good csr0 or cache line size, disable MWI */
1238 	if (csr0 & MWI) {
1239 		pci_clear_mwi(pdev);
1240 		csr0 &= ~MWI;
1241 	}
1242 
1243 	/* sane defaults for burst length and cache alignment
1244 	 * originally from de4x5 driver
1245 	 */
1246 	csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1247 
1248 out:
1249 	tp->csr0 = csr0;
1250 	if (tulip_debug > 2)
1251 		netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1252 			   cache, csr0);
1253 }
1254 #endif
1255 
1256 /*
1257  *	Chips that have the MRM/reserved bit quirk and the burst quirk. That
1258  *	is the DM910X and the on chip ULi devices
1259  */
1260 
tulip_uli_dm_quirk(struct pci_dev * pdev)1261 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1262 {
1263 	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1264 		return 1;
1265 	return 0;
1266 }
1267 
1268 static const struct net_device_ops tulip_netdev_ops = {
1269 	.ndo_open		= tulip_open,
1270 	.ndo_start_xmit		= tulip_start_xmit,
1271 	.ndo_tx_timeout		= tulip_tx_timeout,
1272 	.ndo_stop		= tulip_close,
1273 	.ndo_get_stats		= tulip_get_stats,
1274 	.ndo_eth_ioctl		= private_ioctl,
1275 	.ndo_set_rx_mode	= set_rx_mode,
1276 	.ndo_set_mac_address	= eth_mac_addr,
1277 	.ndo_validate_addr	= eth_validate_addr,
1278 #ifdef CONFIG_NET_POLL_CONTROLLER
1279 	.ndo_poll_controller	 = poll_tulip,
1280 #endif
1281 };
1282 
1283 static const struct pci_device_id early_486_chipsets[] = {
1284 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1285 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1286 	{ },
1287 };
1288 
tulip_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1289 static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1290 {
1291 	struct tulip_private *tp;
1292 	/* See note below on the multiport cards. */
1293 	static unsigned char last_phys_addr[ETH_ALEN] = {
1294 		0x00, 'L', 'i', 'n', 'u', 'x'
1295 	};
1296 #if defined(__i386__) || defined(__x86_64__)	/* Patch up x86 BIOS bug. */
1297 	static int last_irq;
1298 #endif
1299 	int i, irq;
1300 	unsigned short sum;
1301 	unsigned char *ee_data;
1302 	struct net_device *dev;
1303 	void __iomem *ioaddr;
1304 	static int board_idx = -1;
1305 	int chip_idx = ent->driver_data;
1306 	const char *chip_name = tulip_tbl[chip_idx].chip_name;
1307 	unsigned int eeprom_missing = 0;
1308 	unsigned int force_csr0 = 0;
1309 
1310 	board_idx++;
1311 
1312 	/*
1313 	 *	Lan media wire a tulip chip to a wan interface. Needs a very
1314 	 *	different driver (lmc driver)
1315 	 */
1316 
1317         if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1318 		pr_err("skipping LMC card\n");
1319 		return -ENODEV;
1320 	} else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1321 		   (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1322 		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1323 		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1324 		pr_err("skipping SBE T3E3 port\n");
1325 		return -ENODEV;
1326 	}
1327 
1328 	/*
1329 	 *	DM910x chips should be handled by the dmfe driver, except
1330 	 *	on-board chips on SPARC systems.  Also, early DM9100s need
1331 	 *	software CRC which only the dmfe driver supports.
1332 	 */
1333 
1334 #ifdef CONFIG_TULIP_DM910X
1335 	if (chip_idx == DM910X) {
1336 		struct device_node *dp;
1337 
1338 		if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1339 		    pdev->revision < 0x30) {
1340 			pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1341 			return -ENODEV;
1342 		}
1343 
1344 		dp = pci_device_to_OF_node(pdev);
1345 		if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1346 			pr_info("skipping DM910x expansion card (use dmfe)\n");
1347 			return -ENODEV;
1348 		}
1349 	}
1350 #endif
1351 
1352 	/*
1353 	 *	Looks for early PCI chipsets where people report hangs
1354 	 *	without the workarounds being on.
1355 	 */
1356 
1357 	/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1358 	      aligned.  Aries might need this too. The Saturn errata are not
1359 	      pretty reading but thankfully it's an old 486 chipset.
1360 
1361 	   2. The dreaded SiS496 486 chipset. Same workaround as Intel
1362 	      Saturn.
1363 	*/
1364 
1365 	if (pci_dev_present(early_486_chipsets)) {
1366 		csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1367 		force_csr0 = 1;
1368 	}
1369 
1370 	/* bugfix: the ASIX must have a burst limit or horrible things happen. */
1371 	if (chip_idx == AX88140) {
1372 		if ((csr0 & 0x3f00) == 0)
1373 			csr0 |= 0x2000;
1374 	}
1375 
1376 	/* PNIC doesn't have MWI/MRL/MRM... */
1377 	if (chip_idx == LC82C168)
1378 		csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1379 
1380 	/* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1381 	if (tulip_uli_dm_quirk(pdev)) {
1382 		csr0 &= ~0x01f100ff;
1383 #if defined(CONFIG_SPARC)
1384                 csr0 = (csr0 & ~0xff00) | 0xe000;
1385 #endif
1386 	}
1387 	/*
1388 	 *	And back to business
1389 	 */
1390 
1391 	i = pci_enable_device(pdev);
1392 	if (i) {
1393 		pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1394 		return i;
1395 	}
1396 
1397 	irq = pdev->irq;
1398 
1399 	/* alloc_etherdev ensures aligned and zeroed private structures */
1400 	dev = alloc_etherdev (sizeof (*tp));
1401 	if (!dev)
1402 		return -ENOMEM;
1403 
1404 	SET_NETDEV_DEV(dev, &pdev->dev);
1405 	if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1406 		pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1407 		       pci_name(pdev),
1408 		       (unsigned long long)pci_resource_len (pdev, 0),
1409 		       (unsigned long long)pci_resource_start (pdev, 0));
1410 		goto err_out_free_netdev;
1411 	}
1412 
1413 	/* grab all resources from both PIO and MMIO regions, as we
1414 	 * don't want anyone else messing around with our hardware */
1415 	if (pci_request_regions (pdev, DRV_NAME))
1416 		goto err_out_free_netdev;
1417 
1418 	ioaddr =  pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1419 
1420 	if (!ioaddr)
1421 		goto err_out_free_res;
1422 
1423 	/*
1424 	 * initialize private data structure 'tp'
1425 	 * it is zeroed and aligned in alloc_etherdev
1426 	 */
1427 	tp = netdev_priv(dev);
1428 	tp->dev = dev;
1429 
1430 	tp->rx_ring = dma_alloc_coherent(&pdev->dev,
1431 					 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1432 					 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1433 					 &tp->rx_ring_dma, GFP_KERNEL);
1434 	if (!tp->rx_ring)
1435 		goto err_out_mtable;
1436 	tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1437 	tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1438 
1439 	tp->chip_id = chip_idx;
1440 	tp->flags = tulip_tbl[chip_idx].flags;
1441 
1442 	tp->wolinfo.supported = 0;
1443 	tp->wolinfo.wolopts = 0;
1444 	/* COMET: Enable power management only for AN983B */
1445 	if (chip_idx == COMET ) {
1446 		u32 sig;
1447 		pci_read_config_dword (pdev, 0x80, &sig);
1448 		if (sig == 0x09811317) {
1449 			tp->flags |= COMET_PM;
1450 			tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1451 			pr_info("%s: Enabled WOL support for AN983B\n",
1452 				__func__);
1453 		}
1454 	}
1455 	tp->pdev = pdev;
1456 	tp->base_addr = ioaddr;
1457 	tp->revision = pdev->revision;
1458 	tp->csr0 = csr0;
1459 	spin_lock_init(&tp->lock);
1460 	spin_lock_init(&tp->mii_lock);
1461 	timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
1462 
1463 	INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1464 
1465 #ifdef CONFIG_TULIP_MWI
1466 	if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1467 		tulip_mwi_config (pdev, dev);
1468 #endif
1469 
1470 	/* Stop the chip's Tx and Rx processes. */
1471 	tulip_stop_rxtx(tp);
1472 
1473 	pci_set_master(pdev);
1474 
1475 #ifdef CONFIG_GSC
1476 	if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1477 		switch (pdev->subsystem_device) {
1478 		default:
1479 			break;
1480 		case 0x1061:
1481 		case 0x1062:
1482 		case 0x1063:
1483 		case 0x1098:
1484 		case 0x1099:
1485 		case 0x10EE:
1486 			tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1487 			chip_name = "GSC DS21140 Tulip";
1488 		}
1489 	}
1490 #endif
1491 
1492 	/* Clear the missed-packet counter. */
1493 	ioread32(ioaddr + CSR8);
1494 
1495 	/* The station address ROM is read byte serially.  The register must
1496 	   be polled, waiting for the value to be read bit serially from the
1497 	   EEPROM.
1498 	   */
1499 	ee_data = tp->eeprom;
1500 	memset(ee_data, 0, sizeof(tp->eeprom));
1501 	sum = 0;
1502 	if (chip_idx == LC82C168) {
1503 		for (i = 0; i < 3; i++) {
1504 			int value, boguscnt = 100000;
1505 			iowrite32(0x600 | i, ioaddr + 0x98);
1506 			do {
1507 				value = ioread32(ioaddr + CSR9);
1508 			} while (value < 0  && --boguscnt > 0);
1509 			put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1510 			sum += value & 0xffff;
1511 		}
1512 	} else if (chip_idx == COMET) {
1513 		/* No need to read the EEPROM. */
1514 		put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1515 		put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1516 		for (i = 0; i < 6; i ++)
1517 			sum += dev->dev_addr[i];
1518 	} else {
1519 		/* A serial EEPROM interface, we read now and sort it out later. */
1520 		int sa_offset = 0;
1521 		int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1522 		int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1523 
1524 		if (ee_max_addr > sizeof(tp->eeprom))
1525 			ee_max_addr = sizeof(tp->eeprom);
1526 
1527 		for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1528 			u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1529 			ee_data[i] = data & 0xff;
1530 			ee_data[i + 1] = data >> 8;
1531 		}
1532 
1533 		/* DEC now has a specification (see Notes) but early board makers
1534 		   just put the address in the first EEPROM locations. */
1535 		/* This does  memcmp(ee_data, ee_data+16, 8) */
1536 		for (i = 0; i < 8; i ++)
1537 			if (ee_data[i] != ee_data[16+i])
1538 				sa_offset = 20;
1539 		if (chip_idx == CONEXANT) {
1540 			/* Check that the tuple type and length is correct. */
1541 			if (ee_data[0x198] == 0x04  &&  ee_data[0x199] == 6)
1542 				sa_offset = 0x19A;
1543 		} else if (ee_data[0] == 0xff  &&  ee_data[1] == 0xff &&
1544 				   ee_data[2] == 0) {
1545 			sa_offset = 2;		/* Grrr, damn Matrox boards. */
1546 		}
1547 #ifdef CONFIG_MIPS_COBALT
1548                if ((pdev->bus->number == 0) &&
1549                    ((PCI_SLOT(pdev->devfn) == 7) ||
1550                     (PCI_SLOT(pdev->devfn) == 12))) {
1551                        /* Cobalt MAC address in first EEPROM locations. */
1552                        sa_offset = 0;
1553 		       /* Ensure our media table fixup get's applied */
1554 		       memcpy(ee_data + 16, ee_data, 8);
1555                }
1556 #endif
1557 #ifdef CONFIG_GSC
1558 		/* Check to see if we have a broken srom */
1559 		if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1560 			/* pci_vendor_id and subsystem_id are swapped */
1561 			ee_data[0] = ee_data[2];
1562 			ee_data[1] = ee_data[3];
1563 			ee_data[2] = 0x61;
1564 			ee_data[3] = 0x10;
1565 
1566 			/* HSC-PCI boards need to be byte-swaped and shifted
1567 			 * up 1 word.  This shift needs to happen at the end
1568 			 * of the MAC first because of the 2 byte overlap.
1569 			 */
1570 			for (i = 4; i >= 0; i -= 2) {
1571 				ee_data[17 + i + 3] = ee_data[17 + i];
1572 				ee_data[16 + i + 5] = ee_data[16 + i];
1573 			}
1574 		}
1575 #endif
1576 
1577 		for (i = 0; i < 6; i ++) {
1578 			dev->dev_addr[i] = ee_data[i + sa_offset];
1579 			sum += ee_data[i + sa_offset];
1580 		}
1581 	}
1582 	/* Lite-On boards have the address byte-swapped. */
1583 	if ((dev->dev_addr[0] == 0xA0 ||
1584 	     dev->dev_addr[0] == 0xC0 ||
1585 	     dev->dev_addr[0] == 0x02) &&
1586 	    dev->dev_addr[1] == 0x00)
1587 		for (i = 0; i < 6; i+=2) {
1588 			char tmp = dev->dev_addr[i];
1589 			dev->dev_addr[i] = dev->dev_addr[i+1];
1590 			dev->dev_addr[i+1] = tmp;
1591 		}
1592 	/* On the Zynx 315 Etherarray and other multiport boards only the
1593 	   first Tulip has an EEPROM.
1594 	   On Sparc systems the mac address is held in the OBP property
1595 	   "local-mac-address".
1596 	   The addresses of the subsequent ports are derived from the first.
1597 	   Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1598 	   that here as well. */
1599 	if (sum == 0  || sum == 6*0xff) {
1600 #if defined(CONFIG_SPARC)
1601 		struct device_node *dp = pci_device_to_OF_node(pdev);
1602 		const unsigned char *addr;
1603 		int len;
1604 #endif
1605 		eeprom_missing = 1;
1606 		for (i = 0; i < 5; i++)
1607 			dev->dev_addr[i] = last_phys_addr[i];
1608 		dev->dev_addr[i] = last_phys_addr[i] + 1;
1609 #if defined(CONFIG_SPARC)
1610 		addr = of_get_property(dp, "local-mac-address", &len);
1611 		if (addr && len == ETH_ALEN)
1612 			memcpy(dev->dev_addr, addr, ETH_ALEN);
1613 #endif
1614 #if defined(__i386__) || defined(__x86_64__)	/* Patch up x86 BIOS bug. */
1615 		if (last_irq)
1616 			irq = last_irq;
1617 #endif
1618 	}
1619 
1620 	for (i = 0; i < 6; i++)
1621 		last_phys_addr[i] = dev->dev_addr[i];
1622 #if defined(__i386__) || defined(__x86_64__)	/* Patch up x86 BIOS bug. */
1623 	last_irq = irq;
1624 #endif
1625 
1626 	/* The lower four bits are the media type. */
1627 	if (board_idx >= 0  &&  board_idx < MAX_UNITS) {
1628 		if (options[board_idx] & MEDIA_MASK)
1629 			tp->default_port = options[board_idx] & MEDIA_MASK;
1630 		if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1631 			tp->full_duplex = 1;
1632 		if (mtu[board_idx] > 0)
1633 			dev->mtu = mtu[board_idx];
1634 	}
1635 	if (dev->mem_start & MEDIA_MASK)
1636 		tp->default_port = dev->mem_start & MEDIA_MASK;
1637 	if (tp->default_port) {
1638 		pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1639 			board_idx, medianame[tp->default_port & MEDIA_MASK]);
1640 		tp->medialock = 1;
1641 		if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1642 			tp->full_duplex = 1;
1643 	}
1644 	if (tp->full_duplex)
1645 		tp->full_duplex_lock = 1;
1646 
1647 	if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1648 		static const u16 media2advert[] = {
1649 			0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1650 		};
1651 		tp->mii_advertise = media2advert[tp->default_port - 9];
1652 		tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1653 	}
1654 
1655 	if (tp->flags & HAS_MEDIA_TABLE) {
1656 		sprintf(dev->name, DRV_NAME "%d", board_idx);	/* hack */
1657 		tulip_parse_eeprom(dev);
1658 		strcpy(dev->name, "eth%d");			/* un-hack */
1659 	}
1660 
1661 	if ((tp->flags & ALWAYS_CHECK_MII) ||
1662 		(tp->mtable  &&  tp->mtable->has_mii) ||
1663 		( ! tp->mtable  &&  (tp->flags & HAS_MII))) {
1664 		if (tp->mtable  &&  tp->mtable->has_mii) {
1665 			for (i = 0; i < tp->mtable->leafcount; i++)
1666 				if (tp->mtable->mleaf[i].media == 11) {
1667 					tp->cur_index = i;
1668 					tp->saved_if_port = dev->if_port;
1669 					tulip_select_media(dev, 2);
1670 					dev->if_port = tp->saved_if_port;
1671 					break;
1672 				}
1673 		}
1674 
1675 		/* Find the connected MII xcvrs.
1676 		   Doing this in open() would allow detecting external xcvrs
1677 		   later, but takes much time. */
1678 		tulip_find_mii (dev, board_idx);
1679 	}
1680 
1681 	/* The Tulip-specific entries in the device structure. */
1682 	dev->netdev_ops = &tulip_netdev_ops;
1683 	dev->watchdog_timeo = TX_TIMEOUT;
1684 #ifdef CONFIG_TULIP_NAPI
1685 	netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1686 #endif
1687 	dev->ethtool_ops = &ops;
1688 
1689 	if (register_netdev(dev))
1690 		goto err_out_free_ring;
1691 
1692 	pci_set_drvdata(pdev, dev);
1693 
1694 	dev_info(&dev->dev,
1695 #ifdef CONFIG_TULIP_MMIO
1696 		 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1697 #else
1698 		 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1699 #endif
1700 		 chip_name, pdev->revision,
1701 		 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1702 		 eeprom_missing ? " EEPROM not present," : "",
1703 		 dev->dev_addr, irq);
1704 
1705         if (tp->chip_id == PNIC2)
1706 		tp->link_change = pnic2_lnk_change;
1707 	else if (tp->flags & HAS_NWAY)
1708 		tp->link_change = t21142_lnk_change;
1709 	else if (tp->flags & HAS_PNICNWAY)
1710 		tp->link_change = pnic_lnk_change;
1711 
1712 	/* Reset the xcvr interface and turn on heartbeat. */
1713 	switch (chip_idx) {
1714 	case DC21140:
1715 	case DM910X:
1716 	default:
1717 		if (tp->mtable)
1718 			iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1719 		break;
1720 	case DC21142:
1721 		if (tp->mii_cnt  ||  tulip_media_cap[dev->if_port] & MediaIsMII) {
1722 			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1723 			iowrite32(0x0000, ioaddr + CSR13);
1724 			iowrite32(0x0000, ioaddr + CSR14);
1725 			iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1726 		} else
1727 			t21142_start_nway(dev);
1728 		break;
1729 	case PNIC2:
1730 	        /* just do a reset for sanity sake */
1731 		iowrite32(0x0000, ioaddr + CSR13);
1732 		iowrite32(0x0000, ioaddr + CSR14);
1733 		break;
1734 	case LC82C168:
1735 		if ( ! tp->mii_cnt) {
1736 			tp->nway = 1;
1737 			tp->nwayset = 0;
1738 			iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1739 			iowrite32(0x30, ioaddr + CSR12);
1740 			iowrite32(0x0001F078, ioaddr + CSR6);
1741 			iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1742 		}
1743 		break;
1744 	case MX98713:
1745 	case COMPEX9881:
1746 		iowrite32(0x00000000, ioaddr + CSR6);
1747 		iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1748 		iowrite32(0x00000001, ioaddr + CSR13);
1749 		break;
1750 	case MX98715:
1751 	case MX98725:
1752 		iowrite32(0x01a80000, ioaddr + CSR6);
1753 		iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1754 		iowrite32(0x00001000, ioaddr + CSR12);
1755 		break;
1756 	case COMET:
1757 		/* No initialization necessary. */
1758 		break;
1759 	}
1760 
1761 	/* put the chip in snooze mode until opened */
1762 	tulip_set_power_state (tp, 0, 1);
1763 
1764 	return 0;
1765 
1766 err_out_free_ring:
1767 	dma_free_coherent(&pdev->dev,
1768 			  sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1769 			  sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1770 			  tp->rx_ring, tp->rx_ring_dma);
1771 
1772 err_out_mtable:
1773 	kfree (tp->mtable);
1774 	pci_iounmap(pdev, ioaddr);
1775 
1776 err_out_free_res:
1777 	pci_release_regions (pdev);
1778 
1779 err_out_free_netdev:
1780 	free_netdev (dev);
1781 	return -ENODEV;
1782 }
1783 
1784 
1785 /* set the registers according to the given wolopts */
tulip_set_wolopts(struct pci_dev * pdev,u32 wolopts)1786 static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1787 {
1788 	struct net_device *dev = pci_get_drvdata(pdev);
1789 	struct tulip_private *tp = netdev_priv(dev);
1790 	void __iomem *ioaddr = tp->base_addr;
1791 
1792 	if (tp->flags & COMET_PM) {
1793 		unsigned int tmp;
1794 
1795 		tmp = ioread32(ioaddr + CSR18);
1796 		tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1797 		tmp |= comet_csr18_pm_mode;
1798 		iowrite32(tmp, ioaddr + CSR18);
1799 
1800 		/* Set the Wake-up Control/Status Register to the given WOL options*/
1801 		tmp = ioread32(ioaddr + CSR13);
1802 		tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1803 		if (wolopts & WAKE_MAGIC)
1804 			tmp |= comet_csr13_mpre;
1805 		if (wolopts & WAKE_PHY)
1806 			tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1807 		/* Clear the event flags */
1808 		tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1809 		iowrite32(tmp, ioaddr + CSR13);
1810 	}
1811 }
1812 
tulip_suspend(struct device * dev_d)1813 static int __maybe_unused tulip_suspend(struct device *dev_d)
1814 {
1815 	struct net_device *dev = dev_get_drvdata(dev_d);
1816 	struct tulip_private *tp = netdev_priv(dev);
1817 
1818 	if (!dev)
1819 		return -EINVAL;
1820 
1821 	if (!netif_running(dev))
1822 		goto save_state;
1823 
1824 	tulip_down(dev);
1825 
1826 	netif_device_detach(dev);
1827 	/* FIXME: it needlessly adds an error path. */
1828 	free_irq(tp->pdev->irq, dev);
1829 
1830 save_state:
1831 	tulip_set_wolopts(to_pci_dev(dev_d), tp->wolinfo.wolopts);
1832 	device_set_wakeup_enable(dev_d, !!tp->wolinfo.wolopts);
1833 
1834 	return 0;
1835 }
1836 
tulip_resume(struct device * dev_d)1837 static int __maybe_unused tulip_resume(struct device *dev_d)
1838 {
1839 	struct pci_dev *pdev = to_pci_dev(dev_d);
1840 	struct net_device *dev = dev_get_drvdata(dev_d);
1841 	struct tulip_private *tp = netdev_priv(dev);
1842 	void __iomem *ioaddr = tp->base_addr;
1843 	unsigned int tmp;
1844 	int retval = 0;
1845 
1846 	if (!dev)
1847 		return -EINVAL;
1848 
1849 	if (!netif_running(dev))
1850 		return 0;
1851 
1852 	retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1853 			     dev->name, dev);
1854 	if (retval) {
1855 		pr_err("request_irq failed in resume\n");
1856 		return retval;
1857 	}
1858 
1859 	if (tp->flags & COMET_PM) {
1860 		device_set_wakeup_enable(dev_d, 0);
1861 
1862 		/* Clear the PMES flag */
1863 		tmp = ioread32(ioaddr + CSR20);
1864 		tmp |= comet_csr20_pmes;
1865 		iowrite32(tmp, ioaddr + CSR20);
1866 
1867 		/* Disable all wake-up events */
1868 		tulip_set_wolopts(pdev, 0);
1869 	}
1870 	netif_device_attach(dev);
1871 
1872 	if (netif_running(dev))
1873 		tulip_up(dev);
1874 
1875 	return 0;
1876 }
1877 
tulip_remove_one(struct pci_dev * pdev)1878 static void tulip_remove_one(struct pci_dev *pdev)
1879 {
1880 	struct net_device *dev = pci_get_drvdata (pdev);
1881 	struct tulip_private *tp;
1882 
1883 	if (!dev)
1884 		return;
1885 
1886 	tp = netdev_priv(dev);
1887 	unregister_netdev(dev);
1888 	dma_free_coherent(&pdev->dev,
1889 			  sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1890 			  sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1891 			  tp->rx_ring, tp->rx_ring_dma);
1892 	kfree (tp->mtable);
1893 	pci_iounmap(pdev, tp->base_addr);
1894 	free_netdev (dev);
1895 	pci_release_regions (pdev);
1896 	pci_disable_device(pdev);
1897 
1898 	/* pci_power_off (pdev, -1); */
1899 }
1900 
1901 #ifdef CONFIG_NET_POLL_CONTROLLER
1902 /*
1903  * Polling 'interrupt' - used by things like netconsole to send skbs
1904  * without having to re-enable interrupts. It's not called while
1905  * the interrupt routine is executing.
1906  */
1907 
poll_tulip(struct net_device * dev)1908 static void poll_tulip (struct net_device *dev)
1909 {
1910 	struct tulip_private *tp = netdev_priv(dev);
1911 	const int irq = tp->pdev->irq;
1912 
1913 	/* disable_irq here is not very nice, but with the lockless
1914 	   interrupt handler we have no other choice. */
1915 	disable_irq(irq);
1916 	tulip_interrupt (irq, dev);
1917 	enable_irq(irq);
1918 }
1919 #endif
1920 
1921 static SIMPLE_DEV_PM_OPS(tulip_pm_ops, tulip_suspend, tulip_resume);
1922 
1923 static struct pci_driver tulip_driver = {
1924 	.name		= DRV_NAME,
1925 	.id_table	= tulip_pci_tbl,
1926 	.probe		= tulip_init_one,
1927 	.remove		= tulip_remove_one,
1928 	.driver.pm	= &tulip_pm_ops,
1929 };
1930 
1931 
tulip_init(void)1932 static int __init tulip_init (void)
1933 {
1934 	if (!csr0) {
1935 		pr_warn("tulip: unknown CPU architecture, using default csr0\n");
1936 		/* default to 8 longword cache line alignment */
1937 		csr0 = 0x00A00000 | 0x4800;
1938 	}
1939 
1940 	/* copy module parms into globals */
1941 	tulip_rx_copybreak = rx_copybreak;
1942 	tulip_max_interrupt_work = max_interrupt_work;
1943 
1944 	/* probe for and init boards */
1945 	return pci_register_driver(&tulip_driver);
1946 }
1947 
1948 
tulip_cleanup(void)1949 static void __exit tulip_cleanup (void)
1950 {
1951 	pci_unregister_driver (&tulip_driver);
1952 }
1953 
1954 
1955 module_init(tulip_init);
1956 module_exit(tulip_cleanup);
1957