1 /*
2    sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3 
4    Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5    Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6    Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7 
8    Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9    genuine driver.
10 
11    This software may be used and distributed according to the terms of
12    the GNU General Public License (GPL), incorporated herein by reference.
13    Drivers based on or derived from this code fall under the GPL and must
14    retain the authorship, copyright and license notice.  This file is not
15    a complete program and may only be used when the entire operating
16    system is licensed under the GPL.
17 
18    See the file COPYING in this distribution for more information.
19 
20 */
21 
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/netdevice.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/pci.h>
32 #include <linux/mii.h>
33 #include <linux/delay.h>
34 #include <linux/crc32.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/slab.h>
37 #include <asm/irq.h>
38 
39 #define PHY_MAX_ADDR		32
40 #define PHY_ID_ANY		0x1f
41 #define MII_REG_ANY		0x1f
42 
43 #define DRV_VERSION		"1.4"
44 #define DRV_NAME		"sis190"
45 #define SIS190_DRIVER_NAME	DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
46 
47 #define sis190_rx_skb			netif_rx
48 #define sis190_rx_quota(count, quota)	count
49 
50 #define NUM_TX_DESC		64	/* [8..1024] */
51 #define NUM_RX_DESC		64	/* [8..8192] */
52 #define TX_RING_BYTES		(NUM_TX_DESC * sizeof(struct TxDesc))
53 #define RX_RING_BYTES		(NUM_RX_DESC * sizeof(struct RxDesc))
54 #define RX_BUF_SIZE		1536
55 #define RX_BUF_MASK		0xfff8
56 
57 #define SIS190_REGS_SIZE	0x80
58 #define SIS190_TX_TIMEOUT	(6*HZ)
59 #define SIS190_PHY_TIMEOUT	(10*HZ)
60 #define SIS190_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \
61 				 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
62 				 NETIF_MSG_IFDOWN)
63 
64 /* Enhanced PHY access register bit definitions */
65 #define EhnMIIread		0x0000
66 #define EhnMIIwrite		0x0020
67 #define EhnMIIdataShift		16
68 #define EhnMIIpmdShift		6	/* 7016 only */
69 #define EhnMIIregShift		11
70 #define EhnMIIreq		0x0010
71 #define EhnMIInotDone		0x0010
72 
73 /* Write/read MMIO register */
74 #define SIS_W8(reg, val)	writeb ((val), ioaddr + (reg))
75 #define SIS_W16(reg, val)	writew ((val), ioaddr + (reg))
76 #define SIS_W32(reg, val)	writel ((val), ioaddr + (reg))
77 #define SIS_R8(reg)		readb (ioaddr + (reg))
78 #define SIS_R16(reg)		readw (ioaddr + (reg))
79 #define SIS_R32(reg)		readl (ioaddr + (reg))
80 
81 #define SIS_PCI_COMMIT()	SIS_R32(IntrControl)
82 
83 enum sis190_registers {
84 	TxControl		= 0x00,
85 	TxDescStartAddr		= 0x04,
86 	rsv0			= 0x08,	// reserved
87 	TxSts			= 0x0c,	// unused (Control/Status)
88 	RxControl		= 0x10,
89 	RxDescStartAddr		= 0x14,
90 	rsv1			= 0x18,	// reserved
91 	RxSts			= 0x1c,	// unused
92 	IntrStatus		= 0x20,
93 	IntrMask		= 0x24,
94 	IntrControl		= 0x28,
95 	IntrTimer		= 0x2c,	// unused (Interrupt Timer)
96 	PMControl		= 0x30,	// unused (Power Mgmt Control/Status)
97 	rsv2			= 0x34,	// reserved
98 	ROMControl		= 0x38,
99 	ROMInterface		= 0x3c,
100 	StationControl		= 0x40,
101 	GMIIControl		= 0x44,
102 	GIoCR			= 0x48, // unused (GMAC IO Compensation)
103 	GIoCtrl			= 0x4c, // unused (GMAC IO Control)
104 	TxMacControl		= 0x50,
105 	TxLimit			= 0x54, // unused (Tx MAC Timer/TryLimit)
106 	RGDelay			= 0x58, // unused (RGMII Tx Internal Delay)
107 	rsv3			= 0x5c, // reserved
108 	RxMacControl		= 0x60,
109 	RxMacAddr		= 0x62,
110 	RxHashTable		= 0x68,
111 	// Undocumented		= 0x6c,
112 	RxWolCtrl		= 0x70,
113 	RxWolData		= 0x74, // unused (Rx WOL Data Access)
114 	RxMPSControl		= 0x78,	// unused (Rx MPS Control)
115 	rsv4			= 0x7c, // reserved
116 };
117 
118 enum sis190_register_content {
119 	/* IntrStatus */
120 	SoftInt			= 0x40000000,	// unused
121 	Timeup			= 0x20000000,	// unused
122 	PauseFrame		= 0x00080000,	// unused
123 	MagicPacket		= 0x00040000,	// unused
124 	WakeupFrame		= 0x00020000,	// unused
125 	LinkChange		= 0x00010000,
126 	RxQEmpty		= 0x00000080,
127 	RxQInt			= 0x00000040,
128 	TxQ1Empty		= 0x00000020,	// unused
129 	TxQ1Int			= 0x00000010,
130 	TxQ0Empty		= 0x00000008,	// unused
131 	TxQ0Int			= 0x00000004,
132 	RxHalt			= 0x00000002,
133 	TxHalt			= 0x00000001,
134 
135 	/* {Rx/Tx}CmdBits */
136 	CmdReset		= 0x10,
137 	CmdRxEnb		= 0x08,		// unused
138 	CmdTxEnb		= 0x01,
139 	RxBufEmpty		= 0x01,		// unused
140 
141 	/* Cfg9346Bits */
142 	Cfg9346_Lock		= 0x00,		// unused
143 	Cfg9346_Unlock		= 0xc0,		// unused
144 
145 	/* RxMacControl */
146 	AcceptErr		= 0x20,		// unused
147 	AcceptRunt		= 0x10,		// unused
148 	AcceptBroadcast		= 0x0800,
149 	AcceptMulticast		= 0x0400,
150 	AcceptMyPhys		= 0x0200,
151 	AcceptAllPhys		= 0x0100,
152 
153 	/* RxConfigBits */
154 	RxCfgFIFOShift		= 13,
155 	RxCfgDMAShift		= 8,		// 0x1a in RxControl ?
156 
157 	/* TxConfigBits */
158 	TxInterFrameGapShift	= 24,
159 	TxDMAShift		= 8, /* DMA burst value (0-7) is shift this many bits */
160 
161 	LinkStatus		= 0x02,		// unused
162 	FullDup			= 0x01,		// unused
163 
164 	/* TBICSRBit */
165 	TBILinkOK		= 0x02000000,	// unused
166 };
167 
168 struct TxDesc {
169 	__le32 PSize;
170 	__le32 status;
171 	__le32 addr;
172 	__le32 size;
173 };
174 
175 struct RxDesc {
176 	__le32 PSize;
177 	__le32 status;
178 	__le32 addr;
179 	__le32 size;
180 };
181 
182 enum _DescStatusBit {
183 	/* _Desc.status */
184 	OWNbit		= 0x80000000, // RXOWN/TXOWN
185 	INTbit		= 0x40000000, // RXINT/TXINT
186 	CRCbit		= 0x00020000, // CRCOFF/CRCEN
187 	PADbit		= 0x00010000, // PREADD/PADEN
188 	/* _Desc.size */
189 	RingEnd		= 0x80000000,
190 	/* TxDesc.status */
191 	LSEN		= 0x08000000, // TSO ? -- FR
192 	IPCS		= 0x04000000,
193 	TCPCS		= 0x02000000,
194 	UDPCS		= 0x01000000,
195 	BSTEN		= 0x00800000,
196 	EXTEN		= 0x00400000,
197 	DEFEN		= 0x00200000,
198 	BKFEN		= 0x00100000,
199 	CRSEN		= 0x00080000,
200 	COLEN		= 0x00040000,
201 	THOL3		= 0x30000000,
202 	THOL2		= 0x20000000,
203 	THOL1		= 0x10000000,
204 	THOL0		= 0x00000000,
205 
206 	WND		= 0x00080000,
207 	TABRT		= 0x00040000,
208 	FIFO		= 0x00020000,
209 	LINK		= 0x00010000,
210 	ColCountMask	= 0x0000ffff,
211 	/* RxDesc.status */
212 	IPON		= 0x20000000,
213 	TCPON		= 0x10000000,
214 	UDPON		= 0x08000000,
215 	Wakup		= 0x00400000,
216 	Magic		= 0x00200000,
217 	Pause		= 0x00100000,
218 	DEFbit		= 0x00200000,
219 	BCAST		= 0x000c0000,
220 	MCAST		= 0x00080000,
221 	UCAST		= 0x00040000,
222 	/* RxDesc.PSize */
223 	TAGON		= 0x80000000,
224 	RxDescCountMask	= 0x7f000000, // multi-desc pkt when > 1 ? -- FR
225 	ABORT		= 0x00800000,
226 	SHORT		= 0x00400000,
227 	LIMIT		= 0x00200000,
228 	MIIER		= 0x00100000,
229 	OVRUN		= 0x00080000,
230 	NIBON		= 0x00040000,
231 	COLON		= 0x00020000,
232 	CRCOK		= 0x00010000,
233 	RxSizeMask	= 0x0000ffff
234 	/*
235 	 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
236 	 * provide two (unused with Linux) Tx queues. No publicly
237 	 * available documentation alas.
238 	 */
239 };
240 
241 enum sis190_eeprom_access_register_bits {
242 	EECS	= 0x00000001,	// unused
243 	EECLK	= 0x00000002,	// unused
244 	EEDO	= 0x00000008,	// unused
245 	EEDI	= 0x00000004,	// unused
246 	EEREQ	= 0x00000080,
247 	EEROP	= 0x00000200,
248 	EEWOP	= 0x00000100	// unused
249 };
250 
251 /* EEPROM Addresses */
252 enum sis190_eeprom_address {
253 	EEPROMSignature	= 0x00,
254 	EEPROMCLK	= 0x01,	// unused
255 	EEPROMInfo	= 0x02,
256 	EEPROMMACAddr	= 0x03
257 };
258 
259 enum sis190_feature {
260 	F_HAS_RGMII	= 1,
261 	F_PHY_88E1111	= 2,
262 	F_PHY_BCM5461	= 4
263 };
264 
265 struct sis190_private {
266 	void __iomem *mmio_addr;
267 	struct pci_dev *pci_dev;
268 	struct net_device *dev;
269 	spinlock_t lock;
270 	u32 rx_buf_sz;
271 	u32 cur_rx;
272 	u32 cur_tx;
273 	u32 dirty_rx;
274 	u32 dirty_tx;
275 	dma_addr_t rx_dma;
276 	dma_addr_t tx_dma;
277 	struct RxDesc *RxDescRing;
278 	struct TxDesc *TxDescRing;
279 	struct sk_buff *Rx_skbuff[NUM_RX_DESC];
280 	struct sk_buff *Tx_skbuff[NUM_TX_DESC];
281 	struct work_struct phy_task;
282 	struct timer_list timer;
283 	u32 msg_enable;
284 	struct mii_if_info mii_if;
285 	struct list_head first_phy;
286 	u32 features;
287 	u32 negotiated_lpa;
288 	enum {
289 		LNK_OFF,
290 		LNK_ON,
291 		LNK_AUTONEG,
292 	} link_status;
293 };
294 
295 struct sis190_phy {
296 	struct list_head list;
297 	int phy_id;
298 	u16 id[2];
299 	u16 status;
300 	u8  type;
301 };
302 
303 enum sis190_phy_type {
304 	UNKNOWN	= 0x00,
305 	HOME	= 0x01,
306 	LAN	= 0x02,
307 	MIX	= 0x03
308 };
309 
310 static struct mii_chip_info {
311         const char *name;
312         u16 id[2];
313         unsigned int type;
314 	u32 feature;
315 } mii_chip_table[] = {
316 	{ "Atheros PHY",          { 0x004d, 0xd010 }, LAN, 0 },
317 	{ "Atheros PHY AR8012",   { 0x004d, 0xd020 }, LAN, 0 },
318 	{ "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
319 	{ "Broadcom PHY AC131",   { 0x0143, 0xbc70 }, LAN, 0 },
320 	{ "Agere PHY ET1101B",    { 0x0282, 0xf010 }, LAN, 0 },
321 	{ "Marvell PHY 88E1111",  { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
322 	{ "Realtek PHY RTL8201",  { 0x0000, 0x8200 }, LAN, 0 },
323 	{ NULL, }
324 };
325 
326 static const struct {
327 	const char *name;
328 } sis_chip_info[] = {
329 	{ "SiS 190 PCI Fast Ethernet adapter" },
330 	{ "SiS 191 PCI Gigabit Ethernet adapter" },
331 };
332 
333 static const struct pci_device_id sis190_pci_tbl[] = {
334 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
335 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
336 	{ 0, },
337 };
338 
339 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
340 
341 static int rx_copybreak = 200;
342 
343 static struct {
344 	u32 msg_enable;
345 } debug = { -1 };
346 
347 MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
348 module_param(rx_copybreak, int, 0);
349 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
350 module_param_named(debug, debug.msg_enable, int, 0);
351 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
352 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
353 MODULE_VERSION(DRV_VERSION);
354 MODULE_LICENSE("GPL");
355 
356 static const u32 sis190_intr_mask =
357 	RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
358 
359 /*
360  * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
361  * The chips use a 64 element hash table based on the Ethernet CRC.
362  */
363 static const int multicast_filter_limit = 32;
364 
__mdio_cmd(void __iomem * ioaddr,u32 ctl)365 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
366 {
367 	unsigned int i;
368 
369 	SIS_W32(GMIIControl, ctl);
370 
371 	msleep(1);
372 
373 	for (i = 0; i < 100; i++) {
374 		if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
375 			break;
376 		msleep(1);
377 	}
378 
379 	if (i > 99)
380 		pr_err("PHY command failed !\n");
381 }
382 
mdio_write(void __iomem * ioaddr,int phy_id,int reg,int val)383 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
384 {
385 	__mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
386 		(((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
387 		(((u32) val) << EhnMIIdataShift));
388 }
389 
mdio_read(void __iomem * ioaddr,int phy_id,int reg)390 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
391 {
392 	__mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
393 		(((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
394 
395 	return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
396 }
397 
__mdio_write(struct net_device * dev,int phy_id,int reg,int val)398 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
399 {
400 	struct sis190_private *tp = netdev_priv(dev);
401 
402 	mdio_write(tp->mmio_addr, phy_id, reg, val);
403 }
404 
__mdio_read(struct net_device * dev,int phy_id,int reg)405 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
406 {
407 	struct sis190_private *tp = netdev_priv(dev);
408 
409 	return mdio_read(tp->mmio_addr, phy_id, reg);
410 }
411 
mdio_read_latched(void __iomem * ioaddr,int phy_id,int reg)412 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
413 {
414 	mdio_read(ioaddr, phy_id, reg);
415 	return mdio_read(ioaddr, phy_id, reg);
416 }
417 
sis190_read_eeprom(void __iomem * ioaddr,u32 reg)418 static u16 sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
419 {
420 	u16 data = 0xffff;
421 	unsigned int i;
422 
423 	if (!(SIS_R32(ROMControl) & 0x0002))
424 		return 0;
425 
426 	SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
427 
428 	for (i = 0; i < 200; i++) {
429 		if (!(SIS_R32(ROMInterface) & EEREQ)) {
430 			data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
431 			break;
432 		}
433 		msleep(1);
434 	}
435 
436 	return data;
437 }
438 
sis190_irq_mask_and_ack(void __iomem * ioaddr)439 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
440 {
441 	SIS_W32(IntrMask, 0x00);
442 	SIS_W32(IntrStatus, 0xffffffff);
443 	SIS_PCI_COMMIT();
444 }
445 
sis190_asic_down(void __iomem * ioaddr)446 static void sis190_asic_down(void __iomem *ioaddr)
447 {
448 	/* Stop the chip's Tx and Rx DMA processes. */
449 
450 	SIS_W32(TxControl, 0x1a00);
451 	SIS_W32(RxControl, 0x1a00);
452 
453 	sis190_irq_mask_and_ack(ioaddr);
454 }
455 
sis190_mark_as_last_descriptor(struct RxDesc * desc)456 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
457 {
458 	desc->size |= cpu_to_le32(RingEnd);
459 }
460 
sis190_give_to_asic(struct RxDesc * desc,u32 rx_buf_sz)461 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
462 {
463 	u32 eor = le32_to_cpu(desc->size) & RingEnd;
464 
465 	desc->PSize = 0x0;
466 	desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
467 	wmb();
468 	desc->status = cpu_to_le32(OWNbit | INTbit);
469 }
470 
sis190_map_to_asic(struct RxDesc * desc,dma_addr_t mapping,u32 rx_buf_sz)471 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
472 				      u32 rx_buf_sz)
473 {
474 	desc->addr = cpu_to_le32(mapping);
475 	sis190_give_to_asic(desc, rx_buf_sz);
476 }
477 
sis190_make_unusable_by_asic(struct RxDesc * desc)478 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
479 {
480 	desc->PSize = 0x0;
481 	desc->addr = cpu_to_le32(0xdeadbeef);
482 	desc->size &= cpu_to_le32(RingEnd);
483 	wmb();
484 	desc->status = 0x0;
485 }
486 
sis190_alloc_rx_skb(struct sis190_private * tp,struct RxDesc * desc)487 static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
488 					   struct RxDesc *desc)
489 {
490 	u32 rx_buf_sz = tp->rx_buf_sz;
491 	struct sk_buff *skb;
492 	dma_addr_t mapping;
493 
494 	skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
495 	if (unlikely(!skb))
496 		goto skb_alloc_failed;
497 	mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
498 			PCI_DMA_FROMDEVICE);
499 	if (pci_dma_mapping_error(tp->pci_dev, mapping))
500 		goto out;
501 	sis190_map_to_asic(desc, mapping, rx_buf_sz);
502 
503 	return skb;
504 
505 out:
506 	dev_kfree_skb_any(skb);
507 skb_alloc_failed:
508 	sis190_make_unusable_by_asic(desc);
509 	return NULL;
510 }
511 
sis190_rx_fill(struct sis190_private * tp,struct net_device * dev,u32 start,u32 end)512 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
513 			  u32 start, u32 end)
514 {
515 	u32 cur;
516 
517 	for (cur = start; cur < end; cur++) {
518 		unsigned int i = cur % NUM_RX_DESC;
519 
520 		if (tp->Rx_skbuff[i])
521 			continue;
522 
523 		tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
524 
525 		if (!tp->Rx_skbuff[i])
526 			break;
527 	}
528 	return cur - start;
529 }
530 
sis190_try_rx_copy(struct sis190_private * tp,struct sk_buff ** sk_buff,int pkt_size,dma_addr_t addr)531 static bool sis190_try_rx_copy(struct sis190_private *tp,
532 			       struct sk_buff **sk_buff, int pkt_size,
533 			       dma_addr_t addr)
534 {
535 	struct sk_buff *skb;
536 	bool done = false;
537 
538 	if (pkt_size >= rx_copybreak)
539 		goto out;
540 
541 	skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
542 	if (!skb)
543 		goto out;
544 
545 	pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
546 				PCI_DMA_FROMDEVICE);
547 	skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
548 	*sk_buff = skb;
549 	done = true;
550 out:
551 	return done;
552 }
553 
sis190_rx_pkt_err(u32 status,struct net_device_stats * stats)554 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
555 {
556 #define ErrMask	(OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
557 
558 	if ((status & CRCOK) && !(status & ErrMask))
559 		return 0;
560 
561 	if (!(status & CRCOK))
562 		stats->rx_crc_errors++;
563 	else if (status & OVRUN)
564 		stats->rx_over_errors++;
565 	else if (status & (SHORT | LIMIT))
566 		stats->rx_length_errors++;
567 	else if (status & (MIIER | NIBON | COLON))
568 		stats->rx_frame_errors++;
569 
570 	stats->rx_errors++;
571 	return -1;
572 }
573 
sis190_rx_interrupt(struct net_device * dev,struct sis190_private * tp,void __iomem * ioaddr)574 static int sis190_rx_interrupt(struct net_device *dev,
575 			       struct sis190_private *tp, void __iomem *ioaddr)
576 {
577 	struct net_device_stats *stats = &dev->stats;
578 	u32 rx_left, cur_rx = tp->cur_rx;
579 	u32 delta, count;
580 
581 	rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
582 	rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
583 
584 	for (; rx_left > 0; rx_left--, cur_rx++) {
585 		unsigned int entry = cur_rx % NUM_RX_DESC;
586 		struct RxDesc *desc = tp->RxDescRing + entry;
587 		u32 status;
588 
589 		if (le32_to_cpu(desc->status) & OWNbit)
590 			break;
591 
592 		status = le32_to_cpu(desc->PSize);
593 
594 		//netif_info(tp, intr, dev, "Rx PSize = %08x\n", status);
595 
596 		if (sis190_rx_pkt_err(status, stats) < 0)
597 			sis190_give_to_asic(desc, tp->rx_buf_sz);
598 		else {
599 			struct sk_buff *skb = tp->Rx_skbuff[entry];
600 			dma_addr_t addr = le32_to_cpu(desc->addr);
601 			int pkt_size = (status & RxSizeMask) - 4;
602 			struct pci_dev *pdev = tp->pci_dev;
603 
604 			if (unlikely(pkt_size > tp->rx_buf_sz)) {
605 				netif_info(tp, intr, dev,
606 					   "(frag) status = %08x\n", status);
607 				stats->rx_dropped++;
608 				stats->rx_length_errors++;
609 				sis190_give_to_asic(desc, tp->rx_buf_sz);
610 				continue;
611 			}
612 
613 
614 			if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
615 				pci_dma_sync_single_for_device(pdev, addr,
616 					tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
617 				sis190_give_to_asic(desc, tp->rx_buf_sz);
618 			} else {
619 				pci_unmap_single(pdev, addr, tp->rx_buf_sz,
620 						 PCI_DMA_FROMDEVICE);
621 				tp->Rx_skbuff[entry] = NULL;
622 				sis190_make_unusable_by_asic(desc);
623 			}
624 
625 			skb_put(skb, pkt_size);
626 			skb->protocol = eth_type_trans(skb, dev);
627 
628 			sis190_rx_skb(skb);
629 
630 			stats->rx_packets++;
631 			stats->rx_bytes += pkt_size;
632 			if ((status & BCAST) == MCAST)
633 				stats->multicast++;
634 		}
635 	}
636 	count = cur_rx - tp->cur_rx;
637 	tp->cur_rx = cur_rx;
638 
639 	delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
640 	if (!delta && count)
641 		netif_info(tp, intr, dev, "no Rx buffer allocated\n");
642 	tp->dirty_rx += delta;
643 
644 	if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
645 		netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
646 
647 	return count;
648 }
649 
sis190_unmap_tx_skb(struct pci_dev * pdev,struct sk_buff * skb,struct TxDesc * desc)650 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
651 				struct TxDesc *desc)
652 {
653 	unsigned int len;
654 
655 	len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
656 
657 	pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
658 
659 	memset(desc, 0x00, sizeof(*desc));
660 }
661 
sis190_tx_pkt_err(u32 status,struct net_device_stats * stats)662 static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
663 {
664 #define TxErrMask	(WND | TABRT | FIFO | LINK)
665 
666 	if (!unlikely(status & TxErrMask))
667 		return 0;
668 
669 	if (status & WND)
670 		stats->tx_window_errors++;
671 	if (status & TABRT)
672 		stats->tx_aborted_errors++;
673 	if (status & FIFO)
674 		stats->tx_fifo_errors++;
675 	if (status & LINK)
676 		stats->tx_carrier_errors++;
677 
678 	stats->tx_errors++;
679 
680 	return -1;
681 }
682 
sis190_tx_interrupt(struct net_device * dev,struct sis190_private * tp,void __iomem * ioaddr)683 static void sis190_tx_interrupt(struct net_device *dev,
684 				struct sis190_private *tp, void __iomem *ioaddr)
685 {
686 	struct net_device_stats *stats = &dev->stats;
687 	u32 pending, dirty_tx = tp->dirty_tx;
688 	/*
689 	 * It would not be needed if queueing was allowed to be enabled
690 	 * again too early (hint: think preempt and unclocked smp systems).
691 	 */
692 	unsigned int queue_stopped;
693 
694 	smp_rmb();
695 	pending = tp->cur_tx - dirty_tx;
696 	queue_stopped = (pending == NUM_TX_DESC);
697 
698 	for (; pending; pending--, dirty_tx++) {
699 		unsigned int entry = dirty_tx % NUM_TX_DESC;
700 		struct TxDesc *txd = tp->TxDescRing + entry;
701 		u32 status = le32_to_cpu(txd->status);
702 		struct sk_buff *skb;
703 
704 		if (status & OWNbit)
705 			break;
706 
707 		skb = tp->Tx_skbuff[entry];
708 
709 		if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
710 			stats->tx_packets++;
711 			stats->tx_bytes += skb->len;
712 			stats->collisions += ((status & ColCountMask) - 1);
713 		}
714 
715 		sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
716 		tp->Tx_skbuff[entry] = NULL;
717 		dev_kfree_skb_irq(skb);
718 	}
719 
720 	if (tp->dirty_tx != dirty_tx) {
721 		tp->dirty_tx = dirty_tx;
722 		smp_wmb();
723 		if (queue_stopped)
724 			netif_wake_queue(dev);
725 	}
726 }
727 
728 /*
729  * The interrupt handler does all of the Rx thread work and cleans up after
730  * the Tx thread.
731  */
sis190_irq(int irq,void * __dev)732 static irqreturn_t sis190_irq(int irq, void *__dev)
733 {
734 	struct net_device *dev = __dev;
735 	struct sis190_private *tp = netdev_priv(dev);
736 	void __iomem *ioaddr = tp->mmio_addr;
737 	unsigned int handled = 0;
738 	u32 status;
739 
740 	status = SIS_R32(IntrStatus);
741 
742 	if ((status == 0xffffffff) || !status)
743 		goto out;
744 
745 	handled = 1;
746 
747 	if (unlikely(!netif_running(dev))) {
748 		sis190_asic_down(ioaddr);
749 		goto out;
750 	}
751 
752 	SIS_W32(IntrStatus, status);
753 
754 //	netif_info(tp, intr, dev, "status = %08x\n", status);
755 
756 	if (status & LinkChange) {
757 		netif_info(tp, intr, dev, "link change\n");
758 		del_timer(&tp->timer);
759 		schedule_work(&tp->phy_task);
760 	}
761 
762 	if (status & RxQInt)
763 		sis190_rx_interrupt(dev, tp, ioaddr);
764 
765 	if (status & TxQ0Int)
766 		sis190_tx_interrupt(dev, tp, ioaddr);
767 out:
768 	return IRQ_RETVAL(handled);
769 }
770 
771 #ifdef CONFIG_NET_POLL_CONTROLLER
sis190_netpoll(struct net_device * dev)772 static void sis190_netpoll(struct net_device *dev)
773 {
774 	struct sis190_private *tp = netdev_priv(dev);
775 	const int irq = tp->pci_dev->irq;
776 
777 	disable_irq(irq);
778 	sis190_irq(irq, dev);
779 	enable_irq(irq);
780 }
781 #endif
782 
sis190_free_rx_skb(struct sis190_private * tp,struct sk_buff ** sk_buff,struct RxDesc * desc)783 static void sis190_free_rx_skb(struct sis190_private *tp,
784 			       struct sk_buff **sk_buff, struct RxDesc *desc)
785 {
786 	struct pci_dev *pdev = tp->pci_dev;
787 
788 	pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
789 			 PCI_DMA_FROMDEVICE);
790 	dev_kfree_skb(*sk_buff);
791 	*sk_buff = NULL;
792 	sis190_make_unusable_by_asic(desc);
793 }
794 
sis190_rx_clear(struct sis190_private * tp)795 static void sis190_rx_clear(struct sis190_private *tp)
796 {
797 	unsigned int i;
798 
799 	for (i = 0; i < NUM_RX_DESC; i++) {
800 		if (!tp->Rx_skbuff[i])
801 			continue;
802 		sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
803 	}
804 }
805 
sis190_init_ring_indexes(struct sis190_private * tp)806 static void sis190_init_ring_indexes(struct sis190_private *tp)
807 {
808 	tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
809 }
810 
sis190_init_ring(struct net_device * dev)811 static int sis190_init_ring(struct net_device *dev)
812 {
813 	struct sis190_private *tp = netdev_priv(dev);
814 
815 	sis190_init_ring_indexes(tp);
816 
817 	memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
818 	memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
819 
820 	if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
821 		goto err_rx_clear;
822 
823 	sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
824 
825 	return 0;
826 
827 err_rx_clear:
828 	sis190_rx_clear(tp);
829 	return -ENOMEM;
830 }
831 
sis190_set_rx_mode(struct net_device * dev)832 static void sis190_set_rx_mode(struct net_device *dev)
833 {
834 	struct sis190_private *tp = netdev_priv(dev);
835 	void __iomem *ioaddr = tp->mmio_addr;
836 	unsigned long flags;
837 	u32 mc_filter[2];	/* Multicast hash filter */
838 	u16 rx_mode;
839 
840 	if (dev->flags & IFF_PROMISC) {
841 		rx_mode =
842 			AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
843 			AcceptAllPhys;
844 		mc_filter[1] = mc_filter[0] = 0xffffffff;
845 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
846 		   (dev->flags & IFF_ALLMULTI)) {
847 		/* Too many to filter perfectly -- accept all multicasts. */
848 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
849 		mc_filter[1] = mc_filter[0] = 0xffffffff;
850 	} else {
851 		struct netdev_hw_addr *ha;
852 
853 		rx_mode = AcceptBroadcast | AcceptMyPhys;
854 		mc_filter[1] = mc_filter[0] = 0;
855 		netdev_for_each_mc_addr(ha, dev) {
856 			int bit_nr =
857 				ether_crc(ETH_ALEN, ha->addr) & 0x3f;
858 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
859 			rx_mode |= AcceptMulticast;
860 		}
861 	}
862 
863 	spin_lock_irqsave(&tp->lock, flags);
864 
865 	SIS_W16(RxMacControl, rx_mode | 0x2);
866 	SIS_W32(RxHashTable, mc_filter[0]);
867 	SIS_W32(RxHashTable + 4, mc_filter[1]);
868 
869 	spin_unlock_irqrestore(&tp->lock, flags);
870 }
871 
sis190_soft_reset(void __iomem * ioaddr)872 static void sis190_soft_reset(void __iomem *ioaddr)
873 {
874 	SIS_W32(IntrControl, 0x8000);
875 	SIS_PCI_COMMIT();
876 	SIS_W32(IntrControl, 0x0);
877 	sis190_asic_down(ioaddr);
878 }
879 
sis190_hw_start(struct net_device * dev)880 static void sis190_hw_start(struct net_device *dev)
881 {
882 	struct sis190_private *tp = netdev_priv(dev);
883 	void __iomem *ioaddr = tp->mmio_addr;
884 
885 	sis190_soft_reset(ioaddr);
886 
887 	SIS_W32(TxDescStartAddr, tp->tx_dma);
888 	SIS_W32(RxDescStartAddr, tp->rx_dma);
889 
890 	SIS_W32(IntrStatus, 0xffffffff);
891 	SIS_W32(IntrMask, 0x0);
892 	SIS_W32(GMIIControl, 0x0);
893 	SIS_W32(TxMacControl, 0x60);
894 	SIS_W16(RxMacControl, 0x02);
895 	SIS_W32(RxHashTable, 0x0);
896 	SIS_W32(0x6c, 0x0);
897 	SIS_W32(RxWolCtrl, 0x0);
898 	SIS_W32(RxWolData, 0x0);
899 
900 	SIS_PCI_COMMIT();
901 
902 	sis190_set_rx_mode(dev);
903 
904 	/* Enable all known interrupts by setting the interrupt mask. */
905 	SIS_W32(IntrMask, sis190_intr_mask);
906 
907 	SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
908 	SIS_W32(RxControl, 0x1a1d);
909 
910 	netif_start_queue(dev);
911 }
912 
sis190_phy_task(struct work_struct * work)913 static void sis190_phy_task(struct work_struct *work)
914 {
915 	struct sis190_private *tp =
916 		container_of(work, struct sis190_private, phy_task);
917 	struct net_device *dev = tp->dev;
918 	void __iomem *ioaddr = tp->mmio_addr;
919 	int phy_id = tp->mii_if.phy_id;
920 	u16 val;
921 
922 	rtnl_lock();
923 
924 	if (!netif_running(dev))
925 		goto out_unlock;
926 
927 	val = mdio_read(ioaddr, phy_id, MII_BMCR);
928 	if (val & BMCR_RESET) {
929 		// FIXME: needlessly high ?  -- FR 02/07/2005
930 		mod_timer(&tp->timer, jiffies + HZ/10);
931 		goto out_unlock;
932 	}
933 
934 	val = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
935 	if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) {
936 		netif_carrier_off(dev);
937 		netif_warn(tp, link, dev, "auto-negotiating...\n");
938 		tp->link_status = LNK_AUTONEG;
939 	} else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) {
940 		/* Rejoice ! */
941 		struct {
942 			int val;
943 			u32 ctl;
944 			const char *msg;
945 		} reg31[] = {
946 			{ LPA_1000FULL, 0x07000c00 | 0x00001000,
947 				"1000 Mbps Full Duplex" },
948 			{ LPA_1000HALF, 0x07000c00,
949 				"1000 Mbps Half Duplex" },
950 			{ LPA_100FULL, 0x04000800 | 0x00001000,
951 				"100 Mbps Full Duplex" },
952 			{ LPA_100HALF, 0x04000800,
953 				"100 Mbps Half Duplex" },
954 			{ LPA_10FULL, 0x04000400 | 0x00001000,
955 				"10 Mbps Full Duplex" },
956 			{ LPA_10HALF, 0x04000400,
957 				"10 Mbps Half Duplex" },
958 			{ 0, 0x04000400, "unknown" }
959 		}, *p = NULL;
960 		u16 adv, autoexp, gigadv, gigrec;
961 
962 		val = mdio_read(ioaddr, phy_id, 0x1f);
963 		netif_info(tp, link, dev, "mii ext = %04x\n", val);
964 
965 		val = mdio_read(ioaddr, phy_id, MII_LPA);
966 		adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
967 		autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
968 		netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
969 			   val, adv, autoexp);
970 
971 		if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
972 			/* check for gigabit speed */
973 			gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
974 			gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
975 			val = (gigadv & (gigrec >> 2));
976 			if (val & ADVERTISE_1000FULL)
977 				p = reg31;
978 			else if (val & ADVERTISE_1000HALF)
979 				p = reg31 + 1;
980 		}
981 		if (!p) {
982 			val &= adv;
983 
984 			for (p = reg31; p->val; p++) {
985 				if ((val & p->val) == p->val)
986 					break;
987 			}
988 		}
989 
990 		p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
991 
992 		if ((tp->features & F_HAS_RGMII) &&
993 		    (tp->features & F_PHY_BCM5461)) {
994 			// Set Tx Delay in RGMII mode.
995 			mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
996 			udelay(200);
997 			mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
998 			p->ctl |= 0x03000000;
999 		}
1000 
1001 		SIS_W32(StationControl, p->ctl);
1002 
1003 		if (tp->features & F_HAS_RGMII) {
1004 			SIS_W32(RGDelay, 0x0441);
1005 			SIS_W32(RGDelay, 0x0440);
1006 		}
1007 
1008 		tp->negotiated_lpa = p->val;
1009 
1010 		netif_info(tp, link, dev, "link on %s mode\n", p->msg);
1011 		netif_carrier_on(dev);
1012 		tp->link_status = LNK_ON;
1013 	} else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG)
1014 		tp->link_status = LNK_OFF;
1015 	mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
1016 
1017 out_unlock:
1018 	rtnl_unlock();
1019 }
1020 
sis190_phy_timer(struct timer_list * t)1021 static void sis190_phy_timer(struct timer_list *t)
1022 {
1023 	struct sis190_private *tp = from_timer(tp, t, timer);
1024 	struct net_device *dev = tp->dev;
1025 
1026 	if (likely(netif_running(dev)))
1027 		schedule_work(&tp->phy_task);
1028 }
1029 
sis190_delete_timer(struct net_device * dev)1030 static inline void sis190_delete_timer(struct net_device *dev)
1031 {
1032 	struct sis190_private *tp = netdev_priv(dev);
1033 
1034 	del_timer_sync(&tp->timer);
1035 }
1036 
sis190_request_timer(struct net_device * dev)1037 static inline void sis190_request_timer(struct net_device *dev)
1038 {
1039 	struct sis190_private *tp = netdev_priv(dev);
1040 	struct timer_list *timer = &tp->timer;
1041 
1042 	timer_setup(timer, sis190_phy_timer, 0);
1043 	timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1044 	add_timer(timer);
1045 }
1046 
sis190_set_rxbufsize(struct sis190_private * tp,struct net_device * dev)1047 static void sis190_set_rxbufsize(struct sis190_private *tp,
1048 				 struct net_device *dev)
1049 {
1050 	unsigned int mtu = dev->mtu;
1051 
1052 	tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1053 	/* RxDesc->size has a licence to kill the lower bits */
1054 	if (tp->rx_buf_sz & 0x07) {
1055 		tp->rx_buf_sz += 8;
1056 		tp->rx_buf_sz &= RX_BUF_MASK;
1057 	}
1058 }
1059 
sis190_open(struct net_device * dev)1060 static int sis190_open(struct net_device *dev)
1061 {
1062 	struct sis190_private *tp = netdev_priv(dev);
1063 	struct pci_dev *pdev = tp->pci_dev;
1064 	int rc = -ENOMEM;
1065 
1066 	sis190_set_rxbufsize(tp, dev);
1067 
1068 	/*
1069 	 * Rx and Tx descriptors need 256 bytes alignment.
1070 	 * pci_alloc_consistent() guarantees a stronger alignment.
1071 	 */
1072 	tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1073 	if (!tp->TxDescRing)
1074 		goto out;
1075 
1076 	tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1077 	if (!tp->RxDescRing)
1078 		goto err_free_tx_0;
1079 
1080 	rc = sis190_init_ring(dev);
1081 	if (rc < 0)
1082 		goto err_free_rx_1;
1083 
1084 	sis190_request_timer(dev);
1085 
1086 	rc = request_irq(pdev->irq, sis190_irq, IRQF_SHARED, dev->name, dev);
1087 	if (rc < 0)
1088 		goto err_release_timer_2;
1089 
1090 	sis190_hw_start(dev);
1091 out:
1092 	return rc;
1093 
1094 err_release_timer_2:
1095 	sis190_delete_timer(dev);
1096 	sis190_rx_clear(tp);
1097 err_free_rx_1:
1098 	pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1099 err_free_tx_0:
1100 	pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1101 	goto out;
1102 }
1103 
sis190_tx_clear(struct sis190_private * tp)1104 static void sis190_tx_clear(struct sis190_private *tp)
1105 {
1106 	unsigned int i;
1107 
1108 	for (i = 0; i < NUM_TX_DESC; i++) {
1109 		struct sk_buff *skb = tp->Tx_skbuff[i];
1110 
1111 		if (!skb)
1112 			continue;
1113 
1114 		sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1115 		tp->Tx_skbuff[i] = NULL;
1116 		dev_kfree_skb(skb);
1117 
1118 		tp->dev->stats.tx_dropped++;
1119 	}
1120 	tp->cur_tx = tp->dirty_tx = 0;
1121 }
1122 
sis190_down(struct net_device * dev)1123 static void sis190_down(struct net_device *dev)
1124 {
1125 	struct sis190_private *tp = netdev_priv(dev);
1126 	void __iomem *ioaddr = tp->mmio_addr;
1127 	unsigned int poll_locked = 0;
1128 
1129 	sis190_delete_timer(dev);
1130 
1131 	netif_stop_queue(dev);
1132 
1133 	do {
1134 		spin_lock_irq(&tp->lock);
1135 
1136 		sis190_asic_down(ioaddr);
1137 
1138 		spin_unlock_irq(&tp->lock);
1139 
1140 		synchronize_irq(tp->pci_dev->irq);
1141 
1142 		if (!poll_locked)
1143 			poll_locked++;
1144 
1145 		synchronize_sched();
1146 
1147 	} while (SIS_R32(IntrMask));
1148 
1149 	sis190_tx_clear(tp);
1150 	sis190_rx_clear(tp);
1151 }
1152 
sis190_close(struct net_device * dev)1153 static int sis190_close(struct net_device *dev)
1154 {
1155 	struct sis190_private *tp = netdev_priv(dev);
1156 	struct pci_dev *pdev = tp->pci_dev;
1157 
1158 	sis190_down(dev);
1159 
1160 	free_irq(pdev->irq, dev);
1161 
1162 	pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1163 	pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1164 
1165 	tp->TxDescRing = NULL;
1166 	tp->RxDescRing = NULL;
1167 
1168 	return 0;
1169 }
1170 
sis190_start_xmit(struct sk_buff * skb,struct net_device * dev)1171 static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1172 				     struct net_device *dev)
1173 {
1174 	struct sis190_private *tp = netdev_priv(dev);
1175 	void __iomem *ioaddr = tp->mmio_addr;
1176 	u32 len, entry, dirty_tx;
1177 	struct TxDesc *desc;
1178 	dma_addr_t mapping;
1179 
1180 	if (unlikely(skb->len < ETH_ZLEN)) {
1181 		if (skb_padto(skb, ETH_ZLEN)) {
1182 			dev->stats.tx_dropped++;
1183 			goto out;
1184 		}
1185 		len = ETH_ZLEN;
1186 	} else {
1187 		len = skb->len;
1188 	}
1189 
1190 	entry = tp->cur_tx % NUM_TX_DESC;
1191 	desc = tp->TxDescRing + entry;
1192 
1193 	if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1194 		netif_stop_queue(dev);
1195 		netif_err(tp, tx_err, dev,
1196 			  "BUG! Tx Ring full when queue awake!\n");
1197 		return NETDEV_TX_BUSY;
1198 	}
1199 
1200 	mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1201 	if (pci_dma_mapping_error(tp->pci_dev, mapping)) {
1202 		netif_err(tp, tx_err, dev,
1203 				"PCI mapping failed, dropping packet");
1204 		return NETDEV_TX_BUSY;
1205 	}
1206 
1207 	tp->Tx_skbuff[entry] = skb;
1208 
1209 	desc->PSize = cpu_to_le32(len);
1210 	desc->addr = cpu_to_le32(mapping);
1211 
1212 	desc->size = cpu_to_le32(len);
1213 	if (entry == (NUM_TX_DESC - 1))
1214 		desc->size |= cpu_to_le32(RingEnd);
1215 
1216 	wmb();
1217 
1218 	desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1219 	if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) {
1220 		/* Half Duplex */
1221 		desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
1222 		if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL))
1223 			desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */
1224 	}
1225 
1226 	tp->cur_tx++;
1227 
1228 	smp_wmb();
1229 
1230 	SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1231 
1232 	dirty_tx = tp->dirty_tx;
1233 	if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1234 		netif_stop_queue(dev);
1235 		smp_rmb();
1236 		if (dirty_tx != tp->dirty_tx)
1237 			netif_wake_queue(dev);
1238 	}
1239 out:
1240 	return NETDEV_TX_OK;
1241 }
1242 
sis190_free_phy(struct list_head * first_phy)1243 static void sis190_free_phy(struct list_head *first_phy)
1244 {
1245 	struct sis190_phy *cur, *next;
1246 
1247 	list_for_each_entry_safe(cur, next, first_phy, list) {
1248 		kfree(cur);
1249 	}
1250 }
1251 
1252 /**
1253  *	sis190_default_phy - Select default PHY for sis190 mac.
1254  *	@dev: the net device to probe for
1255  *
1256  *	Select first detected PHY with link as default.
1257  *	If no one is link on, select PHY whose types is HOME as default.
1258  *	If HOME doesn't exist, select LAN.
1259  */
sis190_default_phy(struct net_device * dev)1260 static u16 sis190_default_phy(struct net_device *dev)
1261 {
1262 	struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1263 	struct sis190_private *tp = netdev_priv(dev);
1264 	struct mii_if_info *mii_if = &tp->mii_if;
1265 	void __iomem *ioaddr = tp->mmio_addr;
1266 	u16 status;
1267 
1268 	phy_home = phy_default = phy_lan = NULL;
1269 
1270 	list_for_each_entry(phy, &tp->first_phy, list) {
1271 		status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1272 
1273 		// Link ON & Not select default PHY & not ghost PHY.
1274 		if ((status & BMSR_LSTATUS) &&
1275 		    !phy_default &&
1276 		    (phy->type != UNKNOWN)) {
1277 			phy_default = phy;
1278 		} else {
1279 			status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1280 			mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1281 				   status | BMCR_ANENABLE | BMCR_ISOLATE);
1282 			if (phy->type == HOME)
1283 				phy_home = phy;
1284 			else if (phy->type == LAN)
1285 				phy_lan = phy;
1286 		}
1287 	}
1288 
1289 	if (!phy_default) {
1290 		if (phy_home)
1291 			phy_default = phy_home;
1292 		else if (phy_lan)
1293 			phy_default = phy_lan;
1294 		else
1295 			phy_default = list_first_entry(&tp->first_phy,
1296 						 struct sis190_phy, list);
1297 	}
1298 
1299 	if (mii_if->phy_id != phy_default->phy_id) {
1300 		mii_if->phy_id = phy_default->phy_id;
1301 		if (netif_msg_probe(tp))
1302 			pr_info("%s: Using transceiver at address %d as default\n",
1303 				pci_name(tp->pci_dev), mii_if->phy_id);
1304 	}
1305 
1306 	status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1307 	status &= (~BMCR_ISOLATE);
1308 
1309 	mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1310 	status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1311 
1312 	return status;
1313 }
1314 
sis190_init_phy(struct net_device * dev,struct sis190_private * tp,struct sis190_phy * phy,unsigned int phy_id,u16 mii_status)1315 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1316 			    struct sis190_phy *phy, unsigned int phy_id,
1317 			    u16 mii_status)
1318 {
1319 	void __iomem *ioaddr = tp->mmio_addr;
1320 	struct mii_chip_info *p;
1321 
1322 	INIT_LIST_HEAD(&phy->list);
1323 	phy->status = mii_status;
1324 	phy->phy_id = phy_id;
1325 
1326 	phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1327 	phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1328 
1329 	for (p = mii_chip_table; p->type; p++) {
1330 		if ((p->id[0] == phy->id[0]) &&
1331 		    (p->id[1] == (phy->id[1] & 0xfff0))) {
1332 			break;
1333 		}
1334 	}
1335 
1336 	if (p->id[1]) {
1337 		phy->type = (p->type == MIX) ?
1338 			((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1339 				LAN : HOME) : p->type;
1340 		tp->features |= p->feature;
1341 		if (netif_msg_probe(tp))
1342 			pr_info("%s: %s transceiver at address %d\n",
1343 				pci_name(tp->pci_dev), p->name, phy_id);
1344 	} else {
1345 		phy->type = UNKNOWN;
1346 		if (netif_msg_probe(tp))
1347 			pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1348 				pci_name(tp->pci_dev),
1349 				phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1350 	}
1351 }
1352 
sis190_mii_probe_88e1111_fixup(struct sis190_private * tp)1353 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1354 {
1355 	if (tp->features & F_PHY_88E1111) {
1356 		void __iomem *ioaddr = tp->mmio_addr;
1357 		int phy_id = tp->mii_if.phy_id;
1358 		u16 reg[2][2] = {
1359 			{ 0x808b, 0x0ce1 },
1360 			{ 0x808f, 0x0c60 }
1361 		}, *p;
1362 
1363 		p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1364 
1365 		mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1366 		udelay(200);
1367 		mdio_write(ioaddr, phy_id, 0x14, p[1]);
1368 		udelay(200);
1369 	}
1370 }
1371 
1372 /**
1373  *	sis190_mii_probe - Probe MII PHY for sis190
1374  *	@dev: the net device to probe for
1375  *
1376  *	Search for total of 32 possible mii phy addresses.
1377  *	Identify and set current phy if found one,
1378  *	return error if it failed to found.
1379  */
sis190_mii_probe(struct net_device * dev)1380 static int sis190_mii_probe(struct net_device *dev)
1381 {
1382 	struct sis190_private *tp = netdev_priv(dev);
1383 	struct mii_if_info *mii_if = &tp->mii_if;
1384 	void __iomem *ioaddr = tp->mmio_addr;
1385 	int phy_id;
1386 	int rc = 0;
1387 
1388 	INIT_LIST_HEAD(&tp->first_phy);
1389 
1390 	for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1391 		struct sis190_phy *phy;
1392 		u16 status;
1393 
1394 		status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1395 
1396 		// Try next mii if the current one is not accessible.
1397 		if (status == 0xffff || status == 0x0000)
1398 			continue;
1399 
1400 		phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1401 		if (!phy) {
1402 			sis190_free_phy(&tp->first_phy);
1403 			rc = -ENOMEM;
1404 			goto out;
1405 		}
1406 
1407 		sis190_init_phy(dev, tp, phy, phy_id, status);
1408 
1409 		list_add(&tp->first_phy, &phy->list);
1410 	}
1411 
1412 	if (list_empty(&tp->first_phy)) {
1413 		if (netif_msg_probe(tp))
1414 			pr_info("%s: No MII transceivers found!\n",
1415 				pci_name(tp->pci_dev));
1416 		rc = -EIO;
1417 		goto out;
1418 	}
1419 
1420 	/* Select default PHY for mac */
1421 	sis190_default_phy(dev);
1422 
1423 	sis190_mii_probe_88e1111_fixup(tp);
1424 
1425 	mii_if->dev = dev;
1426 	mii_if->mdio_read = __mdio_read;
1427 	mii_if->mdio_write = __mdio_write;
1428 	mii_if->phy_id_mask = PHY_ID_ANY;
1429 	mii_if->reg_num_mask = MII_REG_ANY;
1430 out:
1431 	return rc;
1432 }
1433 
sis190_mii_remove(struct net_device * dev)1434 static void sis190_mii_remove(struct net_device *dev)
1435 {
1436 	struct sis190_private *tp = netdev_priv(dev);
1437 
1438 	sis190_free_phy(&tp->first_phy);
1439 }
1440 
sis190_release_board(struct pci_dev * pdev)1441 static void sis190_release_board(struct pci_dev *pdev)
1442 {
1443 	struct net_device *dev = pci_get_drvdata(pdev);
1444 	struct sis190_private *tp = netdev_priv(dev);
1445 
1446 	iounmap(tp->mmio_addr);
1447 	pci_release_regions(pdev);
1448 	pci_disable_device(pdev);
1449 	free_netdev(dev);
1450 }
1451 
sis190_init_board(struct pci_dev * pdev)1452 static struct net_device *sis190_init_board(struct pci_dev *pdev)
1453 {
1454 	struct sis190_private *tp;
1455 	struct net_device *dev;
1456 	void __iomem *ioaddr;
1457 	int rc;
1458 
1459 	dev = alloc_etherdev(sizeof(*tp));
1460 	if (!dev) {
1461 		rc = -ENOMEM;
1462 		goto err_out_0;
1463 	}
1464 
1465 	SET_NETDEV_DEV(dev, &pdev->dev);
1466 
1467 	tp = netdev_priv(dev);
1468 	tp->dev = dev;
1469 	tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1470 
1471 	rc = pci_enable_device(pdev);
1472 	if (rc < 0) {
1473 		if (netif_msg_probe(tp))
1474 			pr_err("%s: enable failure\n", pci_name(pdev));
1475 		goto err_free_dev_1;
1476 	}
1477 
1478 	rc = -ENODEV;
1479 
1480 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1481 		if (netif_msg_probe(tp))
1482 			pr_err("%s: region #0 is no MMIO resource\n",
1483 			       pci_name(pdev));
1484 		goto err_pci_disable_2;
1485 	}
1486 	if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1487 		if (netif_msg_probe(tp))
1488 			pr_err("%s: invalid PCI region size(s)\n",
1489 			       pci_name(pdev));
1490 		goto err_pci_disable_2;
1491 	}
1492 
1493 	rc = pci_request_regions(pdev, DRV_NAME);
1494 	if (rc < 0) {
1495 		if (netif_msg_probe(tp))
1496 			pr_err("%s: could not request regions\n",
1497 			       pci_name(pdev));
1498 		goto err_pci_disable_2;
1499 	}
1500 
1501 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1502 	if (rc < 0) {
1503 		if (netif_msg_probe(tp))
1504 			pr_err("%s: DMA configuration failed\n",
1505 			       pci_name(pdev));
1506 		goto err_free_res_3;
1507 	}
1508 
1509 	pci_set_master(pdev);
1510 
1511 	ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1512 	if (!ioaddr) {
1513 		if (netif_msg_probe(tp))
1514 			pr_err("%s: cannot remap MMIO, aborting\n",
1515 			       pci_name(pdev));
1516 		rc = -EIO;
1517 		goto err_free_res_3;
1518 	}
1519 
1520 	tp->pci_dev = pdev;
1521 	tp->mmio_addr = ioaddr;
1522 	tp->link_status = LNK_OFF;
1523 
1524 	sis190_irq_mask_and_ack(ioaddr);
1525 
1526 	sis190_soft_reset(ioaddr);
1527 out:
1528 	return dev;
1529 
1530 err_free_res_3:
1531 	pci_release_regions(pdev);
1532 err_pci_disable_2:
1533 	pci_disable_device(pdev);
1534 err_free_dev_1:
1535 	free_netdev(dev);
1536 err_out_0:
1537 	dev = ERR_PTR(rc);
1538 	goto out;
1539 }
1540 
sis190_tx_timeout(struct net_device * dev)1541 static void sis190_tx_timeout(struct net_device *dev)
1542 {
1543 	struct sis190_private *tp = netdev_priv(dev);
1544 	void __iomem *ioaddr = tp->mmio_addr;
1545 	u8 tmp8;
1546 
1547 	/* Disable Tx, if not already */
1548 	tmp8 = SIS_R8(TxControl);
1549 	if (tmp8 & CmdTxEnb)
1550 		SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1551 
1552 	netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
1553 		   SIS_R32(TxControl), SIS_R32(TxSts));
1554 
1555 	/* Disable interrupts by clearing the interrupt mask. */
1556 	SIS_W32(IntrMask, 0x0000);
1557 
1558 	/* Stop a shared interrupt from scavenging while we are. */
1559 	spin_lock_irq(&tp->lock);
1560 	sis190_tx_clear(tp);
1561 	spin_unlock_irq(&tp->lock);
1562 
1563 	/* ...and finally, reset everything. */
1564 	sis190_hw_start(dev);
1565 
1566 	netif_wake_queue(dev);
1567 }
1568 
sis190_set_rgmii(struct sis190_private * tp,u8 reg)1569 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1570 {
1571 	tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1572 }
1573 
sis190_get_mac_addr_from_eeprom(struct pci_dev * pdev,struct net_device * dev)1574 static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1575 					   struct net_device *dev)
1576 {
1577 	struct sis190_private *tp = netdev_priv(dev);
1578 	void __iomem *ioaddr = tp->mmio_addr;
1579 	u16 sig;
1580 	int i;
1581 
1582 	if (netif_msg_probe(tp))
1583 		pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
1584 
1585 	/* Check to see if there is a sane EEPROM */
1586 	sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1587 
1588 	if ((sig == 0xffff) || (sig == 0x0000)) {
1589 		if (netif_msg_probe(tp))
1590 			pr_info("%s: Error EEPROM read %x\n",
1591 				pci_name(pdev), sig);
1592 		return -EIO;
1593 	}
1594 
1595 	/* Get MAC address from EEPROM */
1596 	for (i = 0; i < ETH_ALEN / 2; i++) {
1597 		u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1598 
1599 		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1600 	}
1601 
1602 	sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1603 
1604 	return 0;
1605 }
1606 
1607 /**
1608  *	sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1609  *	@pdev: PCI device
1610  *	@dev:  network device to get address for
1611  *
1612  *	SiS96x model, use APC CMOS RAM to store MAC address.
1613  *	APC CMOS RAM is accessed through ISA bridge.
1614  *	MAC address is read into @net_dev->dev_addr.
1615  */
sis190_get_mac_addr_from_apc(struct pci_dev * pdev,struct net_device * dev)1616 static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1617 					struct net_device *dev)
1618 {
1619 	static const u16 ids[] = { 0x0965, 0x0966, 0x0968 };
1620 	struct sis190_private *tp = netdev_priv(dev);
1621 	struct pci_dev *isa_bridge;
1622 	u8 reg, tmp8;
1623 	unsigned int i;
1624 
1625 	if (netif_msg_probe(tp))
1626 		pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
1627 
1628 	for (i = 0; i < ARRAY_SIZE(ids); i++) {
1629 		isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1630 		if (isa_bridge)
1631 			break;
1632 	}
1633 
1634 	if (!isa_bridge) {
1635 		if (netif_msg_probe(tp))
1636 			pr_info("%s: Can not find ISA bridge\n",
1637 				pci_name(pdev));
1638 		return -EIO;
1639 	}
1640 
1641 	/* Enable port 78h & 79h to access APC Registers. */
1642 	pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1643 	reg = (tmp8 & ~0x02);
1644 	pci_write_config_byte(isa_bridge, 0x48, reg);
1645 	udelay(50);
1646 	pci_read_config_byte(isa_bridge, 0x48, &reg);
1647 
1648         for (i = 0; i < ETH_ALEN; i++) {
1649                 outb(0x9 + i, 0x78);
1650                 dev->dev_addr[i] = inb(0x79);
1651         }
1652 
1653 	outb(0x12, 0x78);
1654 	reg = inb(0x79);
1655 
1656 	sis190_set_rgmii(tp, reg);
1657 
1658 	/* Restore the value to ISA Bridge */
1659 	pci_write_config_byte(isa_bridge, 0x48, tmp8);
1660 	pci_dev_put(isa_bridge);
1661 
1662 	return 0;
1663 }
1664 
1665 /**
1666  *      sis190_init_rxfilter - Initialize the Rx filter
1667  *      @dev: network device to initialize
1668  *
1669  *      Set receive filter address to our MAC address
1670  *      and enable packet filtering.
1671  */
sis190_init_rxfilter(struct net_device * dev)1672 static inline void sis190_init_rxfilter(struct net_device *dev)
1673 {
1674 	struct sis190_private *tp = netdev_priv(dev);
1675 	void __iomem *ioaddr = tp->mmio_addr;
1676 	u16 ctl;
1677 	int i;
1678 
1679 	ctl = SIS_R16(RxMacControl);
1680 	/*
1681 	 * Disable packet filtering before setting filter.
1682 	 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1683 	 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1684 	 */
1685 	SIS_W16(RxMacControl, ctl & ~0x0f00);
1686 
1687 	for (i = 0; i < ETH_ALEN; i++)
1688 		SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1689 
1690 	SIS_W16(RxMacControl, ctl);
1691 	SIS_PCI_COMMIT();
1692 }
1693 
sis190_get_mac_addr(struct pci_dev * pdev,struct net_device * dev)1694 static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1695 {
1696 	int rc;
1697 
1698 	rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1699 	if (rc < 0) {
1700 		u8 reg;
1701 
1702 		pci_read_config_byte(pdev, 0x73, &reg);
1703 
1704 		if (reg & 0x00000001)
1705 			rc = sis190_get_mac_addr_from_apc(pdev, dev);
1706 	}
1707 	return rc;
1708 }
1709 
sis190_set_speed_auto(struct net_device * dev)1710 static void sis190_set_speed_auto(struct net_device *dev)
1711 {
1712 	struct sis190_private *tp = netdev_priv(dev);
1713 	void __iomem *ioaddr = tp->mmio_addr;
1714 	int phy_id = tp->mii_if.phy_id;
1715 	int val;
1716 
1717 	netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
1718 
1719 	val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1720 
1721 	// Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1722 	// unchanged.
1723 	mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1724 		   ADVERTISE_100FULL | ADVERTISE_10FULL |
1725 		   ADVERTISE_100HALF | ADVERTISE_10HALF);
1726 
1727 	// Enable 1000 Full Mode.
1728 	mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1729 
1730 	// Enable auto-negotiation and restart auto-negotiation.
1731 	mdio_write(ioaddr, phy_id, MII_BMCR,
1732 		   BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1733 }
1734 
sis190_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1735 static int sis190_get_link_ksettings(struct net_device *dev,
1736 				     struct ethtool_link_ksettings *cmd)
1737 {
1738 	struct sis190_private *tp = netdev_priv(dev);
1739 
1740 	mii_ethtool_get_link_ksettings(&tp->mii_if, cmd);
1741 
1742 	return 0;
1743 }
1744 
sis190_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1745 static int sis190_set_link_ksettings(struct net_device *dev,
1746 				     const struct ethtool_link_ksettings *cmd)
1747 {
1748 	struct sis190_private *tp = netdev_priv(dev);
1749 
1750 	return mii_ethtool_set_link_ksettings(&tp->mii_if, cmd);
1751 }
1752 
sis190_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1753 static void sis190_get_drvinfo(struct net_device *dev,
1754 			       struct ethtool_drvinfo *info)
1755 {
1756 	struct sis190_private *tp = netdev_priv(dev);
1757 
1758 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1759 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1760 	strlcpy(info->bus_info, pci_name(tp->pci_dev),
1761 		sizeof(info->bus_info));
1762 }
1763 
sis190_get_regs_len(struct net_device * dev)1764 static int sis190_get_regs_len(struct net_device *dev)
1765 {
1766 	return SIS190_REGS_SIZE;
1767 }
1768 
sis190_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * p)1769 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1770 			    void *p)
1771 {
1772 	struct sis190_private *tp = netdev_priv(dev);
1773 	unsigned long flags;
1774 
1775 	spin_lock_irqsave(&tp->lock, flags);
1776 	memcpy_fromio(p, tp->mmio_addr, regs->len);
1777 	spin_unlock_irqrestore(&tp->lock, flags);
1778 }
1779 
sis190_nway_reset(struct net_device * dev)1780 static int sis190_nway_reset(struct net_device *dev)
1781 {
1782 	struct sis190_private *tp = netdev_priv(dev);
1783 
1784 	return mii_nway_restart(&tp->mii_if);
1785 }
1786 
sis190_get_msglevel(struct net_device * dev)1787 static u32 sis190_get_msglevel(struct net_device *dev)
1788 {
1789 	struct sis190_private *tp = netdev_priv(dev);
1790 
1791 	return tp->msg_enable;
1792 }
1793 
sis190_set_msglevel(struct net_device * dev,u32 value)1794 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1795 {
1796 	struct sis190_private *tp = netdev_priv(dev);
1797 
1798 	tp->msg_enable = value;
1799 }
1800 
1801 static const struct ethtool_ops sis190_ethtool_ops = {
1802 	.get_drvinfo	= sis190_get_drvinfo,
1803 	.get_regs_len	= sis190_get_regs_len,
1804 	.get_regs	= sis190_get_regs,
1805 	.get_link	= ethtool_op_get_link,
1806 	.get_msglevel	= sis190_get_msglevel,
1807 	.set_msglevel	= sis190_set_msglevel,
1808 	.nway_reset	= sis190_nway_reset,
1809 	.get_link_ksettings = sis190_get_link_ksettings,
1810 	.set_link_ksettings = sis190_set_link_ksettings,
1811 };
1812 
sis190_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1813 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1814 {
1815 	struct sis190_private *tp = netdev_priv(dev);
1816 
1817 	return !netif_running(dev) ? -EINVAL :
1818 		generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1819 }
1820 
sis190_mac_addr(struct net_device * dev,void * p)1821 static int sis190_mac_addr(struct net_device  *dev, void *p)
1822 {
1823 	int rc;
1824 
1825 	rc = eth_mac_addr(dev, p);
1826 	if (!rc)
1827 		sis190_init_rxfilter(dev);
1828 	return rc;
1829 }
1830 
1831 static const struct net_device_ops sis190_netdev_ops = {
1832 	.ndo_open		= sis190_open,
1833 	.ndo_stop		= sis190_close,
1834 	.ndo_do_ioctl		= sis190_ioctl,
1835 	.ndo_start_xmit		= sis190_start_xmit,
1836 	.ndo_tx_timeout		= sis190_tx_timeout,
1837 	.ndo_set_rx_mode	= sis190_set_rx_mode,
1838 	.ndo_set_mac_address	= sis190_mac_addr,
1839 	.ndo_validate_addr	= eth_validate_addr,
1840 #ifdef CONFIG_NET_POLL_CONTROLLER
1841 	.ndo_poll_controller	 = sis190_netpoll,
1842 #endif
1843 };
1844 
sis190_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1845 static int sis190_init_one(struct pci_dev *pdev,
1846 			   const struct pci_device_id *ent)
1847 {
1848 	static int printed_version = 0;
1849 	struct sis190_private *tp;
1850 	struct net_device *dev;
1851 	void __iomem *ioaddr;
1852 	int rc;
1853 
1854 	if (!printed_version) {
1855 		if (netif_msg_drv(&debug))
1856 			pr_info(SIS190_DRIVER_NAME " loaded\n");
1857 		printed_version = 1;
1858 	}
1859 
1860 	dev = sis190_init_board(pdev);
1861 	if (IS_ERR(dev)) {
1862 		rc = PTR_ERR(dev);
1863 		goto out;
1864 	}
1865 
1866 	pci_set_drvdata(pdev, dev);
1867 
1868 	tp = netdev_priv(dev);
1869 	ioaddr = tp->mmio_addr;
1870 
1871 	rc = sis190_get_mac_addr(pdev, dev);
1872 	if (rc < 0)
1873 		goto err_release_board;
1874 
1875 	sis190_init_rxfilter(dev);
1876 
1877 	INIT_WORK(&tp->phy_task, sis190_phy_task);
1878 
1879 	dev->netdev_ops = &sis190_netdev_ops;
1880 
1881 	dev->ethtool_ops = &sis190_ethtool_ops;
1882 	dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1883 
1884 	spin_lock_init(&tp->lock);
1885 
1886 	rc = sis190_mii_probe(dev);
1887 	if (rc < 0)
1888 		goto err_release_board;
1889 
1890 	rc = register_netdev(dev);
1891 	if (rc < 0)
1892 		goto err_remove_mii;
1893 
1894 	if (netif_msg_probe(tp)) {
1895 		netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
1896 			    pci_name(pdev),
1897 			    sis_chip_info[ent->driver_data].name,
1898 			    ioaddr, pdev->irq, dev->dev_addr);
1899 		netdev_info(dev, "%s mode.\n",
1900 			    (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1901 	}
1902 
1903 	netif_carrier_off(dev);
1904 
1905 	sis190_set_speed_auto(dev);
1906 out:
1907 	return rc;
1908 
1909 err_remove_mii:
1910 	sis190_mii_remove(dev);
1911 err_release_board:
1912 	sis190_release_board(pdev);
1913 	goto out;
1914 }
1915 
sis190_remove_one(struct pci_dev * pdev)1916 static void sis190_remove_one(struct pci_dev *pdev)
1917 {
1918 	struct net_device *dev = pci_get_drvdata(pdev);
1919 	struct sis190_private *tp = netdev_priv(dev);
1920 
1921 	sis190_mii_remove(dev);
1922 	cancel_work_sync(&tp->phy_task);
1923 	unregister_netdev(dev);
1924 	sis190_release_board(pdev);
1925 }
1926 
1927 static struct pci_driver sis190_pci_driver = {
1928 	.name		= DRV_NAME,
1929 	.id_table	= sis190_pci_tbl,
1930 	.probe		= sis190_init_one,
1931 	.remove		= sis190_remove_one,
1932 };
1933 
1934 module_pci_driver(sis190_pci_driver);
1935