1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3 	Written 1998-2001 by Donald Becker.
4 
5 	Current Maintainer: Roger Luethi <rl@hellgate.ch>
6 
7 	This software may be used and distributed according to the terms of
8 	the GNU General Public License (GPL), incorporated herein by reference.
9 	Drivers based on or derived from this code fall under the GPL and must
10 	retain the authorship, copyright and license notice.  This file is not
11 	a complete program and may only be used when the entire operating
12 	system is licensed under the GPL.
13 
14 	This driver is designed for the VIA VT86C100A Rhine-I.
15 	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 	and management NIC 6105M).
17 
18 	The author may be reached as becker@scyld.com, or C/O
19 	Scyld Computing Corporation
20 	410 Severn Ave., Suite 210
21 	Annapolis MD 21403
22 
23 
24 	This driver contains some changes from the original Donald Becker
25 	version. He may or may not be interested in bug reports on this
26 	code. You can find his versions at:
27 	http://www.scyld.com/network/via-rhine.html
28 	[link no longer provides useful info -jgarzik]
29 
30 */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #define DRV_NAME	"via-rhine"
35 #define DRV_VERSION	"1.5.1"
36 #define DRV_RELDATE	"2010-10-09"
37 
38 #include <linux/types.h>
39 
40 /* A few user-configurable values.
41    These may be modified when a driver module is loaded. */
42 static int debug = 0;
43 #define RHINE_MSG_DEFAULT \
44         (0x0000)
45 
46 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47    Setting to > 1518 effectively disables this feature. */
48 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
50 	defined(__sh__) || defined(__mips__)
51 static int rx_copybreak = 1518;
52 #else
53 static int rx_copybreak;
54 #endif
55 
56 /* Work-around for broken BIOSes: they are unable to get the chip back out of
57    power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58 static bool avoid_D3;
59 
60 /*
61  * In case you are looking for 'options[]' or 'full_duplex[]', they
62  * are gone. Use ethtool(8) instead.
63  */
64 
65 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66    The Rhine has a 64 element 8390-like hash table. */
67 static const int multicast_filter_limit = 32;
68 
69 
70 /* Operational parameters that are set at compile time. */
71 
72 /* Keep the ring sizes a power of two for compile efficiency.
73  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74  * Making the Tx ring too large decreases the effectiveness of channel
75  * bonding and packet priority.
76  * With BQL support, we can increase TX ring safely.
77  * There are no ill effects from too-large receive rings.
78  */
79 #define TX_RING_SIZE	64
80 #define TX_QUEUE_LEN	(TX_RING_SIZE - 6)	/* Limit ring entries actually used. */
81 #define RX_RING_SIZE	64
82 
83 /* Operational parameters that usually are not changed. */
84 
85 /* Time in jiffies before concluding the transmitter is hung. */
86 #define TX_TIMEOUT	(2*HZ)
87 
88 #define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
89 
90 #include <linux/module.h>
91 #include <linux/moduleparam.h>
92 #include <linux/kernel.h>
93 #include <linux/string.h>
94 #include <linux/timer.h>
95 #include <linux/errno.h>
96 #include <linux/ioport.h>
97 #include <linux/interrupt.h>
98 #include <linux/pci.h>
99 #include <linux/of_device.h>
100 #include <linux/of_irq.h>
101 #include <linux/platform_device.h>
102 #include <linux/dma-mapping.h>
103 #include <linux/netdevice.h>
104 #include <linux/etherdevice.h>
105 #include <linux/skbuff.h>
106 #include <linux/init.h>
107 #include <linux/delay.h>
108 #include <linux/mii.h>
109 #include <linux/ethtool.h>
110 #include <linux/crc32.h>
111 #include <linux/if_vlan.h>
112 #include <linux/bitops.h>
113 #include <linux/workqueue.h>
114 #include <asm/processor.h>	/* Processor type for cache alignment. */
115 #include <asm/io.h>
116 #include <asm/irq.h>
117 #include <linux/uaccess.h>
118 #include <linux/dmi.h>
119 
120 /* These identify the driver base version and may not be removed. */
121 static const char version[] =
122 	"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
123 
124 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
125 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
126 MODULE_LICENSE("GPL");
127 
128 module_param(debug, int, 0);
129 module_param(rx_copybreak, int, 0);
130 module_param(avoid_D3, bool, 0);
131 MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
132 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
133 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
134 
135 #define MCAM_SIZE	32
136 #define VCAM_SIZE	32
137 
138 /*
139 		Theory of Operation
140 
141 I. Board Compatibility
142 
143 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
144 controller.
145 
146 II. Board-specific settings
147 
148 Boards with this chip are functional only in a bus-master PCI slot.
149 
150 Many operational settings are loaded from the EEPROM to the Config word at
151 offset 0x78. For most of these settings, this driver assumes that they are
152 correct.
153 If this driver is compiled to use PCI memory space operations the EEPROM
154 must be configured to enable memory ops.
155 
156 III. Driver operation
157 
158 IIIa. Ring buffers
159 
160 This driver uses two statically allocated fixed-size descriptor lists
161 formed into rings by a branch from the final descriptor to the beginning of
162 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
163 
164 IIIb/c. Transmit/Receive Structure
165 
166 This driver attempts to use a zero-copy receive and transmit scheme.
167 
168 Alas, all data buffers are required to start on a 32 bit boundary, so
169 the driver must often copy transmit packets into bounce buffers.
170 
171 The driver allocates full frame size skbuffs for the Rx ring buffers at
172 open() time and passes the skb->data field to the chip as receive data
173 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
174 a fresh skbuff is allocated and the frame is copied to the new skbuff.
175 When the incoming frame is larger, the skbuff is passed directly up the
176 protocol stack. Buffers consumed this way are replaced by newly allocated
177 skbuffs in the last phase of rhine_rx().
178 
179 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
180 using a full-sized skbuff for small frames vs. the copying costs of larger
181 frames. New boards are typically used in generously configured machines
182 and the underfilled buffers have negligible impact compared to the benefit of
183 a single allocation size, so the default value of zero results in never
184 copying packets. When copying is done, the cost is usually mitigated by using
185 a combined copy/checksum routine. Copying also preloads the cache, which is
186 most useful with small frames.
187 
188 Since the VIA chips are only able to transfer data to buffers on 32 bit
189 boundaries, the IP header at offset 14 in an ethernet frame isn't
190 longword aligned for further processing. Copying these unaligned buffers
191 has the beneficial effect of 16-byte aligning the IP header.
192 
193 IIId. Synchronization
194 
195 The driver runs as two independent, single-threaded flows of control. One
196 is the send-packet routine, which enforces single-threaded use by the
197 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
198 which is single threaded by the hardware and interrupt handling software.
199 
200 The send packet thread has partial control over the Tx ring. It locks the
201 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
202 the ring is not available it stops the transmit queue by
203 calling netif_stop_queue.
204 
205 The interrupt handler has exclusive control over the Rx ring and records stats
206 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
207 empty by incrementing the dirty_tx mark. If at least half of the entries in
208 the Rx ring are available the transmit queue is woken up if it was stopped.
209 
210 IV. Notes
211 
212 IVb. References
213 
214 Preliminary VT86C100A manual from http://www.via.com.tw/
215 http://www.scyld.com/expert/100mbps.html
216 http://www.scyld.com/expert/NWay.html
217 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
218 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
219 
220 
221 IVc. Errata
222 
223 The VT86C100A manual is not reliable information.
224 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
225 in significant performance degradation for bounce buffer copies on transmit
226 and unaligned IP headers on receive.
227 The chip does not pad to minimum transmit length.
228 
229 */
230 
231 
232 /* This table drives the PCI probe routines. It's mostly boilerplate in all
233    of the drivers, and will likely be provided by some future kernel.
234    Note the matching code -- the first table entry matchs all 56** cards but
235    second only the 1234 card.
236 */
237 
238 enum rhine_revs {
239 	VT86C100A	= 0x00,
240 	VTunknown0	= 0x20,
241 	VT6102		= 0x40,
242 	VT8231		= 0x50,	/* Integrated MAC */
243 	VT8233		= 0x60,	/* Integrated MAC */
244 	VT8235		= 0x74,	/* Integrated MAC */
245 	VT8237		= 0x78,	/* Integrated MAC */
246 	VTunknown1	= 0x7C,
247 	VT6105		= 0x80,
248 	VT6105_B0	= 0x83,
249 	VT6105L		= 0x8A,
250 	VT6107		= 0x8C,
251 	VTunknown2	= 0x8E,
252 	VT6105M		= 0x90,	/* Management adapter */
253 };
254 
255 enum rhine_quirks {
256 	rqWOL		= 0x0001,	/* Wake-On-LAN support */
257 	rqForceReset	= 0x0002,
258 	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
259 	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
260 	rqRhineI	= 0x0100,	/* See comment below */
261 	rqIntPHY	= 0x0200,	/* Integrated PHY */
262 	rqMgmt		= 0x0400,	/* Management adapter */
263 	rqNeedEnMMIO	= 0x0800,	/* Whether the core needs to be
264 					 * switched from PIO mode to MMIO
265 					 * (only applies to PCI)
266 					 */
267 };
268 /*
269  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
270  * MMIO as well as for the collision counter and the Tx FIFO underflow
271  * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
272  */
273 
274 /* Beware of PCI posted writes */
275 #define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
276 
277 static const struct pci_device_id rhine_pci_tbl[] = {
278 	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
279 	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
280 	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
281 	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
282 	{ }	/* terminate list */
283 };
284 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
285 
286 /* OpenFirmware identifiers for platform-bus devices
287  * The .data field is currently only used to store quirks
288  */
289 static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
290 static const struct of_device_id rhine_of_tbl[] = {
291 	{ .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
292 	{ }	/* terminate list */
293 };
294 MODULE_DEVICE_TABLE(of, rhine_of_tbl);
295 
296 /* Offsets to the device registers. */
297 enum register_offsets {
298 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
299 	ChipCmd1=0x09, TQWake=0x0A,
300 	IntrStatus=0x0C, IntrEnable=0x0E,
301 	MulticastFilter0=0x10, MulticastFilter1=0x14,
302 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
303 	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
304 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
305 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
306 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
307 	StickyHW=0x83, IntrStatus2=0x84,
308 	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
309 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
310 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
311 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
312 };
313 
314 /* Bits in ConfigD */
315 enum backoff_bits {
316 	BackOptional=0x01, BackModify=0x02,
317 	BackCaptureEffect=0x04, BackRandom=0x08
318 };
319 
320 /* Bits in the TxConfig (TCR) register */
321 enum tcr_bits {
322 	TCR_PQEN=0x01,
323 	TCR_LB0=0x02,		/* loopback[0] */
324 	TCR_LB1=0x04,		/* loopback[1] */
325 	TCR_OFSET=0x08,
326 	TCR_RTGOPT=0x10,
327 	TCR_RTFT0=0x20,
328 	TCR_RTFT1=0x40,
329 	TCR_RTSF=0x80,
330 };
331 
332 /* Bits in the CamCon (CAMC) register */
333 enum camcon_bits {
334 	CAMC_CAMEN=0x01,
335 	CAMC_VCAMSL=0x02,
336 	CAMC_CAMWR=0x04,
337 	CAMC_CAMRD=0x08,
338 };
339 
340 /* Bits in the PCIBusConfig1 (BCR1) register */
341 enum bcr1_bits {
342 	BCR1_POT0=0x01,
343 	BCR1_POT1=0x02,
344 	BCR1_POT2=0x04,
345 	BCR1_CTFT0=0x08,
346 	BCR1_CTFT1=0x10,
347 	BCR1_CTSF=0x20,
348 	BCR1_TXQNOBK=0x40,	/* for VT6105 */
349 	BCR1_VIDFR=0x80,	/* for VT6105 */
350 	BCR1_MED0=0x40,		/* for VT6102 */
351 	BCR1_MED1=0x80,		/* for VT6102 */
352 };
353 
354 /* Registers we check that mmio and reg are the same. */
355 static const int mmio_verify_registers[] = {
356 	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
357 	0
358 };
359 
360 /* Bits in the interrupt status/mask registers. */
361 enum intr_status_bits {
362 	IntrRxDone	= 0x0001,
363 	IntrTxDone	= 0x0002,
364 	IntrRxErr	= 0x0004,
365 	IntrTxError	= 0x0008,
366 	IntrRxEmpty	= 0x0020,
367 	IntrPCIErr	= 0x0040,
368 	IntrStatsMax	= 0x0080,
369 	IntrRxEarly	= 0x0100,
370 	IntrTxUnderrun	= 0x0210,
371 	IntrRxOverflow	= 0x0400,
372 	IntrRxDropped	= 0x0800,
373 	IntrRxNoBuf	= 0x1000,
374 	IntrTxAborted	= 0x2000,
375 	IntrLinkChange	= 0x4000,
376 	IntrRxWakeUp	= 0x8000,
377 	IntrTxDescRace		= 0x080000,	/* mapped from IntrStatus2 */
378 	IntrNormalSummary	= IntrRxDone | IntrTxDone,
379 	IntrTxErrSummary	= IntrTxDescRace | IntrTxAborted | IntrTxError |
380 				  IntrTxUnderrun,
381 };
382 
383 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
384 enum wol_bits {
385 	WOLucast	= 0x10,
386 	WOLmagic	= 0x20,
387 	WOLbmcast	= 0x30,
388 	WOLlnkon	= 0x40,
389 	WOLlnkoff	= 0x80,
390 };
391 
392 /* The Rx and Tx buffer descriptors. */
393 struct rx_desc {
394 	__le32 rx_status;
395 	__le32 desc_length; /* Chain flag, Buffer/frame length */
396 	__le32 addr;
397 	__le32 next_desc;
398 };
399 struct tx_desc {
400 	__le32 tx_status;
401 	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
402 	__le32 addr;
403 	__le32 next_desc;
404 };
405 
406 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
407 #define TXDESC		0x00e08000
408 
409 enum rx_status_bits {
410 	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
411 };
412 
413 /* Bits in *_desc.*_status */
414 enum desc_status_bits {
415 	DescOwn=0x80000000
416 };
417 
418 /* Bits in *_desc.*_length */
419 enum desc_length_bits {
420 	DescTag=0x00010000
421 };
422 
423 /* Bits in ChipCmd. */
424 enum chip_cmd_bits {
425 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
426 	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
427 	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
428 	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
429 };
430 
431 struct rhine_stats {
432 	u64		packets;
433 	u64		bytes;
434 	struct u64_stats_sync syncp;
435 };
436 
437 struct rhine_private {
438 	/* Bit mask for configured VLAN ids */
439 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
440 
441 	/* Descriptor rings */
442 	struct rx_desc *rx_ring;
443 	struct tx_desc *tx_ring;
444 	dma_addr_t rx_ring_dma;
445 	dma_addr_t tx_ring_dma;
446 
447 	/* The addresses of receive-in-place skbuffs. */
448 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
449 	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
450 
451 	/* The saved address of a sent-in-place packet/buffer, for later free(). */
452 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
453 	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
454 
455 	/* Tx bounce buffers (Rhine-I only) */
456 	unsigned char *tx_buf[TX_RING_SIZE];
457 	unsigned char *tx_bufs;
458 	dma_addr_t tx_bufs_dma;
459 
460 	int irq;
461 	long pioaddr;
462 	struct net_device *dev;
463 	struct napi_struct napi;
464 	spinlock_t lock;
465 	struct mutex task_lock;
466 	bool task_enable;
467 	struct work_struct slow_event_task;
468 	struct work_struct reset_task;
469 
470 	u32 msg_enable;
471 
472 	/* Frequently used values: keep some adjacent for cache effect. */
473 	u32 quirks;
474 	unsigned int cur_rx;
475 	unsigned int cur_tx, dirty_tx;
476 	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
477 	struct rhine_stats rx_stats;
478 	struct rhine_stats tx_stats;
479 	u8 wolopts;
480 
481 	u8 tx_thresh, rx_thresh;
482 
483 	struct mii_if_info mii_if;
484 	void __iomem *base;
485 };
486 
487 #define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
488 #define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
489 #define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
490 
491 #define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
492 #define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
493 #define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
494 
495 #define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
496 #define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
497 #define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
498 
499 #define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
500 #define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
501 #define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
502 
503 
504 static int  mdio_read(struct net_device *dev, int phy_id, int location);
505 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
506 static int  rhine_open(struct net_device *dev);
507 static void rhine_reset_task(struct work_struct *work);
508 static void rhine_slow_event_task(struct work_struct *work);
509 static void rhine_tx_timeout(struct net_device *dev);
510 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
511 				  struct net_device *dev);
512 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
513 static void rhine_tx(struct net_device *dev);
514 static int rhine_rx(struct net_device *dev, int limit);
515 static void rhine_set_rx_mode(struct net_device *dev);
516 static void rhine_get_stats64(struct net_device *dev,
517 			      struct rtnl_link_stats64 *stats);
518 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
519 static const struct ethtool_ops netdev_ethtool_ops;
520 static int  rhine_close(struct net_device *dev);
521 static int rhine_vlan_rx_add_vid(struct net_device *dev,
522 				 __be16 proto, u16 vid);
523 static int rhine_vlan_rx_kill_vid(struct net_device *dev,
524 				  __be16 proto, u16 vid);
525 static void rhine_restart_tx(struct net_device *dev);
526 
rhine_wait_bit(struct rhine_private * rp,u8 reg,u8 mask,bool low)527 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
528 {
529 	void __iomem *ioaddr = rp->base;
530 	int i;
531 
532 	for (i = 0; i < 1024; i++) {
533 		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
534 
535 		if (low ^ has_mask_bits)
536 			break;
537 		udelay(10);
538 	}
539 	if (i > 64) {
540 		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
541 			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
542 	}
543 }
544 
rhine_wait_bit_high(struct rhine_private * rp,u8 reg,u8 mask)545 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
546 {
547 	rhine_wait_bit(rp, reg, mask, false);
548 }
549 
rhine_wait_bit_low(struct rhine_private * rp,u8 reg,u8 mask)550 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
551 {
552 	rhine_wait_bit(rp, reg, mask, true);
553 }
554 
rhine_get_events(struct rhine_private * rp)555 static u32 rhine_get_events(struct rhine_private *rp)
556 {
557 	void __iomem *ioaddr = rp->base;
558 	u32 intr_status;
559 
560 	intr_status = ioread16(ioaddr + IntrStatus);
561 	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
562 	if (rp->quirks & rqStatusWBRace)
563 		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
564 	return intr_status;
565 }
566 
rhine_ack_events(struct rhine_private * rp,u32 mask)567 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
568 {
569 	void __iomem *ioaddr = rp->base;
570 
571 	if (rp->quirks & rqStatusWBRace)
572 		iowrite8(mask >> 16, ioaddr + IntrStatus2);
573 	iowrite16(mask, ioaddr + IntrStatus);
574 	mmiowb();
575 }
576 
577 /*
578  * Get power related registers into sane state.
579  * Notify user about past WOL event.
580  */
rhine_power_init(struct net_device * dev)581 static void rhine_power_init(struct net_device *dev)
582 {
583 	struct rhine_private *rp = netdev_priv(dev);
584 	void __iomem *ioaddr = rp->base;
585 	u16 wolstat;
586 
587 	if (rp->quirks & rqWOL) {
588 		/* Make sure chip is in power state D0 */
589 		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
590 
591 		/* Disable "force PME-enable" */
592 		iowrite8(0x80, ioaddr + WOLcgClr);
593 
594 		/* Clear power-event config bits (WOL) */
595 		iowrite8(0xFF, ioaddr + WOLcrClr);
596 		/* More recent cards can manage two additional patterns */
597 		if (rp->quirks & rq6patterns)
598 			iowrite8(0x03, ioaddr + WOLcrClr1);
599 
600 		/* Save power-event status bits */
601 		wolstat = ioread8(ioaddr + PwrcsrSet);
602 		if (rp->quirks & rq6patterns)
603 			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
604 
605 		/* Clear power-event status bits */
606 		iowrite8(0xFF, ioaddr + PwrcsrClr);
607 		if (rp->quirks & rq6patterns)
608 			iowrite8(0x03, ioaddr + PwrcsrClr1);
609 
610 		if (wolstat) {
611 			char *reason;
612 			switch (wolstat) {
613 			case WOLmagic:
614 				reason = "Magic packet";
615 				break;
616 			case WOLlnkon:
617 				reason = "Link went up";
618 				break;
619 			case WOLlnkoff:
620 				reason = "Link went down";
621 				break;
622 			case WOLucast:
623 				reason = "Unicast packet";
624 				break;
625 			case WOLbmcast:
626 				reason = "Multicast/broadcast packet";
627 				break;
628 			default:
629 				reason = "Unknown";
630 			}
631 			netdev_info(dev, "Woke system up. Reason: %s\n",
632 				    reason);
633 		}
634 	}
635 }
636 
rhine_chip_reset(struct net_device * dev)637 static void rhine_chip_reset(struct net_device *dev)
638 {
639 	struct rhine_private *rp = netdev_priv(dev);
640 	void __iomem *ioaddr = rp->base;
641 	u8 cmd1;
642 
643 	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
644 	IOSYNC;
645 
646 	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
647 		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
648 
649 		/* Force reset */
650 		if (rp->quirks & rqForceReset)
651 			iowrite8(0x40, ioaddr + MiscCmd);
652 
653 		/* Reset can take somewhat longer (rare) */
654 		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
655 	}
656 
657 	cmd1 = ioread8(ioaddr + ChipCmd1);
658 	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
659 		   "failed" : "succeeded");
660 }
661 
enable_mmio(long pioaddr,u32 quirks)662 static void enable_mmio(long pioaddr, u32 quirks)
663 {
664 	int n;
665 
666 	if (quirks & rqNeedEnMMIO) {
667 		if (quirks & rqRhineI) {
668 			/* More recent docs say that this bit is reserved */
669 			n = inb(pioaddr + ConfigA) | 0x20;
670 			outb(n, pioaddr + ConfigA);
671 		} else {
672 			n = inb(pioaddr + ConfigD) | 0x80;
673 			outb(n, pioaddr + ConfigD);
674 		}
675 	}
676 }
677 
verify_mmio(struct device * hwdev,long pioaddr,void __iomem * ioaddr,u32 quirks)678 static inline int verify_mmio(struct device *hwdev,
679 			      long pioaddr,
680 			      void __iomem *ioaddr,
681 			      u32 quirks)
682 {
683 	if (quirks & rqNeedEnMMIO) {
684 		int i = 0;
685 
686 		/* Check that selected MMIO registers match the PIO ones */
687 		while (mmio_verify_registers[i]) {
688 			int reg = mmio_verify_registers[i++];
689 			unsigned char a = inb(pioaddr+reg);
690 			unsigned char b = readb(ioaddr+reg);
691 
692 			if (a != b) {
693 				dev_err(hwdev,
694 					"MMIO do not match PIO [%02x] (%02x != %02x)\n",
695 					reg, a, b);
696 				return -EIO;
697 			}
698 		}
699 	}
700 	return 0;
701 }
702 
703 /*
704  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
705  * (plus 0x6C for Rhine-I/II)
706  */
rhine_reload_eeprom(long pioaddr,struct net_device * dev)707 static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
708 {
709 	struct rhine_private *rp = netdev_priv(dev);
710 	void __iomem *ioaddr = rp->base;
711 	int i;
712 
713 	outb(0x20, pioaddr + MACRegEEcsr);
714 	for (i = 0; i < 1024; i++) {
715 		if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
716 			break;
717 	}
718 	if (i > 512)
719 		pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
720 
721 	/*
722 	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
723 	 * MMIO. If reloading EEPROM was done first this could be avoided, but
724 	 * it is not known if that still works with the "win98-reboot" problem.
725 	 */
726 	enable_mmio(pioaddr, rp->quirks);
727 
728 	/* Turn off EEPROM-controlled wake-up (magic packet) */
729 	if (rp->quirks & rqWOL)
730 		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
731 
732 }
733 
734 #ifdef CONFIG_NET_POLL_CONTROLLER
rhine_poll(struct net_device * dev)735 static void rhine_poll(struct net_device *dev)
736 {
737 	struct rhine_private *rp = netdev_priv(dev);
738 	const int irq = rp->irq;
739 
740 	disable_irq(irq);
741 	rhine_interrupt(irq, dev);
742 	enable_irq(irq);
743 }
744 #endif
745 
rhine_kick_tx_threshold(struct rhine_private * rp)746 static void rhine_kick_tx_threshold(struct rhine_private *rp)
747 {
748 	if (rp->tx_thresh < 0xe0) {
749 		void __iomem *ioaddr = rp->base;
750 
751 		rp->tx_thresh += 0x20;
752 		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
753 	}
754 }
755 
rhine_tx_err(struct rhine_private * rp,u32 status)756 static void rhine_tx_err(struct rhine_private *rp, u32 status)
757 {
758 	struct net_device *dev = rp->dev;
759 
760 	if (status & IntrTxAborted) {
761 		netif_info(rp, tx_err, dev,
762 			   "Abort %08x, frame dropped\n", status);
763 	}
764 
765 	if (status & IntrTxUnderrun) {
766 		rhine_kick_tx_threshold(rp);
767 		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
768 			   "Tx threshold now %02x\n", rp->tx_thresh);
769 	}
770 
771 	if (status & IntrTxDescRace)
772 		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
773 
774 	if ((status & IntrTxError) &&
775 	    (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
776 		rhine_kick_tx_threshold(rp);
777 		netif_info(rp, tx_err, dev, "Unspecified error. "
778 			   "Tx threshold now %02x\n", rp->tx_thresh);
779 	}
780 
781 	rhine_restart_tx(dev);
782 }
783 
rhine_update_rx_crc_and_missed_errord(struct rhine_private * rp)784 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
785 {
786 	void __iomem *ioaddr = rp->base;
787 	struct net_device_stats *stats = &rp->dev->stats;
788 
789 	stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
790 	stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
791 
792 	/*
793 	 * Clears the "tally counters" for CRC errors and missed frames(?).
794 	 * It has been reported that some chips need a write of 0 to clear
795 	 * these, for others the counters are set to 1 when written to and
796 	 * instead cleared when read. So we clear them both ways ...
797 	 */
798 	iowrite32(0, ioaddr + RxMissed);
799 	ioread16(ioaddr + RxCRCErrs);
800 	ioread16(ioaddr + RxMissed);
801 }
802 
803 #define RHINE_EVENT_NAPI_RX	(IntrRxDone | \
804 				 IntrRxErr | \
805 				 IntrRxEmpty | \
806 				 IntrRxOverflow	| \
807 				 IntrRxDropped | \
808 				 IntrRxNoBuf | \
809 				 IntrRxWakeUp)
810 
811 #define RHINE_EVENT_NAPI_TX_ERR	(IntrTxError | \
812 				 IntrTxAborted | \
813 				 IntrTxUnderrun | \
814 				 IntrTxDescRace)
815 #define RHINE_EVENT_NAPI_TX	(IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
816 
817 #define RHINE_EVENT_NAPI	(RHINE_EVENT_NAPI_RX | \
818 				 RHINE_EVENT_NAPI_TX | \
819 				 IntrStatsMax)
820 #define RHINE_EVENT_SLOW	(IntrPCIErr | IntrLinkChange)
821 #define RHINE_EVENT		(RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
822 
rhine_napipoll(struct napi_struct * napi,int budget)823 static int rhine_napipoll(struct napi_struct *napi, int budget)
824 {
825 	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
826 	struct net_device *dev = rp->dev;
827 	void __iomem *ioaddr = rp->base;
828 	u16 enable_mask = RHINE_EVENT & 0xffff;
829 	int work_done = 0;
830 	u32 status;
831 
832 	status = rhine_get_events(rp);
833 	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
834 
835 	if (status & RHINE_EVENT_NAPI_RX)
836 		work_done += rhine_rx(dev, budget);
837 
838 	if (status & RHINE_EVENT_NAPI_TX) {
839 		if (status & RHINE_EVENT_NAPI_TX_ERR) {
840 			/* Avoid scavenging before Tx engine turned off */
841 			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
842 			if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
843 				netif_warn(rp, tx_err, dev, "Tx still on\n");
844 		}
845 
846 		rhine_tx(dev);
847 
848 		if (status & RHINE_EVENT_NAPI_TX_ERR)
849 			rhine_tx_err(rp, status);
850 	}
851 
852 	if (status & IntrStatsMax) {
853 		spin_lock(&rp->lock);
854 		rhine_update_rx_crc_and_missed_errord(rp);
855 		spin_unlock(&rp->lock);
856 	}
857 
858 	if (status & RHINE_EVENT_SLOW) {
859 		enable_mask &= ~RHINE_EVENT_SLOW;
860 		schedule_work(&rp->slow_event_task);
861 	}
862 
863 	if (work_done < budget) {
864 		napi_complete_done(napi, work_done);
865 		iowrite16(enable_mask, ioaddr + IntrEnable);
866 		mmiowb();
867 	}
868 	return work_done;
869 }
870 
rhine_hw_init(struct net_device * dev,long pioaddr)871 static void rhine_hw_init(struct net_device *dev, long pioaddr)
872 {
873 	struct rhine_private *rp = netdev_priv(dev);
874 
875 	/* Reset the chip to erase previous misconfiguration. */
876 	rhine_chip_reset(dev);
877 
878 	/* Rhine-I needs extra time to recuperate before EEPROM reload */
879 	if (rp->quirks & rqRhineI)
880 		msleep(5);
881 
882 	/* Reload EEPROM controlled bytes cleared by soft reset */
883 	if (dev_is_pci(dev->dev.parent))
884 		rhine_reload_eeprom(pioaddr, dev);
885 }
886 
887 static const struct net_device_ops rhine_netdev_ops = {
888 	.ndo_open		 = rhine_open,
889 	.ndo_stop		 = rhine_close,
890 	.ndo_start_xmit		 = rhine_start_tx,
891 	.ndo_get_stats64	 = rhine_get_stats64,
892 	.ndo_set_rx_mode	 = rhine_set_rx_mode,
893 	.ndo_validate_addr	 = eth_validate_addr,
894 	.ndo_set_mac_address 	 = eth_mac_addr,
895 	.ndo_do_ioctl		 = netdev_ioctl,
896 	.ndo_tx_timeout 	 = rhine_tx_timeout,
897 	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
898 	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
899 #ifdef CONFIG_NET_POLL_CONTROLLER
900 	.ndo_poll_controller	 = rhine_poll,
901 #endif
902 };
903 
rhine_init_one_common(struct device * hwdev,u32 quirks,long pioaddr,void __iomem * ioaddr,int irq)904 static int rhine_init_one_common(struct device *hwdev, u32 quirks,
905 				 long pioaddr, void __iomem *ioaddr, int irq)
906 {
907 	struct net_device *dev;
908 	struct rhine_private *rp;
909 	int i, rc, phy_id;
910 	const char *name;
911 
912 	/* this should always be supported */
913 	rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
914 	if (rc) {
915 		dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
916 		goto err_out;
917 	}
918 
919 	dev = alloc_etherdev(sizeof(struct rhine_private));
920 	if (!dev) {
921 		rc = -ENOMEM;
922 		goto err_out;
923 	}
924 	SET_NETDEV_DEV(dev, hwdev);
925 
926 	rp = netdev_priv(dev);
927 	rp->dev = dev;
928 	rp->quirks = quirks;
929 	rp->pioaddr = pioaddr;
930 	rp->base = ioaddr;
931 	rp->irq = irq;
932 	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
933 
934 	phy_id = rp->quirks & rqIntPHY ? 1 : 0;
935 
936 	u64_stats_init(&rp->tx_stats.syncp);
937 	u64_stats_init(&rp->rx_stats.syncp);
938 
939 	/* Get chip registers into a sane state */
940 	rhine_power_init(dev);
941 	rhine_hw_init(dev, pioaddr);
942 
943 	for (i = 0; i < 6; i++)
944 		dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
945 
946 	if (!is_valid_ether_addr(dev->dev_addr)) {
947 		/* Report it and use a random ethernet address instead */
948 		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
949 		eth_hw_addr_random(dev);
950 		netdev_info(dev, "Using random MAC address: %pM\n",
951 			    dev->dev_addr);
952 	}
953 
954 	/* For Rhine-I/II, phy_id is loaded from EEPROM */
955 	if (!phy_id)
956 		phy_id = ioread8(ioaddr + 0x6C);
957 
958 	spin_lock_init(&rp->lock);
959 	mutex_init(&rp->task_lock);
960 	INIT_WORK(&rp->reset_task, rhine_reset_task);
961 	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
962 
963 	rp->mii_if.dev = dev;
964 	rp->mii_if.mdio_read = mdio_read;
965 	rp->mii_if.mdio_write = mdio_write;
966 	rp->mii_if.phy_id_mask = 0x1f;
967 	rp->mii_if.reg_num_mask = 0x1f;
968 
969 	/* The chip-specific entries in the device structure. */
970 	dev->netdev_ops = &rhine_netdev_ops;
971 	dev->ethtool_ops = &netdev_ethtool_ops;
972 	dev->watchdog_timeo = TX_TIMEOUT;
973 
974 	netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
975 
976 	if (rp->quirks & rqRhineI)
977 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
978 
979 	if (rp->quirks & rqMgmt)
980 		dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
981 				 NETIF_F_HW_VLAN_CTAG_RX |
982 				 NETIF_F_HW_VLAN_CTAG_FILTER;
983 
984 	/* dev->name not defined before register_netdev()! */
985 	rc = register_netdev(dev);
986 	if (rc)
987 		goto err_out_free_netdev;
988 
989 	if (rp->quirks & rqRhineI)
990 		name = "Rhine";
991 	else if (rp->quirks & rqStatusWBRace)
992 		name = "Rhine II";
993 	else if (rp->quirks & rqMgmt)
994 		name = "Rhine III (Management Adapter)";
995 	else
996 		name = "Rhine III";
997 
998 	netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
999 		    name, ioaddr, dev->dev_addr, rp->irq);
1000 
1001 	dev_set_drvdata(hwdev, dev);
1002 
1003 	{
1004 		u16 mii_cmd;
1005 		int mii_status = mdio_read(dev, phy_id, 1);
1006 		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1007 		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1008 		if (mii_status != 0xffff && mii_status != 0x0000) {
1009 			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1010 			netdev_info(dev,
1011 				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1012 				    phy_id,
1013 				    mii_status, rp->mii_if.advertising,
1014 				    mdio_read(dev, phy_id, 5));
1015 
1016 			/* set IFF_RUNNING */
1017 			if (mii_status & BMSR_LSTATUS)
1018 				netif_carrier_on(dev);
1019 			else
1020 				netif_carrier_off(dev);
1021 
1022 		}
1023 	}
1024 	rp->mii_if.phy_id = phy_id;
1025 	if (avoid_D3)
1026 		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1027 
1028 	return 0;
1029 
1030 err_out_free_netdev:
1031 	free_netdev(dev);
1032 err_out:
1033 	return rc;
1034 }
1035 
rhine_init_one_pci(struct pci_dev * pdev,const struct pci_device_id * ent)1036 static int rhine_init_one_pci(struct pci_dev *pdev,
1037 			      const struct pci_device_id *ent)
1038 {
1039 	struct device *hwdev = &pdev->dev;
1040 	int rc;
1041 	long pioaddr, memaddr;
1042 	void __iomem *ioaddr;
1043 	int io_size = pdev->revision < VTunknown0 ? 128 : 256;
1044 
1045 /* This driver was written to use PCI memory space. Some early versions
1046  * of the Rhine may only work correctly with I/O space accesses.
1047  * TODO: determine for which revisions this is true and assign the flag
1048  *	 in code as opposed to this Kconfig option (???)
1049  */
1050 #ifdef CONFIG_VIA_RHINE_MMIO
1051 	u32 quirks = rqNeedEnMMIO;
1052 #else
1053 	u32 quirks = 0;
1054 #endif
1055 
1056 /* when built into the kernel, we only print version if device is found */
1057 #ifndef MODULE
1058 	pr_info_once("%s\n", version);
1059 #endif
1060 
1061 	rc = pci_enable_device(pdev);
1062 	if (rc)
1063 		goto err_out;
1064 
1065 	if (pdev->revision < VTunknown0) {
1066 		quirks |= rqRhineI;
1067 	} else if (pdev->revision >= VT6102) {
1068 		quirks |= rqWOL | rqForceReset;
1069 		if (pdev->revision < VT6105) {
1070 			quirks |= rqStatusWBRace;
1071 		} else {
1072 			quirks |= rqIntPHY;
1073 			if (pdev->revision >= VT6105_B0)
1074 				quirks |= rq6patterns;
1075 			if (pdev->revision >= VT6105M)
1076 				quirks |= rqMgmt;
1077 		}
1078 	}
1079 
1080 	/* sanity check */
1081 	if ((pci_resource_len(pdev, 0) < io_size) ||
1082 	    (pci_resource_len(pdev, 1) < io_size)) {
1083 		rc = -EIO;
1084 		dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1085 		goto err_out_pci_disable;
1086 	}
1087 
1088 	pioaddr = pci_resource_start(pdev, 0);
1089 	memaddr = pci_resource_start(pdev, 1);
1090 
1091 	pci_set_master(pdev);
1092 
1093 	rc = pci_request_regions(pdev, DRV_NAME);
1094 	if (rc)
1095 		goto err_out_pci_disable;
1096 
1097 	ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
1098 	if (!ioaddr) {
1099 		rc = -EIO;
1100 		dev_err(hwdev,
1101 			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1102 			dev_name(hwdev), io_size, memaddr);
1103 		goto err_out_free_res;
1104 	}
1105 
1106 	enable_mmio(pioaddr, quirks);
1107 
1108 	rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1109 	if (rc)
1110 		goto err_out_unmap;
1111 
1112 	rc = rhine_init_one_common(&pdev->dev, quirks,
1113 				   pioaddr, ioaddr, pdev->irq);
1114 	if (!rc)
1115 		return 0;
1116 
1117 err_out_unmap:
1118 	pci_iounmap(pdev, ioaddr);
1119 err_out_free_res:
1120 	pci_release_regions(pdev);
1121 err_out_pci_disable:
1122 	pci_disable_device(pdev);
1123 err_out:
1124 	return rc;
1125 }
1126 
rhine_init_one_platform(struct platform_device * pdev)1127 static int rhine_init_one_platform(struct platform_device *pdev)
1128 {
1129 	const struct of_device_id *match;
1130 	const u32 *quirks;
1131 	int irq;
1132 	struct resource *res;
1133 	void __iomem *ioaddr;
1134 
1135 	match = of_match_device(rhine_of_tbl, &pdev->dev);
1136 	if (!match)
1137 		return -EINVAL;
1138 
1139 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1140 	ioaddr = devm_ioremap_resource(&pdev->dev, res);
1141 	if (IS_ERR(ioaddr))
1142 		return PTR_ERR(ioaddr);
1143 
1144 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1145 	if (!irq)
1146 		return -EINVAL;
1147 
1148 	quirks = match->data;
1149 	if (!quirks)
1150 		return -EINVAL;
1151 
1152 	return rhine_init_one_common(&pdev->dev, *quirks,
1153 				     (long)ioaddr, ioaddr, irq);
1154 }
1155 
alloc_ring(struct net_device * dev)1156 static int alloc_ring(struct net_device* dev)
1157 {
1158 	struct rhine_private *rp = netdev_priv(dev);
1159 	struct device *hwdev = dev->dev.parent;
1160 	void *ring;
1161 	dma_addr_t ring_dma;
1162 
1163 	ring = dma_alloc_coherent(hwdev,
1164 				  RX_RING_SIZE * sizeof(struct rx_desc) +
1165 				  TX_RING_SIZE * sizeof(struct tx_desc),
1166 				  &ring_dma,
1167 				  GFP_ATOMIC);
1168 	if (!ring) {
1169 		netdev_err(dev, "Could not allocate DMA memory\n");
1170 		return -ENOMEM;
1171 	}
1172 	if (rp->quirks & rqRhineI) {
1173 		rp->tx_bufs = dma_alloc_coherent(hwdev,
1174 						 PKT_BUF_SZ * TX_RING_SIZE,
1175 						 &rp->tx_bufs_dma,
1176 						 GFP_ATOMIC);
1177 		if (rp->tx_bufs == NULL) {
1178 			dma_free_coherent(hwdev,
1179 					  RX_RING_SIZE * sizeof(struct rx_desc) +
1180 					  TX_RING_SIZE * sizeof(struct tx_desc),
1181 					  ring, ring_dma);
1182 			return -ENOMEM;
1183 		}
1184 	}
1185 
1186 	rp->rx_ring = ring;
1187 	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1188 	rp->rx_ring_dma = ring_dma;
1189 	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1190 
1191 	return 0;
1192 }
1193 
free_ring(struct net_device * dev)1194 static void free_ring(struct net_device* dev)
1195 {
1196 	struct rhine_private *rp = netdev_priv(dev);
1197 	struct device *hwdev = dev->dev.parent;
1198 
1199 	dma_free_coherent(hwdev,
1200 			  RX_RING_SIZE * sizeof(struct rx_desc) +
1201 			  TX_RING_SIZE * sizeof(struct tx_desc),
1202 			  rp->rx_ring, rp->rx_ring_dma);
1203 	rp->tx_ring = NULL;
1204 
1205 	if (rp->tx_bufs)
1206 		dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1207 				  rp->tx_bufs, rp->tx_bufs_dma);
1208 
1209 	rp->tx_bufs = NULL;
1210 
1211 }
1212 
1213 struct rhine_skb_dma {
1214 	struct sk_buff *skb;
1215 	dma_addr_t dma;
1216 };
1217 
rhine_skb_dma_init(struct net_device * dev,struct rhine_skb_dma * sd)1218 static inline int rhine_skb_dma_init(struct net_device *dev,
1219 				     struct rhine_skb_dma *sd)
1220 {
1221 	struct rhine_private *rp = netdev_priv(dev);
1222 	struct device *hwdev = dev->dev.parent;
1223 	const int size = rp->rx_buf_sz;
1224 
1225 	sd->skb = netdev_alloc_skb(dev, size);
1226 	if (!sd->skb)
1227 		return -ENOMEM;
1228 
1229 	sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
1230 	if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
1231 		netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
1232 		dev_kfree_skb_any(sd->skb);
1233 		return -EIO;
1234 	}
1235 
1236 	return 0;
1237 }
1238 
rhine_reset_rbufs(struct rhine_private * rp)1239 static void rhine_reset_rbufs(struct rhine_private *rp)
1240 {
1241 	int i;
1242 
1243 	rp->cur_rx = 0;
1244 
1245 	for (i = 0; i < RX_RING_SIZE; i++)
1246 		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1247 }
1248 
rhine_skb_dma_nic_store(struct rhine_private * rp,struct rhine_skb_dma * sd,int entry)1249 static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
1250 					   struct rhine_skb_dma *sd, int entry)
1251 {
1252 	rp->rx_skbuff_dma[entry] = sd->dma;
1253 	rp->rx_skbuff[entry] = sd->skb;
1254 
1255 	rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
1256 	dma_wmb();
1257 }
1258 
1259 static void free_rbufs(struct net_device* dev);
1260 
alloc_rbufs(struct net_device * dev)1261 static int alloc_rbufs(struct net_device *dev)
1262 {
1263 	struct rhine_private *rp = netdev_priv(dev);
1264 	dma_addr_t next;
1265 	int rc, i;
1266 
1267 	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1268 	next = rp->rx_ring_dma;
1269 
1270 	/* Init the ring entries */
1271 	for (i = 0; i < RX_RING_SIZE; i++) {
1272 		rp->rx_ring[i].rx_status = 0;
1273 		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1274 		next += sizeof(struct rx_desc);
1275 		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1276 		rp->rx_skbuff[i] = NULL;
1277 	}
1278 	/* Mark the last entry as wrapping the ring. */
1279 	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1280 
1281 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1282 	for (i = 0; i < RX_RING_SIZE; i++) {
1283 		struct rhine_skb_dma sd;
1284 
1285 		rc = rhine_skb_dma_init(dev, &sd);
1286 		if (rc < 0) {
1287 			free_rbufs(dev);
1288 			goto out;
1289 		}
1290 
1291 		rhine_skb_dma_nic_store(rp, &sd, i);
1292 	}
1293 
1294 	rhine_reset_rbufs(rp);
1295 out:
1296 	return rc;
1297 }
1298 
free_rbufs(struct net_device * dev)1299 static void free_rbufs(struct net_device* dev)
1300 {
1301 	struct rhine_private *rp = netdev_priv(dev);
1302 	struct device *hwdev = dev->dev.parent;
1303 	int i;
1304 
1305 	/* Free all the skbuffs in the Rx queue. */
1306 	for (i = 0; i < RX_RING_SIZE; i++) {
1307 		rp->rx_ring[i].rx_status = 0;
1308 		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1309 		if (rp->rx_skbuff[i]) {
1310 			dma_unmap_single(hwdev,
1311 					 rp->rx_skbuff_dma[i],
1312 					 rp->rx_buf_sz, DMA_FROM_DEVICE);
1313 			dev_kfree_skb(rp->rx_skbuff[i]);
1314 		}
1315 		rp->rx_skbuff[i] = NULL;
1316 	}
1317 }
1318 
alloc_tbufs(struct net_device * dev)1319 static void alloc_tbufs(struct net_device* dev)
1320 {
1321 	struct rhine_private *rp = netdev_priv(dev);
1322 	dma_addr_t next;
1323 	int i;
1324 
1325 	rp->dirty_tx = rp->cur_tx = 0;
1326 	next = rp->tx_ring_dma;
1327 	for (i = 0; i < TX_RING_SIZE; i++) {
1328 		rp->tx_skbuff[i] = NULL;
1329 		rp->tx_ring[i].tx_status = 0;
1330 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1331 		next += sizeof(struct tx_desc);
1332 		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1333 		if (rp->quirks & rqRhineI)
1334 			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1335 	}
1336 	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1337 
1338 	netdev_reset_queue(dev);
1339 }
1340 
free_tbufs(struct net_device * dev)1341 static void free_tbufs(struct net_device* dev)
1342 {
1343 	struct rhine_private *rp = netdev_priv(dev);
1344 	struct device *hwdev = dev->dev.parent;
1345 	int i;
1346 
1347 	for (i = 0; i < TX_RING_SIZE; i++) {
1348 		rp->tx_ring[i].tx_status = 0;
1349 		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1350 		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1351 		if (rp->tx_skbuff[i]) {
1352 			if (rp->tx_skbuff_dma[i]) {
1353 				dma_unmap_single(hwdev,
1354 						 rp->tx_skbuff_dma[i],
1355 						 rp->tx_skbuff[i]->len,
1356 						 DMA_TO_DEVICE);
1357 			}
1358 			dev_kfree_skb(rp->tx_skbuff[i]);
1359 		}
1360 		rp->tx_skbuff[i] = NULL;
1361 		rp->tx_buf[i] = NULL;
1362 	}
1363 }
1364 
rhine_check_media(struct net_device * dev,unsigned int init_media)1365 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1366 {
1367 	struct rhine_private *rp = netdev_priv(dev);
1368 	void __iomem *ioaddr = rp->base;
1369 
1370 	if (!rp->mii_if.force_media)
1371 		mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1372 
1373 	if (rp->mii_if.full_duplex)
1374 	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1375 		   ioaddr + ChipCmd1);
1376 	else
1377 	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1378 		   ioaddr + ChipCmd1);
1379 
1380 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1381 		   rp->mii_if.force_media, netif_carrier_ok(dev));
1382 }
1383 
1384 /* Called after status of force_media possibly changed */
rhine_set_carrier(struct mii_if_info * mii)1385 static void rhine_set_carrier(struct mii_if_info *mii)
1386 {
1387 	struct net_device *dev = mii->dev;
1388 	struct rhine_private *rp = netdev_priv(dev);
1389 
1390 	if (mii->force_media) {
1391 		/* autoneg is off: Link is always assumed to be up */
1392 		if (!netif_carrier_ok(dev))
1393 			netif_carrier_on(dev);
1394 	}
1395 
1396 	rhine_check_media(dev, 0);
1397 
1398 	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1399 		   mii->force_media, netif_carrier_ok(dev));
1400 }
1401 
1402 /**
1403  * rhine_set_cam - set CAM multicast filters
1404  * @ioaddr: register block of this Rhine
1405  * @idx: multicast CAM index [0..MCAM_SIZE-1]
1406  * @addr: multicast address (6 bytes)
1407  *
1408  * Load addresses into multicast filters.
1409  */
rhine_set_cam(void __iomem * ioaddr,int idx,u8 * addr)1410 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1411 {
1412 	int i;
1413 
1414 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1415 	wmb();
1416 
1417 	/* Paranoid -- idx out of range should never happen */
1418 	idx &= (MCAM_SIZE - 1);
1419 
1420 	iowrite8((u8) idx, ioaddr + CamAddr);
1421 
1422 	for (i = 0; i < 6; i++, addr++)
1423 		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1424 	udelay(10);
1425 	wmb();
1426 
1427 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1428 	udelay(10);
1429 
1430 	iowrite8(0, ioaddr + CamCon);
1431 }
1432 
1433 /**
1434  * rhine_set_vlan_cam - set CAM VLAN filters
1435  * @ioaddr: register block of this Rhine
1436  * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1437  * @addr: VLAN ID (2 bytes)
1438  *
1439  * Load addresses into VLAN filters.
1440  */
rhine_set_vlan_cam(void __iomem * ioaddr,int idx,u8 * addr)1441 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1442 {
1443 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1444 	wmb();
1445 
1446 	/* Paranoid -- idx out of range should never happen */
1447 	idx &= (VCAM_SIZE - 1);
1448 
1449 	iowrite8((u8) idx, ioaddr + CamAddr);
1450 
1451 	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1452 	udelay(10);
1453 	wmb();
1454 
1455 	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1456 	udelay(10);
1457 
1458 	iowrite8(0, ioaddr + CamCon);
1459 }
1460 
1461 /**
1462  * rhine_set_cam_mask - set multicast CAM mask
1463  * @ioaddr: register block of this Rhine
1464  * @mask: multicast CAM mask
1465  *
1466  * Mask sets multicast filters active/inactive.
1467  */
rhine_set_cam_mask(void __iomem * ioaddr,u32 mask)1468 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1469 {
1470 	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1471 	wmb();
1472 
1473 	/* write mask */
1474 	iowrite32(mask, ioaddr + CamMask);
1475 
1476 	/* disable CAMEN */
1477 	iowrite8(0, ioaddr + CamCon);
1478 }
1479 
1480 /**
1481  * rhine_set_vlan_cam_mask - set VLAN CAM mask
1482  * @ioaddr: register block of this Rhine
1483  * @mask: VLAN CAM mask
1484  *
1485  * Mask sets VLAN filters active/inactive.
1486  */
rhine_set_vlan_cam_mask(void __iomem * ioaddr,u32 mask)1487 static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1488 {
1489 	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1490 	wmb();
1491 
1492 	/* write mask */
1493 	iowrite32(mask, ioaddr + CamMask);
1494 
1495 	/* disable CAMEN */
1496 	iowrite8(0, ioaddr + CamCon);
1497 }
1498 
1499 /**
1500  * rhine_init_cam_filter - initialize CAM filters
1501  * @dev: network device
1502  *
1503  * Initialize (disable) hardware VLAN and multicast support on this
1504  * Rhine.
1505  */
rhine_init_cam_filter(struct net_device * dev)1506 static void rhine_init_cam_filter(struct net_device *dev)
1507 {
1508 	struct rhine_private *rp = netdev_priv(dev);
1509 	void __iomem *ioaddr = rp->base;
1510 
1511 	/* Disable all CAMs */
1512 	rhine_set_vlan_cam_mask(ioaddr, 0);
1513 	rhine_set_cam_mask(ioaddr, 0);
1514 
1515 	/* disable hardware VLAN support */
1516 	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1517 	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1518 }
1519 
1520 /**
1521  * rhine_update_vcam - update VLAN CAM filters
1522  * @rp: rhine_private data of this Rhine
1523  *
1524  * Update VLAN CAM filters to match configuration change.
1525  */
rhine_update_vcam(struct net_device * dev)1526 static void rhine_update_vcam(struct net_device *dev)
1527 {
1528 	struct rhine_private *rp = netdev_priv(dev);
1529 	void __iomem *ioaddr = rp->base;
1530 	u16 vid;
1531 	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1532 	unsigned int i = 0;
1533 
1534 	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1535 		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1536 		vCAMmask |= 1 << i;
1537 		if (++i >= VCAM_SIZE)
1538 			break;
1539 	}
1540 	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1541 }
1542 
rhine_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)1543 static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1544 {
1545 	struct rhine_private *rp = netdev_priv(dev);
1546 
1547 	spin_lock_bh(&rp->lock);
1548 	set_bit(vid, rp->active_vlans);
1549 	rhine_update_vcam(dev);
1550 	spin_unlock_bh(&rp->lock);
1551 	return 0;
1552 }
1553 
rhine_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)1554 static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1555 {
1556 	struct rhine_private *rp = netdev_priv(dev);
1557 
1558 	spin_lock_bh(&rp->lock);
1559 	clear_bit(vid, rp->active_vlans);
1560 	rhine_update_vcam(dev);
1561 	spin_unlock_bh(&rp->lock);
1562 	return 0;
1563 }
1564 
init_registers(struct net_device * dev)1565 static void init_registers(struct net_device *dev)
1566 {
1567 	struct rhine_private *rp = netdev_priv(dev);
1568 	void __iomem *ioaddr = rp->base;
1569 	int i;
1570 
1571 	for (i = 0; i < 6; i++)
1572 		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1573 
1574 	/* Initialize other registers. */
1575 	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1576 	/* Configure initial FIFO thresholds. */
1577 	iowrite8(0x20, ioaddr + TxConfig);
1578 	rp->tx_thresh = 0x20;
1579 	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1580 
1581 	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1582 	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1583 
1584 	rhine_set_rx_mode(dev);
1585 
1586 	if (rp->quirks & rqMgmt)
1587 		rhine_init_cam_filter(dev);
1588 
1589 	napi_enable(&rp->napi);
1590 
1591 	iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1592 
1593 	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1594 	       ioaddr + ChipCmd);
1595 	rhine_check_media(dev, 1);
1596 }
1597 
1598 /* Enable MII link status auto-polling (required for IntrLinkChange) */
rhine_enable_linkmon(struct rhine_private * rp)1599 static void rhine_enable_linkmon(struct rhine_private *rp)
1600 {
1601 	void __iomem *ioaddr = rp->base;
1602 
1603 	iowrite8(0, ioaddr + MIICmd);
1604 	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1605 	iowrite8(0x80, ioaddr + MIICmd);
1606 
1607 	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1608 
1609 	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1610 }
1611 
1612 /* Disable MII link status auto-polling (required for MDIO access) */
rhine_disable_linkmon(struct rhine_private * rp)1613 static void rhine_disable_linkmon(struct rhine_private *rp)
1614 {
1615 	void __iomem *ioaddr = rp->base;
1616 
1617 	iowrite8(0, ioaddr + MIICmd);
1618 
1619 	if (rp->quirks & rqRhineI) {
1620 		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1621 
1622 		/* Can be called from ISR. Evil. */
1623 		mdelay(1);
1624 
1625 		/* 0x80 must be set immediately before turning it off */
1626 		iowrite8(0x80, ioaddr + MIICmd);
1627 
1628 		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1629 
1630 		/* Heh. Now clear 0x80 again. */
1631 		iowrite8(0, ioaddr + MIICmd);
1632 	}
1633 	else
1634 		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1635 }
1636 
1637 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1638 
mdio_read(struct net_device * dev,int phy_id,int regnum)1639 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1640 {
1641 	struct rhine_private *rp = netdev_priv(dev);
1642 	void __iomem *ioaddr = rp->base;
1643 	int result;
1644 
1645 	rhine_disable_linkmon(rp);
1646 
1647 	/* rhine_disable_linkmon already cleared MIICmd */
1648 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1649 	iowrite8(regnum, ioaddr + MIIRegAddr);
1650 	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1651 	rhine_wait_bit_low(rp, MIICmd, 0x40);
1652 	result = ioread16(ioaddr + MIIData);
1653 
1654 	rhine_enable_linkmon(rp);
1655 	return result;
1656 }
1657 
mdio_write(struct net_device * dev,int phy_id,int regnum,int value)1658 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1659 {
1660 	struct rhine_private *rp = netdev_priv(dev);
1661 	void __iomem *ioaddr = rp->base;
1662 
1663 	rhine_disable_linkmon(rp);
1664 
1665 	/* rhine_disable_linkmon already cleared MIICmd */
1666 	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1667 	iowrite8(regnum, ioaddr + MIIRegAddr);
1668 	iowrite16(value, ioaddr + MIIData);
1669 	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1670 	rhine_wait_bit_low(rp, MIICmd, 0x20);
1671 
1672 	rhine_enable_linkmon(rp);
1673 }
1674 
rhine_task_disable(struct rhine_private * rp)1675 static void rhine_task_disable(struct rhine_private *rp)
1676 {
1677 	mutex_lock(&rp->task_lock);
1678 	rp->task_enable = false;
1679 	mutex_unlock(&rp->task_lock);
1680 
1681 	cancel_work_sync(&rp->slow_event_task);
1682 	cancel_work_sync(&rp->reset_task);
1683 }
1684 
rhine_task_enable(struct rhine_private * rp)1685 static void rhine_task_enable(struct rhine_private *rp)
1686 {
1687 	mutex_lock(&rp->task_lock);
1688 	rp->task_enable = true;
1689 	mutex_unlock(&rp->task_lock);
1690 }
1691 
rhine_open(struct net_device * dev)1692 static int rhine_open(struct net_device *dev)
1693 {
1694 	struct rhine_private *rp = netdev_priv(dev);
1695 	void __iomem *ioaddr = rp->base;
1696 	int rc;
1697 
1698 	rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1699 	if (rc)
1700 		goto out;
1701 
1702 	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1703 
1704 	rc = alloc_ring(dev);
1705 	if (rc < 0)
1706 		goto out_free_irq;
1707 
1708 	rc = alloc_rbufs(dev);
1709 	if (rc < 0)
1710 		goto out_free_ring;
1711 
1712 	alloc_tbufs(dev);
1713 	rhine_chip_reset(dev);
1714 	rhine_task_enable(rp);
1715 	init_registers(dev);
1716 
1717 	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1718 		  __func__, ioread16(ioaddr + ChipCmd),
1719 		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1720 
1721 	netif_start_queue(dev);
1722 
1723 out:
1724 	return rc;
1725 
1726 out_free_ring:
1727 	free_ring(dev);
1728 out_free_irq:
1729 	free_irq(rp->irq, dev);
1730 	goto out;
1731 }
1732 
rhine_reset_task(struct work_struct * work)1733 static void rhine_reset_task(struct work_struct *work)
1734 {
1735 	struct rhine_private *rp = container_of(work, struct rhine_private,
1736 						reset_task);
1737 	struct net_device *dev = rp->dev;
1738 
1739 	mutex_lock(&rp->task_lock);
1740 
1741 	if (!rp->task_enable)
1742 		goto out_unlock;
1743 
1744 	napi_disable(&rp->napi);
1745 	netif_tx_disable(dev);
1746 	spin_lock_bh(&rp->lock);
1747 
1748 	/* clear all descriptors */
1749 	free_tbufs(dev);
1750 	alloc_tbufs(dev);
1751 
1752 	rhine_reset_rbufs(rp);
1753 
1754 	/* Reinitialize the hardware. */
1755 	rhine_chip_reset(dev);
1756 	init_registers(dev);
1757 
1758 	spin_unlock_bh(&rp->lock);
1759 
1760 	netif_trans_update(dev); /* prevent tx timeout */
1761 	dev->stats.tx_errors++;
1762 	netif_wake_queue(dev);
1763 
1764 out_unlock:
1765 	mutex_unlock(&rp->task_lock);
1766 }
1767 
rhine_tx_timeout(struct net_device * dev)1768 static void rhine_tx_timeout(struct net_device *dev)
1769 {
1770 	struct rhine_private *rp = netdev_priv(dev);
1771 	void __iomem *ioaddr = rp->base;
1772 
1773 	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1774 		    ioread16(ioaddr + IntrStatus),
1775 		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1776 
1777 	schedule_work(&rp->reset_task);
1778 }
1779 
rhine_tx_queue_full(struct rhine_private * rp)1780 static inline bool rhine_tx_queue_full(struct rhine_private *rp)
1781 {
1782 	return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
1783 }
1784 
rhine_start_tx(struct sk_buff * skb,struct net_device * dev)1785 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1786 				  struct net_device *dev)
1787 {
1788 	struct rhine_private *rp = netdev_priv(dev);
1789 	struct device *hwdev = dev->dev.parent;
1790 	void __iomem *ioaddr = rp->base;
1791 	unsigned entry;
1792 
1793 	/* Caution: the write order is important here, set the field
1794 	   with the "ownership" bits last. */
1795 
1796 	/* Calculate the next Tx descriptor entry. */
1797 	entry = rp->cur_tx % TX_RING_SIZE;
1798 
1799 	if (skb_padto(skb, ETH_ZLEN))
1800 		return NETDEV_TX_OK;
1801 
1802 	rp->tx_skbuff[entry] = skb;
1803 
1804 	if ((rp->quirks & rqRhineI) &&
1805 	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1806 		/* Must use alignment buffer. */
1807 		if (skb->len > PKT_BUF_SZ) {
1808 			/* packet too long, drop it */
1809 			dev_kfree_skb_any(skb);
1810 			rp->tx_skbuff[entry] = NULL;
1811 			dev->stats.tx_dropped++;
1812 			return NETDEV_TX_OK;
1813 		}
1814 
1815 		/* Padding is not copied and so must be redone. */
1816 		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1817 		if (skb->len < ETH_ZLEN)
1818 			memset(rp->tx_buf[entry] + skb->len, 0,
1819 			       ETH_ZLEN - skb->len);
1820 		rp->tx_skbuff_dma[entry] = 0;
1821 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1822 						      (rp->tx_buf[entry] -
1823 						       rp->tx_bufs));
1824 	} else {
1825 		rp->tx_skbuff_dma[entry] =
1826 			dma_map_single(hwdev, skb->data, skb->len,
1827 				       DMA_TO_DEVICE);
1828 		if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
1829 			dev_kfree_skb_any(skb);
1830 			rp->tx_skbuff_dma[entry] = 0;
1831 			dev->stats.tx_dropped++;
1832 			return NETDEV_TX_OK;
1833 		}
1834 		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1835 	}
1836 
1837 	rp->tx_ring[entry].desc_length =
1838 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1839 
1840 	if (unlikely(skb_vlan_tag_present(skb))) {
1841 		u16 vid_pcp = skb_vlan_tag_get(skb);
1842 
1843 		/* drop CFI/DEI bit, register needs VID and PCP */
1844 		vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1845 			  ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1846 		rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1847 		/* request tagging */
1848 		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1849 	}
1850 	else
1851 		rp->tx_ring[entry].tx_status = 0;
1852 
1853 	netdev_sent_queue(dev, skb->len);
1854 	/* lock eth irq */
1855 	dma_wmb();
1856 	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1857 	wmb();
1858 
1859 	rp->cur_tx++;
1860 	/*
1861 	 * Nobody wants cur_tx write to rot for ages after the NIC will have
1862 	 * seen the transmit request, especially as the transmit completion
1863 	 * handler could miss it.
1864 	 */
1865 	smp_wmb();
1866 
1867 	/* Non-x86 Todo: explicitly flush cache lines here. */
1868 
1869 	if (skb_vlan_tag_present(skb))
1870 		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1871 		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1872 
1873 	/* Wake the potentially-idle transmit channel */
1874 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1875 	       ioaddr + ChipCmd1);
1876 	IOSYNC;
1877 
1878 	/* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */
1879 	if (rhine_tx_queue_full(rp)) {
1880 		netif_stop_queue(dev);
1881 		smp_rmb();
1882 		/* Rejuvenate. */
1883 		if (!rhine_tx_queue_full(rp))
1884 			netif_wake_queue(dev);
1885 	}
1886 
1887 	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1888 		  rp->cur_tx - 1, entry);
1889 
1890 	return NETDEV_TX_OK;
1891 }
1892 
rhine_irq_disable(struct rhine_private * rp)1893 static void rhine_irq_disable(struct rhine_private *rp)
1894 {
1895 	iowrite16(0x0000, rp->base + IntrEnable);
1896 	mmiowb();
1897 }
1898 
1899 /* The interrupt handler does all of the Rx thread work and cleans up
1900    after the Tx thread. */
rhine_interrupt(int irq,void * dev_instance)1901 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1902 {
1903 	struct net_device *dev = dev_instance;
1904 	struct rhine_private *rp = netdev_priv(dev);
1905 	u32 status;
1906 	int handled = 0;
1907 
1908 	status = rhine_get_events(rp);
1909 
1910 	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1911 
1912 	if (status & RHINE_EVENT) {
1913 		handled = 1;
1914 
1915 		rhine_irq_disable(rp);
1916 		napi_schedule(&rp->napi);
1917 	}
1918 
1919 	if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1920 		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1921 			  status);
1922 	}
1923 
1924 	return IRQ_RETVAL(handled);
1925 }
1926 
1927 /* This routine is logically part of the interrupt handler, but isolated
1928    for clarity. */
rhine_tx(struct net_device * dev)1929 static void rhine_tx(struct net_device *dev)
1930 {
1931 	struct rhine_private *rp = netdev_priv(dev);
1932 	struct device *hwdev = dev->dev.parent;
1933 	unsigned int pkts_compl = 0, bytes_compl = 0;
1934 	unsigned int dirty_tx = rp->dirty_tx;
1935 	unsigned int cur_tx;
1936 	struct sk_buff *skb;
1937 
1938 	/*
1939 	 * The race with rhine_start_tx does not matter here as long as the
1940 	 * driver enforces a value of cur_tx that was relevant when the
1941 	 * packet was scheduled to the network chipset.
1942 	 * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx.
1943 	 */
1944 	smp_rmb();
1945 	cur_tx = rp->cur_tx;
1946 	/* find and cleanup dirty tx descriptors */
1947 	while (dirty_tx != cur_tx) {
1948 		unsigned int entry = dirty_tx % TX_RING_SIZE;
1949 		u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1950 
1951 		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1952 			  entry, txstatus);
1953 		if (txstatus & DescOwn)
1954 			break;
1955 		skb = rp->tx_skbuff[entry];
1956 		if (txstatus & 0x8000) {
1957 			netif_dbg(rp, tx_done, dev,
1958 				  "Transmit error, Tx status %08x\n", txstatus);
1959 			dev->stats.tx_errors++;
1960 			if (txstatus & 0x0400)
1961 				dev->stats.tx_carrier_errors++;
1962 			if (txstatus & 0x0200)
1963 				dev->stats.tx_window_errors++;
1964 			if (txstatus & 0x0100)
1965 				dev->stats.tx_aborted_errors++;
1966 			if (txstatus & 0x0080)
1967 				dev->stats.tx_heartbeat_errors++;
1968 			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1969 			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1970 				dev->stats.tx_fifo_errors++;
1971 				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1972 				break; /* Keep the skb - we try again */
1973 			}
1974 			/* Transmitter restarted in 'abnormal' handler. */
1975 		} else {
1976 			if (rp->quirks & rqRhineI)
1977 				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1978 			else
1979 				dev->stats.collisions += txstatus & 0x0F;
1980 			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1981 				  (txstatus >> 3) & 0xF, txstatus & 0xF);
1982 
1983 			u64_stats_update_begin(&rp->tx_stats.syncp);
1984 			rp->tx_stats.bytes += skb->len;
1985 			rp->tx_stats.packets++;
1986 			u64_stats_update_end(&rp->tx_stats.syncp);
1987 		}
1988 		/* Free the original skb. */
1989 		if (rp->tx_skbuff_dma[entry]) {
1990 			dma_unmap_single(hwdev,
1991 					 rp->tx_skbuff_dma[entry],
1992 					 skb->len,
1993 					 DMA_TO_DEVICE);
1994 		}
1995 		bytes_compl += skb->len;
1996 		pkts_compl++;
1997 		dev_consume_skb_any(skb);
1998 		rp->tx_skbuff[entry] = NULL;
1999 		dirty_tx++;
2000 	}
2001 
2002 	rp->dirty_tx = dirty_tx;
2003 	/* Pity we can't rely on the nearby BQL completion implicit barrier. */
2004 	smp_wmb();
2005 
2006 	netdev_completed_queue(dev, pkts_compl, bytes_compl);
2007 
2008 	/* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */
2009 	if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
2010 		netif_wake_queue(dev);
2011 		smp_rmb();
2012 		/* Rejuvenate. */
2013 		if (rhine_tx_queue_full(rp))
2014 			netif_stop_queue(dev);
2015 	}
2016 }
2017 
2018 /**
2019  * rhine_get_vlan_tci - extract TCI from Rx data buffer
2020  * @skb: pointer to sk_buff
2021  * @data_size: used data area of the buffer including CRC
2022  *
2023  * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
2024  * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
2025  * aligned following the CRC.
2026  */
rhine_get_vlan_tci(struct sk_buff * skb,int data_size)2027 static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
2028 {
2029 	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
2030 	return be16_to_cpup((__be16 *)trailer);
2031 }
2032 
rhine_rx_vlan_tag(struct sk_buff * skb,struct rx_desc * desc,int data_size)2033 static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
2034 				     int data_size)
2035 {
2036 	dma_rmb();
2037 	if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
2038 		u16 vlan_tci;
2039 
2040 		vlan_tci = rhine_get_vlan_tci(skb, data_size);
2041 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2042 	}
2043 }
2044 
2045 /* Process up to limit frames from receive ring */
rhine_rx(struct net_device * dev,int limit)2046 static int rhine_rx(struct net_device *dev, int limit)
2047 {
2048 	struct rhine_private *rp = netdev_priv(dev);
2049 	struct device *hwdev = dev->dev.parent;
2050 	int entry = rp->cur_rx % RX_RING_SIZE;
2051 	int count;
2052 
2053 	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
2054 		  entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
2055 
2056 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
2057 	for (count = 0; count < limit; ++count) {
2058 		struct rx_desc *desc = rp->rx_ring + entry;
2059 		u32 desc_status = le32_to_cpu(desc->rx_status);
2060 		int data_size = desc_status >> 16;
2061 
2062 		if (desc_status & DescOwn)
2063 			break;
2064 
2065 		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
2066 			  desc_status);
2067 
2068 		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
2069 			if ((desc_status & RxWholePkt) != RxWholePkt) {
2070 				netdev_warn(dev,
2071 	"Oversized Ethernet frame spanned multiple buffers, "
2072 	"entry %#x length %d status %08x!\n",
2073 					    entry, data_size,
2074 					    desc_status);
2075 				dev->stats.rx_length_errors++;
2076 			} else if (desc_status & RxErr) {
2077 				/* There was a error. */
2078 				netif_dbg(rp, rx_err, dev,
2079 					  "%s() Rx error %08x\n", __func__,
2080 					  desc_status);
2081 				dev->stats.rx_errors++;
2082 				if (desc_status & 0x0030)
2083 					dev->stats.rx_length_errors++;
2084 				if (desc_status & 0x0048)
2085 					dev->stats.rx_fifo_errors++;
2086 				if (desc_status & 0x0004)
2087 					dev->stats.rx_frame_errors++;
2088 				if (desc_status & 0x0002) {
2089 					/* this can also be updated outside the interrupt handler */
2090 					spin_lock(&rp->lock);
2091 					dev->stats.rx_crc_errors++;
2092 					spin_unlock(&rp->lock);
2093 				}
2094 			}
2095 		} else {
2096 			/* Length should omit the CRC */
2097 			int pkt_len = data_size - 4;
2098 			struct sk_buff *skb;
2099 
2100 			/* Check if the packet is long enough to accept without
2101 			   copying to a minimally-sized skbuff. */
2102 			if (pkt_len < rx_copybreak) {
2103 				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
2104 				if (unlikely(!skb))
2105 					goto drop;
2106 
2107 				dma_sync_single_for_cpu(hwdev,
2108 							rp->rx_skbuff_dma[entry],
2109 							rp->rx_buf_sz,
2110 							DMA_FROM_DEVICE);
2111 
2112 				skb_copy_to_linear_data(skb,
2113 						 rp->rx_skbuff[entry]->data,
2114 						 pkt_len);
2115 
2116 				dma_sync_single_for_device(hwdev,
2117 							   rp->rx_skbuff_dma[entry],
2118 							   rp->rx_buf_sz,
2119 							   DMA_FROM_DEVICE);
2120 			} else {
2121 				struct rhine_skb_dma sd;
2122 
2123 				if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
2124 					goto drop;
2125 
2126 				skb = rp->rx_skbuff[entry];
2127 
2128 				dma_unmap_single(hwdev,
2129 						 rp->rx_skbuff_dma[entry],
2130 						 rp->rx_buf_sz,
2131 						 DMA_FROM_DEVICE);
2132 				rhine_skb_dma_nic_store(rp, &sd, entry);
2133 			}
2134 
2135 			skb_put(skb, pkt_len);
2136 
2137 			rhine_rx_vlan_tag(skb, desc, data_size);
2138 
2139 			skb->protocol = eth_type_trans(skb, dev);
2140 
2141 			netif_receive_skb(skb);
2142 
2143 			u64_stats_update_begin(&rp->rx_stats.syncp);
2144 			rp->rx_stats.bytes += pkt_len;
2145 			rp->rx_stats.packets++;
2146 			u64_stats_update_end(&rp->rx_stats.syncp);
2147 		}
2148 give_descriptor_to_nic:
2149 		desc->rx_status = cpu_to_le32(DescOwn);
2150 		entry = (++rp->cur_rx) % RX_RING_SIZE;
2151 	}
2152 
2153 	return count;
2154 
2155 drop:
2156 	dev->stats.rx_dropped++;
2157 	goto give_descriptor_to_nic;
2158 }
2159 
rhine_restart_tx(struct net_device * dev)2160 static void rhine_restart_tx(struct net_device *dev) {
2161 	struct rhine_private *rp = netdev_priv(dev);
2162 	void __iomem *ioaddr = rp->base;
2163 	int entry = rp->dirty_tx % TX_RING_SIZE;
2164 	u32 intr_status;
2165 
2166 	/*
2167 	 * If new errors occurred, we need to sort them out before doing Tx.
2168 	 * In that case the ISR will be back here RSN anyway.
2169 	 */
2170 	intr_status = rhine_get_events(rp);
2171 
2172 	if ((intr_status & IntrTxErrSummary) == 0) {
2173 
2174 		/* We know better than the chip where it should continue. */
2175 		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2176 		       ioaddr + TxRingPtr);
2177 
2178 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2179 		       ioaddr + ChipCmd);
2180 
2181 		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2182 			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2183 			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2184 
2185 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2186 		       ioaddr + ChipCmd1);
2187 		IOSYNC;
2188 	}
2189 	else {
2190 		/* This should never happen */
2191 		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2192 			   intr_status);
2193 	}
2194 
2195 }
2196 
rhine_slow_event_task(struct work_struct * work)2197 static void rhine_slow_event_task(struct work_struct *work)
2198 {
2199 	struct rhine_private *rp =
2200 		container_of(work, struct rhine_private, slow_event_task);
2201 	struct net_device *dev = rp->dev;
2202 	u32 intr_status;
2203 
2204 	mutex_lock(&rp->task_lock);
2205 
2206 	if (!rp->task_enable)
2207 		goto out_unlock;
2208 
2209 	intr_status = rhine_get_events(rp);
2210 	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2211 
2212 	if (intr_status & IntrLinkChange)
2213 		rhine_check_media(dev, 0);
2214 
2215 	if (intr_status & IntrPCIErr)
2216 		netif_warn(rp, hw, dev, "PCI error\n");
2217 
2218 	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2219 
2220 out_unlock:
2221 	mutex_unlock(&rp->task_lock);
2222 }
2223 
2224 static void
rhine_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)2225 rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2226 {
2227 	struct rhine_private *rp = netdev_priv(dev);
2228 	unsigned int start;
2229 
2230 	spin_lock_bh(&rp->lock);
2231 	rhine_update_rx_crc_and_missed_errord(rp);
2232 	spin_unlock_bh(&rp->lock);
2233 
2234 	netdev_stats_to_stats64(stats, &dev->stats);
2235 
2236 	do {
2237 		start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2238 		stats->rx_packets = rp->rx_stats.packets;
2239 		stats->rx_bytes = rp->rx_stats.bytes;
2240 	} while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2241 
2242 	do {
2243 		start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2244 		stats->tx_packets = rp->tx_stats.packets;
2245 		stats->tx_bytes = rp->tx_stats.bytes;
2246 	} while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2247 }
2248 
rhine_set_rx_mode(struct net_device * dev)2249 static void rhine_set_rx_mode(struct net_device *dev)
2250 {
2251 	struct rhine_private *rp = netdev_priv(dev);
2252 	void __iomem *ioaddr = rp->base;
2253 	u32 mc_filter[2];	/* Multicast hash filter */
2254 	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
2255 	struct netdev_hw_addr *ha;
2256 
2257 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
2258 		rx_mode = 0x1C;
2259 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2260 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2261 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2262 		   (dev->flags & IFF_ALLMULTI)) {
2263 		/* Too many to match, or accept all multicasts. */
2264 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2265 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2266 	} else if (rp->quirks & rqMgmt) {
2267 		int i = 0;
2268 		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
2269 		netdev_for_each_mc_addr(ha, dev) {
2270 			if (i == MCAM_SIZE)
2271 				break;
2272 			rhine_set_cam(ioaddr, i, ha->addr);
2273 			mCAMmask |= 1 << i;
2274 			i++;
2275 		}
2276 		rhine_set_cam_mask(ioaddr, mCAMmask);
2277 	} else {
2278 		memset(mc_filter, 0, sizeof(mc_filter));
2279 		netdev_for_each_mc_addr(ha, dev) {
2280 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2281 
2282 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2283 		}
2284 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2285 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2286 	}
2287 	/* enable/disable VLAN receive filtering */
2288 	if (rp->quirks & rqMgmt) {
2289 		if (dev->flags & IFF_PROMISC)
2290 			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2291 		else
2292 			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2293 	}
2294 	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2295 }
2296 
netdev_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2297 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2298 {
2299 	struct device *hwdev = dev->dev.parent;
2300 
2301 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2302 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2303 	strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2304 }
2305 
netdev_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)2306 static int netdev_get_link_ksettings(struct net_device *dev,
2307 				     struct ethtool_link_ksettings *cmd)
2308 {
2309 	struct rhine_private *rp = netdev_priv(dev);
2310 
2311 	mutex_lock(&rp->task_lock);
2312 	mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
2313 	mutex_unlock(&rp->task_lock);
2314 
2315 	return 0;
2316 }
2317 
netdev_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)2318 static int netdev_set_link_ksettings(struct net_device *dev,
2319 				     const struct ethtool_link_ksettings *cmd)
2320 {
2321 	struct rhine_private *rp = netdev_priv(dev);
2322 	int rc;
2323 
2324 	mutex_lock(&rp->task_lock);
2325 	rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
2326 	rhine_set_carrier(&rp->mii_if);
2327 	mutex_unlock(&rp->task_lock);
2328 
2329 	return rc;
2330 }
2331 
netdev_nway_reset(struct net_device * dev)2332 static int netdev_nway_reset(struct net_device *dev)
2333 {
2334 	struct rhine_private *rp = netdev_priv(dev);
2335 
2336 	return mii_nway_restart(&rp->mii_if);
2337 }
2338 
netdev_get_link(struct net_device * dev)2339 static u32 netdev_get_link(struct net_device *dev)
2340 {
2341 	struct rhine_private *rp = netdev_priv(dev);
2342 
2343 	return mii_link_ok(&rp->mii_if);
2344 }
2345 
netdev_get_msglevel(struct net_device * dev)2346 static u32 netdev_get_msglevel(struct net_device *dev)
2347 {
2348 	struct rhine_private *rp = netdev_priv(dev);
2349 
2350 	return rp->msg_enable;
2351 }
2352 
netdev_set_msglevel(struct net_device * dev,u32 value)2353 static void netdev_set_msglevel(struct net_device *dev, u32 value)
2354 {
2355 	struct rhine_private *rp = netdev_priv(dev);
2356 
2357 	rp->msg_enable = value;
2358 }
2359 
rhine_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2360 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2361 {
2362 	struct rhine_private *rp = netdev_priv(dev);
2363 
2364 	if (!(rp->quirks & rqWOL))
2365 		return;
2366 
2367 	spin_lock_irq(&rp->lock);
2368 	wol->supported = WAKE_PHY | WAKE_MAGIC |
2369 			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2370 	wol->wolopts = rp->wolopts;
2371 	spin_unlock_irq(&rp->lock);
2372 }
2373 
rhine_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2374 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2375 {
2376 	struct rhine_private *rp = netdev_priv(dev);
2377 	u32 support = WAKE_PHY | WAKE_MAGIC |
2378 		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2379 
2380 	if (!(rp->quirks & rqWOL))
2381 		return -EINVAL;
2382 
2383 	if (wol->wolopts & ~support)
2384 		return -EINVAL;
2385 
2386 	spin_lock_irq(&rp->lock);
2387 	rp->wolopts = wol->wolopts;
2388 	spin_unlock_irq(&rp->lock);
2389 
2390 	return 0;
2391 }
2392 
2393 static const struct ethtool_ops netdev_ethtool_ops = {
2394 	.get_drvinfo		= netdev_get_drvinfo,
2395 	.nway_reset		= netdev_nway_reset,
2396 	.get_link		= netdev_get_link,
2397 	.get_msglevel		= netdev_get_msglevel,
2398 	.set_msglevel		= netdev_set_msglevel,
2399 	.get_wol		= rhine_get_wol,
2400 	.set_wol		= rhine_set_wol,
2401 	.get_link_ksettings	= netdev_get_link_ksettings,
2402 	.set_link_ksettings	= netdev_set_link_ksettings,
2403 };
2404 
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)2405 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2406 {
2407 	struct rhine_private *rp = netdev_priv(dev);
2408 	int rc;
2409 
2410 	if (!netif_running(dev))
2411 		return -EINVAL;
2412 
2413 	mutex_lock(&rp->task_lock);
2414 	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2415 	rhine_set_carrier(&rp->mii_if);
2416 	mutex_unlock(&rp->task_lock);
2417 
2418 	return rc;
2419 }
2420 
rhine_close(struct net_device * dev)2421 static int rhine_close(struct net_device *dev)
2422 {
2423 	struct rhine_private *rp = netdev_priv(dev);
2424 	void __iomem *ioaddr = rp->base;
2425 
2426 	rhine_task_disable(rp);
2427 	napi_disable(&rp->napi);
2428 	netif_stop_queue(dev);
2429 
2430 	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2431 		  ioread16(ioaddr + ChipCmd));
2432 
2433 	/* Switch to loopback mode to avoid hardware races. */
2434 	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2435 
2436 	rhine_irq_disable(rp);
2437 
2438 	/* Stop the chip's Tx and Rx processes. */
2439 	iowrite16(CmdStop, ioaddr + ChipCmd);
2440 
2441 	free_irq(rp->irq, dev);
2442 	free_rbufs(dev);
2443 	free_tbufs(dev);
2444 	free_ring(dev);
2445 
2446 	return 0;
2447 }
2448 
2449 
rhine_remove_one_pci(struct pci_dev * pdev)2450 static void rhine_remove_one_pci(struct pci_dev *pdev)
2451 {
2452 	struct net_device *dev = pci_get_drvdata(pdev);
2453 	struct rhine_private *rp = netdev_priv(dev);
2454 
2455 	unregister_netdev(dev);
2456 
2457 	pci_iounmap(pdev, rp->base);
2458 	pci_release_regions(pdev);
2459 
2460 	free_netdev(dev);
2461 	pci_disable_device(pdev);
2462 }
2463 
rhine_remove_one_platform(struct platform_device * pdev)2464 static int rhine_remove_one_platform(struct platform_device *pdev)
2465 {
2466 	struct net_device *dev = platform_get_drvdata(pdev);
2467 	struct rhine_private *rp = netdev_priv(dev);
2468 
2469 	unregister_netdev(dev);
2470 
2471 	iounmap(rp->base);
2472 
2473 	free_netdev(dev);
2474 
2475 	return 0;
2476 }
2477 
rhine_shutdown_pci(struct pci_dev * pdev)2478 static void rhine_shutdown_pci(struct pci_dev *pdev)
2479 {
2480 	struct net_device *dev = pci_get_drvdata(pdev);
2481 	struct rhine_private *rp = netdev_priv(dev);
2482 	void __iomem *ioaddr = rp->base;
2483 
2484 	if (!(rp->quirks & rqWOL))
2485 		return; /* Nothing to do for non-WOL adapters */
2486 
2487 	rhine_power_init(dev);
2488 
2489 	/* Make sure we use pattern 0, 1 and not 4, 5 */
2490 	if (rp->quirks & rq6patterns)
2491 		iowrite8(0x04, ioaddr + WOLcgClr);
2492 
2493 	spin_lock(&rp->lock);
2494 
2495 	if (rp->wolopts & WAKE_MAGIC) {
2496 		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2497 		/*
2498 		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2499 		 * not cooperate otherwise.
2500 		 */
2501 		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2502 	}
2503 
2504 	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2505 		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2506 
2507 	if (rp->wolopts & WAKE_PHY)
2508 		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2509 
2510 	if (rp->wolopts & WAKE_UCAST)
2511 		iowrite8(WOLucast, ioaddr + WOLcrSet);
2512 
2513 	if (rp->wolopts) {
2514 		/* Enable legacy WOL (for old motherboards) */
2515 		iowrite8(0x01, ioaddr + PwcfgSet);
2516 		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2517 	}
2518 
2519 	spin_unlock(&rp->lock);
2520 
2521 	if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2522 		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2523 
2524 		pci_wake_from_d3(pdev, true);
2525 		pci_set_power_state(pdev, PCI_D3hot);
2526 	}
2527 }
2528 
2529 #ifdef CONFIG_PM_SLEEP
rhine_suspend(struct device * device)2530 static int rhine_suspend(struct device *device)
2531 {
2532 	struct net_device *dev = dev_get_drvdata(device);
2533 	struct rhine_private *rp = netdev_priv(dev);
2534 
2535 	if (!netif_running(dev))
2536 		return 0;
2537 
2538 	rhine_task_disable(rp);
2539 	rhine_irq_disable(rp);
2540 	napi_disable(&rp->napi);
2541 
2542 	netif_device_detach(dev);
2543 
2544 	if (dev_is_pci(device))
2545 		rhine_shutdown_pci(to_pci_dev(device));
2546 
2547 	return 0;
2548 }
2549 
rhine_resume(struct device * device)2550 static int rhine_resume(struct device *device)
2551 {
2552 	struct net_device *dev = dev_get_drvdata(device);
2553 	struct rhine_private *rp = netdev_priv(dev);
2554 
2555 	if (!netif_running(dev))
2556 		return 0;
2557 
2558 	enable_mmio(rp->pioaddr, rp->quirks);
2559 	rhine_power_init(dev);
2560 	free_tbufs(dev);
2561 	alloc_tbufs(dev);
2562 	rhine_reset_rbufs(rp);
2563 	rhine_task_enable(rp);
2564 	spin_lock_bh(&rp->lock);
2565 	init_registers(dev);
2566 	spin_unlock_bh(&rp->lock);
2567 
2568 	netif_device_attach(dev);
2569 
2570 	return 0;
2571 }
2572 
2573 static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2574 #define RHINE_PM_OPS	(&rhine_pm_ops)
2575 
2576 #else
2577 
2578 #define RHINE_PM_OPS	NULL
2579 
2580 #endif /* !CONFIG_PM_SLEEP */
2581 
2582 static struct pci_driver rhine_driver_pci = {
2583 	.name		= DRV_NAME,
2584 	.id_table	= rhine_pci_tbl,
2585 	.probe		= rhine_init_one_pci,
2586 	.remove		= rhine_remove_one_pci,
2587 	.shutdown	= rhine_shutdown_pci,
2588 	.driver.pm	= RHINE_PM_OPS,
2589 };
2590 
2591 static struct platform_driver rhine_driver_platform = {
2592 	.probe		= rhine_init_one_platform,
2593 	.remove		= rhine_remove_one_platform,
2594 	.driver = {
2595 		.name	= DRV_NAME,
2596 		.of_match_table	= rhine_of_tbl,
2597 		.pm		= RHINE_PM_OPS,
2598 	}
2599 };
2600 
2601 static const struct dmi_system_id rhine_dmi_table[] __initconst = {
2602 	{
2603 		.ident = "EPIA-M",
2604 		.matches = {
2605 			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2606 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2607 		},
2608 	},
2609 	{
2610 		.ident = "KV7",
2611 		.matches = {
2612 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2613 			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2614 		},
2615 	},
2616 	{ NULL }
2617 };
2618 
rhine_init(void)2619 static int __init rhine_init(void)
2620 {
2621 	int ret_pci, ret_platform;
2622 
2623 /* when a module, this is printed whether or not devices are found in probe */
2624 #ifdef MODULE
2625 	pr_info("%s\n", version);
2626 #endif
2627 	if (dmi_check_system(rhine_dmi_table)) {
2628 		/* these BIOSes fail at PXE boot if chip is in D3 */
2629 		avoid_D3 = true;
2630 		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2631 	}
2632 	else if (avoid_D3)
2633 		pr_info("avoid_D3 set\n");
2634 
2635 	ret_pci = pci_register_driver(&rhine_driver_pci);
2636 	ret_platform = platform_driver_register(&rhine_driver_platform);
2637 	if ((ret_pci < 0) && (ret_platform < 0))
2638 		return ret_pci;
2639 
2640 	return 0;
2641 }
2642 
2643 
rhine_cleanup(void)2644 static void __exit rhine_cleanup(void)
2645 {
2646 	platform_driver_unregister(&rhine_driver_platform);
2647 	pci_unregister_driver(&rhine_driver_pci);
2648 }
2649 
2650 
2651 module_init(rhine_init);
2652 module_exit(rhine_cleanup);
2653