1 /*
2  *	This program is free software; you can redistribute it and/or
3  *	modify it under the terms of the GNU General Public License
4  *	as published by the Free Software Foundation; either version
5  *	2 of the License, or (at your option) any later version.
6  *
7  *	(c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
8  *	(c) Copyright 2000, 2001 Red Hat Inc
9  *
10  *	Development of this driver was funded by Equiinet Ltd
11  *			http://www.equiinet.com
12  *
13  *	ChangeLog:
14  *
15  *	Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
16  *	unification of all the Z85x30 asynchronous drivers for real.
17  *
18  *	DMA now uses get_free_page as kmalloc buffers may span a 64K
19  *	boundary.
20  *
21  *	Modified for SMP safety and SMP locking by Alan Cox
22  *					<alan@lxorguk.ukuu.org.uk>
23  *
24  *	Performance
25  *
26  *	Z85230:
27  *	Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
28  *	X.25 is not unrealistic on all machines. DMA mode can in theory
29  *	handle T1/E1 quite nicely. In practice the limit seems to be about
30  *	512Kbit->1Mbit depending on motherboard.
31  *
32  *	Z85C30:
33  *	64K will take DMA, 9600 baud X.25 should be ok.
34  *
35  *	Z8530:
36  *	Synchronous mode without DMA is unlikely to pass about 2400 baud.
37  */
38 
39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 
41 #include <linux/module.h>
42 #include <linux/kernel.h>
43 #include <linux/mm.h>
44 #include <linux/net.h>
45 #include <linux/skbuff.h>
46 #include <linux/netdevice.h>
47 #include <linux/if_arp.h>
48 #include <linux/delay.h>
49 #include <linux/hdlc.h>
50 #include <linux/ioport.h>
51 #include <linux/init.h>
52 #include <linux/gfp.h>
53 #include <asm/dma.h>
54 #include <asm/io.h>
55 #define RT_LOCK
56 #define RT_UNLOCK
57 #include <linux/spinlock.h>
58 
59 #include "z85230.h"
60 
61 
62 /**
63  *	z8530_read_port - Architecture specific interface function
64  *	@p: port to read
65  *
66  *	Provided port access methods. The Comtrol SV11 requires no delays
67  *	between accesses and uses PC I/O. Some drivers may need a 5uS delay
68  *
69  *	In the longer term this should become an architecture specific
70  *	section so that this can become a generic driver interface for all
71  *	platforms. For now we only handle PC I/O ports with or without the
72  *	dread 5uS sanity delay.
73  *
74  *	The caller must hold sufficient locks to avoid violating the horrible
75  *	5uS delay rule.
76  */
77 
z8530_read_port(unsigned long p)78 static inline int z8530_read_port(unsigned long p)
79 {
80 	u8 r=inb(Z8530_PORT_OF(p));
81 	if(p&Z8530_PORT_SLEEP)	/* gcc should figure this out efficiently ! */
82 		udelay(5);
83 	return r;
84 }
85 
86 /**
87  *	z8530_write_port - Architecture specific interface function
88  *	@p: port to write
89  *	@d: value to write
90  *
91  *	Write a value to a port with delays if need be. Note that the
92  *	caller must hold locks to avoid read/writes from other contexts
93  *	violating the 5uS rule
94  *
95  *	In the longer term this should become an architecture specific
96  *	section so that this can become a generic driver interface for all
97  *	platforms. For now we only handle PC I/O ports with or without the
98  *	dread 5uS sanity delay.
99  */
100 
101 
z8530_write_port(unsigned long p,u8 d)102 static inline void z8530_write_port(unsigned long p, u8 d)
103 {
104 	outb(d,Z8530_PORT_OF(p));
105 	if(p&Z8530_PORT_SLEEP)
106 		udelay(5);
107 }
108 
109 
110 
111 static void z8530_rx_done(struct z8530_channel *c);
112 static void z8530_tx_done(struct z8530_channel *c);
113 
114 
115 /**
116  *	read_zsreg - Read a register from a Z85230
117  *	@c: Z8530 channel to read from (2 per chip)
118  *	@reg: Register to read
119  *	FIXME: Use a spinlock.
120  *
121  *	Most of the Z8530 registers are indexed off the control registers.
122  *	A read is done by writing to the control register and reading the
123  *	register back.  The caller must hold the lock
124  */
125 
read_zsreg(struct z8530_channel * c,u8 reg)126 static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
127 {
128 	if(reg)
129 		z8530_write_port(c->ctrlio, reg);
130 	return z8530_read_port(c->ctrlio);
131 }
132 
133 /**
134  *	read_zsdata - Read the data port of a Z8530 channel
135  *	@c: The Z8530 channel to read the data port from
136  *
137  *	The data port provides fast access to some things. We still
138  *	have all the 5uS delays to worry about.
139  */
140 
read_zsdata(struct z8530_channel * c)141 static inline u8 read_zsdata(struct z8530_channel *c)
142 {
143 	u8 r;
144 	r=z8530_read_port(c->dataio);
145 	return r;
146 }
147 
148 /**
149  *	write_zsreg - Write to a Z8530 channel register
150  *	@c: The Z8530 channel
151  *	@reg: Register number
152  *	@val: Value to write
153  *
154  *	Write a value to an indexed register. The caller must hold the lock
155  *	to honour the irritating delay rules. We know about register 0
156  *	being fast to access.
157  *
158  *      Assumes c->lock is held.
159  */
write_zsreg(struct z8530_channel * c,u8 reg,u8 val)160 static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
161 {
162 	if(reg)
163 		z8530_write_port(c->ctrlio, reg);
164 	z8530_write_port(c->ctrlio, val);
165 
166 }
167 
168 /**
169  *	write_zsctrl - Write to a Z8530 control register
170  *	@c: The Z8530 channel
171  *	@val: Value to write
172  *
173  *	Write directly to the control register on the Z8530
174  */
175 
write_zsctrl(struct z8530_channel * c,u8 val)176 static inline void write_zsctrl(struct z8530_channel *c, u8 val)
177 {
178 	z8530_write_port(c->ctrlio, val);
179 }
180 
181 /**
182  *	write_zsdata - Write to a Z8530 control register
183  *	@c: The Z8530 channel
184  *	@val: Value to write
185  *
186  *	Write directly to the data register on the Z8530
187  */
188 
189 
write_zsdata(struct z8530_channel * c,u8 val)190 static inline void write_zsdata(struct z8530_channel *c, u8 val)
191 {
192 	z8530_write_port(c->dataio, val);
193 }
194 
195 /*
196  *	Register loading parameters for a dead port
197  */
198 
199 u8 z8530_dead_port[]=
200 {
201 	255
202 };
203 
204 EXPORT_SYMBOL(z8530_dead_port);
205 
206 /*
207  *	Register loading parameters for currently supported circuit types
208  */
209 
210 
211 /*
212  *	Data clocked by telco end. This is the correct data for the UK
213  *	"kilostream" service, and most other similar services.
214  */
215 
216 u8 z8530_hdlc_kilostream[]=
217 {
218 	4,	SYNC_ENAB|SDLC|X1CLK,
219 	2,	0,	/* No vector */
220 	1,	0,
221 	3,	ENT_HM|RxCRC_ENAB|Rx8,
222 	5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
223 	9,	0,		/* Disable interrupts */
224 	6,	0xFF,
225 	7,	FLAG,
226 	10,	ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
227 	11,	TCTRxCP,
228 	14,	DISDPLL,
229 	15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
230 	1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
231 	9,	NV|MIE|NORESET,
232 	255
233 };
234 
235 EXPORT_SYMBOL(z8530_hdlc_kilostream);
236 
237 /*
238  *	As above but for enhanced chips.
239  */
240 
241 u8 z8530_hdlc_kilostream_85230[]=
242 {
243 	4,	SYNC_ENAB|SDLC|X1CLK,
244 	2,	0,	/* No vector */
245 	1,	0,
246 	3,	ENT_HM|RxCRC_ENAB|Rx8,
247 	5,	TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
248 	9,	0,		/* Disable interrupts */
249 	6,	0xFF,
250 	7,	FLAG,
251 	10,	ABUNDER|NRZ|CRCPS,	/* MARKIDLE?? */
252 	11,	TCTRxCP,
253 	14,	DISDPLL,
254 	15,	DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
255 	1,	EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
256 	9,	NV|MIE|NORESET,
257 	23,	3,		/* Extended mode AUTO TX and EOM*/
258 
259 	255
260 };
261 
262 EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
263 
264 /**
265  *	z8530_flush_fifo - Flush on chip RX FIFO
266  *	@c: Channel to flush
267  *
268  *	Flush the receive FIFO. There is no specific option for this, we
269  *	blindly read bytes and discard them. Reading when there is no data
270  *	is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
271  *
272  *	All locking is handled for the caller. On return data may still be
273  *	present if it arrived during the flush.
274  */
275 
z8530_flush_fifo(struct z8530_channel * c)276 static void z8530_flush_fifo(struct z8530_channel *c)
277 {
278 	read_zsreg(c, R1);
279 	read_zsreg(c, R1);
280 	read_zsreg(c, R1);
281 	read_zsreg(c, R1);
282 	if(c->dev->type==Z85230)
283 	{
284 		read_zsreg(c, R1);
285 		read_zsreg(c, R1);
286 		read_zsreg(c, R1);
287 		read_zsreg(c, R1);
288 	}
289 }
290 
291 /**
292  *	z8530_rtsdtr - Control the outgoing DTS/RTS line
293  *	@c: The Z8530 channel to control;
294  *	@set: 1 to set, 0 to clear
295  *
296  *	Sets or clears DTR/RTS on the requested line. All locking is handled
297  *	by the caller. For now we assume all boards use the actual RTS/DTR
298  *	on the chip. Apparently one or two don't. We'll scream about them
299  *	later.
300  */
301 
z8530_rtsdtr(struct z8530_channel * c,int set)302 static void z8530_rtsdtr(struct z8530_channel *c, int set)
303 {
304 	if (set)
305 		c->regs[5] |= (RTS | DTR);
306 	else
307 		c->regs[5] &= ~(RTS | DTR);
308 	write_zsreg(c, R5, c->regs[5]);
309 }
310 
311 /**
312  *	z8530_rx - Handle a PIO receive event
313  *	@c: Z8530 channel to process
314  *
315  *	Receive handler for receiving in PIO mode. This is much like the
316  *	async one but not quite the same or as complex
317  *
318  *	Note: Its intended that this handler can easily be separated from
319  *	the main code to run realtime. That'll be needed for some machines
320  *	(eg to ever clock 64kbits on a sparc ;)).
321  *
322  *	The RT_LOCK macros don't do anything now. Keep the code covered
323  *	by them as short as possible in all circumstances - clocks cost
324  *	baud. The interrupt handler is assumed to be atomic w.r.t. to
325  *	other code - this is true in the RT case too.
326  *
327  *	We only cover the sync cases for this. If you want 2Mbit async
328  *	do it yourself but consider medical assistance first. This non DMA
329  *	synchronous mode is portable code. The DMA mode assumes PCI like
330  *	ISA DMA
331  *
332  *	Called with the device lock held
333  */
334 
z8530_rx(struct z8530_channel * c)335 static void z8530_rx(struct z8530_channel *c)
336 {
337 	u8 ch,stat;
338 
339 	while(1)
340 	{
341 		/* FIFO empty ? */
342 		if(!(read_zsreg(c, R0)&1))
343 			break;
344 		ch=read_zsdata(c);
345 		stat=read_zsreg(c, R1);
346 
347 		/*
348 		 *	Overrun ?
349 		 */
350 		if(c->count < c->max)
351 		{
352 			*c->dptr++=ch;
353 			c->count++;
354 		}
355 
356 		if(stat&END_FR)
357 		{
358 
359 			/*
360 			 *	Error ?
361 			 */
362 			if(stat&(Rx_OVR|CRC_ERR))
363 			{
364 				/* Rewind the buffer and return */
365 				if(c->skb)
366 					c->dptr=c->skb->data;
367 				c->count=0;
368 				if(stat&Rx_OVR)
369 				{
370 					pr_warn("%s: overrun\n", c->dev->name);
371 					c->rx_overrun++;
372 				}
373 				if(stat&CRC_ERR)
374 				{
375 					c->rx_crc_err++;
376 					/* printk("crc error\n"); */
377 				}
378 				/* Shove the frame upstream */
379 			}
380 			else
381 			{
382 				/*
383 				 *	Drop the lock for RX processing, or
384 		 		 *	there are deadlocks
385 		 		 */
386 				z8530_rx_done(c);
387 				write_zsctrl(c, RES_Rx_CRC);
388 			}
389 		}
390 	}
391 	/*
392 	 *	Clear irq
393 	 */
394 	write_zsctrl(c, ERR_RES);
395 	write_zsctrl(c, RES_H_IUS);
396 }
397 
398 
399 /**
400  *	z8530_tx - Handle a PIO transmit event
401  *	@c: Z8530 channel to process
402  *
403  *	Z8530 transmit interrupt handler for the PIO mode. The basic
404  *	idea is to attempt to keep the FIFO fed. We fill as many bytes
405  *	in as possible, its quite possible that we won't keep up with the
406  *	data rate otherwise.
407  */
408 
z8530_tx(struct z8530_channel * c)409 static void z8530_tx(struct z8530_channel *c)
410 {
411 	while(c->txcount) {
412 		/* FIFO full ? */
413 		if(!(read_zsreg(c, R0)&4))
414 			return;
415 		c->txcount--;
416 		/*
417 		 *	Shovel out the byte
418 		 */
419 		write_zsreg(c, R8, *c->tx_ptr++);
420 		write_zsctrl(c, RES_H_IUS);
421 		/* We are about to underflow */
422 		if(c->txcount==0)
423 		{
424 			write_zsctrl(c, RES_EOM_L);
425 			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
426 		}
427 	}
428 
429 
430 	/*
431 	 *	End of frame TX - fire another one
432 	 */
433 
434 	write_zsctrl(c, RES_Tx_P);
435 
436 	z8530_tx_done(c);
437 	write_zsctrl(c, RES_H_IUS);
438 }
439 
440 /**
441  *	z8530_status - Handle a PIO status exception
442  *	@chan: Z8530 channel to process
443  *
444  *	A status event occurred in PIO synchronous mode. There are several
445  *	reasons the chip will bother us here. A transmit underrun means we
446  *	failed to feed the chip fast enough and just broke a packet. A DCD
447  *	change is a line up or down.
448  */
449 
z8530_status(struct z8530_channel * chan)450 static void z8530_status(struct z8530_channel *chan)
451 {
452 	u8 status, altered;
453 
454 	status = read_zsreg(chan, R0);
455 	altered = chan->status ^ status;
456 
457 	chan->status = status;
458 
459 	if (status & TxEOM) {
460 /*		printk("%s: Tx underrun.\n", chan->dev->name); */
461 		chan->netdevice->stats.tx_fifo_errors++;
462 		write_zsctrl(chan, ERR_RES);
463 		z8530_tx_done(chan);
464 	}
465 
466 	if (altered & chan->dcdcheck)
467 	{
468 		if (status & chan->dcdcheck) {
469 			pr_info("%s: DCD raised\n", chan->dev->name);
470 			write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
471 			if (chan->netdevice)
472 				netif_carrier_on(chan->netdevice);
473 		} else {
474 			pr_info("%s: DCD lost\n", chan->dev->name);
475 			write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
476 			z8530_flush_fifo(chan);
477 			if (chan->netdevice)
478 				netif_carrier_off(chan->netdevice);
479 		}
480 
481 	}
482 	write_zsctrl(chan, RES_EXT_INT);
483 	write_zsctrl(chan, RES_H_IUS);
484 }
485 
486 struct z8530_irqhandler z8530_sync = {
487 	.rx = z8530_rx,
488 	.tx = z8530_tx,
489 	.status = z8530_status,
490 };
491 
492 EXPORT_SYMBOL(z8530_sync);
493 
494 /**
495  *	z8530_dma_rx - Handle a DMA RX event
496  *	@chan: Channel to handle
497  *
498  *	Non bus mastering DMA interfaces for the Z8x30 devices. This
499  *	is really pretty PC specific. The DMA mode means that most receive
500  *	events are handled by the DMA hardware. We get a kick here only if
501  *	a frame ended.
502  */
503 
z8530_dma_rx(struct z8530_channel * chan)504 static void z8530_dma_rx(struct z8530_channel *chan)
505 {
506 	if(chan->rxdma_on)
507 	{
508 		/* Special condition check only */
509 		u8 status;
510 
511 		read_zsreg(chan, R7);
512 		read_zsreg(chan, R6);
513 
514 		status=read_zsreg(chan, R1);
515 
516 		if(status&END_FR)
517 		{
518 			z8530_rx_done(chan);	/* Fire up the next one */
519 		}
520 		write_zsctrl(chan, ERR_RES);
521 		write_zsctrl(chan, RES_H_IUS);
522 	}
523 	else
524 	{
525 		/* DMA is off right now, drain the slow way */
526 		z8530_rx(chan);
527 	}
528 }
529 
530 /**
531  *	z8530_dma_tx - Handle a DMA TX event
532  *	@chan:	The Z8530 channel to handle
533  *
534  *	We have received an interrupt while doing DMA transmissions. It
535  *	shouldn't happen. Scream loudly if it does.
536  */
537 
z8530_dma_tx(struct z8530_channel * chan)538 static void z8530_dma_tx(struct z8530_channel *chan)
539 {
540 	if(!chan->dma_tx)
541 	{
542 		pr_warn("Hey who turned the DMA off?\n");
543 		z8530_tx(chan);
544 		return;
545 	}
546 	/* This shouldn't occur in DMA mode */
547 	pr_err("DMA tx - bogus event!\n");
548 	z8530_tx(chan);
549 }
550 
551 /**
552  *	z8530_dma_status - Handle a DMA status exception
553  *	@chan: Z8530 channel to process
554  *
555  *	A status event occurred on the Z8530. We receive these for two reasons
556  *	when in DMA mode. Firstly if we finished a packet transfer we get one
557  *	and kick the next packet out. Secondly we may see a DCD change.
558  *
559  */
560 
z8530_dma_status(struct z8530_channel * chan)561 static void z8530_dma_status(struct z8530_channel *chan)
562 {
563 	u8 status, altered;
564 
565 	status=read_zsreg(chan, R0);
566 	altered=chan->status^status;
567 
568 	chan->status=status;
569 
570 
571 	if(chan->dma_tx)
572 	{
573 		if(status&TxEOM)
574 		{
575 			unsigned long flags;
576 
577 			flags=claim_dma_lock();
578 			disable_dma(chan->txdma);
579 			clear_dma_ff(chan->txdma);
580 			chan->txdma_on=0;
581 			release_dma_lock(flags);
582 			z8530_tx_done(chan);
583 		}
584 	}
585 
586 	if (altered & chan->dcdcheck)
587 	{
588 		if (status & chan->dcdcheck) {
589 			pr_info("%s: DCD raised\n", chan->dev->name);
590 			write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
591 			if (chan->netdevice)
592 				netif_carrier_on(chan->netdevice);
593 		} else {
594 			pr_info("%s: DCD lost\n", chan->dev->name);
595 			write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
596 			z8530_flush_fifo(chan);
597 			if (chan->netdevice)
598 				netif_carrier_off(chan->netdevice);
599 		}
600 	}
601 
602 	write_zsctrl(chan, RES_EXT_INT);
603 	write_zsctrl(chan, RES_H_IUS);
604 }
605 
606 static struct z8530_irqhandler z8530_dma_sync = {
607 	.rx = z8530_dma_rx,
608 	.tx = z8530_dma_tx,
609 	.status = z8530_dma_status,
610 };
611 
612 static struct z8530_irqhandler z8530_txdma_sync = {
613 	.rx = z8530_rx,
614 	.tx = z8530_dma_tx,
615 	.status = z8530_dma_status,
616 };
617 
618 /**
619  *	z8530_rx_clear - Handle RX events from a stopped chip
620  *	@c: Z8530 channel to shut up
621  *
622  *	Receive interrupt vectors for a Z8530 that is in 'parked' mode.
623  *	For machines with PCI Z85x30 cards, or level triggered interrupts
624  *	(eg the MacII) we must clear the interrupt cause or die.
625  */
626 
627 
z8530_rx_clear(struct z8530_channel * c)628 static void z8530_rx_clear(struct z8530_channel *c)
629 {
630 	/*
631 	 *	Data and status bytes
632 	 */
633 	u8 stat;
634 
635 	read_zsdata(c);
636 	stat=read_zsreg(c, R1);
637 
638 	if(stat&END_FR)
639 		write_zsctrl(c, RES_Rx_CRC);
640 	/*
641 	 *	Clear irq
642 	 */
643 	write_zsctrl(c, ERR_RES);
644 	write_zsctrl(c, RES_H_IUS);
645 }
646 
647 /**
648  *	z8530_tx_clear - Handle TX events from a stopped chip
649  *	@c: Z8530 channel to shut up
650  *
651  *	Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
652  *	For machines with PCI Z85x30 cards, or level triggered interrupts
653  *	(eg the MacII) we must clear the interrupt cause or die.
654  */
655 
z8530_tx_clear(struct z8530_channel * c)656 static void z8530_tx_clear(struct z8530_channel *c)
657 {
658 	write_zsctrl(c, RES_Tx_P);
659 	write_zsctrl(c, RES_H_IUS);
660 }
661 
662 /**
663  *	z8530_status_clear - Handle status events from a stopped chip
664  *	@chan: Z8530 channel to shut up
665  *
666  *	Status interrupt vectors for a Z8530 that is in 'parked' mode.
667  *	For machines with PCI Z85x30 cards, or level triggered interrupts
668  *	(eg the MacII) we must clear the interrupt cause or die.
669  */
670 
z8530_status_clear(struct z8530_channel * chan)671 static void z8530_status_clear(struct z8530_channel *chan)
672 {
673 	u8 status=read_zsreg(chan, R0);
674 	if(status&TxEOM)
675 		write_zsctrl(chan, ERR_RES);
676 	write_zsctrl(chan, RES_EXT_INT);
677 	write_zsctrl(chan, RES_H_IUS);
678 }
679 
680 struct z8530_irqhandler z8530_nop = {
681 	.rx = z8530_rx_clear,
682 	.tx = z8530_tx_clear,
683 	.status = z8530_status_clear,
684 };
685 
686 
687 EXPORT_SYMBOL(z8530_nop);
688 
689 /**
690  *	z8530_interrupt - Handle an interrupt from a Z8530
691  *	@irq: 	Interrupt number
692  *	@dev_id: The Z8530 device that is interrupting.
693  *
694  *	A Z85[2]30 device has stuck its hand in the air for attention.
695  *	We scan both the channels on the chip for events and then call
696  *	the channel specific call backs for each channel that has events.
697  *	We have to use callback functions because the two channels can be
698  *	in different modes.
699  *
700  *	Locking is done for the handlers. Note that locking is done
701  *	at the chip level (the 5uS delay issue is per chip not per
702  *	channel). c->lock for both channels points to dev->lock
703  */
704 
z8530_interrupt(int irq,void * dev_id)705 irqreturn_t z8530_interrupt(int irq, void *dev_id)
706 {
707 	struct z8530_dev *dev=dev_id;
708 	u8 uninitialized_var(intr);
709 	static volatile int locker=0;
710 	int work=0;
711 	struct z8530_irqhandler *irqs;
712 
713 	if(locker)
714 	{
715 		pr_err("IRQ re-enter\n");
716 		return IRQ_NONE;
717 	}
718 	locker=1;
719 
720 	spin_lock(&dev->lock);
721 
722 	while(++work<5000)
723 	{
724 
725 		intr = read_zsreg(&dev->chanA, R3);
726 		if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
727 			break;
728 
729 		/* This holds the IRQ status. On the 8530 you must read it from chan
730 		   A even though it applies to the whole chip */
731 
732 		/* Now walk the chip and see what it is wanting - it may be
733 		   an IRQ for someone else remember */
734 
735 		irqs=dev->chanA.irqs;
736 
737 		if(intr & (CHARxIP|CHATxIP|CHAEXT))
738 		{
739 			if(intr&CHARxIP)
740 				irqs->rx(&dev->chanA);
741 			if(intr&CHATxIP)
742 				irqs->tx(&dev->chanA);
743 			if(intr&CHAEXT)
744 				irqs->status(&dev->chanA);
745 		}
746 
747 		irqs=dev->chanB.irqs;
748 
749 		if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
750 		{
751 			if(intr&CHBRxIP)
752 				irqs->rx(&dev->chanB);
753 			if(intr&CHBTxIP)
754 				irqs->tx(&dev->chanB);
755 			if(intr&CHBEXT)
756 				irqs->status(&dev->chanB);
757 		}
758 	}
759 	spin_unlock(&dev->lock);
760 	if(work==5000)
761 		pr_err("%s: interrupt jammed - abort(0x%X)!\n",
762 		       dev->name, intr);
763 	/* Ok all done */
764 	locker=0;
765 	return IRQ_HANDLED;
766 }
767 
768 EXPORT_SYMBOL(z8530_interrupt);
769 
770 static const u8 reg_init[16]=
771 {
772 	0,0,0,0,
773 	0,0,0,0,
774 	0,0,0,0,
775 	0x55,0,0,0
776 };
777 
778 
779 /**
780  *	z8530_sync_open - Open a Z8530 channel for PIO
781  *	@dev:	The network interface we are using
782  *	@c:	The Z8530 channel to open in synchronous PIO mode
783  *
784  *	Switch a Z8530 into synchronous mode without DMA assist. We
785  *	raise the RTS/DTR and commence network operation.
786  */
787 
z8530_sync_open(struct net_device * dev,struct z8530_channel * c)788 int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
789 {
790 	unsigned long flags;
791 
792 	spin_lock_irqsave(c->lock, flags);
793 
794 	c->sync = 1;
795 	c->mtu = dev->mtu+64;
796 	c->count = 0;
797 	c->skb = NULL;
798 	c->skb2 = NULL;
799 	c->irqs = &z8530_sync;
800 
801 	/* This loads the double buffer up */
802 	z8530_rx_done(c);	/* Load the frame ring */
803 	z8530_rx_done(c);	/* Load the backup frame */
804 	z8530_rtsdtr(c,1);
805 	c->dma_tx = 0;
806 	c->regs[R1]|=TxINT_ENAB;
807 	write_zsreg(c, R1, c->regs[R1]);
808 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
809 
810 	spin_unlock_irqrestore(c->lock, flags);
811 	return 0;
812 }
813 
814 
815 EXPORT_SYMBOL(z8530_sync_open);
816 
817 /**
818  *	z8530_sync_close - Close a PIO Z8530 channel
819  *	@dev: Network device to close
820  *	@c: Z8530 channel to disassociate and move to idle
821  *
822  *	Close down a Z8530 interface and switch its interrupt handlers
823  *	to discard future events.
824  */
825 
z8530_sync_close(struct net_device * dev,struct z8530_channel * c)826 int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
827 {
828 	u8 chk;
829 	unsigned long flags;
830 
831 	spin_lock_irqsave(c->lock, flags);
832 	c->irqs = &z8530_nop;
833 	c->max = 0;
834 	c->sync = 0;
835 
836 	chk=read_zsreg(c,R0);
837 	write_zsreg(c, R3, c->regs[R3]);
838 	z8530_rtsdtr(c,0);
839 
840 	spin_unlock_irqrestore(c->lock, flags);
841 	return 0;
842 }
843 
844 EXPORT_SYMBOL(z8530_sync_close);
845 
846 /**
847  *	z8530_sync_dma_open - Open a Z8530 for DMA I/O
848  *	@dev: The network device to attach
849  *	@c: The Z8530 channel to configure in sync DMA mode.
850  *
851  *	Set up a Z85x30 device for synchronous DMA in both directions. Two
852  *	ISA DMA channels must be available for this to work. We assume ISA
853  *	DMA driven I/O and PC limits on access.
854  */
855 
z8530_sync_dma_open(struct net_device * dev,struct z8530_channel * c)856 int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
857 {
858 	unsigned long cflags, dflags;
859 
860 	c->sync = 1;
861 	c->mtu = dev->mtu+64;
862 	c->count = 0;
863 	c->skb = NULL;
864 	c->skb2 = NULL;
865 	/*
866 	 *	Load the DMA interfaces up
867 	 */
868 	c->rxdma_on = 0;
869 	c->txdma_on = 0;
870 
871 	/*
872 	 *	Allocate the DMA flip buffers. Limit by page size.
873 	 *	Everyone runs 1500 mtu or less on wan links so this
874 	 *	should be fine.
875 	 */
876 
877 	if(c->mtu  > PAGE_SIZE/2)
878 		return -EMSGSIZE;
879 
880 	c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
881 	if(c->rx_buf[0]==NULL)
882 		return -ENOBUFS;
883 	c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
884 
885 	c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
886 	if(c->tx_dma_buf[0]==NULL)
887 	{
888 		free_page((unsigned long)c->rx_buf[0]);
889 		c->rx_buf[0]=NULL;
890 		return -ENOBUFS;
891 	}
892 	c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
893 
894 	c->tx_dma_used=0;
895 	c->dma_tx = 1;
896 	c->dma_num=0;
897 	c->dma_ready=1;
898 
899 	/*
900 	 *	Enable DMA control mode
901 	 */
902 
903 	spin_lock_irqsave(c->lock, cflags);
904 
905 	/*
906 	 *	TX DMA via DIR/REQ
907 	 */
908 
909 	c->regs[R14]|= DTRREQ;
910 	write_zsreg(c, R14, c->regs[R14]);
911 
912 	c->regs[R1]&= ~TxINT_ENAB;
913 	write_zsreg(c, R1, c->regs[R1]);
914 
915 	/*
916 	 *	RX DMA via W/Req
917 	 */
918 
919 	c->regs[R1]|= WT_FN_RDYFN;
920 	c->regs[R1]|= WT_RDY_RT;
921 	c->regs[R1]|= INT_ERR_Rx;
922 	c->regs[R1]&= ~TxINT_ENAB;
923 	write_zsreg(c, R1, c->regs[R1]);
924 	c->regs[R1]|= WT_RDY_ENAB;
925 	write_zsreg(c, R1, c->regs[R1]);
926 
927 	/*
928 	 *	DMA interrupts
929 	 */
930 
931 	/*
932 	 *	Set up the DMA configuration
933 	 */
934 
935 	dflags=claim_dma_lock();
936 
937 	disable_dma(c->rxdma);
938 	clear_dma_ff(c->rxdma);
939 	set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
940 	set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
941 	set_dma_count(c->rxdma, c->mtu);
942 	enable_dma(c->rxdma);
943 
944 	disable_dma(c->txdma);
945 	clear_dma_ff(c->txdma);
946 	set_dma_mode(c->txdma, DMA_MODE_WRITE);
947 	disable_dma(c->txdma);
948 
949 	release_dma_lock(dflags);
950 
951 	/*
952 	 *	Select the DMA interrupt handlers
953 	 */
954 
955 	c->rxdma_on = 1;
956 	c->txdma_on = 1;
957 	c->tx_dma_used = 1;
958 
959 	c->irqs = &z8530_dma_sync;
960 	z8530_rtsdtr(c,1);
961 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
962 
963 	spin_unlock_irqrestore(c->lock, cflags);
964 
965 	return 0;
966 }
967 
968 EXPORT_SYMBOL(z8530_sync_dma_open);
969 
970 /**
971  *	z8530_sync_dma_close - Close down DMA I/O
972  *	@dev: Network device to detach
973  *	@c: Z8530 channel to move into discard mode
974  *
975  *	Shut down a DMA mode synchronous interface. Halt the DMA, and
976  *	free the buffers.
977  */
978 
z8530_sync_dma_close(struct net_device * dev,struct z8530_channel * c)979 int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
980 {
981 	u8 chk;
982 	unsigned long flags;
983 
984 	c->irqs = &z8530_nop;
985 	c->max = 0;
986 	c->sync = 0;
987 
988 	/*
989 	 *	Disable the PC DMA channels
990 	 */
991 
992 	flags=claim_dma_lock();
993 	disable_dma(c->rxdma);
994 	clear_dma_ff(c->rxdma);
995 
996 	c->rxdma_on = 0;
997 
998 	disable_dma(c->txdma);
999 	clear_dma_ff(c->txdma);
1000 	release_dma_lock(flags);
1001 
1002 	c->txdma_on = 0;
1003 	c->tx_dma_used = 0;
1004 
1005 	spin_lock_irqsave(c->lock, flags);
1006 
1007 	/*
1008 	 *	Disable DMA control mode
1009 	 */
1010 
1011 	c->regs[R1]&= ~WT_RDY_ENAB;
1012 	write_zsreg(c, R1, c->regs[R1]);
1013 	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1014 	c->regs[R1]|= INT_ALL_Rx;
1015 	write_zsreg(c, R1, c->regs[R1]);
1016 	c->regs[R14]&= ~DTRREQ;
1017 	write_zsreg(c, R14, c->regs[R14]);
1018 
1019 	if(c->rx_buf[0])
1020 	{
1021 		free_page((unsigned long)c->rx_buf[0]);
1022 		c->rx_buf[0]=NULL;
1023 	}
1024 	if(c->tx_dma_buf[0])
1025 	{
1026 		free_page((unsigned  long)c->tx_dma_buf[0]);
1027 		c->tx_dma_buf[0]=NULL;
1028 	}
1029 	chk=read_zsreg(c,R0);
1030 	write_zsreg(c, R3, c->regs[R3]);
1031 	z8530_rtsdtr(c,0);
1032 
1033 	spin_unlock_irqrestore(c->lock, flags);
1034 
1035 	return 0;
1036 }
1037 
1038 EXPORT_SYMBOL(z8530_sync_dma_close);
1039 
1040 /**
1041  *	z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
1042  *	@dev: The network device to attach
1043  *	@c: The Z8530 channel to configure in sync DMA mode.
1044  *
1045  *	Set up a Z85x30 device for synchronous DMA transmission. One
1046  *	ISA DMA channel must be available for this to work. The receive
1047  *	side is run in PIO mode, but then it has the bigger FIFO.
1048  */
1049 
z8530_sync_txdma_open(struct net_device * dev,struct z8530_channel * c)1050 int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
1051 {
1052 	unsigned long cflags, dflags;
1053 
1054 	printk("Opening sync interface for TX-DMA\n");
1055 	c->sync = 1;
1056 	c->mtu = dev->mtu+64;
1057 	c->count = 0;
1058 	c->skb = NULL;
1059 	c->skb2 = NULL;
1060 
1061 	/*
1062 	 *	Allocate the DMA flip buffers. Limit by page size.
1063 	 *	Everyone runs 1500 mtu or less on wan links so this
1064 	 *	should be fine.
1065 	 */
1066 
1067 	if(c->mtu  > PAGE_SIZE/2)
1068 		return -EMSGSIZE;
1069 
1070 	c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1071 	if(c->tx_dma_buf[0]==NULL)
1072 		return -ENOBUFS;
1073 
1074 	c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
1075 
1076 
1077 	spin_lock_irqsave(c->lock, cflags);
1078 
1079 	/*
1080 	 *	Load the PIO receive ring
1081 	 */
1082 
1083 	z8530_rx_done(c);
1084 	z8530_rx_done(c);
1085 
1086  	/*
1087 	 *	Load the DMA interfaces up
1088 	 */
1089 
1090 	c->rxdma_on = 0;
1091 	c->txdma_on = 0;
1092 
1093 	c->tx_dma_used=0;
1094 	c->dma_num=0;
1095 	c->dma_ready=1;
1096 	c->dma_tx = 1;
1097 
1098  	/*
1099 	 *	Enable DMA control mode
1100 	 */
1101 
1102  	/*
1103 	 *	TX DMA via DIR/REQ
1104  	 */
1105 	c->regs[R14]|= DTRREQ;
1106 	write_zsreg(c, R14, c->regs[R14]);
1107 
1108 	c->regs[R1]&= ~TxINT_ENAB;
1109 	write_zsreg(c, R1, c->regs[R1]);
1110 
1111 	/*
1112 	 *	Set up the DMA configuration
1113 	 */
1114 
1115 	dflags = claim_dma_lock();
1116 
1117 	disable_dma(c->txdma);
1118 	clear_dma_ff(c->txdma);
1119 	set_dma_mode(c->txdma, DMA_MODE_WRITE);
1120 	disable_dma(c->txdma);
1121 
1122 	release_dma_lock(dflags);
1123 
1124 	/*
1125 	 *	Select the DMA interrupt handlers
1126 	 */
1127 
1128 	c->rxdma_on = 0;
1129 	c->txdma_on = 1;
1130 	c->tx_dma_used = 1;
1131 
1132 	c->irqs = &z8530_txdma_sync;
1133 	z8530_rtsdtr(c,1);
1134 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1135 	spin_unlock_irqrestore(c->lock, cflags);
1136 
1137 	return 0;
1138 }
1139 
1140 EXPORT_SYMBOL(z8530_sync_txdma_open);
1141 
1142 /**
1143  *	z8530_sync_txdma_close - Close down a TX driven DMA channel
1144  *	@dev: Network device to detach
1145  *	@c: Z8530 channel to move into discard mode
1146  *
1147  *	Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
1148  *	and  free the buffers.
1149  */
1150 
z8530_sync_txdma_close(struct net_device * dev,struct z8530_channel * c)1151 int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1152 {
1153 	unsigned long dflags, cflags;
1154 	u8 chk;
1155 
1156 
1157 	spin_lock_irqsave(c->lock, cflags);
1158 
1159 	c->irqs = &z8530_nop;
1160 	c->max = 0;
1161 	c->sync = 0;
1162 
1163 	/*
1164 	 *	Disable the PC DMA channels
1165 	 */
1166 
1167 	dflags = claim_dma_lock();
1168 
1169 	disable_dma(c->txdma);
1170 	clear_dma_ff(c->txdma);
1171 	c->txdma_on = 0;
1172 	c->tx_dma_used = 0;
1173 
1174 	release_dma_lock(dflags);
1175 
1176 	/*
1177 	 *	Disable DMA control mode
1178 	 */
1179 
1180 	c->regs[R1]&= ~WT_RDY_ENAB;
1181 	write_zsreg(c, R1, c->regs[R1]);
1182 	c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1183 	c->regs[R1]|= INT_ALL_Rx;
1184 	write_zsreg(c, R1, c->regs[R1]);
1185 	c->regs[R14]&= ~DTRREQ;
1186 	write_zsreg(c, R14, c->regs[R14]);
1187 
1188 	if(c->tx_dma_buf[0])
1189 	{
1190 		free_page((unsigned long)c->tx_dma_buf[0]);
1191 		c->tx_dma_buf[0]=NULL;
1192 	}
1193 	chk=read_zsreg(c,R0);
1194 	write_zsreg(c, R3, c->regs[R3]);
1195 	z8530_rtsdtr(c,0);
1196 
1197 	spin_unlock_irqrestore(c->lock, cflags);
1198 	return 0;
1199 }
1200 
1201 
1202 EXPORT_SYMBOL(z8530_sync_txdma_close);
1203 
1204 
1205 /*
1206  *	Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
1207  *	it exists...
1208  */
1209 
1210 static const char *z8530_type_name[]={
1211 	"Z8530",
1212 	"Z85C30",
1213 	"Z85230"
1214 };
1215 
1216 /**
1217  *	z8530_describe - Uniformly describe a Z8530 port
1218  *	@dev: Z8530 device to describe
1219  *	@mapping: string holding mapping type (eg "I/O" or "Mem")
1220  *	@io: the port value in question
1221  *
1222  *	Describe a Z8530 in a standard format. We must pass the I/O as
1223  *	the port offset isn't predictable. The main reason for this function
1224  *	is to try and get a common format of report.
1225  */
1226 
z8530_describe(struct z8530_dev * dev,char * mapping,unsigned long io)1227 void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1228 {
1229 	pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
1230 		dev->name,
1231 		z8530_type_name[dev->type],
1232 		mapping,
1233 		Z8530_PORT_OF(io),
1234 		dev->irq);
1235 }
1236 
1237 EXPORT_SYMBOL(z8530_describe);
1238 
1239 /*
1240  *	Locked operation part of the z8530 init code
1241  */
1242 
do_z8530_init(struct z8530_dev * dev)1243 static inline int do_z8530_init(struct z8530_dev *dev)
1244 {
1245 	/* NOP the interrupt handlers first - we might get a
1246 	   floating IRQ transition when we reset the chip */
1247 	dev->chanA.irqs=&z8530_nop;
1248 	dev->chanB.irqs=&z8530_nop;
1249 	dev->chanA.dcdcheck=DCD;
1250 	dev->chanB.dcdcheck=DCD;
1251 
1252 	/* Reset the chip */
1253 	write_zsreg(&dev->chanA, R9, 0xC0);
1254 	udelay(200);
1255 	/* Now check its valid */
1256 	write_zsreg(&dev->chanA, R12, 0xAA);
1257 	if(read_zsreg(&dev->chanA, R12)!=0xAA)
1258 		return -ENODEV;
1259 	write_zsreg(&dev->chanA, R12, 0x55);
1260 	if(read_zsreg(&dev->chanA, R12)!=0x55)
1261 		return -ENODEV;
1262 
1263 	dev->type=Z8530;
1264 
1265 	/*
1266 	 *	See the application note.
1267 	 */
1268 
1269 	write_zsreg(&dev->chanA, R15, 0x01);
1270 
1271 	/*
1272 	 *	If we can set the low bit of R15 then
1273 	 *	the chip is enhanced.
1274 	 */
1275 
1276 	if(read_zsreg(&dev->chanA, R15)==0x01)
1277 	{
1278 		/* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1279 		/* Put a char in the fifo */
1280 		write_zsreg(&dev->chanA, R8, 0);
1281 		if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1282 			dev->type = Z85230;	/* Has a FIFO */
1283 		else
1284 			dev->type = Z85C30;	/* Z85C30, 1 byte FIFO */
1285 	}
1286 
1287 	/*
1288 	 *	The code assumes R7' and friends are
1289 	 *	off. Use write_zsext() for these and keep
1290 	 *	this bit clear.
1291 	 */
1292 
1293 	write_zsreg(&dev->chanA, R15, 0);
1294 
1295 	/*
1296 	 *	At this point it looks like the chip is behaving
1297 	 */
1298 
1299 	memcpy(dev->chanA.regs, reg_init, 16);
1300 	memcpy(dev->chanB.regs, reg_init ,16);
1301 
1302 	return 0;
1303 }
1304 
1305 /**
1306  *	z8530_init - Initialise a Z8530 device
1307  *	@dev: Z8530 device to initialise.
1308  *
1309  *	Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
1310  *	is present, identify the type and then program it to hopefully
1311  *	keep quite and behave. This matters a lot, a Z8530 in the wrong
1312  *	state will sometimes get into stupid modes generating 10Khz
1313  *	interrupt streams and the like.
1314  *
1315  *	We set the interrupt handler up to discard any events, in case
1316  *	we get them during reset or setp.
1317  *
1318  *	Return 0 for success, or a negative value indicating the problem
1319  *	in errno form.
1320  */
1321 
z8530_init(struct z8530_dev * dev)1322 int z8530_init(struct z8530_dev *dev)
1323 {
1324 	unsigned long flags;
1325 	int ret;
1326 
1327 	/* Set up the chip level lock */
1328 	spin_lock_init(&dev->lock);
1329 	dev->chanA.lock = &dev->lock;
1330 	dev->chanB.lock = &dev->lock;
1331 
1332 	spin_lock_irqsave(&dev->lock, flags);
1333 	ret = do_z8530_init(dev);
1334 	spin_unlock_irqrestore(&dev->lock, flags);
1335 
1336 	return ret;
1337 }
1338 
1339 
1340 EXPORT_SYMBOL(z8530_init);
1341 
1342 /**
1343  *	z8530_shutdown - Shutdown a Z8530 device
1344  *	@dev: The Z8530 chip to shutdown
1345  *
1346  *	We set the interrupt handlers to silence any interrupts. We then
1347  *	reset the chip and wait 100uS to be sure the reset completed. Just
1348  *	in case the caller then tries to do stuff.
1349  *
1350  *	This is called without the lock held
1351  */
1352 
z8530_shutdown(struct z8530_dev * dev)1353 int z8530_shutdown(struct z8530_dev *dev)
1354 {
1355 	unsigned long flags;
1356 	/* Reset the chip */
1357 
1358 	spin_lock_irqsave(&dev->lock, flags);
1359 	dev->chanA.irqs=&z8530_nop;
1360 	dev->chanB.irqs=&z8530_nop;
1361 	write_zsreg(&dev->chanA, R9, 0xC0);
1362 	/* We must lock the udelay, the chip is offlimits here */
1363 	udelay(100);
1364 	spin_unlock_irqrestore(&dev->lock, flags);
1365 	return 0;
1366 }
1367 
1368 EXPORT_SYMBOL(z8530_shutdown);
1369 
1370 /**
1371  *	z8530_channel_load - Load channel data
1372  *	@c: Z8530 channel to configure
1373  *	@rtable: table of register, value pairs
1374  *	FIXME: ioctl to allow user uploaded tables
1375  *
1376  *	Load a Z8530 channel up from the system data. We use +16 to
1377  *	indicate the "prime" registers. The value 255 terminates the
1378  *	table.
1379  */
1380 
z8530_channel_load(struct z8530_channel * c,u8 * rtable)1381 int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1382 {
1383 	unsigned long flags;
1384 
1385 	spin_lock_irqsave(c->lock, flags);
1386 
1387 	while(*rtable!=255)
1388 	{
1389 		int reg=*rtable++;
1390 		if(reg>0x0F)
1391 			write_zsreg(c, R15, c->regs[15]|1);
1392 		write_zsreg(c, reg&0x0F, *rtable);
1393 		if(reg>0x0F)
1394 			write_zsreg(c, R15, c->regs[15]&~1);
1395 		c->regs[reg]=*rtable++;
1396 	}
1397 	c->rx_function=z8530_null_rx;
1398 	c->skb=NULL;
1399 	c->tx_skb=NULL;
1400 	c->tx_next_skb=NULL;
1401 	c->mtu=1500;
1402 	c->max=0;
1403 	c->count=0;
1404 	c->status=read_zsreg(c, R0);
1405 	c->sync=1;
1406 	write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1407 
1408 	spin_unlock_irqrestore(c->lock, flags);
1409 	return 0;
1410 }
1411 
1412 EXPORT_SYMBOL(z8530_channel_load);
1413 
1414 
1415 /**
1416  *	z8530_tx_begin - Begin packet transmission
1417  *	@c: The Z8530 channel to kick
1418  *
1419  *	This is the speed sensitive side of transmission. If we are called
1420  *	and no buffer is being transmitted we commence the next buffer. If
1421  *	nothing is queued we idle the sync.
1422  *
1423  *	Note: We are handling this code path in the interrupt path, keep it
1424  *	fast or bad things will happen.
1425  *
1426  *	Called with the lock held.
1427  */
1428 
z8530_tx_begin(struct z8530_channel * c)1429 static void z8530_tx_begin(struct z8530_channel *c)
1430 {
1431 	unsigned long flags;
1432 	if(c->tx_skb)
1433 		return;
1434 
1435 	c->tx_skb=c->tx_next_skb;
1436 	c->tx_next_skb=NULL;
1437 	c->tx_ptr=c->tx_next_ptr;
1438 
1439 	if(c->tx_skb==NULL)
1440 	{
1441 		/* Idle on */
1442 		if(c->dma_tx)
1443 		{
1444 			flags=claim_dma_lock();
1445 			disable_dma(c->txdma);
1446 			/*
1447 			 *	Check if we crapped out.
1448 			 */
1449 			if (get_dma_residue(c->txdma))
1450 			{
1451 				c->netdevice->stats.tx_dropped++;
1452 				c->netdevice->stats.tx_fifo_errors++;
1453 			}
1454 			release_dma_lock(flags);
1455 		}
1456 		c->txcount=0;
1457 	}
1458 	else
1459 	{
1460 		c->txcount=c->tx_skb->len;
1461 
1462 
1463 		if(c->dma_tx)
1464 		{
1465 			/*
1466 			 *	FIXME. DMA is broken for the original 8530,
1467 			 *	on the older parts we need to set a flag and
1468 			 *	wait for a further TX interrupt to fire this
1469 			 *	stage off
1470 			 */
1471 
1472 			flags=claim_dma_lock();
1473 			disable_dma(c->txdma);
1474 
1475 			/*
1476 			 *	These two are needed by the 8530/85C30
1477 			 *	and must be issued when idling.
1478 			 */
1479 
1480 			if(c->dev->type!=Z85230)
1481 			{
1482 				write_zsctrl(c, RES_Tx_CRC);
1483 				write_zsctrl(c, RES_EOM_L);
1484 			}
1485 			write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1486 			clear_dma_ff(c->txdma);
1487 			set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1488 			set_dma_count(c->txdma, c->txcount);
1489 			enable_dma(c->txdma);
1490 			release_dma_lock(flags);
1491 			write_zsctrl(c, RES_EOM_L);
1492 			write_zsreg(c, R5, c->regs[R5]|TxENAB);
1493 		}
1494 		else
1495 		{
1496 
1497 			/* ABUNDER off */
1498 			write_zsreg(c, R10, c->regs[10]);
1499 			write_zsctrl(c, RES_Tx_CRC);
1500 
1501 			while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1502 			{
1503 				write_zsreg(c, R8, *c->tx_ptr++);
1504 				c->txcount--;
1505 			}
1506 
1507 		}
1508 	}
1509 	/*
1510 	 *	Since we emptied tx_skb we can ask for more
1511 	 */
1512 	netif_wake_queue(c->netdevice);
1513 }
1514 
1515 /**
1516  *	z8530_tx_done - TX complete callback
1517  *	@c: The channel that completed a transmit.
1518  *
1519  *	This is called when we complete a packet send. We wake the queue,
1520  *	start the next packet going and then free the buffer of the existing
1521  *	packet. This code is fairly timing sensitive.
1522  *
1523  *	Called with the register lock held.
1524  */
1525 
z8530_tx_done(struct z8530_channel * c)1526 static void z8530_tx_done(struct z8530_channel *c)
1527 {
1528 	struct sk_buff *skb;
1529 
1530 	/* Actually this can happen.*/
1531 	if (c->tx_skb == NULL)
1532 		return;
1533 
1534 	skb = c->tx_skb;
1535 	c->tx_skb = NULL;
1536 	z8530_tx_begin(c);
1537 	c->netdevice->stats.tx_packets++;
1538 	c->netdevice->stats.tx_bytes += skb->len;
1539 	dev_kfree_skb_irq(skb);
1540 }
1541 
1542 /**
1543  *	z8530_null_rx - Discard a packet
1544  *	@c: The channel the packet arrived on
1545  *	@skb: The buffer
1546  *
1547  *	We point the receive handler at this function when idle. Instead
1548  *	of processing the frames we get to throw them away.
1549  */
1550 
z8530_null_rx(struct z8530_channel * c,struct sk_buff * skb)1551 void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1552 {
1553 	dev_kfree_skb_any(skb);
1554 }
1555 
1556 EXPORT_SYMBOL(z8530_null_rx);
1557 
1558 /**
1559  *	z8530_rx_done - Receive completion callback
1560  *	@c: The channel that completed a receive
1561  *
1562  *	A new packet is complete. Our goal here is to get back into receive
1563  *	mode as fast as possible. On the Z85230 we could change to using
1564  *	ESCC mode, but on the older chips we have no choice. We flip to the
1565  *	new buffer immediately in DMA mode so that the DMA of the next
1566  *	frame can occur while we are copying the previous buffer to an sk_buff
1567  *
1568  *	Called with the lock held
1569  */
1570 
z8530_rx_done(struct z8530_channel * c)1571 static void z8530_rx_done(struct z8530_channel *c)
1572 {
1573 	struct sk_buff *skb;
1574 	int ct;
1575 
1576 	/*
1577 	 *	Is our receive engine in DMA mode
1578 	 */
1579 
1580 	if(c->rxdma_on)
1581 	{
1582 		/*
1583 		 *	Save the ready state and the buffer currently
1584 		 *	being used as the DMA target
1585 		 */
1586 
1587 		int ready=c->dma_ready;
1588 		unsigned char *rxb=c->rx_buf[c->dma_num];
1589 		unsigned long flags;
1590 
1591 		/*
1592 		 *	Complete this DMA. Necessary to find the length
1593 		 */
1594 
1595 		flags=claim_dma_lock();
1596 
1597 		disable_dma(c->rxdma);
1598 		clear_dma_ff(c->rxdma);
1599 		c->rxdma_on=0;
1600 		ct=c->mtu-get_dma_residue(c->rxdma);
1601 		if(ct<0)
1602 			ct=2;	/* Shit happens.. */
1603 		c->dma_ready=0;
1604 
1605 		/*
1606 		 *	Normal case: the other slot is free, start the next DMA
1607 		 *	into it immediately.
1608 		 */
1609 
1610 		if(ready)
1611 		{
1612 			c->dma_num^=1;
1613 			set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1614 			set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1615 			set_dma_count(c->rxdma, c->mtu);
1616 			c->rxdma_on = 1;
1617 			enable_dma(c->rxdma);
1618 			/* Stop any frames that we missed the head of
1619 			   from passing */
1620 			write_zsreg(c, R0, RES_Rx_CRC);
1621 		}
1622 		else
1623 			/* Can't occur as we dont reenable the DMA irq until
1624 			   after the flip is done */
1625 			netdev_warn(c->netdevice, "DMA flip overrun!\n");
1626 
1627 		release_dma_lock(flags);
1628 
1629 		/*
1630 		 *	Shove the old buffer into an sk_buff. We can't DMA
1631 		 *	directly into one on a PC - it might be above the 16Mb
1632 		 *	boundary. Optimisation - we could check to see if we
1633 		 *	can avoid the copy. Optimisation 2 - make the memcpy
1634 		 *	a copychecksum.
1635 		 */
1636 
1637 		skb = dev_alloc_skb(ct);
1638 		if (skb == NULL) {
1639 			c->netdevice->stats.rx_dropped++;
1640 			netdev_warn(c->netdevice, "Memory squeeze\n");
1641 		} else {
1642 			skb_put(skb, ct);
1643 			skb_copy_to_linear_data(skb, rxb, ct);
1644 			c->netdevice->stats.rx_packets++;
1645 			c->netdevice->stats.rx_bytes += ct;
1646 		}
1647 		c->dma_ready = 1;
1648 	} else {
1649 		RT_LOCK;
1650 		skb = c->skb;
1651 
1652 		/*
1653 		 *	The game we play for non DMA is similar. We want to
1654 		 *	get the controller set up for the next packet as fast
1655 		 *	as possible. We potentially only have one byte + the
1656 		 *	fifo length for this. Thus we want to flip to the new
1657 		 *	buffer and then mess around copying and allocating
1658 		 *	things. For the current case it doesn't matter but
1659 		 *	if you build a system where the sync irq isn't blocked
1660 		 *	by the kernel IRQ disable then you need only block the
1661 		 *	sync IRQ for the RT_LOCK area.
1662 		 *
1663 		 */
1664 		ct=c->count;
1665 
1666 		c->skb = c->skb2;
1667 		c->count = 0;
1668 		c->max = c->mtu;
1669 		if (c->skb) {
1670 			c->dptr = c->skb->data;
1671 			c->max = c->mtu;
1672 		} else {
1673 			c->count = 0;
1674 			c->max = 0;
1675 		}
1676 		RT_UNLOCK;
1677 
1678 		c->skb2 = dev_alloc_skb(c->mtu);
1679 		if (c->skb2 == NULL)
1680 			netdev_warn(c->netdevice, "memory squeeze\n");
1681 		else
1682 			skb_put(c->skb2, c->mtu);
1683 		c->netdevice->stats.rx_packets++;
1684 		c->netdevice->stats.rx_bytes += ct;
1685 	}
1686 	/*
1687 	 *	If we received a frame we must now process it.
1688 	 */
1689 	if (skb) {
1690 		skb_trim(skb, ct);
1691 		c->rx_function(c, skb);
1692 	} else {
1693 		c->netdevice->stats.rx_dropped++;
1694 		netdev_err(c->netdevice, "Lost a frame\n");
1695 	}
1696 }
1697 
1698 /**
1699  *	spans_boundary - Check a packet can be ISA DMA'd
1700  *	@skb: The buffer to check
1701  *
1702  *	Returns true if the buffer cross a DMA boundary on a PC. The poor
1703  *	thing can only DMA within a 64K block not across the edges of it.
1704  */
1705 
spans_boundary(struct sk_buff * skb)1706 static inline int spans_boundary(struct sk_buff *skb)
1707 {
1708 	unsigned long a=(unsigned long)skb->data;
1709 	a^=(a+skb->len);
1710 	if(a&0x00010000)	/* If the 64K bit is different.. */
1711 		return 1;
1712 	return 0;
1713 }
1714 
1715 /**
1716  *	z8530_queue_xmit - Queue a packet
1717  *	@c: The channel to use
1718  *	@skb: The packet to kick down the channel
1719  *
1720  *	Queue a packet for transmission. Because we have rather
1721  *	hard to hit interrupt latencies for the Z85230 per packet
1722  *	even in DMA mode we do the flip to DMA buffer if needed here
1723  *	not in the IRQ.
1724  *
1725  *	Called from the network code. The lock is not held at this
1726  *	point.
1727  */
1728 
z8530_queue_xmit(struct z8530_channel * c,struct sk_buff * skb)1729 netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1730 {
1731 	unsigned long flags;
1732 
1733 	netif_stop_queue(c->netdevice);
1734 	if(c->tx_next_skb)
1735 		return NETDEV_TX_BUSY;
1736 
1737 
1738 	/* PC SPECIFIC - DMA limits */
1739 
1740 	/*
1741 	 *	If we will DMA the transmit and its gone over the ISA bus
1742 	 *	limit, then copy to the flip buffer
1743 	 */
1744 
1745 	if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1746 	{
1747 		/*
1748 		 *	Send the flip buffer, and flip the flippy bit.
1749 		 *	We don't care which is used when just so long as
1750 		 *	we never use the same buffer twice in a row. Since
1751 		 *	only one buffer can be going out at a time the other
1752 		 *	has to be safe.
1753 		 */
1754 		c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1755 		c->tx_dma_used^=1;	/* Flip temp buffer */
1756 		skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
1757 	}
1758 	else
1759 		c->tx_next_ptr=skb->data;
1760 	RT_LOCK;
1761 	c->tx_next_skb=skb;
1762 	RT_UNLOCK;
1763 
1764 	spin_lock_irqsave(c->lock, flags);
1765 	z8530_tx_begin(c);
1766 	spin_unlock_irqrestore(c->lock, flags);
1767 
1768 	return NETDEV_TX_OK;
1769 }
1770 
1771 EXPORT_SYMBOL(z8530_queue_xmit);
1772 
1773 /*
1774  *	Module support
1775  */
1776 static const char banner[] __initconst =
1777 	KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1778 
z85230_init_driver(void)1779 static int __init z85230_init_driver(void)
1780 {
1781 	printk(banner);
1782 	return 0;
1783 }
1784 module_init(z85230_init_driver);
1785 
z85230_cleanup_driver(void)1786 static void __exit z85230_cleanup_driver(void)
1787 {
1788 }
1789 module_exit(z85230_cleanup_driver);
1790 
1791 MODULE_AUTHOR("Red Hat Inc.");
1792 MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1793 MODULE_LICENSE("GPL");
1794