1 // SPDX-License-Identifier: GPL-1.0+
2 /*
3 * $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
4 *
5 * Device driver for Microgate SyncLink ISA and PCI
6 * high speed multiprotocol serial adapters.
7 *
8 * written by Paul Fulghum for Microgate Corporation
9 * paulkf@microgate.com
10 *
11 * Microgate and SyncLink are trademarks of Microgate Corporation
12 *
13 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
14 *
15 * Original release 01/11/99
16 *
17 * This driver is primarily intended for use in synchronous
18 * HDLC mode. Asynchronous mode is also provided.
19 *
20 * When operating in synchronous mode, each call to mgsl_write()
21 * contains exactly one complete HDLC frame. Calling mgsl_put_char
22 * will start assembling an HDLC frame that will not be sent until
23 * mgsl_flush_chars or mgsl_write is called.
24 *
25 * Synchronous receive data is reported as complete frames. To accomplish
26 * this, the TTY flip buffer is bypassed (too small to hold largest
27 * frame and may fragment frames) and the line discipline
28 * receive entry point is called directly.
29 *
30 * This driver has been tested with a slightly modified ppp.c driver
31 * for synchronous PPP.
32 *
33 * 2000/02/16
34 * Added interface for syncppp.c driver (an alternate synchronous PPP
35 * implementation that also supports Cisco HDLC). Each device instance
36 * registers as a tty device AND a network device (if dosyncppp option
37 * is set for the device). The functionality is determined by which
38 * device interface is opened.
39 *
40 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
41 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
42 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
43 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
44 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
45 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
46 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
48 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
49 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
50 * OF THE POSSIBILITY OF SUCH DAMAGE.
51 */
52
53 #if defined(__i386__)
54 # define BREAKPOINT() asm(" int $3");
55 #else
56 # define BREAKPOINT() { }
57 #endif
58
59 #define MAX_ISA_DEVICES 10
60 #define MAX_PCI_DEVICES 10
61 #define MAX_TOTAL_DEVICES 20
62
63 #include <linux/module.h>
64 #include <linux/errno.h>
65 #include <linux/signal.h>
66 #include <linux/sched.h>
67 #include <linux/timer.h>
68 #include <linux/interrupt.h>
69 #include <linux/pci.h>
70 #include <linux/tty.h>
71 #include <linux/tty_flip.h>
72 #include <linux/serial.h>
73 #include <linux/major.h>
74 #include <linux/string.h>
75 #include <linux/fcntl.h>
76 #include <linux/ptrace.h>
77 #include <linux/ioport.h>
78 #include <linux/mm.h>
79 #include <linux/seq_file.h>
80 #include <linux/slab.h>
81 #include <linux/delay.h>
82 #include <linux/netdevice.h>
83 #include <linux/vmalloc.h>
84 #include <linux/init.h>
85 #include <linux/ioctl.h>
86 #include <linux/synclink.h>
87
88 #include <asm/io.h>
89 #include <asm/irq.h>
90 #include <asm/dma.h>
91 #include <linux/bitops.h>
92 #include <asm/types.h>
93 #include <linux/termios.h>
94 #include <linux/workqueue.h>
95 #include <linux/hdlc.h>
96 #include <linux/dma-mapping.h>
97
98 #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
99 #define SYNCLINK_GENERIC_HDLC 1
100 #else
101 #define SYNCLINK_GENERIC_HDLC 0
102 #endif
103
104 #define GET_USER(error,value,addr) error = get_user(value,addr)
105 #define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
106 #define PUT_USER(error,value,addr) error = put_user(value,addr)
107 #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
108
109 #include <linux/uaccess.h>
110
111 #define RCLRVALUE 0xffff
112
113 static MGSL_PARAMS default_params = {
114 MGSL_MODE_HDLC, /* unsigned long mode */
115 0, /* unsigned char loopback; */
116 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
117 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
118 0, /* unsigned long clock_speed; */
119 0xff, /* unsigned char addr_filter; */
120 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
121 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
122 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
123 9600, /* unsigned long data_rate; */
124 8, /* unsigned char data_bits; */
125 1, /* unsigned char stop_bits; */
126 ASYNC_PARITY_NONE /* unsigned char parity; */
127 };
128
129 #define SHARED_MEM_ADDRESS_SIZE 0x40000
130 #define BUFFERLISTSIZE 4096
131 #define DMABUFFERSIZE 4096
132 #define MAXRXFRAMES 7
133
134 typedef struct _DMABUFFERENTRY
135 {
136 u32 phys_addr; /* 32-bit flat physical address of data buffer */
137 volatile u16 count; /* buffer size/data count */
138 volatile u16 status; /* Control/status field */
139 volatile u16 rcc; /* character count field */
140 u16 reserved; /* padding required by 16C32 */
141 u32 link; /* 32-bit flat link to next buffer entry */
142 char *virt_addr; /* virtual address of data buffer */
143 u32 phys_entry; /* physical address of this buffer entry */
144 dma_addr_t dma_addr;
145 } DMABUFFERENTRY, *DMAPBUFFERENTRY;
146
147 /* The queue of BH actions to be performed */
148
149 #define BH_RECEIVE 1
150 #define BH_TRANSMIT 2
151 #define BH_STATUS 4
152
153 #define IO_PIN_SHUTDOWN_LIMIT 100
154
155 struct _input_signal_events {
156 int ri_up;
157 int ri_down;
158 int dsr_up;
159 int dsr_down;
160 int dcd_up;
161 int dcd_down;
162 int cts_up;
163 int cts_down;
164 };
165
166 /* transmit holding buffer definitions*/
167 #define MAX_TX_HOLDING_BUFFERS 5
168 struct tx_holding_buffer {
169 int buffer_size;
170 unsigned char * buffer;
171 };
172
173
174 /*
175 * Device instance data structure
176 */
177
178 struct mgsl_struct {
179 int magic;
180 struct tty_port port;
181 int line;
182 int hw_version;
183
184 struct mgsl_icount icount;
185
186 int timeout;
187 int x_char; /* xon/xoff character */
188 u16 read_status_mask;
189 u16 ignore_status_mask;
190 unsigned char *xmit_buf;
191 int xmit_head;
192 int xmit_tail;
193 int xmit_cnt;
194
195 wait_queue_head_t status_event_wait_q;
196 wait_queue_head_t event_wait_q;
197 struct timer_list tx_timer; /* HDLC transmit timeout timer */
198 struct mgsl_struct *next_device; /* device list link */
199
200 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
201 struct work_struct task; /* task structure for scheduling bh */
202
203 u32 EventMask; /* event trigger mask */
204 u32 RecordedEvents; /* pending events */
205
206 u32 max_frame_size; /* as set by device config */
207
208 u32 pending_bh;
209
210 bool bh_running; /* Protection from multiple */
211 int isr_overflow;
212 bool bh_requested;
213
214 int dcd_chkcount; /* check counts to prevent */
215 int cts_chkcount; /* too many IRQs if a signal */
216 int dsr_chkcount; /* is floating */
217 int ri_chkcount;
218
219 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
220 u32 buffer_list_phys;
221 dma_addr_t buffer_list_dma_addr;
222
223 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
224 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
225 unsigned int current_rx_buffer;
226
227 int num_tx_dma_buffers; /* number of tx dma frames required */
228 int tx_dma_buffers_used;
229 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
230 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
231 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
232 int current_tx_buffer; /* next tx dma buffer to be loaded */
233
234 unsigned char *intermediate_rxbuffer;
235
236 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
237 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
238 int put_tx_holding_index; /* next tx holding buffer to store user request */
239 int tx_holding_count; /* number of tx holding buffers waiting */
240 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
241
242 bool rx_enabled;
243 bool rx_overflow;
244 bool rx_rcc_underrun;
245
246 bool tx_enabled;
247 bool tx_active;
248 u32 idle_mode;
249
250 u16 cmr_value;
251 u16 tcsr_value;
252
253 char device_name[25]; /* device instance name */
254
255 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
256 unsigned char bus; /* expansion bus number (zero based) */
257 unsigned char function; /* PCI device number */
258
259 unsigned int io_base; /* base I/O address of adapter */
260 unsigned int io_addr_size; /* size of the I/O address range */
261 bool io_addr_requested; /* true if I/O address requested */
262
263 unsigned int irq_level; /* interrupt level */
264 unsigned long irq_flags;
265 bool irq_requested; /* true if IRQ requested */
266
267 unsigned int dma_level; /* DMA channel */
268 bool dma_requested; /* true if dma channel requested */
269
270 u16 mbre_bit;
271 u16 loopback_bits;
272 u16 usc_idle_mode;
273
274 MGSL_PARAMS params; /* communications parameters */
275
276 unsigned char serial_signals; /* current serial signal states */
277
278 bool irq_occurred; /* for diagnostics use */
279 unsigned int init_error; /* Initialization startup error (DIAGS) */
280 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
281
282 u32 last_mem_alloc;
283 unsigned char* memory_base; /* shared memory address (PCI only) */
284 u32 phys_memory_base;
285 bool shared_mem_requested;
286
287 unsigned char* lcr_base; /* local config registers (PCI only) */
288 u32 phys_lcr_base;
289 u32 lcr_offset;
290 bool lcr_mem_requested;
291
292 u32 misc_ctrl_value;
293 char *flag_buf;
294 bool drop_rts_on_tx_done;
295
296 bool loopmode_insert_requested;
297 bool loopmode_send_done_requested;
298
299 struct _input_signal_events input_signal_events;
300
301 /* generic HDLC device parts */
302 int netcount;
303 spinlock_t netlock;
304
305 #if SYNCLINK_GENERIC_HDLC
306 struct net_device *netdev;
307 #endif
308 };
309
310 #define MGSL_MAGIC 0x5401
311
312 /*
313 * The size of the serial xmit buffer is 1 page, or 4096 bytes
314 */
315 #ifndef SERIAL_XMIT_SIZE
316 #define SERIAL_XMIT_SIZE 4096
317 #endif
318
319 /*
320 * These macros define the offsets used in calculating the
321 * I/O address of the specified USC registers.
322 */
323
324
325 #define DCPIN 2 /* Bit 1 of I/O address */
326 #define SDPIN 4 /* Bit 2 of I/O address */
327
328 #define DCAR 0 /* DMA command/address register */
329 #define CCAR SDPIN /* channel command/address register */
330 #define DATAREG DCPIN + SDPIN /* serial data register */
331 #define MSBONLY 0x41
332 #define LSBONLY 0x40
333
334 /*
335 * These macros define the register address (ordinal number)
336 * used for writing address/value pairs to the USC.
337 */
338
339 #define CMR 0x02 /* Channel mode Register */
340 #define CCSR 0x04 /* Channel Command/status Register */
341 #define CCR 0x06 /* Channel Control Register */
342 #define PSR 0x08 /* Port status Register */
343 #define PCR 0x0a /* Port Control Register */
344 #define TMDR 0x0c /* Test mode Data Register */
345 #define TMCR 0x0e /* Test mode Control Register */
346 #define CMCR 0x10 /* Clock mode Control Register */
347 #define HCR 0x12 /* Hardware Configuration Register */
348 #define IVR 0x14 /* Interrupt Vector Register */
349 #define IOCR 0x16 /* Input/Output Control Register */
350 #define ICR 0x18 /* Interrupt Control Register */
351 #define DCCR 0x1a /* Daisy Chain Control Register */
352 #define MISR 0x1c /* Misc Interrupt status Register */
353 #define SICR 0x1e /* status Interrupt Control Register */
354 #define RDR 0x20 /* Receive Data Register */
355 #define RMR 0x22 /* Receive mode Register */
356 #define RCSR 0x24 /* Receive Command/status Register */
357 #define RICR 0x26 /* Receive Interrupt Control Register */
358 #define RSR 0x28 /* Receive Sync Register */
359 #define RCLR 0x2a /* Receive count Limit Register */
360 #define RCCR 0x2c /* Receive Character count Register */
361 #define TC0R 0x2e /* Time Constant 0 Register */
362 #define TDR 0x30 /* Transmit Data Register */
363 #define TMR 0x32 /* Transmit mode Register */
364 #define TCSR 0x34 /* Transmit Command/status Register */
365 #define TICR 0x36 /* Transmit Interrupt Control Register */
366 #define TSR 0x38 /* Transmit Sync Register */
367 #define TCLR 0x3a /* Transmit count Limit Register */
368 #define TCCR 0x3c /* Transmit Character count Register */
369 #define TC1R 0x3e /* Time Constant 1 Register */
370
371
372 /*
373 * MACRO DEFINITIONS FOR DMA REGISTERS
374 */
375
376 #define DCR 0x06 /* DMA Control Register (shared) */
377 #define DACR 0x08 /* DMA Array count Register (shared) */
378 #define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
379 #define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
380 #define DICR 0x18 /* DMA Interrupt Control Register (shared) */
381 #define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
382 #define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
383
384 #define TDMR 0x02 /* Transmit DMA mode Register */
385 #define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
386 #define TBCR 0x2a /* Transmit Byte count Register */
387 #define TARL 0x2c /* Transmit Address Register (low) */
388 #define TARU 0x2e /* Transmit Address Register (high) */
389 #define NTBCR 0x3a /* Next Transmit Byte count Register */
390 #define NTARL 0x3c /* Next Transmit Address Register (low) */
391 #define NTARU 0x3e /* Next Transmit Address Register (high) */
392
393 #define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
394 #define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
395 #define RBCR 0xaa /* Receive Byte count Register */
396 #define RARL 0xac /* Receive Address Register (low) */
397 #define RARU 0xae /* Receive Address Register (high) */
398 #define NRBCR 0xba /* Next Receive Byte count Register */
399 #define NRARL 0xbc /* Next Receive Address Register (low) */
400 #define NRARU 0xbe /* Next Receive Address Register (high) */
401
402
403 /*
404 * MACRO DEFINITIONS FOR MODEM STATUS BITS
405 */
406
407 #define MODEMSTATUS_DTR 0x80
408 #define MODEMSTATUS_DSR 0x40
409 #define MODEMSTATUS_RTS 0x20
410 #define MODEMSTATUS_CTS 0x10
411 #define MODEMSTATUS_RI 0x04
412 #define MODEMSTATUS_DCD 0x01
413
414
415 /*
416 * Channel Command/Address Register (CCAR) Command Codes
417 */
418
419 #define RTCmd_Null 0x0000
420 #define RTCmd_ResetHighestIus 0x1000
421 #define RTCmd_TriggerChannelLoadDma 0x2000
422 #define RTCmd_TriggerRxDma 0x2800
423 #define RTCmd_TriggerTxDma 0x3000
424 #define RTCmd_TriggerRxAndTxDma 0x3800
425 #define RTCmd_PurgeRxFifo 0x4800
426 #define RTCmd_PurgeTxFifo 0x5000
427 #define RTCmd_PurgeRxAndTxFifo 0x5800
428 #define RTCmd_LoadRcc 0x6800
429 #define RTCmd_LoadTcc 0x7000
430 #define RTCmd_LoadRccAndTcc 0x7800
431 #define RTCmd_LoadTC0 0x8800
432 #define RTCmd_LoadTC1 0x9000
433 #define RTCmd_LoadTC0AndTC1 0x9800
434 #define RTCmd_SerialDataLSBFirst 0xa000
435 #define RTCmd_SerialDataMSBFirst 0xa800
436 #define RTCmd_SelectBigEndian 0xb000
437 #define RTCmd_SelectLittleEndian 0xb800
438
439
440 /*
441 * DMA Command/Address Register (DCAR) Command Codes
442 */
443
444 #define DmaCmd_Null 0x0000
445 #define DmaCmd_ResetTxChannel 0x1000
446 #define DmaCmd_ResetRxChannel 0x1200
447 #define DmaCmd_StartTxChannel 0x2000
448 #define DmaCmd_StartRxChannel 0x2200
449 #define DmaCmd_ContinueTxChannel 0x3000
450 #define DmaCmd_ContinueRxChannel 0x3200
451 #define DmaCmd_PauseTxChannel 0x4000
452 #define DmaCmd_PauseRxChannel 0x4200
453 #define DmaCmd_AbortTxChannel 0x5000
454 #define DmaCmd_AbortRxChannel 0x5200
455 #define DmaCmd_InitTxChannel 0x7000
456 #define DmaCmd_InitRxChannel 0x7200
457 #define DmaCmd_ResetHighestDmaIus 0x8000
458 #define DmaCmd_ResetAllChannels 0x9000
459 #define DmaCmd_StartAllChannels 0xa000
460 #define DmaCmd_ContinueAllChannels 0xb000
461 #define DmaCmd_PauseAllChannels 0xc000
462 #define DmaCmd_AbortAllChannels 0xd000
463 #define DmaCmd_InitAllChannels 0xf000
464
465 #define TCmd_Null 0x0000
466 #define TCmd_ClearTxCRC 0x2000
467 #define TCmd_SelectTicrTtsaData 0x4000
468 #define TCmd_SelectTicrTxFifostatus 0x5000
469 #define TCmd_SelectTicrIntLevel 0x6000
470 #define TCmd_SelectTicrdma_level 0x7000
471 #define TCmd_SendFrame 0x8000
472 #define TCmd_SendAbort 0x9000
473 #define TCmd_EnableDleInsertion 0xc000
474 #define TCmd_DisableDleInsertion 0xd000
475 #define TCmd_ClearEofEom 0xe000
476 #define TCmd_SetEofEom 0xf000
477
478 #define RCmd_Null 0x0000
479 #define RCmd_ClearRxCRC 0x2000
480 #define RCmd_EnterHuntmode 0x3000
481 #define RCmd_SelectRicrRtsaData 0x4000
482 #define RCmd_SelectRicrRxFifostatus 0x5000
483 #define RCmd_SelectRicrIntLevel 0x6000
484 #define RCmd_SelectRicrdma_level 0x7000
485
486 /*
487 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
488 */
489
490 #define RECEIVE_STATUS BIT5
491 #define RECEIVE_DATA BIT4
492 #define TRANSMIT_STATUS BIT3
493 #define TRANSMIT_DATA BIT2
494 #define IO_PIN BIT1
495 #define MISC BIT0
496
497
498 /*
499 * Receive status Bits in Receive Command/status Register RCSR
500 */
501
502 #define RXSTATUS_SHORT_FRAME BIT8
503 #define RXSTATUS_CODE_VIOLATION BIT8
504 #define RXSTATUS_EXITED_HUNT BIT7
505 #define RXSTATUS_IDLE_RECEIVED BIT6
506 #define RXSTATUS_BREAK_RECEIVED BIT5
507 #define RXSTATUS_ABORT_RECEIVED BIT5
508 #define RXSTATUS_RXBOUND BIT4
509 #define RXSTATUS_CRC_ERROR BIT3
510 #define RXSTATUS_FRAMING_ERROR BIT3
511 #define RXSTATUS_ABORT BIT2
512 #define RXSTATUS_PARITY_ERROR BIT2
513 #define RXSTATUS_OVERRUN BIT1
514 #define RXSTATUS_DATA_AVAILABLE BIT0
515 #define RXSTATUS_ALL 0x01f6
516 #define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
517
518 /*
519 * Values for setting transmit idle mode in
520 * Transmit Control/status Register (TCSR)
521 */
522 #define IDLEMODE_FLAGS 0x0000
523 #define IDLEMODE_ALT_ONE_ZERO 0x0100
524 #define IDLEMODE_ZERO 0x0200
525 #define IDLEMODE_ONE 0x0300
526 #define IDLEMODE_ALT_MARK_SPACE 0x0500
527 #define IDLEMODE_SPACE 0x0600
528 #define IDLEMODE_MARK 0x0700
529 #define IDLEMODE_MASK 0x0700
530
531 /*
532 * IUSC revision identifiers
533 */
534 #define IUSC_SL1660 0x4d44
535 #define IUSC_PRE_SL1660 0x4553
536
537 /*
538 * Transmit status Bits in Transmit Command/status Register (TCSR)
539 */
540
541 #define TCSR_PRESERVE 0x0F00
542
543 #define TCSR_UNDERWAIT BIT11
544 #define TXSTATUS_PREAMBLE_SENT BIT7
545 #define TXSTATUS_IDLE_SENT BIT6
546 #define TXSTATUS_ABORT_SENT BIT5
547 #define TXSTATUS_EOF_SENT BIT4
548 #define TXSTATUS_EOM_SENT BIT4
549 #define TXSTATUS_CRC_SENT BIT3
550 #define TXSTATUS_ALL_SENT BIT2
551 #define TXSTATUS_UNDERRUN BIT1
552 #define TXSTATUS_FIFO_EMPTY BIT0
553 #define TXSTATUS_ALL 0x00fa
554 #define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
555
556
557 #define MISCSTATUS_RXC_LATCHED BIT15
558 #define MISCSTATUS_RXC BIT14
559 #define MISCSTATUS_TXC_LATCHED BIT13
560 #define MISCSTATUS_TXC BIT12
561 #define MISCSTATUS_RI_LATCHED BIT11
562 #define MISCSTATUS_RI BIT10
563 #define MISCSTATUS_DSR_LATCHED BIT9
564 #define MISCSTATUS_DSR BIT8
565 #define MISCSTATUS_DCD_LATCHED BIT7
566 #define MISCSTATUS_DCD BIT6
567 #define MISCSTATUS_CTS_LATCHED BIT5
568 #define MISCSTATUS_CTS BIT4
569 #define MISCSTATUS_RCC_UNDERRUN BIT3
570 #define MISCSTATUS_DPLL_NO_SYNC BIT2
571 #define MISCSTATUS_BRG1_ZERO BIT1
572 #define MISCSTATUS_BRG0_ZERO BIT0
573
574 #define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
575 #define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
576
577 #define SICR_RXC_ACTIVE BIT15
578 #define SICR_RXC_INACTIVE BIT14
579 #define SICR_RXC (BIT15|BIT14)
580 #define SICR_TXC_ACTIVE BIT13
581 #define SICR_TXC_INACTIVE BIT12
582 #define SICR_TXC (BIT13|BIT12)
583 #define SICR_RI_ACTIVE BIT11
584 #define SICR_RI_INACTIVE BIT10
585 #define SICR_RI (BIT11|BIT10)
586 #define SICR_DSR_ACTIVE BIT9
587 #define SICR_DSR_INACTIVE BIT8
588 #define SICR_DSR (BIT9|BIT8)
589 #define SICR_DCD_ACTIVE BIT7
590 #define SICR_DCD_INACTIVE BIT6
591 #define SICR_DCD (BIT7|BIT6)
592 #define SICR_CTS_ACTIVE BIT5
593 #define SICR_CTS_INACTIVE BIT4
594 #define SICR_CTS (BIT5|BIT4)
595 #define SICR_RCC_UNDERFLOW BIT3
596 #define SICR_DPLL_NO_SYNC BIT2
597 #define SICR_BRG1_ZERO BIT1
598 #define SICR_BRG0_ZERO BIT0
599
600 void usc_DisableMasterIrqBit( struct mgsl_struct *info );
601 void usc_EnableMasterIrqBit( struct mgsl_struct *info );
602 void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
603 void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
604 void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
605
606 #define usc_EnableInterrupts( a, b ) \
607 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
608
609 #define usc_DisableInterrupts( a, b ) \
610 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
611
612 #define usc_EnableMasterIrqBit(a) \
613 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
614
615 #define usc_DisableMasterIrqBit(a) \
616 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
617
618 #define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
619
620 /*
621 * Transmit status Bits in Transmit Control status Register (TCSR)
622 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
623 */
624
625 #define TXSTATUS_PREAMBLE_SENT BIT7
626 #define TXSTATUS_IDLE_SENT BIT6
627 #define TXSTATUS_ABORT_SENT BIT5
628 #define TXSTATUS_EOF BIT4
629 #define TXSTATUS_CRC_SENT BIT3
630 #define TXSTATUS_ALL_SENT BIT2
631 #define TXSTATUS_UNDERRUN BIT1
632 #define TXSTATUS_FIFO_EMPTY BIT0
633
634 #define DICR_MASTER BIT15
635 #define DICR_TRANSMIT BIT0
636 #define DICR_RECEIVE BIT1
637
638 #define usc_EnableDmaInterrupts(a,b) \
639 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
640
641 #define usc_DisableDmaInterrupts(a,b) \
642 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
643
644 #define usc_EnableStatusIrqs(a,b) \
645 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
646
647 #define usc_DisablestatusIrqs(a,b) \
648 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
649
650 /* Transmit status Bits in Transmit Control status Register (TCSR) */
651 /* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
652
653
654 #define DISABLE_UNCONDITIONAL 0
655 #define DISABLE_END_OF_FRAME 1
656 #define ENABLE_UNCONDITIONAL 2
657 #define ENABLE_AUTO_CTS 3
658 #define ENABLE_AUTO_DCD 3
659 #define usc_EnableTransmitter(a,b) \
660 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
661 #define usc_EnableReceiver(a,b) \
662 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
663
664 static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
665 static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
666 static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
667
668 static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
669 static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
670 static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
671 void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
672 void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
673
674 #define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
675 #define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
676
677 #define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
678
679 static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
680 static void usc_start_receiver( struct mgsl_struct *info );
681 static void usc_stop_receiver( struct mgsl_struct *info );
682
683 static void usc_start_transmitter( struct mgsl_struct *info );
684 static void usc_stop_transmitter( struct mgsl_struct *info );
685 static void usc_set_txidle( struct mgsl_struct *info );
686 static void usc_load_txfifo( struct mgsl_struct *info );
687
688 static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
689 static void usc_enable_loopback( struct mgsl_struct *info, int enable );
690
691 static void usc_get_serial_signals( struct mgsl_struct *info );
692 static void usc_set_serial_signals( struct mgsl_struct *info );
693
694 static void usc_reset( struct mgsl_struct *info );
695
696 static void usc_set_sync_mode( struct mgsl_struct *info );
697 static void usc_set_sdlc_mode( struct mgsl_struct *info );
698 static void usc_set_async_mode( struct mgsl_struct *info );
699 static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
700
701 static void usc_loopback_frame( struct mgsl_struct *info );
702
703 static void mgsl_tx_timeout(struct timer_list *t);
704
705
706 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
707 static void usc_loopmode_insert_request( struct mgsl_struct * info );
708 static int usc_loopmode_active( struct mgsl_struct * info);
709 static void usc_loopmode_send_done( struct mgsl_struct * info );
710
711 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
712
713 #if SYNCLINK_GENERIC_HDLC
714 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
715 static void hdlcdev_tx_done(struct mgsl_struct *info);
716 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
717 static int hdlcdev_init(struct mgsl_struct *info);
718 static void hdlcdev_exit(struct mgsl_struct *info);
719 #endif
720
721 /*
722 * Defines a BUS descriptor value for the PCI adapter
723 * local bus address ranges.
724 */
725
726 #define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
727 (0x00400020 + \
728 ((WrHold) << 30) + \
729 ((WrDly) << 28) + \
730 ((RdDly) << 26) + \
731 ((Nwdd) << 20) + \
732 ((Nwad) << 15) + \
733 ((Nxda) << 13) + \
734 ((Nrdd) << 11) + \
735 ((Nrad) << 6) )
736
737 static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
738
739 /*
740 * Adapter diagnostic routines
741 */
742 static bool mgsl_register_test( struct mgsl_struct *info );
743 static bool mgsl_irq_test( struct mgsl_struct *info );
744 static bool mgsl_dma_test( struct mgsl_struct *info );
745 static bool mgsl_memory_test( struct mgsl_struct *info );
746 static int mgsl_adapter_test( struct mgsl_struct *info );
747
748 /*
749 * device and resource management routines
750 */
751 static int mgsl_claim_resources(struct mgsl_struct *info);
752 static void mgsl_release_resources(struct mgsl_struct *info);
753 static void mgsl_add_device(struct mgsl_struct *info);
754 static struct mgsl_struct* mgsl_allocate_device(void);
755
756 /*
757 * DMA buffer manupulation functions.
758 */
759 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
760 static bool mgsl_get_rx_frame( struct mgsl_struct *info );
761 static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
762 static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
763 static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
764 static int num_free_tx_dma_buffers(struct mgsl_struct *info);
765 static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
766 static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
767
768 /*
769 * DMA and Shared Memory buffer allocation and formatting
770 */
771 static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
772 static void mgsl_free_dma_buffers(struct mgsl_struct *info);
773 static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
774 static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
775 static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
776 static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
777 static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
778 static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
779 static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
780 static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
781 static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
782 static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
783
784 /*
785 * Bottom half interrupt handlers
786 */
787 static void mgsl_bh_handler(struct work_struct *work);
788 static void mgsl_bh_receive(struct mgsl_struct *info);
789 static void mgsl_bh_transmit(struct mgsl_struct *info);
790 static void mgsl_bh_status(struct mgsl_struct *info);
791
792 /*
793 * Interrupt handler routines and dispatch table.
794 */
795 static void mgsl_isr_null( struct mgsl_struct *info );
796 static void mgsl_isr_transmit_data( struct mgsl_struct *info );
797 static void mgsl_isr_receive_data( struct mgsl_struct *info );
798 static void mgsl_isr_receive_status( struct mgsl_struct *info );
799 static void mgsl_isr_transmit_status( struct mgsl_struct *info );
800 static void mgsl_isr_io_pin( struct mgsl_struct *info );
801 static void mgsl_isr_misc( struct mgsl_struct *info );
802 static void mgsl_isr_receive_dma( struct mgsl_struct *info );
803 static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
804
805 typedef void (*isr_dispatch_func)(struct mgsl_struct *);
806
807 static isr_dispatch_func UscIsrTable[7] =
808 {
809 mgsl_isr_null,
810 mgsl_isr_misc,
811 mgsl_isr_io_pin,
812 mgsl_isr_transmit_data,
813 mgsl_isr_transmit_status,
814 mgsl_isr_receive_data,
815 mgsl_isr_receive_status
816 };
817
818 /*
819 * ioctl call handlers
820 */
821 static int tiocmget(struct tty_struct *tty);
822 static int tiocmset(struct tty_struct *tty,
823 unsigned int set, unsigned int clear);
824 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
825 __user *user_icount);
826 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
827 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
828 static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
829 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
830 static int mgsl_txenable(struct mgsl_struct * info, int enable);
831 static int mgsl_txabort(struct mgsl_struct * info);
832 static int mgsl_rxenable(struct mgsl_struct * info, int enable);
833 static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
834 static int mgsl_loopmode_send_done( struct mgsl_struct * info );
835
836 /* set non-zero on successful registration with PCI subsystem */
837 static bool pci_registered;
838
839 /*
840 * Global linked list of SyncLink devices
841 */
842 static struct mgsl_struct *mgsl_device_list;
843 static int mgsl_device_count;
844
845 /*
846 * Set this param to non-zero to load eax with the
847 * .text section address and breakpoint on module load.
848 * This is useful for use with gdb and add-symbol-file command.
849 */
850 static bool break_on_load;
851
852 /*
853 * Driver major number, defaults to zero to get auto
854 * assigned major number. May be forced as module parameter.
855 */
856 static int ttymajor;
857
858 /*
859 * Array of user specified options for ISA adapters.
860 */
861 static int io[MAX_ISA_DEVICES];
862 static int irq[MAX_ISA_DEVICES];
863 static int dma[MAX_ISA_DEVICES];
864 static int debug_level;
865 static int maxframe[MAX_TOTAL_DEVICES];
866 static int txdmabufs[MAX_TOTAL_DEVICES];
867 static int txholdbufs[MAX_TOTAL_DEVICES];
868
869 module_param(break_on_load, bool, 0);
870 module_param(ttymajor, int, 0);
871 module_param_hw_array(io, int, ioport, NULL, 0);
872 module_param_hw_array(irq, int, irq, NULL, 0);
873 module_param_hw_array(dma, int, dma, NULL, 0);
874 module_param(debug_level, int, 0);
875 module_param_array(maxframe, int, NULL, 0);
876 module_param_array(txdmabufs, int, NULL, 0);
877 module_param_array(txholdbufs, int, NULL, 0);
878
879 static char *driver_name = "SyncLink serial driver";
880 static char *driver_version = "$Revision: 4.38 $";
881
882 static int synclink_init_one (struct pci_dev *dev,
883 const struct pci_device_id *ent);
884 static void synclink_remove_one (struct pci_dev *dev);
885
886 static const struct pci_device_id synclink_pci_tbl[] = {
887 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
888 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
889 { 0, }, /* terminate list */
890 };
891 MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
892
893 MODULE_LICENSE("GPL");
894
895 static struct pci_driver synclink_pci_driver = {
896 .name = "synclink",
897 .id_table = synclink_pci_tbl,
898 .probe = synclink_init_one,
899 .remove = synclink_remove_one,
900 };
901
902 static struct tty_driver *serial_driver;
903
904 /* number of characters left in xmit buffer before we ask for more */
905 #define WAKEUP_CHARS 256
906
907
908 static void mgsl_change_params(struct mgsl_struct *info);
909 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
910
911 /*
912 * 1st function defined in .text section. Calling this function in
913 * init_module() followed by a breakpoint allows a remote debugger
914 * (gdb) to get the .text address for the add-symbol-file command.
915 * This allows remote debugging of dynamically loadable modules.
916 */
mgsl_get_text_ptr(void)917 static void* mgsl_get_text_ptr(void)
918 {
919 return mgsl_get_text_ptr;
920 }
921
mgsl_paranoia_check(struct mgsl_struct * info,char * name,const char * routine)922 static inline int mgsl_paranoia_check(struct mgsl_struct *info,
923 char *name, const char *routine)
924 {
925 #ifdef MGSL_PARANOIA_CHECK
926 static const char *badmagic =
927 "Warning: bad magic number for mgsl struct (%s) in %s\n";
928 static const char *badinfo =
929 "Warning: null mgsl_struct for (%s) in %s\n";
930
931 if (!info) {
932 printk(badinfo, name, routine);
933 return 1;
934 }
935 if (info->magic != MGSL_MAGIC) {
936 printk(badmagic, name, routine);
937 return 1;
938 }
939 #else
940 if (!info)
941 return 1;
942 #endif
943 return 0;
944 }
945
946 /**
947 * line discipline callback wrappers
948 *
949 * The wrappers maintain line discipline references
950 * while calling into the line discipline.
951 *
952 * ldisc_receive_buf - pass receive data to line discipline
953 */
954
ldisc_receive_buf(struct tty_struct * tty,const __u8 * data,char * flags,int count)955 static void ldisc_receive_buf(struct tty_struct *tty,
956 const __u8 *data, char *flags, int count)
957 {
958 struct tty_ldisc *ld;
959 if (!tty)
960 return;
961 ld = tty_ldisc_ref(tty);
962 if (ld) {
963 if (ld->ops->receive_buf)
964 ld->ops->receive_buf(tty, data, flags, count);
965 tty_ldisc_deref(ld);
966 }
967 }
968
969 /* mgsl_stop() throttle (stop) transmitter
970 *
971 * Arguments: tty pointer to tty info structure
972 * Return Value: None
973 */
mgsl_stop(struct tty_struct * tty)974 static void mgsl_stop(struct tty_struct *tty)
975 {
976 struct mgsl_struct *info = tty->driver_data;
977 unsigned long flags;
978
979 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
980 return;
981
982 if ( debug_level >= DEBUG_LEVEL_INFO )
983 printk("mgsl_stop(%s)\n",info->device_name);
984
985 spin_lock_irqsave(&info->irq_spinlock,flags);
986 if (info->tx_enabled)
987 usc_stop_transmitter(info);
988 spin_unlock_irqrestore(&info->irq_spinlock,flags);
989
990 } /* end of mgsl_stop() */
991
992 /* mgsl_start() release (start) transmitter
993 *
994 * Arguments: tty pointer to tty info structure
995 * Return Value: None
996 */
mgsl_start(struct tty_struct * tty)997 static void mgsl_start(struct tty_struct *tty)
998 {
999 struct mgsl_struct *info = tty->driver_data;
1000 unsigned long flags;
1001
1002 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1003 return;
1004
1005 if ( debug_level >= DEBUG_LEVEL_INFO )
1006 printk("mgsl_start(%s)\n",info->device_name);
1007
1008 spin_lock_irqsave(&info->irq_spinlock,flags);
1009 if (!info->tx_enabled)
1010 usc_start_transmitter(info);
1011 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1012
1013 } /* end of mgsl_start() */
1014
1015 /*
1016 * Bottom half work queue access functions
1017 */
1018
1019 /* mgsl_bh_action() Return next bottom half action to perform.
1020 * Return Value: BH action code or 0 if nothing to do.
1021 */
mgsl_bh_action(struct mgsl_struct * info)1022 static int mgsl_bh_action(struct mgsl_struct *info)
1023 {
1024 unsigned long flags;
1025 int rc = 0;
1026
1027 spin_lock_irqsave(&info->irq_spinlock,flags);
1028
1029 if (info->pending_bh & BH_RECEIVE) {
1030 info->pending_bh &= ~BH_RECEIVE;
1031 rc = BH_RECEIVE;
1032 } else if (info->pending_bh & BH_TRANSMIT) {
1033 info->pending_bh &= ~BH_TRANSMIT;
1034 rc = BH_TRANSMIT;
1035 } else if (info->pending_bh & BH_STATUS) {
1036 info->pending_bh &= ~BH_STATUS;
1037 rc = BH_STATUS;
1038 }
1039
1040 if (!rc) {
1041 /* Mark BH routine as complete */
1042 info->bh_running = false;
1043 info->bh_requested = false;
1044 }
1045
1046 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1047
1048 return rc;
1049 }
1050
1051 /*
1052 * Perform bottom half processing of work items queued by ISR.
1053 */
mgsl_bh_handler(struct work_struct * work)1054 static void mgsl_bh_handler(struct work_struct *work)
1055 {
1056 struct mgsl_struct *info =
1057 container_of(work, struct mgsl_struct, task);
1058 int action;
1059
1060 if ( debug_level >= DEBUG_LEVEL_BH )
1061 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1062 __FILE__,__LINE__,info->device_name);
1063
1064 info->bh_running = true;
1065
1066 while((action = mgsl_bh_action(info)) != 0) {
1067
1068 /* Process work item */
1069 if ( debug_level >= DEBUG_LEVEL_BH )
1070 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1071 __FILE__,__LINE__,action);
1072
1073 switch (action) {
1074
1075 case BH_RECEIVE:
1076 mgsl_bh_receive(info);
1077 break;
1078 case BH_TRANSMIT:
1079 mgsl_bh_transmit(info);
1080 break;
1081 case BH_STATUS:
1082 mgsl_bh_status(info);
1083 break;
1084 default:
1085 /* unknown work item ID */
1086 printk("Unknown work item ID=%08X!\n", action);
1087 break;
1088 }
1089 }
1090
1091 if ( debug_level >= DEBUG_LEVEL_BH )
1092 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1093 __FILE__,__LINE__,info->device_name);
1094 }
1095
mgsl_bh_receive(struct mgsl_struct * info)1096 static void mgsl_bh_receive(struct mgsl_struct *info)
1097 {
1098 bool (*get_rx_frame)(struct mgsl_struct *info) =
1099 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1100
1101 if ( debug_level >= DEBUG_LEVEL_BH )
1102 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1103 __FILE__,__LINE__,info->device_name);
1104
1105 do
1106 {
1107 if (info->rx_rcc_underrun) {
1108 unsigned long flags;
1109 spin_lock_irqsave(&info->irq_spinlock,flags);
1110 usc_start_receiver(info);
1111 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1112 return;
1113 }
1114 } while(get_rx_frame(info));
1115 }
1116
mgsl_bh_transmit(struct mgsl_struct * info)1117 static void mgsl_bh_transmit(struct mgsl_struct *info)
1118 {
1119 struct tty_struct *tty = info->port.tty;
1120 unsigned long flags;
1121
1122 if ( debug_level >= DEBUG_LEVEL_BH )
1123 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1124 __FILE__,__LINE__,info->device_name);
1125
1126 if (tty)
1127 tty_wakeup(tty);
1128
1129 /* if transmitter idle and loopmode_send_done_requested
1130 * then start echoing RxD to TxD
1131 */
1132 spin_lock_irqsave(&info->irq_spinlock,flags);
1133 if ( !info->tx_active && info->loopmode_send_done_requested )
1134 usc_loopmode_send_done( info );
1135 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1136 }
1137
mgsl_bh_status(struct mgsl_struct * info)1138 static void mgsl_bh_status(struct mgsl_struct *info)
1139 {
1140 if ( debug_level >= DEBUG_LEVEL_BH )
1141 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1142 __FILE__,__LINE__,info->device_name);
1143
1144 info->ri_chkcount = 0;
1145 info->dsr_chkcount = 0;
1146 info->dcd_chkcount = 0;
1147 info->cts_chkcount = 0;
1148 }
1149
1150 /* mgsl_isr_receive_status()
1151 *
1152 * Service a receive status interrupt. The type of status
1153 * interrupt is indicated by the state of the RCSR.
1154 * This is only used for HDLC mode.
1155 *
1156 * Arguments: info pointer to device instance data
1157 * Return Value: None
1158 */
mgsl_isr_receive_status(struct mgsl_struct * info)1159 static void mgsl_isr_receive_status( struct mgsl_struct *info )
1160 {
1161 u16 status = usc_InReg( info, RCSR );
1162
1163 if ( debug_level >= DEBUG_LEVEL_ISR )
1164 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1165 __FILE__,__LINE__,status);
1166
1167 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1168 info->loopmode_insert_requested &&
1169 usc_loopmode_active(info) )
1170 {
1171 ++info->icount.rxabort;
1172 info->loopmode_insert_requested = false;
1173
1174 /* clear CMR:13 to start echoing RxD to TxD */
1175 info->cmr_value &= ~BIT13;
1176 usc_OutReg(info, CMR, info->cmr_value);
1177
1178 /* disable received abort irq (no longer required) */
1179 usc_OutReg(info, RICR,
1180 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1181 }
1182
1183 if (status & (RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED)) {
1184 if (status & RXSTATUS_EXITED_HUNT)
1185 info->icount.exithunt++;
1186 if (status & RXSTATUS_IDLE_RECEIVED)
1187 info->icount.rxidle++;
1188 wake_up_interruptible(&info->event_wait_q);
1189 }
1190
1191 if (status & RXSTATUS_OVERRUN){
1192 info->icount.rxover++;
1193 usc_process_rxoverrun_sync( info );
1194 }
1195
1196 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1197 usc_UnlatchRxstatusBits( info, status );
1198
1199 } /* end of mgsl_isr_receive_status() */
1200
1201 /* mgsl_isr_transmit_status()
1202 *
1203 * Service a transmit status interrupt
1204 * HDLC mode :end of transmit frame
1205 * Async mode:all data is sent
1206 * transmit status is indicated by bits in the TCSR.
1207 *
1208 * Arguments: info pointer to device instance data
1209 * Return Value: None
1210 */
mgsl_isr_transmit_status(struct mgsl_struct * info)1211 static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1212 {
1213 u16 status = usc_InReg( info, TCSR );
1214
1215 if ( debug_level >= DEBUG_LEVEL_ISR )
1216 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1217 __FILE__,__LINE__,status);
1218
1219 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1220 usc_UnlatchTxstatusBits( info, status );
1221
1222 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1223 {
1224 /* finished sending HDLC abort. This may leave */
1225 /* the TxFifo with data from the aborted frame */
1226 /* so purge the TxFifo. Also shutdown the DMA */
1227 /* channel in case there is data remaining in */
1228 /* the DMA buffer */
1229 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1230 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1231 }
1232
1233 if ( status & TXSTATUS_EOF_SENT )
1234 info->icount.txok++;
1235 else if ( status & TXSTATUS_UNDERRUN )
1236 info->icount.txunder++;
1237 else if ( status & TXSTATUS_ABORT_SENT )
1238 info->icount.txabort++;
1239 else
1240 info->icount.txunder++;
1241
1242 info->tx_active = false;
1243 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1244 del_timer(&info->tx_timer);
1245
1246 if ( info->drop_rts_on_tx_done ) {
1247 usc_get_serial_signals( info );
1248 if ( info->serial_signals & SerialSignal_RTS ) {
1249 info->serial_signals &= ~SerialSignal_RTS;
1250 usc_set_serial_signals( info );
1251 }
1252 info->drop_rts_on_tx_done = false;
1253 }
1254
1255 #if SYNCLINK_GENERIC_HDLC
1256 if (info->netcount)
1257 hdlcdev_tx_done(info);
1258 else
1259 #endif
1260 {
1261 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1262 usc_stop_transmitter(info);
1263 return;
1264 }
1265 info->pending_bh |= BH_TRANSMIT;
1266 }
1267
1268 } /* end of mgsl_isr_transmit_status() */
1269
1270 /* mgsl_isr_io_pin()
1271 *
1272 * Service an Input/Output pin interrupt. The type of
1273 * interrupt is indicated by bits in the MISR
1274 *
1275 * Arguments: info pointer to device instance data
1276 * Return Value: None
1277 */
mgsl_isr_io_pin(struct mgsl_struct * info)1278 static void mgsl_isr_io_pin( struct mgsl_struct *info )
1279 {
1280 struct mgsl_icount *icount;
1281 u16 status = usc_InReg( info, MISR );
1282
1283 if ( debug_level >= DEBUG_LEVEL_ISR )
1284 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1285 __FILE__,__LINE__,status);
1286
1287 usc_ClearIrqPendingBits( info, IO_PIN );
1288 usc_UnlatchIostatusBits( info, status );
1289
1290 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1291 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1292 icount = &info->icount;
1293 /* update input line counters */
1294 if (status & MISCSTATUS_RI_LATCHED) {
1295 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1296 usc_DisablestatusIrqs(info,SICR_RI);
1297 icount->rng++;
1298 if ( status & MISCSTATUS_RI )
1299 info->input_signal_events.ri_up++;
1300 else
1301 info->input_signal_events.ri_down++;
1302 }
1303 if (status & MISCSTATUS_DSR_LATCHED) {
1304 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1305 usc_DisablestatusIrqs(info,SICR_DSR);
1306 icount->dsr++;
1307 if ( status & MISCSTATUS_DSR )
1308 info->input_signal_events.dsr_up++;
1309 else
1310 info->input_signal_events.dsr_down++;
1311 }
1312 if (status & MISCSTATUS_DCD_LATCHED) {
1313 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1314 usc_DisablestatusIrqs(info,SICR_DCD);
1315 icount->dcd++;
1316 if (status & MISCSTATUS_DCD) {
1317 info->input_signal_events.dcd_up++;
1318 } else
1319 info->input_signal_events.dcd_down++;
1320 #if SYNCLINK_GENERIC_HDLC
1321 if (info->netcount) {
1322 if (status & MISCSTATUS_DCD)
1323 netif_carrier_on(info->netdev);
1324 else
1325 netif_carrier_off(info->netdev);
1326 }
1327 #endif
1328 }
1329 if (status & MISCSTATUS_CTS_LATCHED)
1330 {
1331 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1332 usc_DisablestatusIrqs(info,SICR_CTS);
1333 icount->cts++;
1334 if ( status & MISCSTATUS_CTS )
1335 info->input_signal_events.cts_up++;
1336 else
1337 info->input_signal_events.cts_down++;
1338 }
1339 wake_up_interruptible(&info->status_event_wait_q);
1340 wake_up_interruptible(&info->event_wait_q);
1341
1342 if (tty_port_check_carrier(&info->port) &&
1343 (status & MISCSTATUS_DCD_LATCHED) ) {
1344 if ( debug_level >= DEBUG_LEVEL_ISR )
1345 printk("%s CD now %s...", info->device_name,
1346 (status & MISCSTATUS_DCD) ? "on" : "off");
1347 if (status & MISCSTATUS_DCD)
1348 wake_up_interruptible(&info->port.open_wait);
1349 else {
1350 if ( debug_level >= DEBUG_LEVEL_ISR )
1351 printk("doing serial hangup...");
1352 if (info->port.tty)
1353 tty_hangup(info->port.tty);
1354 }
1355 }
1356
1357 if (tty_port_cts_enabled(&info->port) &&
1358 (status & MISCSTATUS_CTS_LATCHED) ) {
1359 if (info->port.tty->hw_stopped) {
1360 if (status & MISCSTATUS_CTS) {
1361 if ( debug_level >= DEBUG_LEVEL_ISR )
1362 printk("CTS tx start...");
1363 info->port.tty->hw_stopped = 0;
1364 usc_start_transmitter(info);
1365 info->pending_bh |= BH_TRANSMIT;
1366 return;
1367 }
1368 } else {
1369 if (!(status & MISCSTATUS_CTS)) {
1370 if ( debug_level >= DEBUG_LEVEL_ISR )
1371 printk("CTS tx stop...");
1372 if (info->port.tty)
1373 info->port.tty->hw_stopped = 1;
1374 usc_stop_transmitter(info);
1375 }
1376 }
1377 }
1378 }
1379
1380 info->pending_bh |= BH_STATUS;
1381
1382 /* for diagnostics set IRQ flag */
1383 if ( status & MISCSTATUS_TXC_LATCHED ){
1384 usc_OutReg( info, SICR,
1385 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1386 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1387 info->irq_occurred = true;
1388 }
1389
1390 } /* end of mgsl_isr_io_pin() */
1391
1392 /* mgsl_isr_transmit_data()
1393 *
1394 * Service a transmit data interrupt (async mode only).
1395 *
1396 * Arguments: info pointer to device instance data
1397 * Return Value: None
1398 */
mgsl_isr_transmit_data(struct mgsl_struct * info)1399 static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1400 {
1401 if ( debug_level >= DEBUG_LEVEL_ISR )
1402 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1403 __FILE__,__LINE__,info->xmit_cnt);
1404
1405 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1406
1407 if (info->port.tty->stopped || info->port.tty->hw_stopped) {
1408 usc_stop_transmitter(info);
1409 return;
1410 }
1411
1412 if ( info->xmit_cnt )
1413 usc_load_txfifo( info );
1414 else
1415 info->tx_active = false;
1416
1417 if (info->xmit_cnt < WAKEUP_CHARS)
1418 info->pending_bh |= BH_TRANSMIT;
1419
1420 } /* end of mgsl_isr_transmit_data() */
1421
1422 /* mgsl_isr_receive_data()
1423 *
1424 * Service a receive data interrupt. This occurs
1425 * when operating in asynchronous interrupt transfer mode.
1426 * The receive data FIFO is flushed to the receive data buffers.
1427 *
1428 * Arguments: info pointer to device instance data
1429 * Return Value: None
1430 */
mgsl_isr_receive_data(struct mgsl_struct * info)1431 static void mgsl_isr_receive_data( struct mgsl_struct *info )
1432 {
1433 int Fifocount;
1434 u16 status;
1435 int work = 0;
1436 unsigned char DataByte;
1437 struct mgsl_icount *icount = &info->icount;
1438
1439 if ( debug_level >= DEBUG_LEVEL_ISR )
1440 printk("%s(%d):mgsl_isr_receive_data\n",
1441 __FILE__,__LINE__);
1442
1443 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1444
1445 /* select FIFO status for RICR readback */
1446 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1447
1448 /* clear the Wordstatus bit so that status readback */
1449 /* only reflects the status of this byte */
1450 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1451
1452 /* flush the receive FIFO */
1453
1454 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1455 int flag;
1456
1457 /* read one byte from RxFIFO */
1458 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1459 info->io_base + CCAR );
1460 DataByte = inb( info->io_base + CCAR );
1461
1462 /* get the status of the received byte */
1463 status = usc_InReg(info, RCSR);
1464 if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
1465 RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) )
1466 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1467
1468 icount->rx++;
1469
1470 flag = 0;
1471 if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
1472 RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) ) {
1473 printk("rxerr=%04X\n",status);
1474 /* update error statistics */
1475 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1476 status &= ~(RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR);
1477 icount->brk++;
1478 } else if (status & RXSTATUS_PARITY_ERROR)
1479 icount->parity++;
1480 else if (status & RXSTATUS_FRAMING_ERROR)
1481 icount->frame++;
1482 else if (status & RXSTATUS_OVERRUN) {
1483 /* must issue purge fifo cmd before */
1484 /* 16C32 accepts more receive chars */
1485 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1486 icount->overrun++;
1487 }
1488
1489 /* discard char if tty control flags say so */
1490 if (status & info->ignore_status_mask)
1491 continue;
1492
1493 status &= info->read_status_mask;
1494
1495 if (status & RXSTATUS_BREAK_RECEIVED) {
1496 flag = TTY_BREAK;
1497 if (info->port.flags & ASYNC_SAK)
1498 do_SAK(info->port.tty);
1499 } else if (status & RXSTATUS_PARITY_ERROR)
1500 flag = TTY_PARITY;
1501 else if (status & RXSTATUS_FRAMING_ERROR)
1502 flag = TTY_FRAME;
1503 } /* end of if (error) */
1504 tty_insert_flip_char(&info->port, DataByte, flag);
1505 if (status & RXSTATUS_OVERRUN) {
1506 /* Overrun is special, since it's
1507 * reported immediately, and doesn't
1508 * affect the current character
1509 */
1510 work += tty_insert_flip_char(&info->port, 0, TTY_OVERRUN);
1511 }
1512 }
1513
1514 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1515 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1516 __FILE__,__LINE__,icount->rx,icount->brk,
1517 icount->parity,icount->frame,icount->overrun);
1518 }
1519
1520 if(work)
1521 tty_flip_buffer_push(&info->port);
1522 }
1523
1524 /* mgsl_isr_misc()
1525 *
1526 * Service a miscellaneous interrupt source.
1527 *
1528 * Arguments: info pointer to device extension (instance data)
1529 * Return Value: None
1530 */
mgsl_isr_misc(struct mgsl_struct * info)1531 static void mgsl_isr_misc( struct mgsl_struct *info )
1532 {
1533 u16 status = usc_InReg( info, MISR );
1534
1535 if ( debug_level >= DEBUG_LEVEL_ISR )
1536 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1537 __FILE__,__LINE__,status);
1538
1539 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1540 (info->params.mode == MGSL_MODE_HDLC)) {
1541
1542 /* turn off receiver and rx DMA */
1543 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1544 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1545 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1546 usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
1547 usc_DisableInterrupts(info, RECEIVE_DATA | RECEIVE_STATUS);
1548
1549 /* schedule BH handler to restart receiver */
1550 info->pending_bh |= BH_RECEIVE;
1551 info->rx_rcc_underrun = true;
1552 }
1553
1554 usc_ClearIrqPendingBits( info, MISC );
1555 usc_UnlatchMiscstatusBits( info, status );
1556
1557 } /* end of mgsl_isr_misc() */
1558
1559 /* mgsl_isr_null()
1560 *
1561 * Services undefined interrupt vectors from the
1562 * USC. (hence this function SHOULD never be called)
1563 *
1564 * Arguments: info pointer to device extension (instance data)
1565 * Return Value: None
1566 */
mgsl_isr_null(struct mgsl_struct * info)1567 static void mgsl_isr_null( struct mgsl_struct *info )
1568 {
1569
1570 } /* end of mgsl_isr_null() */
1571
1572 /* mgsl_isr_receive_dma()
1573 *
1574 * Service a receive DMA channel interrupt.
1575 * For this driver there are two sources of receive DMA interrupts
1576 * as identified in the Receive DMA mode Register (RDMR):
1577 *
1578 * BIT3 EOA/EOL End of List, all receive buffers in receive
1579 * buffer list have been filled (no more free buffers
1580 * available). The DMA controller has shut down.
1581 *
1582 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1583 * DMA buffer is terminated in response to completion
1584 * of a good frame or a frame with errors. The status
1585 * of the frame is stored in the buffer entry in the
1586 * list of receive buffer entries.
1587 *
1588 * Arguments: info pointer to device instance data
1589 * Return Value: None
1590 */
mgsl_isr_receive_dma(struct mgsl_struct * info)1591 static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1592 {
1593 u16 status;
1594
1595 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1596 usc_OutDmaReg( info, CDIR, BIT9 | BIT1 );
1597
1598 /* Read the receive DMA status to identify interrupt type. */
1599 /* This also clears the status bits. */
1600 status = usc_InDmaReg( info, RDMR );
1601
1602 if ( debug_level >= DEBUG_LEVEL_ISR )
1603 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1604 __FILE__,__LINE__,info->device_name,status);
1605
1606 info->pending_bh |= BH_RECEIVE;
1607
1608 if ( status & BIT3 ) {
1609 info->rx_overflow = true;
1610 info->icount.buf_overrun++;
1611 }
1612
1613 } /* end of mgsl_isr_receive_dma() */
1614
1615 /* mgsl_isr_transmit_dma()
1616 *
1617 * This function services a transmit DMA channel interrupt.
1618 *
1619 * For this driver there is one source of transmit DMA interrupts
1620 * as identified in the Transmit DMA Mode Register (TDMR):
1621 *
1622 * BIT2 EOB End of Buffer. This interrupt occurs when a
1623 * transmit DMA buffer has been emptied.
1624 *
1625 * The driver maintains enough transmit DMA buffers to hold at least
1626 * one max frame size transmit frame. When operating in a buffered
1627 * transmit mode, there may be enough transmit DMA buffers to hold at
1628 * least two or more max frame size frames. On an EOB condition,
1629 * determine if there are any queued transmit buffers and copy into
1630 * transmit DMA buffers if we have room.
1631 *
1632 * Arguments: info pointer to device instance data
1633 * Return Value: None
1634 */
mgsl_isr_transmit_dma(struct mgsl_struct * info)1635 static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1636 {
1637 u16 status;
1638
1639 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1640 usc_OutDmaReg(info, CDIR, BIT8 | BIT0 );
1641
1642 /* Read the transmit DMA status to identify interrupt type. */
1643 /* This also clears the status bits. */
1644
1645 status = usc_InDmaReg( info, TDMR );
1646
1647 if ( debug_level >= DEBUG_LEVEL_ISR )
1648 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1649 __FILE__,__LINE__,info->device_name,status);
1650
1651 if ( status & BIT2 ) {
1652 --info->tx_dma_buffers_used;
1653
1654 /* if there are transmit frames queued,
1655 * try to load the next one
1656 */
1657 if ( load_next_tx_holding_buffer(info) ) {
1658 /* if call returns non-zero value, we have
1659 * at least one free tx holding buffer
1660 */
1661 info->pending_bh |= BH_TRANSMIT;
1662 }
1663 }
1664
1665 } /* end of mgsl_isr_transmit_dma() */
1666
1667 /* mgsl_interrupt()
1668 *
1669 * Interrupt service routine entry point.
1670 *
1671 * Arguments:
1672 *
1673 * irq interrupt number that caused interrupt
1674 * dev_id device ID supplied during interrupt registration
1675 *
1676 * Return Value: None
1677 */
mgsl_interrupt(int dummy,void * dev_id)1678 static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
1679 {
1680 struct mgsl_struct *info = dev_id;
1681 u16 UscVector;
1682 u16 DmaVector;
1683
1684 if ( debug_level >= DEBUG_LEVEL_ISR )
1685 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n",
1686 __FILE__, __LINE__, info->irq_level);
1687
1688 spin_lock(&info->irq_spinlock);
1689
1690 for(;;) {
1691 /* Read the interrupt vectors from hardware. */
1692 UscVector = usc_InReg(info, IVR) >> 9;
1693 DmaVector = usc_InDmaReg(info, DIVR);
1694
1695 if ( debug_level >= DEBUG_LEVEL_ISR )
1696 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1697 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1698
1699 if ( !UscVector && !DmaVector )
1700 break;
1701
1702 /* Dispatch interrupt vector */
1703 if ( UscVector )
1704 (*UscIsrTable[UscVector])(info);
1705 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1706 mgsl_isr_transmit_dma(info);
1707 else
1708 mgsl_isr_receive_dma(info);
1709
1710 if ( info->isr_overflow ) {
1711 printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n",
1712 __FILE__, __LINE__, info->device_name, info->irq_level);
1713 usc_DisableMasterIrqBit(info);
1714 usc_DisableDmaInterrupts(info,DICR_MASTER);
1715 break;
1716 }
1717 }
1718
1719 /* Request bottom half processing if there's something
1720 * for it to do and the bh is not already running
1721 */
1722
1723 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1724 if ( debug_level >= DEBUG_LEVEL_ISR )
1725 printk("%s(%d):%s queueing bh task.\n",
1726 __FILE__,__LINE__,info->device_name);
1727 schedule_work(&info->task);
1728 info->bh_requested = true;
1729 }
1730
1731 spin_unlock(&info->irq_spinlock);
1732
1733 if ( debug_level >= DEBUG_LEVEL_ISR )
1734 printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n",
1735 __FILE__, __LINE__, info->irq_level);
1736
1737 return IRQ_HANDLED;
1738 } /* end of mgsl_interrupt() */
1739
1740 /* startup()
1741 *
1742 * Initialize and start device.
1743 *
1744 * Arguments: info pointer to device instance data
1745 * Return Value: 0 if success, otherwise error code
1746 */
startup(struct mgsl_struct * info)1747 static int startup(struct mgsl_struct * info)
1748 {
1749 int retval = 0;
1750
1751 if ( debug_level >= DEBUG_LEVEL_INFO )
1752 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1753
1754 if (tty_port_initialized(&info->port))
1755 return 0;
1756
1757 if (!info->xmit_buf) {
1758 /* allocate a page of memory for a transmit buffer */
1759 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1760 if (!info->xmit_buf) {
1761 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1762 __FILE__,__LINE__,info->device_name);
1763 return -ENOMEM;
1764 }
1765 }
1766
1767 info->pending_bh = 0;
1768
1769 memset(&info->icount, 0, sizeof(info->icount));
1770
1771 timer_setup(&info->tx_timer, mgsl_tx_timeout, 0);
1772
1773 /* Allocate and claim adapter resources */
1774 retval = mgsl_claim_resources(info);
1775
1776 /* perform existence check and diagnostics */
1777 if ( !retval )
1778 retval = mgsl_adapter_test(info);
1779
1780 if ( retval ) {
1781 if (capable(CAP_SYS_ADMIN) && info->port.tty)
1782 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1783 mgsl_release_resources(info);
1784 return retval;
1785 }
1786
1787 /* program hardware for current parameters */
1788 mgsl_change_params(info);
1789
1790 if (info->port.tty)
1791 clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
1792
1793 tty_port_set_initialized(&info->port, 1);
1794
1795 return 0;
1796 } /* end of startup() */
1797
1798 /* shutdown()
1799 *
1800 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1801 *
1802 * Arguments: info pointer to device instance data
1803 * Return Value: None
1804 */
shutdown(struct mgsl_struct * info)1805 static void shutdown(struct mgsl_struct * info)
1806 {
1807 unsigned long flags;
1808
1809 if (!tty_port_initialized(&info->port))
1810 return;
1811
1812 if (debug_level >= DEBUG_LEVEL_INFO)
1813 printk("%s(%d):mgsl_shutdown(%s)\n",
1814 __FILE__,__LINE__, info->device_name );
1815
1816 /* clear status wait queue because status changes */
1817 /* can't happen after shutting down the hardware */
1818 wake_up_interruptible(&info->status_event_wait_q);
1819 wake_up_interruptible(&info->event_wait_q);
1820
1821 del_timer_sync(&info->tx_timer);
1822
1823 if (info->xmit_buf) {
1824 free_page((unsigned long) info->xmit_buf);
1825 info->xmit_buf = NULL;
1826 }
1827
1828 spin_lock_irqsave(&info->irq_spinlock,flags);
1829 usc_DisableMasterIrqBit(info);
1830 usc_stop_receiver(info);
1831 usc_stop_transmitter(info);
1832 usc_DisableInterrupts(info,RECEIVE_DATA | RECEIVE_STATUS |
1833 TRANSMIT_DATA | TRANSMIT_STATUS | IO_PIN | MISC );
1834 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1835
1836 /* Disable DMAEN (Port 7, Bit 14) */
1837 /* This disconnects the DMA request signal from the ISA bus */
1838 /* on the ISA adapter. This has no effect for the PCI adapter */
1839 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1840
1841 /* Disable INTEN (Port 6, Bit12) */
1842 /* This disconnects the IRQ request signal to the ISA bus */
1843 /* on the ISA adapter. This has no effect for the PCI adapter */
1844 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1845
1846 if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
1847 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1848 usc_set_serial_signals(info);
1849 }
1850
1851 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1852
1853 mgsl_release_resources(info);
1854
1855 if (info->port.tty)
1856 set_bit(TTY_IO_ERROR, &info->port.tty->flags);
1857
1858 tty_port_set_initialized(&info->port, 0);
1859 } /* end of shutdown() */
1860
mgsl_program_hw(struct mgsl_struct * info)1861 static void mgsl_program_hw(struct mgsl_struct *info)
1862 {
1863 unsigned long flags;
1864
1865 spin_lock_irqsave(&info->irq_spinlock,flags);
1866
1867 usc_stop_receiver(info);
1868 usc_stop_transmitter(info);
1869 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1870
1871 if (info->params.mode == MGSL_MODE_HDLC ||
1872 info->params.mode == MGSL_MODE_RAW ||
1873 info->netcount)
1874 usc_set_sync_mode(info);
1875 else
1876 usc_set_async_mode(info);
1877
1878 usc_set_serial_signals(info);
1879
1880 info->dcd_chkcount = 0;
1881 info->cts_chkcount = 0;
1882 info->ri_chkcount = 0;
1883 info->dsr_chkcount = 0;
1884
1885 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1886 usc_EnableInterrupts(info, IO_PIN);
1887 usc_get_serial_signals(info);
1888
1889 if (info->netcount || info->port.tty->termios.c_cflag & CREAD)
1890 usc_start_receiver(info);
1891
1892 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1893 }
1894
1895 /* Reconfigure adapter based on new parameters
1896 */
mgsl_change_params(struct mgsl_struct * info)1897 static void mgsl_change_params(struct mgsl_struct *info)
1898 {
1899 unsigned cflag;
1900 int bits_per_char;
1901
1902 if (!info->port.tty)
1903 return;
1904
1905 if (debug_level >= DEBUG_LEVEL_INFO)
1906 printk("%s(%d):mgsl_change_params(%s)\n",
1907 __FILE__,__LINE__, info->device_name );
1908
1909 cflag = info->port.tty->termios.c_cflag;
1910
1911 /* if B0 rate (hangup) specified then negate RTS and DTR */
1912 /* otherwise assert RTS and DTR */
1913 if (cflag & CBAUD)
1914 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
1915 else
1916 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
1917
1918 /* byte size and parity */
1919
1920 switch (cflag & CSIZE) {
1921 case CS5: info->params.data_bits = 5; break;
1922 case CS6: info->params.data_bits = 6; break;
1923 case CS7: info->params.data_bits = 7; break;
1924 case CS8: info->params.data_bits = 8; break;
1925 /* Never happens, but GCC is too dumb to figure it out */
1926 default: info->params.data_bits = 7; break;
1927 }
1928
1929 if (cflag & CSTOPB)
1930 info->params.stop_bits = 2;
1931 else
1932 info->params.stop_bits = 1;
1933
1934 info->params.parity = ASYNC_PARITY_NONE;
1935 if (cflag & PARENB) {
1936 if (cflag & PARODD)
1937 info->params.parity = ASYNC_PARITY_ODD;
1938 else
1939 info->params.parity = ASYNC_PARITY_EVEN;
1940 #ifdef CMSPAR
1941 if (cflag & CMSPAR)
1942 info->params.parity = ASYNC_PARITY_SPACE;
1943 #endif
1944 }
1945
1946 /* calculate number of jiffies to transmit a full
1947 * FIFO (32 bytes) at specified data rate
1948 */
1949 bits_per_char = info->params.data_bits +
1950 info->params.stop_bits + 1;
1951
1952 /* if port data rate is set to 460800 or less then
1953 * allow tty settings to override, otherwise keep the
1954 * current data rate.
1955 */
1956 if (info->params.data_rate <= 460800)
1957 info->params.data_rate = tty_get_baud_rate(info->port.tty);
1958
1959 if ( info->params.data_rate ) {
1960 info->timeout = (32*HZ*bits_per_char) /
1961 info->params.data_rate;
1962 }
1963 info->timeout += HZ/50; /* Add .02 seconds of slop */
1964
1965 tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
1966 tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
1967
1968 /* process tty input control flags */
1969
1970 info->read_status_mask = RXSTATUS_OVERRUN;
1971 if (I_INPCK(info->port.tty))
1972 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1973 if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
1974 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
1975
1976 if (I_IGNPAR(info->port.tty))
1977 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
1978 if (I_IGNBRK(info->port.tty)) {
1979 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
1980 /* If ignoring parity and break indicators, ignore
1981 * overruns too. (For real raw support).
1982 */
1983 if (I_IGNPAR(info->port.tty))
1984 info->ignore_status_mask |= RXSTATUS_OVERRUN;
1985 }
1986
1987 mgsl_program_hw(info);
1988
1989 } /* end of mgsl_change_params() */
1990
1991 /* mgsl_put_char()
1992 *
1993 * Add a character to the transmit buffer.
1994 *
1995 * Arguments: tty pointer to tty information structure
1996 * ch character to add to transmit buffer
1997 *
1998 * Return Value: None
1999 */
mgsl_put_char(struct tty_struct * tty,unsigned char ch)2000 static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2001 {
2002 struct mgsl_struct *info = tty->driver_data;
2003 unsigned long flags;
2004 int ret = 0;
2005
2006 if (debug_level >= DEBUG_LEVEL_INFO) {
2007 printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
2008 __FILE__, __LINE__, ch, info->device_name);
2009 }
2010
2011 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2012 return 0;
2013
2014 if (!info->xmit_buf)
2015 return 0;
2016
2017 spin_lock_irqsave(&info->irq_spinlock, flags);
2018
2019 if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
2020 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2021 info->xmit_buf[info->xmit_head++] = ch;
2022 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2023 info->xmit_cnt++;
2024 ret = 1;
2025 }
2026 }
2027 spin_unlock_irqrestore(&info->irq_spinlock, flags);
2028 return ret;
2029
2030 } /* end of mgsl_put_char() */
2031
2032 /* mgsl_flush_chars()
2033 *
2034 * Enable transmitter so remaining characters in the
2035 * transmit buffer are sent.
2036 *
2037 * Arguments: tty pointer to tty information structure
2038 * Return Value: None
2039 */
mgsl_flush_chars(struct tty_struct * tty)2040 static void mgsl_flush_chars(struct tty_struct *tty)
2041 {
2042 struct mgsl_struct *info = tty->driver_data;
2043 unsigned long flags;
2044
2045 if ( debug_level >= DEBUG_LEVEL_INFO )
2046 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2047 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2048
2049 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2050 return;
2051
2052 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2053 !info->xmit_buf)
2054 return;
2055
2056 if ( debug_level >= DEBUG_LEVEL_INFO )
2057 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2058 __FILE__,__LINE__,info->device_name );
2059
2060 spin_lock_irqsave(&info->irq_spinlock,flags);
2061
2062 if (!info->tx_active) {
2063 if ( (info->params.mode == MGSL_MODE_HDLC ||
2064 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2065 /* operating in synchronous (frame oriented) mode */
2066 /* copy data from circular xmit_buf to */
2067 /* transmit DMA buffer. */
2068 mgsl_load_tx_dma_buffer(info,
2069 info->xmit_buf,info->xmit_cnt);
2070 }
2071 usc_start_transmitter(info);
2072 }
2073
2074 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2075
2076 } /* end of mgsl_flush_chars() */
2077
2078 /* mgsl_write()
2079 *
2080 * Send a block of data
2081 *
2082 * Arguments:
2083 *
2084 * tty pointer to tty information structure
2085 * buf pointer to buffer containing send data
2086 * count size of send data in bytes
2087 *
2088 * Return Value: number of characters written
2089 */
mgsl_write(struct tty_struct * tty,const unsigned char * buf,int count)2090 static int mgsl_write(struct tty_struct * tty,
2091 const unsigned char *buf, int count)
2092 {
2093 int c, ret = 0;
2094 struct mgsl_struct *info = tty->driver_data;
2095 unsigned long flags;
2096
2097 if ( debug_level >= DEBUG_LEVEL_INFO )
2098 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2099 __FILE__,__LINE__,info->device_name,count);
2100
2101 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2102 goto cleanup;
2103
2104 if (!info->xmit_buf)
2105 goto cleanup;
2106
2107 if ( info->params.mode == MGSL_MODE_HDLC ||
2108 info->params.mode == MGSL_MODE_RAW ) {
2109 /* operating in synchronous (frame oriented) mode */
2110 if (info->tx_active) {
2111
2112 if ( info->params.mode == MGSL_MODE_HDLC ) {
2113 ret = 0;
2114 goto cleanup;
2115 }
2116 /* transmitter is actively sending data -
2117 * if we have multiple transmit dma and
2118 * holding buffers, attempt to queue this
2119 * frame for transmission at a later time.
2120 */
2121 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2122 /* no tx holding buffers available */
2123 ret = 0;
2124 goto cleanup;
2125 }
2126
2127 /* queue transmit frame request */
2128 ret = count;
2129 save_tx_buffer_request(info,buf,count);
2130
2131 /* if we have sufficient tx dma buffers,
2132 * load the next buffered tx request
2133 */
2134 spin_lock_irqsave(&info->irq_spinlock,flags);
2135 load_next_tx_holding_buffer(info);
2136 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2137 goto cleanup;
2138 }
2139
2140 /* if operating in HDLC LoopMode and the adapter */
2141 /* has yet to be inserted into the loop, we can't */
2142 /* transmit */
2143
2144 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2145 !usc_loopmode_active(info) )
2146 {
2147 ret = 0;
2148 goto cleanup;
2149 }
2150
2151 if ( info->xmit_cnt ) {
2152 /* Send accumulated from send_char() calls */
2153 /* as frame and wait before accepting more data. */
2154 ret = 0;
2155
2156 /* copy data from circular xmit_buf to */
2157 /* transmit DMA buffer. */
2158 mgsl_load_tx_dma_buffer(info,
2159 info->xmit_buf,info->xmit_cnt);
2160 if ( debug_level >= DEBUG_LEVEL_INFO )
2161 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2162 __FILE__,__LINE__,info->device_name);
2163 } else {
2164 if ( debug_level >= DEBUG_LEVEL_INFO )
2165 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2166 __FILE__,__LINE__,info->device_name);
2167 ret = count;
2168 info->xmit_cnt = count;
2169 mgsl_load_tx_dma_buffer(info,buf,count);
2170 }
2171 } else {
2172 while (1) {
2173 spin_lock_irqsave(&info->irq_spinlock,flags);
2174 c = min_t(int, count,
2175 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2176 SERIAL_XMIT_SIZE - info->xmit_head));
2177 if (c <= 0) {
2178 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2179 break;
2180 }
2181 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2182 info->xmit_head = ((info->xmit_head + c) &
2183 (SERIAL_XMIT_SIZE-1));
2184 info->xmit_cnt += c;
2185 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2186 buf += c;
2187 count -= c;
2188 ret += c;
2189 }
2190 }
2191
2192 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2193 spin_lock_irqsave(&info->irq_spinlock,flags);
2194 if (!info->tx_active)
2195 usc_start_transmitter(info);
2196 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2197 }
2198 cleanup:
2199 if ( debug_level >= DEBUG_LEVEL_INFO )
2200 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2201 __FILE__,__LINE__,info->device_name,ret);
2202
2203 return ret;
2204
2205 } /* end of mgsl_write() */
2206
2207 /* mgsl_write_room()
2208 *
2209 * Return the count of free bytes in transmit buffer
2210 *
2211 * Arguments: tty pointer to tty info structure
2212 * Return Value: None
2213 */
mgsl_write_room(struct tty_struct * tty)2214 static int mgsl_write_room(struct tty_struct *tty)
2215 {
2216 struct mgsl_struct *info = tty->driver_data;
2217 int ret;
2218
2219 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2220 return 0;
2221 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2222 if (ret < 0)
2223 ret = 0;
2224
2225 if (debug_level >= DEBUG_LEVEL_INFO)
2226 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2227 __FILE__,__LINE__, info->device_name,ret );
2228
2229 if ( info->params.mode == MGSL_MODE_HDLC ||
2230 info->params.mode == MGSL_MODE_RAW ) {
2231 /* operating in synchronous (frame oriented) mode */
2232 if ( info->tx_active )
2233 return 0;
2234 else
2235 return HDLC_MAX_FRAME_SIZE;
2236 }
2237
2238 return ret;
2239
2240 } /* end of mgsl_write_room() */
2241
2242 /* mgsl_chars_in_buffer()
2243 *
2244 * Return the count of bytes in transmit buffer
2245 *
2246 * Arguments: tty pointer to tty info structure
2247 * Return Value: None
2248 */
mgsl_chars_in_buffer(struct tty_struct * tty)2249 static int mgsl_chars_in_buffer(struct tty_struct *tty)
2250 {
2251 struct mgsl_struct *info = tty->driver_data;
2252
2253 if (debug_level >= DEBUG_LEVEL_INFO)
2254 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2255 __FILE__,__LINE__, info->device_name );
2256
2257 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2258 return 0;
2259
2260 if (debug_level >= DEBUG_LEVEL_INFO)
2261 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2262 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2263
2264 if ( info->params.mode == MGSL_MODE_HDLC ||
2265 info->params.mode == MGSL_MODE_RAW ) {
2266 /* operating in synchronous (frame oriented) mode */
2267 if ( info->tx_active )
2268 return info->max_frame_size;
2269 else
2270 return 0;
2271 }
2272
2273 return info->xmit_cnt;
2274 } /* end of mgsl_chars_in_buffer() */
2275
2276 /* mgsl_flush_buffer()
2277 *
2278 * Discard all data in the send buffer
2279 *
2280 * Arguments: tty pointer to tty info structure
2281 * Return Value: None
2282 */
mgsl_flush_buffer(struct tty_struct * tty)2283 static void mgsl_flush_buffer(struct tty_struct *tty)
2284 {
2285 struct mgsl_struct *info = tty->driver_data;
2286 unsigned long flags;
2287
2288 if (debug_level >= DEBUG_LEVEL_INFO)
2289 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2290 __FILE__,__LINE__, info->device_name );
2291
2292 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2293 return;
2294
2295 spin_lock_irqsave(&info->irq_spinlock,flags);
2296 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2297 del_timer(&info->tx_timer);
2298 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2299
2300 tty_wakeup(tty);
2301 }
2302
2303 /* mgsl_send_xchar()
2304 *
2305 * Send a high-priority XON/XOFF character
2306 *
2307 * Arguments: tty pointer to tty info structure
2308 * ch character to send
2309 * Return Value: None
2310 */
mgsl_send_xchar(struct tty_struct * tty,char ch)2311 static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2312 {
2313 struct mgsl_struct *info = tty->driver_data;
2314 unsigned long flags;
2315
2316 if (debug_level >= DEBUG_LEVEL_INFO)
2317 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2318 __FILE__,__LINE__, info->device_name, ch );
2319
2320 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2321 return;
2322
2323 info->x_char = ch;
2324 if (ch) {
2325 /* Make sure transmit interrupts are on */
2326 spin_lock_irqsave(&info->irq_spinlock,flags);
2327 if (!info->tx_enabled)
2328 usc_start_transmitter(info);
2329 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2330 }
2331 } /* end of mgsl_send_xchar() */
2332
2333 /* mgsl_throttle()
2334 *
2335 * Signal remote device to throttle send data (our receive data)
2336 *
2337 * Arguments: tty pointer to tty info structure
2338 * Return Value: None
2339 */
mgsl_throttle(struct tty_struct * tty)2340 static void mgsl_throttle(struct tty_struct * tty)
2341 {
2342 struct mgsl_struct *info = tty->driver_data;
2343 unsigned long flags;
2344
2345 if (debug_level >= DEBUG_LEVEL_INFO)
2346 printk("%s(%d):mgsl_throttle(%s) entry\n",
2347 __FILE__,__LINE__, info->device_name );
2348
2349 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2350 return;
2351
2352 if (I_IXOFF(tty))
2353 mgsl_send_xchar(tty, STOP_CHAR(tty));
2354
2355 if (C_CRTSCTS(tty)) {
2356 spin_lock_irqsave(&info->irq_spinlock,flags);
2357 info->serial_signals &= ~SerialSignal_RTS;
2358 usc_set_serial_signals(info);
2359 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2360 }
2361 } /* end of mgsl_throttle() */
2362
2363 /* mgsl_unthrottle()
2364 *
2365 * Signal remote device to stop throttling send data (our receive data)
2366 *
2367 * Arguments: tty pointer to tty info structure
2368 * Return Value: None
2369 */
mgsl_unthrottle(struct tty_struct * tty)2370 static void mgsl_unthrottle(struct tty_struct * tty)
2371 {
2372 struct mgsl_struct *info = tty->driver_data;
2373 unsigned long flags;
2374
2375 if (debug_level >= DEBUG_LEVEL_INFO)
2376 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2377 __FILE__,__LINE__, info->device_name );
2378
2379 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2380 return;
2381
2382 if (I_IXOFF(tty)) {
2383 if (info->x_char)
2384 info->x_char = 0;
2385 else
2386 mgsl_send_xchar(tty, START_CHAR(tty));
2387 }
2388
2389 if (C_CRTSCTS(tty)) {
2390 spin_lock_irqsave(&info->irq_spinlock,flags);
2391 info->serial_signals |= SerialSignal_RTS;
2392 usc_set_serial_signals(info);
2393 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2394 }
2395
2396 } /* end of mgsl_unthrottle() */
2397
2398 /* mgsl_get_stats()
2399 *
2400 * get the current serial parameters information
2401 *
2402 * Arguments: info pointer to device instance data
2403 * user_icount pointer to buffer to hold returned stats
2404 *
2405 * Return Value: 0 if success, otherwise error code
2406 */
mgsl_get_stats(struct mgsl_struct * info,struct mgsl_icount __user * user_icount)2407 static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2408 {
2409 int err;
2410
2411 if (debug_level >= DEBUG_LEVEL_INFO)
2412 printk("%s(%d):mgsl_get_params(%s)\n",
2413 __FILE__,__LINE__, info->device_name);
2414
2415 if (!user_icount) {
2416 memset(&info->icount, 0, sizeof(info->icount));
2417 } else {
2418 mutex_lock(&info->port.mutex);
2419 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
2420 mutex_unlock(&info->port.mutex);
2421 if (err)
2422 return -EFAULT;
2423 }
2424
2425 return 0;
2426
2427 } /* end of mgsl_get_stats() */
2428
2429 /* mgsl_get_params()
2430 *
2431 * get the current serial parameters information
2432 *
2433 * Arguments: info pointer to device instance data
2434 * user_params pointer to buffer to hold returned params
2435 *
2436 * Return Value: 0 if success, otherwise error code
2437 */
mgsl_get_params(struct mgsl_struct * info,MGSL_PARAMS __user * user_params)2438 static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2439 {
2440 int err;
2441 if (debug_level >= DEBUG_LEVEL_INFO)
2442 printk("%s(%d):mgsl_get_params(%s)\n",
2443 __FILE__,__LINE__, info->device_name);
2444
2445 mutex_lock(&info->port.mutex);
2446 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2447 mutex_unlock(&info->port.mutex);
2448 if (err) {
2449 if ( debug_level >= DEBUG_LEVEL_INFO )
2450 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2451 __FILE__,__LINE__,info->device_name);
2452 return -EFAULT;
2453 }
2454
2455 return 0;
2456
2457 } /* end of mgsl_get_params() */
2458
2459 /* mgsl_set_params()
2460 *
2461 * set the serial parameters
2462 *
2463 * Arguments:
2464 *
2465 * info pointer to device instance data
2466 * new_params user buffer containing new serial params
2467 *
2468 * Return Value: 0 if success, otherwise error code
2469 */
mgsl_set_params(struct mgsl_struct * info,MGSL_PARAMS __user * new_params)2470 static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2471 {
2472 unsigned long flags;
2473 MGSL_PARAMS tmp_params;
2474 int err;
2475
2476 if (debug_level >= DEBUG_LEVEL_INFO)
2477 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2478 info->device_name );
2479 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2480 if (err) {
2481 if ( debug_level >= DEBUG_LEVEL_INFO )
2482 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2483 __FILE__,__LINE__,info->device_name);
2484 return -EFAULT;
2485 }
2486
2487 mutex_lock(&info->port.mutex);
2488 spin_lock_irqsave(&info->irq_spinlock,flags);
2489 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2490 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2491
2492 mgsl_change_params(info);
2493 mutex_unlock(&info->port.mutex);
2494
2495 return 0;
2496
2497 } /* end of mgsl_set_params() */
2498
2499 /* mgsl_get_txidle()
2500 *
2501 * get the current transmit idle mode
2502 *
2503 * Arguments: info pointer to device instance data
2504 * idle_mode pointer to buffer to hold returned idle mode
2505 *
2506 * Return Value: 0 if success, otherwise error code
2507 */
mgsl_get_txidle(struct mgsl_struct * info,int __user * idle_mode)2508 static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2509 {
2510 int err;
2511
2512 if (debug_level >= DEBUG_LEVEL_INFO)
2513 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2514 __FILE__,__LINE__, info->device_name, info->idle_mode);
2515
2516 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2517 if (err) {
2518 if ( debug_level >= DEBUG_LEVEL_INFO )
2519 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2520 __FILE__,__LINE__,info->device_name);
2521 return -EFAULT;
2522 }
2523
2524 return 0;
2525
2526 } /* end of mgsl_get_txidle() */
2527
2528 /* mgsl_set_txidle() service ioctl to set transmit idle mode
2529 *
2530 * Arguments: info pointer to device instance data
2531 * idle_mode new idle mode
2532 *
2533 * Return Value: 0 if success, otherwise error code
2534 */
mgsl_set_txidle(struct mgsl_struct * info,int idle_mode)2535 static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2536 {
2537 unsigned long flags;
2538
2539 if (debug_level >= DEBUG_LEVEL_INFO)
2540 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2541 info->device_name, idle_mode );
2542
2543 spin_lock_irqsave(&info->irq_spinlock,flags);
2544 info->idle_mode = idle_mode;
2545 usc_set_txidle( info );
2546 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2547 return 0;
2548
2549 } /* end of mgsl_set_txidle() */
2550
2551 /* mgsl_txenable()
2552 *
2553 * enable or disable the transmitter
2554 *
2555 * Arguments:
2556 *
2557 * info pointer to device instance data
2558 * enable 1 = enable, 0 = disable
2559 *
2560 * Return Value: 0 if success, otherwise error code
2561 */
mgsl_txenable(struct mgsl_struct * info,int enable)2562 static int mgsl_txenable(struct mgsl_struct * info, int enable)
2563 {
2564 unsigned long flags;
2565
2566 if (debug_level >= DEBUG_LEVEL_INFO)
2567 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2568 info->device_name, enable);
2569
2570 spin_lock_irqsave(&info->irq_spinlock,flags);
2571 if ( enable ) {
2572 if ( !info->tx_enabled ) {
2573
2574 usc_start_transmitter(info);
2575 /*--------------------------------------------------
2576 * if HDLC/SDLC Loop mode, attempt to insert the
2577 * station in the 'loop' by setting CMR:13. Upon
2578 * receipt of the next GoAhead (RxAbort) sequence,
2579 * the OnLoop indicator (CCSR:7) should go active
2580 * to indicate that we are on the loop
2581 *--------------------------------------------------*/
2582 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2583 usc_loopmode_insert_request( info );
2584 }
2585 } else {
2586 if ( info->tx_enabled )
2587 usc_stop_transmitter(info);
2588 }
2589 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2590 return 0;
2591
2592 } /* end of mgsl_txenable() */
2593
2594 /* mgsl_txabort() abort send HDLC frame
2595 *
2596 * Arguments: info pointer to device instance data
2597 * Return Value: 0 if success, otherwise error code
2598 */
mgsl_txabort(struct mgsl_struct * info)2599 static int mgsl_txabort(struct mgsl_struct * info)
2600 {
2601 unsigned long flags;
2602
2603 if (debug_level >= DEBUG_LEVEL_INFO)
2604 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2605 info->device_name);
2606
2607 spin_lock_irqsave(&info->irq_spinlock,flags);
2608 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2609 {
2610 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2611 usc_loopmode_cancel_transmit( info );
2612 else
2613 usc_TCmd(info,TCmd_SendAbort);
2614 }
2615 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2616 return 0;
2617
2618 } /* end of mgsl_txabort() */
2619
2620 /* mgsl_rxenable() enable or disable the receiver
2621 *
2622 * Arguments: info pointer to device instance data
2623 * enable 1 = enable, 0 = disable
2624 * Return Value: 0 if success, otherwise error code
2625 */
mgsl_rxenable(struct mgsl_struct * info,int enable)2626 static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2627 {
2628 unsigned long flags;
2629
2630 if (debug_level >= DEBUG_LEVEL_INFO)
2631 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2632 info->device_name, enable);
2633
2634 spin_lock_irqsave(&info->irq_spinlock,flags);
2635 if ( enable ) {
2636 if ( !info->rx_enabled )
2637 usc_start_receiver(info);
2638 } else {
2639 if ( info->rx_enabled )
2640 usc_stop_receiver(info);
2641 }
2642 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2643 return 0;
2644
2645 } /* end of mgsl_rxenable() */
2646
2647 /* mgsl_wait_event() wait for specified event to occur
2648 *
2649 * Arguments: info pointer to device instance data
2650 * mask pointer to bitmask of events to wait for
2651 * Return Value: 0 if successful and bit mask updated with
2652 * of events triggerred,
2653 * otherwise error code
2654 */
mgsl_wait_event(struct mgsl_struct * info,int __user * mask_ptr)2655 static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2656 {
2657 unsigned long flags;
2658 int s;
2659 int rc=0;
2660 struct mgsl_icount cprev, cnow;
2661 int events;
2662 int mask;
2663 struct _input_signal_events oldsigs, newsigs;
2664 DECLARE_WAITQUEUE(wait, current);
2665
2666 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2667 if (rc) {
2668 return -EFAULT;
2669 }
2670
2671 if (debug_level >= DEBUG_LEVEL_INFO)
2672 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2673 info->device_name, mask);
2674
2675 spin_lock_irqsave(&info->irq_spinlock,flags);
2676
2677 /* return immediately if state matches requested events */
2678 usc_get_serial_signals(info);
2679 s = info->serial_signals;
2680 events = mask &
2681 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2682 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2683 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2684 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2685 if (events) {
2686 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2687 goto exit;
2688 }
2689
2690 /* save current irq counts */
2691 cprev = info->icount;
2692 oldsigs = info->input_signal_events;
2693
2694 /* enable hunt and idle irqs if needed */
2695 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2696 u16 oldreg = usc_InReg(info,RICR);
2697 u16 newreg = oldreg +
2698 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2699 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2700 if (oldreg != newreg)
2701 usc_OutReg(info, RICR, newreg);
2702 }
2703
2704 set_current_state(TASK_INTERRUPTIBLE);
2705 add_wait_queue(&info->event_wait_q, &wait);
2706
2707 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2708
2709
2710 for(;;) {
2711 schedule();
2712 if (signal_pending(current)) {
2713 rc = -ERESTARTSYS;
2714 break;
2715 }
2716
2717 /* get current irq counts */
2718 spin_lock_irqsave(&info->irq_spinlock,flags);
2719 cnow = info->icount;
2720 newsigs = info->input_signal_events;
2721 set_current_state(TASK_INTERRUPTIBLE);
2722 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2723
2724 /* if no change, wait aborted for some reason */
2725 if (newsigs.dsr_up == oldsigs.dsr_up &&
2726 newsigs.dsr_down == oldsigs.dsr_down &&
2727 newsigs.dcd_up == oldsigs.dcd_up &&
2728 newsigs.dcd_down == oldsigs.dcd_down &&
2729 newsigs.cts_up == oldsigs.cts_up &&
2730 newsigs.cts_down == oldsigs.cts_down &&
2731 newsigs.ri_up == oldsigs.ri_up &&
2732 newsigs.ri_down == oldsigs.ri_down &&
2733 cnow.exithunt == cprev.exithunt &&
2734 cnow.rxidle == cprev.rxidle) {
2735 rc = -EIO;
2736 break;
2737 }
2738
2739 events = mask &
2740 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2741 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2742 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2743 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2744 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2745 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2746 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2747 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2748 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2749 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2750 if (events)
2751 break;
2752
2753 cprev = cnow;
2754 oldsigs = newsigs;
2755 }
2756
2757 remove_wait_queue(&info->event_wait_q, &wait);
2758 set_current_state(TASK_RUNNING);
2759
2760 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2761 spin_lock_irqsave(&info->irq_spinlock,flags);
2762 if (!waitqueue_active(&info->event_wait_q)) {
2763 /* disable enable exit hunt mode/idle rcvd IRQs */
2764 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2765 ~(RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED));
2766 }
2767 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2768 }
2769 exit:
2770 if ( rc == 0 )
2771 PUT_USER(rc, events, mask_ptr);
2772
2773 return rc;
2774
2775 } /* end of mgsl_wait_event() */
2776
modem_input_wait(struct mgsl_struct * info,int arg)2777 static int modem_input_wait(struct mgsl_struct *info,int arg)
2778 {
2779 unsigned long flags;
2780 int rc;
2781 struct mgsl_icount cprev, cnow;
2782 DECLARE_WAITQUEUE(wait, current);
2783
2784 /* save current irq counts */
2785 spin_lock_irqsave(&info->irq_spinlock,flags);
2786 cprev = info->icount;
2787 add_wait_queue(&info->status_event_wait_q, &wait);
2788 set_current_state(TASK_INTERRUPTIBLE);
2789 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2790
2791 for(;;) {
2792 schedule();
2793 if (signal_pending(current)) {
2794 rc = -ERESTARTSYS;
2795 break;
2796 }
2797
2798 /* get new irq counts */
2799 spin_lock_irqsave(&info->irq_spinlock,flags);
2800 cnow = info->icount;
2801 set_current_state(TASK_INTERRUPTIBLE);
2802 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2803
2804 /* if no change, wait aborted for some reason */
2805 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2806 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2807 rc = -EIO;
2808 break;
2809 }
2810
2811 /* check for change in caller specified modem input */
2812 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2813 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2814 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2815 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2816 rc = 0;
2817 break;
2818 }
2819
2820 cprev = cnow;
2821 }
2822 remove_wait_queue(&info->status_event_wait_q, &wait);
2823 set_current_state(TASK_RUNNING);
2824 return rc;
2825 }
2826
2827 /* return the state of the serial control and status signals
2828 */
tiocmget(struct tty_struct * tty)2829 static int tiocmget(struct tty_struct *tty)
2830 {
2831 struct mgsl_struct *info = tty->driver_data;
2832 unsigned int result;
2833 unsigned long flags;
2834
2835 spin_lock_irqsave(&info->irq_spinlock,flags);
2836 usc_get_serial_signals(info);
2837 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2838
2839 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2840 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2841 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2842 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2843 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2844 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2845
2846 if (debug_level >= DEBUG_LEVEL_INFO)
2847 printk("%s(%d):%s tiocmget() value=%08X\n",
2848 __FILE__,__LINE__, info->device_name, result );
2849 return result;
2850 }
2851
2852 /* set modem control signals (DTR/RTS)
2853 */
tiocmset(struct tty_struct * tty,unsigned int set,unsigned int clear)2854 static int tiocmset(struct tty_struct *tty,
2855 unsigned int set, unsigned int clear)
2856 {
2857 struct mgsl_struct *info = tty->driver_data;
2858 unsigned long flags;
2859
2860 if (debug_level >= DEBUG_LEVEL_INFO)
2861 printk("%s(%d):%s tiocmset(%x,%x)\n",
2862 __FILE__,__LINE__,info->device_name, set, clear);
2863
2864 if (set & TIOCM_RTS)
2865 info->serial_signals |= SerialSignal_RTS;
2866 if (set & TIOCM_DTR)
2867 info->serial_signals |= SerialSignal_DTR;
2868 if (clear & TIOCM_RTS)
2869 info->serial_signals &= ~SerialSignal_RTS;
2870 if (clear & TIOCM_DTR)
2871 info->serial_signals &= ~SerialSignal_DTR;
2872
2873 spin_lock_irqsave(&info->irq_spinlock,flags);
2874 usc_set_serial_signals(info);
2875 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2876
2877 return 0;
2878 }
2879
2880 /* mgsl_break() Set or clear transmit break condition
2881 *
2882 * Arguments: tty pointer to tty instance data
2883 * break_state -1=set break condition, 0=clear
2884 * Return Value: error code
2885 */
mgsl_break(struct tty_struct * tty,int break_state)2886 static int mgsl_break(struct tty_struct *tty, int break_state)
2887 {
2888 struct mgsl_struct * info = tty->driver_data;
2889 unsigned long flags;
2890
2891 if (debug_level >= DEBUG_LEVEL_INFO)
2892 printk("%s(%d):mgsl_break(%s,%d)\n",
2893 __FILE__,__LINE__, info->device_name, break_state);
2894
2895 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2896 return -EINVAL;
2897
2898 spin_lock_irqsave(&info->irq_spinlock,flags);
2899 if (break_state == -1)
2900 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2901 else
2902 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2903 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2904 return 0;
2905
2906 } /* end of mgsl_break() */
2907
2908 /*
2909 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
2910 * Return: write counters to the user passed counter struct
2911 * NB: both 1->0 and 0->1 transitions are counted except for
2912 * RI where only 0->1 is counted.
2913 */
msgl_get_icount(struct tty_struct * tty,struct serial_icounter_struct * icount)2914 static int msgl_get_icount(struct tty_struct *tty,
2915 struct serial_icounter_struct *icount)
2916
2917 {
2918 struct mgsl_struct * info = tty->driver_data;
2919 struct mgsl_icount cnow; /* kernel counter temps */
2920 unsigned long flags;
2921
2922 spin_lock_irqsave(&info->irq_spinlock,flags);
2923 cnow = info->icount;
2924 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2925
2926 icount->cts = cnow.cts;
2927 icount->dsr = cnow.dsr;
2928 icount->rng = cnow.rng;
2929 icount->dcd = cnow.dcd;
2930 icount->rx = cnow.rx;
2931 icount->tx = cnow.tx;
2932 icount->frame = cnow.frame;
2933 icount->overrun = cnow.overrun;
2934 icount->parity = cnow.parity;
2935 icount->brk = cnow.brk;
2936 icount->buf_overrun = cnow.buf_overrun;
2937 return 0;
2938 }
2939
2940 /* mgsl_ioctl() Service an IOCTL request
2941 *
2942 * Arguments:
2943 *
2944 * tty pointer to tty instance data
2945 * cmd IOCTL command code
2946 * arg command argument/context
2947 *
2948 * Return Value: 0 if success, otherwise error code
2949 */
mgsl_ioctl(struct tty_struct * tty,unsigned int cmd,unsigned long arg)2950 static int mgsl_ioctl(struct tty_struct *tty,
2951 unsigned int cmd, unsigned long arg)
2952 {
2953 struct mgsl_struct * info = tty->driver_data;
2954
2955 if (debug_level >= DEBUG_LEVEL_INFO)
2956 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2957 info->device_name, cmd );
2958
2959 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2960 return -ENODEV;
2961
2962 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2963 (cmd != TIOCMIWAIT)) {
2964 if (tty_io_error(tty))
2965 return -EIO;
2966 }
2967
2968 return mgsl_ioctl_common(info, cmd, arg);
2969 }
2970
mgsl_ioctl_common(struct mgsl_struct * info,unsigned int cmd,unsigned long arg)2971 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2972 {
2973 void __user *argp = (void __user *)arg;
2974
2975 switch (cmd) {
2976 case MGSL_IOCGPARAMS:
2977 return mgsl_get_params(info, argp);
2978 case MGSL_IOCSPARAMS:
2979 return mgsl_set_params(info, argp);
2980 case MGSL_IOCGTXIDLE:
2981 return mgsl_get_txidle(info, argp);
2982 case MGSL_IOCSTXIDLE:
2983 return mgsl_set_txidle(info,(int)arg);
2984 case MGSL_IOCTXENABLE:
2985 return mgsl_txenable(info,(int)arg);
2986 case MGSL_IOCRXENABLE:
2987 return mgsl_rxenable(info,(int)arg);
2988 case MGSL_IOCTXABORT:
2989 return mgsl_txabort(info);
2990 case MGSL_IOCGSTATS:
2991 return mgsl_get_stats(info, argp);
2992 case MGSL_IOCWAITEVENT:
2993 return mgsl_wait_event(info, argp);
2994 case MGSL_IOCLOOPTXDONE:
2995 return mgsl_loopmode_send_done(info);
2996 /* Wait for modem input (DCD,RI,DSR,CTS) change
2997 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
2998 */
2999 case TIOCMIWAIT:
3000 return modem_input_wait(info,(int)arg);
3001
3002 default:
3003 return -ENOIOCTLCMD;
3004 }
3005 return 0;
3006 }
3007
3008 /* mgsl_set_termios()
3009 *
3010 * Set new termios settings
3011 *
3012 * Arguments:
3013 *
3014 * tty pointer to tty structure
3015 * termios pointer to buffer to hold returned old termios
3016 *
3017 * Return Value: None
3018 */
mgsl_set_termios(struct tty_struct * tty,struct ktermios * old_termios)3019 static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
3020 {
3021 struct mgsl_struct *info = tty->driver_data;
3022 unsigned long flags;
3023
3024 if (debug_level >= DEBUG_LEVEL_INFO)
3025 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3026 tty->driver->name );
3027
3028 mgsl_change_params(info);
3029
3030 /* Handle transition to B0 status */
3031 if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
3032 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3033 spin_lock_irqsave(&info->irq_spinlock,flags);
3034 usc_set_serial_signals(info);
3035 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3036 }
3037
3038 /* Handle transition away from B0 status */
3039 if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
3040 info->serial_signals |= SerialSignal_DTR;
3041 if (!C_CRTSCTS(tty) || !tty_throttled(tty))
3042 info->serial_signals |= SerialSignal_RTS;
3043 spin_lock_irqsave(&info->irq_spinlock,flags);
3044 usc_set_serial_signals(info);
3045 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3046 }
3047
3048 /* Handle turning off CRTSCTS */
3049 if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) {
3050 tty->hw_stopped = 0;
3051 mgsl_start(tty);
3052 }
3053
3054 } /* end of mgsl_set_termios() */
3055
3056 /* mgsl_close()
3057 *
3058 * Called when port is closed. Wait for remaining data to be
3059 * sent. Disable port and free resources.
3060 *
3061 * Arguments:
3062 *
3063 * tty pointer to open tty structure
3064 * filp pointer to open file object
3065 *
3066 * Return Value: None
3067 */
mgsl_close(struct tty_struct * tty,struct file * filp)3068 static void mgsl_close(struct tty_struct *tty, struct file * filp)
3069 {
3070 struct mgsl_struct * info = tty->driver_data;
3071
3072 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3073 return;
3074
3075 if (debug_level >= DEBUG_LEVEL_INFO)
3076 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3077 __FILE__,__LINE__, info->device_name, info->port.count);
3078
3079 if (tty_port_close_start(&info->port, tty, filp) == 0)
3080 goto cleanup;
3081
3082 mutex_lock(&info->port.mutex);
3083 if (tty_port_initialized(&info->port))
3084 mgsl_wait_until_sent(tty, info->timeout);
3085 mgsl_flush_buffer(tty);
3086 tty_ldisc_flush(tty);
3087 shutdown(info);
3088 mutex_unlock(&info->port.mutex);
3089
3090 tty_port_close_end(&info->port, tty);
3091 info->port.tty = NULL;
3092 cleanup:
3093 if (debug_level >= DEBUG_LEVEL_INFO)
3094 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3095 tty->driver->name, info->port.count);
3096
3097 } /* end of mgsl_close() */
3098
3099 /* mgsl_wait_until_sent()
3100 *
3101 * Wait until the transmitter is empty.
3102 *
3103 * Arguments:
3104 *
3105 * tty pointer to tty info structure
3106 * timeout time to wait for send completion
3107 *
3108 * Return Value: None
3109 */
mgsl_wait_until_sent(struct tty_struct * tty,int timeout)3110 static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3111 {
3112 struct mgsl_struct * info = tty->driver_data;
3113 unsigned long orig_jiffies, char_time;
3114
3115 if (!info )
3116 return;
3117
3118 if (debug_level >= DEBUG_LEVEL_INFO)
3119 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3120 __FILE__,__LINE__, info->device_name );
3121
3122 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3123 return;
3124
3125 if (!tty_port_initialized(&info->port))
3126 goto exit;
3127
3128 orig_jiffies = jiffies;
3129
3130 /* Set check interval to 1/5 of estimated time to
3131 * send a character, and make it at least 1. The check
3132 * interval should also be less than the timeout.
3133 * Note: use tight timings here to satisfy the NIST-PCTS.
3134 */
3135
3136 if ( info->params.data_rate ) {
3137 char_time = info->timeout/(32 * 5);
3138 if (!char_time)
3139 char_time++;
3140 } else
3141 char_time = 1;
3142
3143 if (timeout)
3144 char_time = min_t(unsigned long, char_time, timeout);
3145
3146 if ( info->params.mode == MGSL_MODE_HDLC ||
3147 info->params.mode == MGSL_MODE_RAW ) {
3148 while (info->tx_active) {
3149 msleep_interruptible(jiffies_to_msecs(char_time));
3150 if (signal_pending(current))
3151 break;
3152 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3153 break;
3154 }
3155 } else {
3156 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3157 info->tx_enabled) {
3158 msleep_interruptible(jiffies_to_msecs(char_time));
3159 if (signal_pending(current))
3160 break;
3161 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3162 break;
3163 }
3164 }
3165
3166 exit:
3167 if (debug_level >= DEBUG_LEVEL_INFO)
3168 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3169 __FILE__,__LINE__, info->device_name );
3170
3171 } /* end of mgsl_wait_until_sent() */
3172
3173 /* mgsl_hangup()
3174 *
3175 * Called by tty_hangup() when a hangup is signaled.
3176 * This is the same as to closing all open files for the port.
3177 *
3178 * Arguments: tty pointer to associated tty object
3179 * Return Value: None
3180 */
mgsl_hangup(struct tty_struct * tty)3181 static void mgsl_hangup(struct tty_struct *tty)
3182 {
3183 struct mgsl_struct * info = tty->driver_data;
3184
3185 if (debug_level >= DEBUG_LEVEL_INFO)
3186 printk("%s(%d):mgsl_hangup(%s)\n",
3187 __FILE__,__LINE__, info->device_name );
3188
3189 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3190 return;
3191
3192 mgsl_flush_buffer(tty);
3193 shutdown(info);
3194
3195 info->port.count = 0;
3196 tty_port_set_active(&info->port, 0);
3197 info->port.tty = NULL;
3198
3199 wake_up_interruptible(&info->port.open_wait);
3200
3201 } /* end of mgsl_hangup() */
3202
3203 /*
3204 * carrier_raised()
3205 *
3206 * Return true if carrier is raised
3207 */
3208
carrier_raised(struct tty_port * port)3209 static int carrier_raised(struct tty_port *port)
3210 {
3211 unsigned long flags;
3212 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3213
3214 spin_lock_irqsave(&info->irq_spinlock, flags);
3215 usc_get_serial_signals(info);
3216 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3217 return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
3218 }
3219
dtr_rts(struct tty_port * port,int on)3220 static void dtr_rts(struct tty_port *port, int on)
3221 {
3222 struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
3223 unsigned long flags;
3224
3225 spin_lock_irqsave(&info->irq_spinlock,flags);
3226 if (on)
3227 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
3228 else
3229 info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
3230 usc_set_serial_signals(info);
3231 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3232 }
3233
3234
3235 /* block_til_ready()
3236 *
3237 * Block the current process until the specified port
3238 * is ready to be opened.
3239 *
3240 * Arguments:
3241 *
3242 * tty pointer to tty info structure
3243 * filp pointer to open file object
3244 * info pointer to device instance data
3245 *
3246 * Return Value: 0 if success, otherwise error code
3247 */
block_til_ready(struct tty_struct * tty,struct file * filp,struct mgsl_struct * info)3248 static int block_til_ready(struct tty_struct *tty, struct file * filp,
3249 struct mgsl_struct *info)
3250 {
3251 DECLARE_WAITQUEUE(wait, current);
3252 int retval;
3253 bool do_clocal = false;
3254 unsigned long flags;
3255 int dcd;
3256 struct tty_port *port = &info->port;
3257
3258 if (debug_level >= DEBUG_LEVEL_INFO)
3259 printk("%s(%d):block_til_ready on %s\n",
3260 __FILE__,__LINE__, tty->driver->name );
3261
3262 if (filp->f_flags & O_NONBLOCK || tty_io_error(tty)) {
3263 /* nonblock mode is set or port is not enabled */
3264 tty_port_set_active(port, 1);
3265 return 0;
3266 }
3267
3268 if (C_CLOCAL(tty))
3269 do_clocal = true;
3270
3271 /* Wait for carrier detect and the line to become
3272 * free (i.e., not in use by the callout). While we are in
3273 * this loop, port->count is dropped by one, so that
3274 * mgsl_close() knows when to free things. We restore it upon
3275 * exit, either normal or abnormal.
3276 */
3277
3278 retval = 0;
3279 add_wait_queue(&port->open_wait, &wait);
3280
3281 if (debug_level >= DEBUG_LEVEL_INFO)
3282 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3283 __FILE__,__LINE__, tty->driver->name, port->count );
3284
3285 spin_lock_irqsave(&info->irq_spinlock, flags);
3286 port->count--;
3287 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3288 port->blocked_open++;
3289
3290 while (1) {
3291 if (C_BAUD(tty) && tty_port_initialized(port))
3292 tty_port_raise_dtr_rts(port);
3293
3294 set_current_state(TASK_INTERRUPTIBLE);
3295
3296 if (tty_hung_up_p(filp) || !tty_port_initialized(port)) {
3297 retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3298 -EAGAIN : -ERESTARTSYS;
3299 break;
3300 }
3301
3302 dcd = tty_port_carrier_raised(&info->port);
3303 if (do_clocal || dcd)
3304 break;
3305
3306 if (signal_pending(current)) {
3307 retval = -ERESTARTSYS;
3308 break;
3309 }
3310
3311 if (debug_level >= DEBUG_LEVEL_INFO)
3312 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3313 __FILE__,__LINE__, tty->driver->name, port->count );
3314
3315 tty_unlock(tty);
3316 schedule();
3317 tty_lock(tty);
3318 }
3319
3320 set_current_state(TASK_RUNNING);
3321 remove_wait_queue(&port->open_wait, &wait);
3322
3323 /* FIXME: Racy on hangup during close wait */
3324 if (!tty_hung_up_p(filp))
3325 port->count++;
3326 port->blocked_open--;
3327
3328 if (debug_level >= DEBUG_LEVEL_INFO)
3329 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3330 __FILE__,__LINE__, tty->driver->name, port->count );
3331
3332 if (!retval)
3333 tty_port_set_active(port, 1);
3334
3335 return retval;
3336
3337 } /* end of block_til_ready() */
3338
mgsl_install(struct tty_driver * driver,struct tty_struct * tty)3339 static int mgsl_install(struct tty_driver *driver, struct tty_struct *tty)
3340 {
3341 struct mgsl_struct *info;
3342 int line = tty->index;
3343
3344 /* verify range of specified line number */
3345 if (line >= mgsl_device_count) {
3346 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3347 __FILE__, __LINE__, line);
3348 return -ENODEV;
3349 }
3350
3351 /* find the info structure for the specified line */
3352 info = mgsl_device_list;
3353 while (info && info->line != line)
3354 info = info->next_device;
3355 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3356 return -ENODEV;
3357 tty->driver_data = info;
3358
3359 return tty_port_install(&info->port, driver, tty);
3360 }
3361
3362 /* mgsl_open()
3363 *
3364 * Called when a port is opened. Init and enable port.
3365 * Perform serial-specific initialization for the tty structure.
3366 *
3367 * Arguments: tty pointer to tty info structure
3368 * filp associated file pointer
3369 *
3370 * Return Value: 0 if success, otherwise error code
3371 */
mgsl_open(struct tty_struct * tty,struct file * filp)3372 static int mgsl_open(struct tty_struct *tty, struct file * filp)
3373 {
3374 struct mgsl_struct *info = tty->driver_data;
3375 unsigned long flags;
3376 int retval;
3377
3378 info->port.tty = tty;
3379
3380 if (debug_level >= DEBUG_LEVEL_INFO)
3381 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3382 __FILE__,__LINE__,tty->driver->name, info->port.count);
3383
3384 info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3385
3386 spin_lock_irqsave(&info->netlock, flags);
3387 if (info->netcount) {
3388 retval = -EBUSY;
3389 spin_unlock_irqrestore(&info->netlock, flags);
3390 goto cleanup;
3391 }
3392 info->port.count++;
3393 spin_unlock_irqrestore(&info->netlock, flags);
3394
3395 if (info->port.count == 1) {
3396 /* 1st open on this device, init hardware */
3397 retval = startup(info);
3398 if (retval < 0)
3399 goto cleanup;
3400 }
3401
3402 retval = block_til_ready(tty, filp, info);
3403 if (retval) {
3404 if (debug_level >= DEBUG_LEVEL_INFO)
3405 printk("%s(%d):block_til_ready(%s) returned %d\n",
3406 __FILE__,__LINE__, info->device_name, retval);
3407 goto cleanup;
3408 }
3409
3410 if (debug_level >= DEBUG_LEVEL_INFO)
3411 printk("%s(%d):mgsl_open(%s) success\n",
3412 __FILE__,__LINE__, info->device_name);
3413 retval = 0;
3414
3415 cleanup:
3416 if (retval) {
3417 if (tty->count == 1)
3418 info->port.tty = NULL; /* tty layer will release tty struct */
3419 if(info->port.count)
3420 info->port.count--;
3421 }
3422
3423 return retval;
3424
3425 } /* end of mgsl_open() */
3426
3427 /*
3428 * /proc fs routines....
3429 */
3430
line_info(struct seq_file * m,struct mgsl_struct * info)3431 static inline void line_info(struct seq_file *m, struct mgsl_struct *info)
3432 {
3433 char stat_buf[30];
3434 unsigned long flags;
3435
3436 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3437 seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3438 info->device_name, info->io_base, info->irq_level,
3439 info->phys_memory_base, info->phys_lcr_base);
3440 } else {
3441 seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d",
3442 info->device_name, info->io_base,
3443 info->irq_level, info->dma_level);
3444 }
3445
3446 /* output current serial signal states */
3447 spin_lock_irqsave(&info->irq_spinlock,flags);
3448 usc_get_serial_signals(info);
3449 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3450
3451 stat_buf[0] = 0;
3452 stat_buf[1] = 0;
3453 if (info->serial_signals & SerialSignal_RTS)
3454 strcat(stat_buf, "|RTS");
3455 if (info->serial_signals & SerialSignal_CTS)
3456 strcat(stat_buf, "|CTS");
3457 if (info->serial_signals & SerialSignal_DTR)
3458 strcat(stat_buf, "|DTR");
3459 if (info->serial_signals & SerialSignal_DSR)
3460 strcat(stat_buf, "|DSR");
3461 if (info->serial_signals & SerialSignal_DCD)
3462 strcat(stat_buf, "|CD");
3463 if (info->serial_signals & SerialSignal_RI)
3464 strcat(stat_buf, "|RI");
3465
3466 if (info->params.mode == MGSL_MODE_HDLC ||
3467 info->params.mode == MGSL_MODE_RAW ) {
3468 seq_printf(m, " HDLC txok:%d rxok:%d",
3469 info->icount.txok, info->icount.rxok);
3470 if (info->icount.txunder)
3471 seq_printf(m, " txunder:%d", info->icount.txunder);
3472 if (info->icount.txabort)
3473 seq_printf(m, " txabort:%d", info->icount.txabort);
3474 if (info->icount.rxshort)
3475 seq_printf(m, " rxshort:%d", info->icount.rxshort);
3476 if (info->icount.rxlong)
3477 seq_printf(m, " rxlong:%d", info->icount.rxlong);
3478 if (info->icount.rxover)
3479 seq_printf(m, " rxover:%d", info->icount.rxover);
3480 if (info->icount.rxcrc)
3481 seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
3482 } else {
3483 seq_printf(m, " ASYNC tx:%d rx:%d",
3484 info->icount.tx, info->icount.rx);
3485 if (info->icount.frame)
3486 seq_printf(m, " fe:%d", info->icount.frame);
3487 if (info->icount.parity)
3488 seq_printf(m, " pe:%d", info->icount.parity);
3489 if (info->icount.brk)
3490 seq_printf(m, " brk:%d", info->icount.brk);
3491 if (info->icount.overrun)
3492 seq_printf(m, " oe:%d", info->icount.overrun);
3493 }
3494
3495 /* Append serial signal status to end */
3496 seq_printf(m, " %s\n", stat_buf+1);
3497
3498 seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3499 info->tx_active,info->bh_requested,info->bh_running,
3500 info->pending_bh);
3501
3502 spin_lock_irqsave(&info->irq_spinlock,flags);
3503 {
3504 u16 Tcsr = usc_InReg( info, TCSR );
3505 u16 Tdmr = usc_InDmaReg( info, TDMR );
3506 u16 Ticr = usc_InReg( info, TICR );
3507 u16 Rscr = usc_InReg( info, RCSR );
3508 u16 Rdmr = usc_InDmaReg( info, RDMR );
3509 u16 Ricr = usc_InReg( info, RICR );
3510 u16 Icr = usc_InReg( info, ICR );
3511 u16 Dccr = usc_InReg( info, DCCR );
3512 u16 Tmr = usc_InReg( info, TMR );
3513 u16 Tccr = usc_InReg( info, TCCR );
3514 u16 Ccar = inw( info->io_base + CCAR );
3515 seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3516 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3517 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3518 }
3519 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3520 }
3521
3522 /* Called to print information about devices */
mgsl_proc_show(struct seq_file * m,void * v)3523 static int mgsl_proc_show(struct seq_file *m, void *v)
3524 {
3525 struct mgsl_struct *info;
3526
3527 seq_printf(m, "synclink driver:%s\n", driver_version);
3528
3529 info = mgsl_device_list;
3530 while( info ) {
3531 line_info(m, info);
3532 info = info->next_device;
3533 }
3534 return 0;
3535 }
3536
3537 /* mgsl_allocate_dma_buffers()
3538 *
3539 * Allocate and format DMA buffers (ISA adapter)
3540 * or format shared memory buffers (PCI adapter).
3541 *
3542 * Arguments: info pointer to device instance data
3543 * Return Value: 0 if success, otherwise error
3544 */
mgsl_allocate_dma_buffers(struct mgsl_struct * info)3545 static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3546 {
3547 unsigned short BuffersPerFrame;
3548
3549 info->last_mem_alloc = 0;
3550
3551 /* Calculate the number of DMA buffers necessary to hold the */
3552 /* largest allowable frame size. Note: If the max frame size is */
3553 /* not an even multiple of the DMA buffer size then we need to */
3554 /* round the buffer count per frame up one. */
3555
3556 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3557 if ( info->max_frame_size % DMABUFFERSIZE )
3558 BuffersPerFrame++;
3559
3560 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3561 /*
3562 * The PCI adapter has 256KBytes of shared memory to use.
3563 * This is 64 PAGE_SIZE buffers.
3564 *
3565 * The first page is used for padding at this time so the
3566 * buffer list does not begin at offset 0 of the PCI
3567 * adapter's shared memory.
3568 *
3569 * The 2nd page is used for the buffer list. A 4K buffer
3570 * list can hold 128 DMA_BUFFER structures at 32 bytes
3571 * each.
3572 *
3573 * This leaves 62 4K pages.
3574 *
3575 * The next N pages are used for transmit frame(s). We
3576 * reserve enough 4K page blocks to hold the required
3577 * number of transmit dma buffers (num_tx_dma_buffers),
3578 * each of MaxFrameSize size.
3579 *
3580 * Of the remaining pages (62-N), determine how many can
3581 * be used to receive full MaxFrameSize inbound frames
3582 */
3583 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3584 info->rx_buffer_count = 62 - info->tx_buffer_count;
3585 } else {
3586 /* Calculate the number of PAGE_SIZE buffers needed for */
3587 /* receive and transmit DMA buffers. */
3588
3589
3590 /* Calculate the number of DMA buffers necessary to */
3591 /* hold 7 max size receive frames and one max size transmit frame. */
3592 /* The receive buffer count is bumped by one so we avoid an */
3593 /* End of List condition if all receive buffers are used when */
3594 /* using linked list DMA buffers. */
3595
3596 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3597 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3598
3599 /*
3600 * limit total TxBuffers & RxBuffers to 62 4K total
3601 * (ala PCI Allocation)
3602 */
3603
3604 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3605 info->rx_buffer_count = 62 - info->tx_buffer_count;
3606
3607 }
3608
3609 if ( debug_level >= DEBUG_LEVEL_INFO )
3610 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3611 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3612
3613 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3614 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3615 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3616 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3617 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3618 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3619 return -ENOMEM;
3620 }
3621
3622 mgsl_reset_rx_dma_buffers( info );
3623 mgsl_reset_tx_dma_buffers( info );
3624
3625 return 0;
3626
3627 } /* end of mgsl_allocate_dma_buffers() */
3628
3629 /*
3630 * mgsl_alloc_buffer_list_memory()
3631 *
3632 * Allocate a common DMA buffer for use as the
3633 * receive and transmit buffer lists.
3634 *
3635 * A buffer list is a set of buffer entries where each entry contains
3636 * a pointer to an actual buffer and a pointer to the next buffer entry
3637 * (plus some other info about the buffer).
3638 *
3639 * The buffer entries for a list are built to form a circular list so
3640 * that when the entire list has been traversed you start back at the
3641 * beginning.
3642 *
3643 * This function allocates memory for just the buffer entries.
3644 * The links (pointer to next entry) are filled in with the physical
3645 * address of the next entry so the adapter can navigate the list
3646 * using bus master DMA. The pointers to the actual buffers are filled
3647 * out later when the actual buffers are allocated.
3648 *
3649 * Arguments: info pointer to device instance data
3650 * Return Value: 0 if success, otherwise error
3651 */
mgsl_alloc_buffer_list_memory(struct mgsl_struct * info)3652 static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3653 {
3654 unsigned int i;
3655
3656 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3657 /* PCI adapter uses shared memory. */
3658 info->buffer_list = info->memory_base + info->last_mem_alloc;
3659 info->buffer_list_phys = info->last_mem_alloc;
3660 info->last_mem_alloc += BUFFERLISTSIZE;
3661 } else {
3662 /* ISA adapter uses system memory. */
3663 /* The buffer lists are allocated as a common buffer that both */
3664 /* the processor and adapter can access. This allows the driver to */
3665 /* inspect portions of the buffer while other portions are being */
3666 /* updated by the adapter using Bus Master DMA. */
3667
3668 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
3669 if (info->buffer_list == NULL)
3670 return -ENOMEM;
3671 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
3672 }
3673
3674 /* We got the memory for the buffer entry lists. */
3675 /* Initialize the memory block to all zeros. */
3676 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3677
3678 /* Save virtual address pointers to the receive and */
3679 /* transmit buffer lists. (Receive 1st). These pointers will */
3680 /* be used by the processor to access the lists. */
3681 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3682 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3683 info->tx_buffer_list += info->rx_buffer_count;
3684
3685 /*
3686 * Build the links for the buffer entry lists such that
3687 * two circular lists are built. (Transmit and Receive).
3688 *
3689 * Note: the links are physical addresses
3690 * which are read by the adapter to determine the next
3691 * buffer entry to use.
3692 */
3693
3694 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3695 /* calculate and store physical address of this buffer entry */
3696 info->rx_buffer_list[i].phys_entry =
3697 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3698
3699 /* calculate and store physical address of */
3700 /* next entry in cirular list of entries */
3701
3702 info->rx_buffer_list[i].link = info->buffer_list_phys;
3703
3704 if ( i < info->rx_buffer_count - 1 )
3705 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3706 }
3707
3708 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3709 /* calculate and store physical address of this buffer entry */
3710 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3711 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3712
3713 /* calculate and store physical address of */
3714 /* next entry in cirular list of entries */
3715
3716 info->tx_buffer_list[i].link = info->buffer_list_phys +
3717 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3718
3719 if ( i < info->tx_buffer_count - 1 )
3720 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3721 }
3722
3723 return 0;
3724
3725 } /* end of mgsl_alloc_buffer_list_memory() */
3726
3727 /* Free DMA buffers allocated for use as the
3728 * receive and transmit buffer lists.
3729 * Warning:
3730 *
3731 * The data transfer buffers associated with the buffer list
3732 * MUST be freed before freeing the buffer list itself because
3733 * the buffer list contains the information necessary to free
3734 * the individual buffers!
3735 */
mgsl_free_buffer_list_memory(struct mgsl_struct * info)3736 static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3737 {
3738 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
3739 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
3740
3741 info->buffer_list = NULL;
3742 info->rx_buffer_list = NULL;
3743 info->tx_buffer_list = NULL;
3744
3745 } /* end of mgsl_free_buffer_list_memory() */
3746
3747 /*
3748 * mgsl_alloc_frame_memory()
3749 *
3750 * Allocate the frame DMA buffers used by the specified buffer list.
3751 * Each DMA buffer will be one memory page in size. This is necessary
3752 * because memory can fragment enough that it may be impossible
3753 * contiguous pages.
3754 *
3755 * Arguments:
3756 *
3757 * info pointer to device instance data
3758 * BufferList pointer to list of buffer entries
3759 * Buffercount count of buffer entries in buffer list
3760 *
3761 * Return Value: 0 if success, otherwise -ENOMEM
3762 */
mgsl_alloc_frame_memory(struct mgsl_struct * info,DMABUFFERENTRY * BufferList,int Buffercount)3763 static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3764 {
3765 int i;
3766 u32 phys_addr;
3767
3768 /* Allocate page sized buffers for the receive buffer list */
3769
3770 for ( i = 0; i < Buffercount; i++ ) {
3771 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3772 /* PCI adapter uses shared memory buffers. */
3773 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3774 phys_addr = info->last_mem_alloc;
3775 info->last_mem_alloc += DMABUFFERSIZE;
3776 } else {
3777 /* ISA adapter uses system memory. */
3778 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
3779 if (BufferList[i].virt_addr == NULL)
3780 return -ENOMEM;
3781 phys_addr = (u32)(BufferList[i].dma_addr);
3782 }
3783 BufferList[i].phys_addr = phys_addr;
3784 }
3785
3786 return 0;
3787
3788 } /* end of mgsl_alloc_frame_memory() */
3789
3790 /*
3791 * mgsl_free_frame_memory()
3792 *
3793 * Free the buffers associated with
3794 * each buffer entry of a buffer list.
3795 *
3796 * Arguments:
3797 *
3798 * info pointer to device instance data
3799 * BufferList pointer to list of buffer entries
3800 * Buffercount count of buffer entries in buffer list
3801 *
3802 * Return Value: None
3803 */
mgsl_free_frame_memory(struct mgsl_struct * info,DMABUFFERENTRY * BufferList,int Buffercount)3804 static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3805 {
3806 int i;
3807
3808 if ( BufferList ) {
3809 for ( i = 0 ; i < Buffercount ; i++ ) {
3810 if ( BufferList[i].virt_addr ) {
3811 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3812 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
3813 BufferList[i].virt_addr = NULL;
3814 }
3815 }
3816 }
3817
3818 } /* end of mgsl_free_frame_memory() */
3819
3820 /* mgsl_free_dma_buffers()
3821 *
3822 * Free DMA buffers
3823 *
3824 * Arguments: info pointer to device instance data
3825 * Return Value: None
3826 */
mgsl_free_dma_buffers(struct mgsl_struct * info)3827 static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3828 {
3829 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3830 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3831 mgsl_free_buffer_list_memory( info );
3832
3833 } /* end of mgsl_free_dma_buffers() */
3834
3835
3836 /*
3837 * mgsl_alloc_intermediate_rxbuffer_memory()
3838 *
3839 * Allocate a buffer large enough to hold max_frame_size. This buffer
3840 * is used to pass an assembled frame to the line discipline.
3841 *
3842 * Arguments:
3843 *
3844 * info pointer to device instance data
3845 *
3846 * Return Value: 0 if success, otherwise -ENOMEM
3847 */
mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct * info)3848 static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3849 {
3850 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3851 if ( info->intermediate_rxbuffer == NULL )
3852 return -ENOMEM;
3853 /* unused flag buffer to satisfy receive_buf calling interface */
3854 info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
3855 if (!info->flag_buf) {
3856 kfree(info->intermediate_rxbuffer);
3857 info->intermediate_rxbuffer = NULL;
3858 return -ENOMEM;
3859 }
3860 return 0;
3861
3862 } /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
3863
3864 /*
3865 * mgsl_free_intermediate_rxbuffer_memory()
3866 *
3867 *
3868 * Arguments:
3869 *
3870 * info pointer to device instance data
3871 *
3872 * Return Value: None
3873 */
mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct * info)3874 static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3875 {
3876 kfree(info->intermediate_rxbuffer);
3877 info->intermediate_rxbuffer = NULL;
3878 kfree(info->flag_buf);
3879 info->flag_buf = NULL;
3880
3881 } /* end of mgsl_free_intermediate_rxbuffer_memory() */
3882
3883 /*
3884 * mgsl_alloc_intermediate_txbuffer_memory()
3885 *
3886 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
3887 * This buffer is used to load transmit frames into the adapter's dma transfer
3888 * buffers when there is sufficient space.
3889 *
3890 * Arguments:
3891 *
3892 * info pointer to device instance data
3893 *
3894 * Return Value: 0 if success, otherwise -ENOMEM
3895 */
mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct * info)3896 static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
3897 {
3898 int i;
3899
3900 if ( debug_level >= DEBUG_LEVEL_INFO )
3901 printk("%s %s(%d) allocating %d tx holding buffers\n",
3902 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
3903
3904 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
3905
3906 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
3907 info->tx_holding_buffers[i].buffer =
3908 kmalloc(info->max_frame_size, GFP_KERNEL);
3909 if (info->tx_holding_buffers[i].buffer == NULL) {
3910 for (--i; i >= 0; i--) {
3911 kfree(info->tx_holding_buffers[i].buffer);
3912 info->tx_holding_buffers[i].buffer = NULL;
3913 }
3914 return -ENOMEM;
3915 }
3916 }
3917
3918 return 0;
3919
3920 } /* end of mgsl_alloc_intermediate_txbuffer_memory() */
3921
3922 /*
3923 * mgsl_free_intermediate_txbuffer_memory()
3924 *
3925 *
3926 * Arguments:
3927 *
3928 * info pointer to device instance data
3929 *
3930 * Return Value: None
3931 */
mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct * info)3932 static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
3933 {
3934 int i;
3935
3936 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
3937 kfree(info->tx_holding_buffers[i].buffer);
3938 info->tx_holding_buffers[i].buffer = NULL;
3939 }
3940
3941 info->get_tx_holding_index = 0;
3942 info->put_tx_holding_index = 0;
3943 info->tx_holding_count = 0;
3944
3945 } /* end of mgsl_free_intermediate_txbuffer_memory() */
3946
3947
3948 /*
3949 * load_next_tx_holding_buffer()
3950 *
3951 * attempts to load the next buffered tx request into the
3952 * tx dma buffers
3953 *
3954 * Arguments:
3955 *
3956 * info pointer to device instance data
3957 *
3958 * Return Value: true if next buffered tx request loaded
3959 * into adapter's tx dma buffer,
3960 * false otherwise
3961 */
load_next_tx_holding_buffer(struct mgsl_struct * info)3962 static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
3963 {
3964 bool ret = false;
3965
3966 if ( info->tx_holding_count ) {
3967 /* determine if we have enough tx dma buffers
3968 * to accommodate the next tx frame
3969 */
3970 struct tx_holding_buffer *ptx =
3971 &info->tx_holding_buffers[info->get_tx_holding_index];
3972 int num_free = num_free_tx_dma_buffers(info);
3973 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
3974 if ( ptx->buffer_size % DMABUFFERSIZE )
3975 ++num_needed;
3976
3977 if (num_needed <= num_free) {
3978 info->xmit_cnt = ptx->buffer_size;
3979 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
3980
3981 --info->tx_holding_count;
3982 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
3983 info->get_tx_holding_index=0;
3984
3985 /* restart transmit timer */
3986 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
3987
3988 ret = true;
3989 }
3990 }
3991
3992 return ret;
3993 }
3994
3995 /*
3996 * save_tx_buffer_request()
3997 *
3998 * attempt to store transmit frame request for later transmission
3999 *
4000 * Arguments:
4001 *
4002 * info pointer to device instance data
4003 * Buffer pointer to buffer containing frame to load
4004 * BufferSize size in bytes of frame in Buffer
4005 *
4006 * Return Value: 1 if able to store, 0 otherwise
4007 */
save_tx_buffer_request(struct mgsl_struct * info,const char * Buffer,unsigned int BufferSize)4008 static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4009 {
4010 struct tx_holding_buffer *ptx;
4011
4012 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4013 return 0; /* all buffers in use */
4014 }
4015
4016 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4017 ptx->buffer_size = BufferSize;
4018 memcpy( ptx->buffer, Buffer, BufferSize);
4019
4020 ++info->tx_holding_count;
4021 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4022 info->put_tx_holding_index=0;
4023
4024 return 1;
4025 }
4026
mgsl_claim_resources(struct mgsl_struct * info)4027 static int mgsl_claim_resources(struct mgsl_struct *info)
4028 {
4029 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4030 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4031 __FILE__,__LINE__,info->device_name, info->io_base);
4032 return -ENODEV;
4033 }
4034 info->io_addr_requested = true;
4035
4036 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4037 info->device_name, info ) < 0 ) {
4038 printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n",
4039 __FILE__,__LINE__,info->device_name, info->irq_level );
4040 goto errout;
4041 }
4042 info->irq_requested = true;
4043
4044 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4045 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4046 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4047 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4048 goto errout;
4049 }
4050 info->shared_mem_requested = true;
4051 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4052 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4053 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4054 goto errout;
4055 }
4056 info->lcr_mem_requested = true;
4057
4058 info->memory_base = ioremap_nocache(info->phys_memory_base,
4059 0x40000);
4060 if (!info->memory_base) {
4061 printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
4062 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4063 goto errout;
4064 }
4065
4066 if ( !mgsl_memory_test(info) ) {
4067 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4068 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4069 goto errout;
4070 }
4071
4072 info->lcr_base = ioremap_nocache(info->phys_lcr_base,
4073 PAGE_SIZE);
4074 if (!info->lcr_base) {
4075 printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
4076 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4077 goto errout;
4078 }
4079 info->lcr_base += info->lcr_offset;
4080
4081 } else {
4082 /* claim DMA channel */
4083
4084 if (request_dma(info->dma_level,info->device_name) < 0){
4085 printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n",
4086 __FILE__,__LINE__,info->device_name, info->dma_level );
4087 goto errout;
4088 }
4089 info->dma_requested = true;
4090
4091 /* ISA adapter uses bus master DMA */
4092 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4093 enable_dma(info->dma_level);
4094 }
4095
4096 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4097 printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n",
4098 __FILE__,__LINE__,info->device_name, info->dma_level );
4099 goto errout;
4100 }
4101
4102 return 0;
4103 errout:
4104 mgsl_release_resources(info);
4105 return -ENODEV;
4106
4107 } /* end of mgsl_claim_resources() */
4108
mgsl_release_resources(struct mgsl_struct * info)4109 static void mgsl_release_resources(struct mgsl_struct *info)
4110 {
4111 if ( debug_level >= DEBUG_LEVEL_INFO )
4112 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4113 __FILE__,__LINE__,info->device_name );
4114
4115 if ( info->irq_requested ) {
4116 free_irq(info->irq_level, info);
4117 info->irq_requested = false;
4118 }
4119 if ( info->dma_requested ) {
4120 disable_dma(info->dma_level);
4121 free_dma(info->dma_level);
4122 info->dma_requested = false;
4123 }
4124 mgsl_free_dma_buffers(info);
4125 mgsl_free_intermediate_rxbuffer_memory(info);
4126 mgsl_free_intermediate_txbuffer_memory(info);
4127
4128 if ( info->io_addr_requested ) {
4129 release_region(info->io_base,info->io_addr_size);
4130 info->io_addr_requested = false;
4131 }
4132 if ( info->shared_mem_requested ) {
4133 release_mem_region(info->phys_memory_base,0x40000);
4134 info->shared_mem_requested = false;
4135 }
4136 if ( info->lcr_mem_requested ) {
4137 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4138 info->lcr_mem_requested = false;
4139 }
4140 if (info->memory_base){
4141 iounmap(info->memory_base);
4142 info->memory_base = NULL;
4143 }
4144 if (info->lcr_base){
4145 iounmap(info->lcr_base - info->lcr_offset);
4146 info->lcr_base = NULL;
4147 }
4148
4149 if ( debug_level >= DEBUG_LEVEL_INFO )
4150 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4151 __FILE__,__LINE__,info->device_name );
4152
4153 } /* end of mgsl_release_resources() */
4154
4155 /* mgsl_add_device()
4156 *
4157 * Add the specified device instance data structure to the
4158 * global linked list of devices and increment the device count.
4159 *
4160 * Arguments: info pointer to device instance data
4161 * Return Value: None
4162 */
mgsl_add_device(struct mgsl_struct * info)4163 static void mgsl_add_device( struct mgsl_struct *info )
4164 {
4165 info->next_device = NULL;
4166 info->line = mgsl_device_count;
4167 sprintf(info->device_name,"ttySL%d",info->line);
4168
4169 if (info->line < MAX_TOTAL_DEVICES) {
4170 if (maxframe[info->line])
4171 info->max_frame_size = maxframe[info->line];
4172
4173 if (txdmabufs[info->line]) {
4174 info->num_tx_dma_buffers = txdmabufs[info->line];
4175 if (info->num_tx_dma_buffers < 1)
4176 info->num_tx_dma_buffers = 1;
4177 }
4178
4179 if (txholdbufs[info->line]) {
4180 info->num_tx_holding_buffers = txholdbufs[info->line];
4181 if (info->num_tx_holding_buffers < 1)
4182 info->num_tx_holding_buffers = 1;
4183 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4184 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4185 }
4186 }
4187
4188 mgsl_device_count++;
4189
4190 if ( !mgsl_device_list )
4191 mgsl_device_list = info;
4192 else {
4193 struct mgsl_struct *current_dev = mgsl_device_list;
4194 while( current_dev->next_device )
4195 current_dev = current_dev->next_device;
4196 current_dev->next_device = info;
4197 }
4198
4199 if ( info->max_frame_size < 4096 )
4200 info->max_frame_size = 4096;
4201 else if ( info->max_frame_size > 65535 )
4202 info->max_frame_size = 65535;
4203
4204 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4205 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4206 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4207 info->phys_memory_base, info->phys_lcr_base,
4208 info->max_frame_size );
4209 } else {
4210 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4211 info->device_name, info->io_base, info->irq_level, info->dma_level,
4212 info->max_frame_size );
4213 }
4214
4215 #if SYNCLINK_GENERIC_HDLC
4216 hdlcdev_init(info);
4217 #endif
4218
4219 } /* end of mgsl_add_device() */
4220
4221 static const struct tty_port_operations mgsl_port_ops = {
4222 .carrier_raised = carrier_raised,
4223 .dtr_rts = dtr_rts,
4224 };
4225
4226
4227 /* mgsl_allocate_device()
4228 *
4229 * Allocate and initialize a device instance structure
4230 *
4231 * Arguments: none
4232 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4233 */
mgsl_allocate_device(void)4234 static struct mgsl_struct* mgsl_allocate_device(void)
4235 {
4236 struct mgsl_struct *info;
4237
4238 info = kzalloc(sizeof(struct mgsl_struct),
4239 GFP_KERNEL);
4240
4241 if (!info) {
4242 printk("Error can't allocate device instance data\n");
4243 } else {
4244 tty_port_init(&info->port);
4245 info->port.ops = &mgsl_port_ops;
4246 info->magic = MGSL_MAGIC;
4247 INIT_WORK(&info->task, mgsl_bh_handler);
4248 info->max_frame_size = 4096;
4249 info->port.close_delay = 5*HZ/10;
4250 info->port.closing_wait = 30*HZ;
4251 init_waitqueue_head(&info->status_event_wait_q);
4252 init_waitqueue_head(&info->event_wait_q);
4253 spin_lock_init(&info->irq_spinlock);
4254 spin_lock_init(&info->netlock);
4255 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4256 info->idle_mode = HDLC_TXIDLE_FLAGS;
4257 info->num_tx_dma_buffers = 1;
4258 info->num_tx_holding_buffers = 0;
4259 }
4260
4261 return info;
4262
4263 } /* end of mgsl_allocate_device()*/
4264
4265 static const struct tty_operations mgsl_ops = {
4266 .install = mgsl_install,
4267 .open = mgsl_open,
4268 .close = mgsl_close,
4269 .write = mgsl_write,
4270 .put_char = mgsl_put_char,
4271 .flush_chars = mgsl_flush_chars,
4272 .write_room = mgsl_write_room,
4273 .chars_in_buffer = mgsl_chars_in_buffer,
4274 .flush_buffer = mgsl_flush_buffer,
4275 .ioctl = mgsl_ioctl,
4276 .throttle = mgsl_throttle,
4277 .unthrottle = mgsl_unthrottle,
4278 .send_xchar = mgsl_send_xchar,
4279 .break_ctl = mgsl_break,
4280 .wait_until_sent = mgsl_wait_until_sent,
4281 .set_termios = mgsl_set_termios,
4282 .stop = mgsl_stop,
4283 .start = mgsl_start,
4284 .hangup = mgsl_hangup,
4285 .tiocmget = tiocmget,
4286 .tiocmset = tiocmset,
4287 .get_icount = msgl_get_icount,
4288 .proc_show = mgsl_proc_show,
4289 };
4290
4291 /*
4292 * perform tty device initialization
4293 */
mgsl_init_tty(void)4294 static int mgsl_init_tty(void)
4295 {
4296 int rc;
4297
4298 serial_driver = alloc_tty_driver(128);
4299 if (!serial_driver)
4300 return -ENOMEM;
4301
4302 serial_driver->driver_name = "synclink";
4303 serial_driver->name = "ttySL";
4304 serial_driver->major = ttymajor;
4305 serial_driver->minor_start = 64;
4306 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4307 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4308 serial_driver->init_termios = tty_std_termios;
4309 serial_driver->init_termios.c_cflag =
4310 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4311 serial_driver->init_termios.c_ispeed = 9600;
4312 serial_driver->init_termios.c_ospeed = 9600;
4313 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4314 tty_set_operations(serial_driver, &mgsl_ops);
4315 if ((rc = tty_register_driver(serial_driver)) < 0) {
4316 printk("%s(%d):Couldn't register serial driver\n",
4317 __FILE__,__LINE__);
4318 put_tty_driver(serial_driver);
4319 serial_driver = NULL;
4320 return rc;
4321 }
4322
4323 printk("%s %s, tty major#%d\n",
4324 driver_name, driver_version,
4325 serial_driver->major);
4326 return 0;
4327 }
4328
4329 /* enumerate user specified ISA adapters
4330 */
mgsl_enum_isa_devices(void)4331 static void mgsl_enum_isa_devices(void)
4332 {
4333 struct mgsl_struct *info;
4334 int i;
4335
4336 /* Check for user specified ISA devices */
4337
4338 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4339 if ( debug_level >= DEBUG_LEVEL_INFO )
4340 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4341 io[i], irq[i], dma[i] );
4342
4343 info = mgsl_allocate_device();
4344 if ( !info ) {
4345 /* error allocating device instance data */
4346 if ( debug_level >= DEBUG_LEVEL_ERROR )
4347 printk( "can't allocate device instance data.\n");
4348 continue;
4349 }
4350
4351 /* Copy user configuration info to device instance data */
4352 info->io_base = (unsigned int)io[i];
4353 info->irq_level = (unsigned int)irq[i];
4354 info->irq_level = irq_canonicalize(info->irq_level);
4355 info->dma_level = (unsigned int)dma[i];
4356 info->bus_type = MGSL_BUS_TYPE_ISA;
4357 info->io_addr_size = 16;
4358 info->irq_flags = 0;
4359
4360 mgsl_add_device( info );
4361 }
4362 }
4363
synclink_cleanup(void)4364 static void synclink_cleanup(void)
4365 {
4366 int rc;
4367 struct mgsl_struct *info;
4368 struct mgsl_struct *tmp;
4369
4370 printk("Unloading %s: %s\n", driver_name, driver_version);
4371
4372 if (serial_driver) {
4373 rc = tty_unregister_driver(serial_driver);
4374 if (rc)
4375 printk("%s(%d) failed to unregister tty driver err=%d\n",
4376 __FILE__,__LINE__,rc);
4377 put_tty_driver(serial_driver);
4378 }
4379
4380 info = mgsl_device_list;
4381 while(info) {
4382 #if SYNCLINK_GENERIC_HDLC
4383 hdlcdev_exit(info);
4384 #endif
4385 mgsl_release_resources(info);
4386 tmp = info;
4387 info = info->next_device;
4388 tty_port_destroy(&tmp->port);
4389 kfree(tmp);
4390 }
4391
4392 if (pci_registered)
4393 pci_unregister_driver(&synclink_pci_driver);
4394 }
4395
synclink_init(void)4396 static int __init synclink_init(void)
4397 {
4398 int rc;
4399
4400 if (break_on_load) {
4401 mgsl_get_text_ptr();
4402 BREAKPOINT();
4403 }
4404
4405 printk("%s %s\n", driver_name, driver_version);
4406
4407 mgsl_enum_isa_devices();
4408 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4409 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4410 else
4411 pci_registered = true;
4412
4413 if ((rc = mgsl_init_tty()) < 0)
4414 goto error;
4415
4416 return 0;
4417
4418 error:
4419 synclink_cleanup();
4420 return rc;
4421 }
4422
synclink_exit(void)4423 static void __exit synclink_exit(void)
4424 {
4425 synclink_cleanup();
4426 }
4427
4428 module_init(synclink_init);
4429 module_exit(synclink_exit);
4430
4431 /*
4432 * usc_RTCmd()
4433 *
4434 * Issue a USC Receive/Transmit command to the
4435 * Channel Command/Address Register (CCAR).
4436 *
4437 * Notes:
4438 *
4439 * The command is encoded in the most significant 5 bits <15..11>
4440 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4441 * and Bits <6..0> must be written as zeros.
4442 *
4443 * Arguments:
4444 *
4445 * info pointer to device information structure
4446 * Cmd command mask (use symbolic macros)
4447 *
4448 * Return Value:
4449 *
4450 * None
4451 */
usc_RTCmd(struct mgsl_struct * info,u16 Cmd)4452 static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4453 {
4454 /* output command to CCAR in bits <15..11> */
4455 /* preserve bits <10..7>, bits <6..0> must be zero */
4456
4457 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4458
4459 /* Read to flush write to CCAR */
4460 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4461 inw( info->io_base + CCAR );
4462
4463 } /* end of usc_RTCmd() */
4464
4465 /*
4466 * usc_DmaCmd()
4467 *
4468 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4469 *
4470 * Arguments:
4471 *
4472 * info pointer to device information structure
4473 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4474 *
4475 * Return Value:
4476 *
4477 * None
4478 */
usc_DmaCmd(struct mgsl_struct * info,u16 Cmd)4479 static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4480 {
4481 /* write command mask to DCAR */
4482 outw( Cmd + info->mbre_bit, info->io_base );
4483
4484 /* Read to flush write to DCAR */
4485 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4486 inw( info->io_base );
4487
4488 } /* end of usc_DmaCmd() */
4489
4490 /*
4491 * usc_OutDmaReg()
4492 *
4493 * Write a 16-bit value to a USC DMA register
4494 *
4495 * Arguments:
4496 *
4497 * info pointer to device info structure
4498 * RegAddr register address (number) for write
4499 * RegValue 16-bit value to write to register
4500 *
4501 * Return Value:
4502 *
4503 * None
4504 *
4505 */
usc_OutDmaReg(struct mgsl_struct * info,u16 RegAddr,u16 RegValue)4506 static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4507 {
4508 /* Note: The DCAR is located at the adapter base address */
4509 /* Note: must preserve state of BIT8 in DCAR */
4510
4511 outw( RegAddr + info->mbre_bit, info->io_base );
4512 outw( RegValue, info->io_base );
4513
4514 /* Read to flush write to DCAR */
4515 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4516 inw( info->io_base );
4517
4518 } /* end of usc_OutDmaReg() */
4519
4520 /*
4521 * usc_InDmaReg()
4522 *
4523 * Read a 16-bit value from a DMA register
4524 *
4525 * Arguments:
4526 *
4527 * info pointer to device info structure
4528 * RegAddr register address (number) to read from
4529 *
4530 * Return Value:
4531 *
4532 * The 16-bit value read from register
4533 *
4534 */
usc_InDmaReg(struct mgsl_struct * info,u16 RegAddr)4535 static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4536 {
4537 /* Note: The DCAR is located at the adapter base address */
4538 /* Note: must preserve state of BIT8 in DCAR */
4539
4540 outw( RegAddr + info->mbre_bit, info->io_base );
4541 return inw( info->io_base );
4542
4543 } /* end of usc_InDmaReg() */
4544
4545 /*
4546 *
4547 * usc_OutReg()
4548 *
4549 * Write a 16-bit value to a USC serial channel register
4550 *
4551 * Arguments:
4552 *
4553 * info pointer to device info structure
4554 * RegAddr register address (number) to write to
4555 * RegValue 16-bit value to write to register
4556 *
4557 * Return Value:
4558 *
4559 * None
4560 *
4561 */
usc_OutReg(struct mgsl_struct * info,u16 RegAddr,u16 RegValue)4562 static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4563 {
4564 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4565 outw( RegValue, info->io_base + CCAR );
4566
4567 /* Read to flush write to CCAR */
4568 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4569 inw( info->io_base + CCAR );
4570
4571 } /* end of usc_OutReg() */
4572
4573 /*
4574 * usc_InReg()
4575 *
4576 * Reads a 16-bit value from a USC serial channel register
4577 *
4578 * Arguments:
4579 *
4580 * info pointer to device extension
4581 * RegAddr register address (number) to read from
4582 *
4583 * Return Value:
4584 *
4585 * 16-bit value read from register
4586 */
usc_InReg(struct mgsl_struct * info,u16 RegAddr)4587 static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4588 {
4589 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4590 return inw( info->io_base + CCAR );
4591
4592 } /* end of usc_InReg() */
4593
4594 /* usc_set_sdlc_mode()
4595 *
4596 * Set up the adapter for SDLC DMA communications.
4597 *
4598 * Arguments: info pointer to device instance data
4599 * Return Value: NONE
4600 */
usc_set_sdlc_mode(struct mgsl_struct * info)4601 static void usc_set_sdlc_mode( struct mgsl_struct *info )
4602 {
4603 u16 RegValue;
4604 bool PreSL1660;
4605
4606 /*
4607 * determine if the IUSC on the adapter is pre-SL1660. If
4608 * not, take advantage of the UnderWait feature of more
4609 * modern chips. If an underrun occurs and this bit is set,
4610 * the transmitter will idle the programmed idle pattern
4611 * until the driver has time to service the underrun. Otherwise,
4612 * the dma controller may get the cycles previously requested
4613 * and begin transmitting queued tx data.
4614 */
4615 usc_OutReg(info,TMCR,0x1f);
4616 RegValue=usc_InReg(info,TMDR);
4617 PreSL1660 = (RegValue == IUSC_PRE_SL1660);
4618
4619 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4620 {
4621 /*
4622 ** Channel Mode Register (CMR)
4623 **
4624 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4625 ** <13> 0 0 = Transmit Disabled (initially)
4626 ** <12> 0 1 = Consecutive Idles share common 0
4627 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4628 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4629 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4630 **
4631 ** 1000 1110 0000 0110 = 0x8e06
4632 */
4633 RegValue = 0x8e06;
4634
4635 /*--------------------------------------------------
4636 * ignore user options for UnderRun Actions and
4637 * preambles
4638 *--------------------------------------------------*/
4639 }
4640 else
4641 {
4642 /* Channel mode Register (CMR)
4643 *
4644 * <15..14> 00 Tx Sub modes, Underrun Action
4645 * <13> 0 1 = Send Preamble before opening flag
4646 * <12> 0 1 = Consecutive Idles share common 0
4647 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4648 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4649 * <3..0> 0110 Receiver mode = HDLC/SDLC
4650 *
4651 * 0000 0110 0000 0110 = 0x0606
4652 */
4653 if (info->params.mode == MGSL_MODE_RAW) {
4654 RegValue = 0x0001; /* Set Receive mode = external sync */
4655
4656 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4657 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4658
4659 /*
4660 * TxSubMode:
4661 * CMR <15> 0 Don't send CRC on Tx Underrun
4662 * CMR <14> x undefined
4663 * CMR <13> 0 Send preamble before openning sync
4664 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4665 *
4666 * TxMode:
4667 * CMR <11-8) 0100 MonoSync
4668 *
4669 * 0x00 0100 xxxx xxxx 04xx
4670 */
4671 RegValue |= 0x0400;
4672 }
4673 else {
4674
4675 RegValue = 0x0606;
4676
4677 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4678 RegValue |= BIT14;
4679 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4680 RegValue |= BIT15;
4681 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4682 RegValue |= BIT15 | BIT14;
4683 }
4684
4685 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4686 RegValue |= BIT13;
4687 }
4688
4689 if ( info->params.mode == MGSL_MODE_HDLC &&
4690 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4691 RegValue |= BIT12;
4692
4693 if ( info->params.addr_filter != 0xff )
4694 {
4695 /* set up receive address filtering */
4696 usc_OutReg( info, RSR, info->params.addr_filter );
4697 RegValue |= BIT4;
4698 }
4699
4700 usc_OutReg( info, CMR, RegValue );
4701 info->cmr_value = RegValue;
4702
4703 /* Receiver mode Register (RMR)
4704 *
4705 * <15..13> 000 encoding
4706 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4707 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4708 * <9> 0 1 = Include Receive chars in CRC
4709 * <8> 1 1 = Use Abort/PE bit as abort indicator
4710 * <7..6> 00 Even parity
4711 * <5> 0 parity disabled
4712 * <4..2> 000 Receive Char Length = 8 bits
4713 * <1..0> 00 Disable Receiver
4714 *
4715 * 0000 0101 0000 0000 = 0x0500
4716 */
4717
4718 RegValue = 0x0500;
4719
4720 switch ( info->params.encoding ) {
4721 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4722 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4723 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
4724 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4725 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
4726 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
4727 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
4728 }
4729
4730 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4731 RegValue |= BIT9;
4732 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4733 RegValue |= ( BIT12 | BIT10 | BIT9 );
4734
4735 usc_OutReg( info, RMR, RegValue );
4736
4737 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4738 /* When an opening flag of an SDLC frame is recognized the */
4739 /* Receive Character count (RCC) is loaded with the value in */
4740 /* RCLR. The RCC is decremented for each received byte. The */
4741 /* value of RCC is stored after the closing flag of the frame */
4742 /* allowing the frame size to be computed. */
4743
4744 usc_OutReg( info, RCLR, RCLRVALUE );
4745
4746 usc_RCmd( info, RCmd_SelectRicrdma_level );
4747
4748 /* Receive Interrupt Control Register (RICR)
4749 *
4750 * <15..8> ? RxFIFO DMA Request Level
4751 * <7> 0 Exited Hunt IA (Interrupt Arm)
4752 * <6> 0 Idle Received IA
4753 * <5> 0 Break/Abort IA
4754 * <4> 0 Rx Bound IA
4755 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4756 * <2> 0 Abort/PE IA
4757 * <1> 1 Rx Overrun IA
4758 * <0> 0 Select TC0 value for readback
4759 *
4760 * 0000 0000 0000 1000 = 0x000a
4761 */
4762
4763 /* Carry over the Exit Hunt and Idle Received bits */
4764 /* in case they have been armed by usc_ArmEvents. */
4765
4766 RegValue = usc_InReg( info, RICR ) & 0xc0;
4767
4768 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4769 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4770 else
4771 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4772
4773 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4774
4775 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4776 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4777
4778 /* Transmit mode Register (TMR)
4779 *
4780 * <15..13> 000 encoding
4781 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4782 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4783 * <9> 0 1 = Tx CRC Enabled
4784 * <8> 0 1 = Append CRC to end of transmit frame
4785 * <7..6> 00 Transmit parity Even
4786 * <5> 0 Transmit parity Disabled
4787 * <4..2> 000 Tx Char Length = 8 bits
4788 * <1..0> 00 Disable Transmitter
4789 *
4790 * 0000 0100 0000 0000 = 0x0400
4791 */
4792
4793 RegValue = 0x0400;
4794
4795 switch ( info->params.encoding ) {
4796 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4797 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4798 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
4799 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4800 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
4801 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
4802 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
4803 }
4804
4805 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4806 RegValue |= BIT9 | BIT8;
4807 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4808 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4809
4810 usc_OutReg( info, TMR, RegValue );
4811
4812 usc_set_txidle( info );
4813
4814
4815 usc_TCmd( info, TCmd_SelectTicrdma_level );
4816
4817 /* Transmit Interrupt Control Register (TICR)
4818 *
4819 * <15..8> ? Transmit FIFO DMA Level
4820 * <7> 0 Present IA (Interrupt Arm)
4821 * <6> 0 Idle Sent IA
4822 * <5> 1 Abort Sent IA
4823 * <4> 1 EOF/EOM Sent IA
4824 * <3> 0 CRC Sent IA
4825 * <2> 1 1 = Wait for SW Trigger to Start Frame
4826 * <1> 1 Tx Underrun IA
4827 * <0> 0 TC0 constant on read back
4828 *
4829 * 0000 0000 0011 0110 = 0x0036
4830 */
4831
4832 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4833 usc_OutReg( info, TICR, 0x0736 );
4834 else
4835 usc_OutReg( info, TICR, 0x1436 );
4836
4837 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4838 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4839
4840 /*
4841 ** Transmit Command/Status Register (TCSR)
4842 **
4843 ** <15..12> 0000 TCmd
4844 ** <11> 0/1 UnderWait
4845 ** <10..08> 000 TxIdle
4846 ** <7> x PreSent
4847 ** <6> x IdleSent
4848 ** <5> x AbortSent
4849 ** <4> x EOF/EOM Sent
4850 ** <3> x CRC Sent
4851 ** <2> x All Sent
4852 ** <1> x TxUnder
4853 ** <0> x TxEmpty
4854 **
4855 ** 0000 0000 0000 0000 = 0x0000
4856 */
4857 info->tcsr_value = 0;
4858
4859 if ( !PreSL1660 )
4860 info->tcsr_value |= TCSR_UNDERWAIT;
4861
4862 usc_OutReg( info, TCSR, info->tcsr_value );
4863
4864 /* Clock mode Control Register (CMCR)
4865 *
4866 * <15..14> 00 counter 1 Source = Disabled
4867 * <13..12> 00 counter 0 Source = Disabled
4868 * <11..10> 11 BRG1 Input is TxC Pin
4869 * <9..8> 11 BRG0 Input is TxC Pin
4870 * <7..6> 01 DPLL Input is BRG1 Output
4871 * <5..3> XXX TxCLK comes from Port 0
4872 * <2..0> XXX RxCLK comes from Port 1
4873 *
4874 * 0000 1111 0111 0111 = 0x0f77
4875 */
4876
4877 RegValue = 0x0f40;
4878
4879 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
4880 RegValue |= 0x0003; /* RxCLK from DPLL */
4881 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
4882 RegValue |= 0x0004; /* RxCLK from BRG0 */
4883 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4884 RegValue |= 0x0006; /* RxCLK from TXC Input */
4885 else
4886 RegValue |= 0x0007; /* RxCLK from Port1 */
4887
4888 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
4889 RegValue |= 0x0018; /* TxCLK from DPLL */
4890 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
4891 RegValue |= 0x0020; /* TxCLK from BRG0 */
4892 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4893 RegValue |= 0x0038; /* RxCLK from TXC Input */
4894 else
4895 RegValue |= 0x0030; /* TxCLK from Port0 */
4896
4897 usc_OutReg( info, CMCR, RegValue );
4898
4899
4900 /* Hardware Configuration Register (HCR)
4901 *
4902 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
4903 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
4904 * <12> 0 CVOK:0=report code violation in biphase
4905 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
4906 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
4907 * <7..6> 00 reserved
4908 * <5> 0 BRG1 mode:0=continuous,1=single cycle
4909 * <4> X BRG1 Enable
4910 * <3..2> 00 reserved
4911 * <1> 0 BRG0 mode:0=continuous,1=single cycle
4912 * <0> 0 BRG0 Enable
4913 */
4914
4915 RegValue = 0x0000;
4916
4917 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL | HDLC_FLAG_TXC_DPLL) ) {
4918 u32 XtalSpeed;
4919 u32 DpllDivisor;
4920 u16 Tc;
4921
4922 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
4923 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
4924
4925 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4926 XtalSpeed = 11059200;
4927 else
4928 XtalSpeed = 14745600;
4929
4930 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
4931 DpllDivisor = 16;
4932 RegValue |= BIT10;
4933 }
4934 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
4935 DpllDivisor = 8;
4936 RegValue |= BIT11;
4937 }
4938 else
4939 DpllDivisor = 32;
4940
4941 /* Tc = (Xtal/Speed) - 1 */
4942 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
4943 /* then rounding up gives a more precise time constant. Instead */
4944 /* of rounding up and then subtracting 1 we just don't subtract */
4945 /* the one in this case. */
4946
4947 /*--------------------------------------------------
4948 * ejz: for DPLL mode, application should use the
4949 * same clock speed as the partner system, even
4950 * though clocking is derived from the input RxData.
4951 * In case the user uses a 0 for the clock speed,
4952 * default to 0xffffffff and don't try to divide by
4953 * zero
4954 *--------------------------------------------------*/
4955 if ( info->params.clock_speed )
4956 {
4957 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
4958 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
4959 / info->params.clock_speed) )
4960 Tc--;
4961 }
4962 else
4963 Tc = -1;
4964
4965
4966 /* Write 16-bit Time Constant for BRG1 */
4967 usc_OutReg( info, TC1R, Tc );
4968
4969 RegValue |= BIT4; /* enable BRG1 */
4970
4971 switch ( info->params.encoding ) {
4972 case HDLC_ENCODING_NRZ:
4973 case HDLC_ENCODING_NRZB:
4974 case HDLC_ENCODING_NRZI_MARK:
4975 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
4976 case HDLC_ENCODING_BIPHASE_MARK:
4977 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
4978 case HDLC_ENCODING_BIPHASE_LEVEL:
4979 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 | BIT8; break;
4980 }
4981 }
4982
4983 usc_OutReg( info, HCR, RegValue );
4984
4985
4986 /* Channel Control/status Register (CCSR)
4987 *
4988 * <15> X RCC FIFO Overflow status (RO)
4989 * <14> X RCC FIFO Not Empty status (RO)
4990 * <13> 0 1 = Clear RCC FIFO (WO)
4991 * <12> X DPLL Sync (RW)
4992 * <11> X DPLL 2 Missed Clocks status (RO)
4993 * <10> X DPLL 1 Missed Clock status (RO)
4994 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
4995 * <7> X SDLC Loop On status (RO)
4996 * <6> X SDLC Loop Send status (RO)
4997 * <5> 1 Bypass counters for TxClk and RxClk (RW)
4998 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
4999 * <1..0> 00 reserved
5000 *
5001 * 0000 0000 0010 0000 = 0x0020
5002 */
5003
5004 usc_OutReg( info, CCSR, 0x1020 );
5005
5006
5007 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5008 usc_OutReg( info, SICR,
5009 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5010 }
5011
5012
5013 /* enable Master Interrupt Enable bit (MIE) */
5014 usc_EnableMasterIrqBit( info );
5015
5016 usc_ClearIrqPendingBits( info, RECEIVE_STATUS | RECEIVE_DATA |
5017 TRANSMIT_STATUS | TRANSMIT_DATA | MISC);
5018
5019 /* arm RCC underflow interrupt */
5020 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5021 usc_EnableInterrupts(info, MISC);
5022
5023 info->mbre_bit = 0;
5024 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5025 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5026 info->mbre_bit = BIT8;
5027 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5028
5029 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5030 /* Enable DMAEN (Port 7, Bit 14) */
5031 /* This connects the DMA request signal to the ISA bus */
5032 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5033 }
5034
5035 /* DMA Control Register (DCR)
5036 *
5037 * <15..14> 10 Priority mode = Alternating Tx/Rx
5038 * 01 Rx has priority
5039 * 00 Tx has priority
5040 *
5041 * <13> 1 Enable Priority Preempt per DCR<15..14>
5042 * (WARNING DCR<11..10> must be 00 when this is 1)
5043 * 0 Choose activate channel per DCR<11..10>
5044 *
5045 * <12> 0 Little Endian for Array/List
5046 * <11..10> 00 Both Channels can use each bus grant
5047 * <9..6> 0000 reserved
5048 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5049 * <4> 0 1 = drive D/C and S/D pins
5050 * <3> 1 1 = Add one wait state to all DMA cycles.
5051 * <2> 0 1 = Strobe /UAS on every transfer.
5052 * <1..0> 11 Addr incrementing only affects LS24 bits
5053 *
5054 * 0110 0000 0000 1011 = 0x600b
5055 */
5056
5057 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5058 /* PCI adapter does not need DMA wait state */
5059 usc_OutDmaReg( info, DCR, 0xa00b );
5060 }
5061 else
5062 usc_OutDmaReg( info, DCR, 0x800b );
5063
5064
5065 /* Receive DMA mode Register (RDMR)
5066 *
5067 * <15..14> 11 DMA mode = Linked List Buffer mode
5068 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5069 * <12> 1 Clear count of List Entry after fetching
5070 * <11..10> 00 Address mode = Increment
5071 * <9> 1 Terminate Buffer on RxBound
5072 * <8> 0 Bus Width = 16bits
5073 * <7..0> ? status Bits (write as 0s)
5074 *
5075 * 1111 0010 0000 0000 = 0xf200
5076 */
5077
5078 usc_OutDmaReg( info, RDMR, 0xf200 );
5079
5080
5081 /* Transmit DMA mode Register (TDMR)
5082 *
5083 * <15..14> 11 DMA mode = Linked List Buffer mode
5084 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5085 * <12> 1 Clear count of List Entry after fetching
5086 * <11..10> 00 Address mode = Increment
5087 * <9> 1 Terminate Buffer on end of frame
5088 * <8> 0 Bus Width = 16bits
5089 * <7..0> ? status Bits (Read Only so write as 0)
5090 *
5091 * 1111 0010 0000 0000 = 0xf200
5092 */
5093
5094 usc_OutDmaReg( info, TDMR, 0xf200 );
5095
5096
5097 /* DMA Interrupt Control Register (DICR)
5098 *
5099 * <15> 1 DMA Interrupt Enable
5100 * <14> 0 1 = Disable IEO from USC
5101 * <13> 0 1 = Don't provide vector during IntAck
5102 * <12> 1 1 = Include status in Vector
5103 * <10..2> 0 reserved, Must be 0s
5104 * <1> 0 1 = Rx DMA Interrupt Enabled
5105 * <0> 0 1 = Tx DMA Interrupt Enabled
5106 *
5107 * 1001 0000 0000 0000 = 0x9000
5108 */
5109
5110 usc_OutDmaReg( info, DICR, 0x9000 );
5111
5112 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5113 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5114 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5115
5116 /* Channel Control Register (CCR)
5117 *
5118 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5119 * <13> 0 Trigger Tx on SW Command Disabled
5120 * <12> 0 Flag Preamble Disabled
5121 * <11..10> 00 Preamble Length
5122 * <9..8> 00 Preamble Pattern
5123 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5124 * <5> 0 Trigger Rx on SW Command Disabled
5125 * <4..0> 0 reserved
5126 *
5127 * 1000 0000 1000 0000 = 0x8080
5128 */
5129
5130 RegValue = 0x8080;
5131
5132 switch ( info->params.preamble_length ) {
5133 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5134 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5135 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 | BIT10; break;
5136 }
5137
5138 switch ( info->params.preamble ) {
5139 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 | BIT12; break;
5140 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5141 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5142 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 | BIT8; break;
5143 }
5144
5145 usc_OutReg( info, CCR, RegValue );
5146
5147
5148 /*
5149 * Burst/Dwell Control Register
5150 *
5151 * <15..8> 0x20 Maximum number of transfers per bus grant
5152 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5153 */
5154
5155 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5156 /* don't limit bus occupancy on PCI adapter */
5157 usc_OutDmaReg( info, BDCR, 0x0000 );
5158 }
5159 else
5160 usc_OutDmaReg( info, BDCR, 0x2000 );
5161
5162 usc_stop_transmitter(info);
5163 usc_stop_receiver(info);
5164
5165 } /* end of usc_set_sdlc_mode() */
5166
5167 /* usc_enable_loopback()
5168 *
5169 * Set the 16C32 for internal loopback mode.
5170 * The TxCLK and RxCLK signals are generated from the BRG0 and
5171 * the TxD is looped back to the RxD internally.
5172 *
5173 * Arguments: info pointer to device instance data
5174 * enable 1 = enable loopback, 0 = disable
5175 * Return Value: None
5176 */
usc_enable_loopback(struct mgsl_struct * info,int enable)5177 static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5178 {
5179 if (enable) {
5180 /* blank external TXD output */
5181 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7 | BIT6));
5182
5183 /* Clock mode Control Register (CMCR)
5184 *
5185 * <15..14> 00 counter 1 Disabled
5186 * <13..12> 00 counter 0 Disabled
5187 * <11..10> 11 BRG1 Input is TxC Pin
5188 * <9..8> 11 BRG0 Input is TxC Pin
5189 * <7..6> 01 DPLL Input is BRG1 Output
5190 * <5..3> 100 TxCLK comes from BRG0
5191 * <2..0> 100 RxCLK comes from BRG0
5192 *
5193 * 0000 1111 0110 0100 = 0x0f64
5194 */
5195
5196 usc_OutReg( info, CMCR, 0x0f64 );
5197
5198 /* Write 16-bit Time Constant for BRG0 */
5199 /* use clock speed if available, otherwise use 8 for diagnostics */
5200 if (info->params.clock_speed) {
5201 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5202 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5203 else
5204 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5205 } else
5206 usc_OutReg(info, TC0R, (u16)8);
5207
5208 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5209 mode = Continuous Set Bit 0 to enable BRG0. */
5210 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5211
5212 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5213 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5214
5215 /* set Internal Data loopback mode */
5216 info->loopback_bits = 0x300;
5217 outw( 0x0300, info->io_base + CCAR );
5218 } else {
5219 /* enable external TXD output */
5220 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7 | BIT6));
5221
5222 /* clear Internal Data loopback mode */
5223 info->loopback_bits = 0;
5224 outw( 0,info->io_base + CCAR );
5225 }
5226
5227 } /* end of usc_enable_loopback() */
5228
5229 /* usc_enable_aux_clock()
5230 *
5231 * Enabled the AUX clock output at the specified frequency.
5232 *
5233 * Arguments:
5234 *
5235 * info pointer to device extension
5236 * data_rate data rate of clock in bits per second
5237 * A data rate of 0 disables the AUX clock.
5238 *
5239 * Return Value: None
5240 */
usc_enable_aux_clock(struct mgsl_struct * info,u32 data_rate)5241 static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5242 {
5243 u32 XtalSpeed;
5244 u16 Tc;
5245
5246 if ( data_rate ) {
5247 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5248 XtalSpeed = 11059200;
5249 else
5250 XtalSpeed = 14745600;
5251
5252
5253 /* Tc = (Xtal/Speed) - 1 */
5254 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5255 /* then rounding up gives a more precise time constant. Instead */
5256 /* of rounding up and then subtracting 1 we just don't subtract */
5257 /* the one in this case. */
5258
5259
5260 Tc = (u16)(XtalSpeed/data_rate);
5261 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5262 Tc--;
5263
5264 /* Write 16-bit Time Constant for BRG0 */
5265 usc_OutReg( info, TC0R, Tc );
5266
5267 /*
5268 * Hardware Configuration Register (HCR)
5269 * Clear Bit 1, BRG0 mode = Continuous
5270 * Set Bit 0 to enable BRG0.
5271 */
5272
5273 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5274
5275 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5276 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5277 } else {
5278 /* data rate == 0 so turn off BRG0 */
5279 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5280 }
5281
5282 } /* end of usc_enable_aux_clock() */
5283
5284 /*
5285 *
5286 * usc_process_rxoverrun_sync()
5287 *
5288 * This function processes a receive overrun by resetting the
5289 * receive DMA buffers and issuing a Purge Rx FIFO command
5290 * to allow the receiver to continue receiving.
5291 *
5292 * Arguments:
5293 *
5294 * info pointer to device extension
5295 *
5296 * Return Value: None
5297 */
usc_process_rxoverrun_sync(struct mgsl_struct * info)5298 static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5299 {
5300 int start_index;
5301 int end_index;
5302 int frame_start_index;
5303 bool start_of_frame_found = false;
5304 bool end_of_frame_found = false;
5305 bool reprogram_dma = false;
5306
5307 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5308 u32 phys_addr;
5309
5310 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5311 usc_RCmd( info, RCmd_EnterHuntmode );
5312 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5313
5314 /* CurrentRxBuffer points to the 1st buffer of the next */
5315 /* possibly available receive frame. */
5316
5317 frame_start_index = start_index = end_index = info->current_rx_buffer;
5318
5319 /* Search for an unfinished string of buffers. This means */
5320 /* that a receive frame started (at least one buffer with */
5321 /* count set to zero) but there is no terminiting buffer */
5322 /* (status set to non-zero). */
5323
5324 while( !buffer_list[end_index].count )
5325 {
5326 /* Count field has been reset to zero by 16C32. */
5327 /* This buffer is currently in use. */
5328
5329 if ( !start_of_frame_found )
5330 {
5331 start_of_frame_found = true;
5332 frame_start_index = end_index;
5333 end_of_frame_found = false;
5334 }
5335
5336 if ( buffer_list[end_index].status )
5337 {
5338 /* Status field has been set by 16C32. */
5339 /* This is the last buffer of a received frame. */
5340
5341 /* We want to leave the buffers for this frame intact. */
5342 /* Move on to next possible frame. */
5343
5344 start_of_frame_found = false;
5345 end_of_frame_found = true;
5346 }
5347
5348 /* advance to next buffer entry in linked list */
5349 end_index++;
5350 if ( end_index == info->rx_buffer_count )
5351 end_index = 0;
5352
5353 if ( start_index == end_index )
5354 {
5355 /* The entire list has been searched with all Counts == 0 and */
5356 /* all Status == 0. The receive buffers are */
5357 /* completely screwed, reset all receive buffers! */
5358 mgsl_reset_rx_dma_buffers( info );
5359 frame_start_index = 0;
5360 start_of_frame_found = false;
5361 reprogram_dma = true;
5362 break;
5363 }
5364 }
5365
5366 if ( start_of_frame_found && !end_of_frame_found )
5367 {
5368 /* There is an unfinished string of receive DMA buffers */
5369 /* as a result of the receiver overrun. */
5370
5371 /* Reset the buffers for the unfinished frame */
5372 /* and reprogram the receive DMA controller to start */
5373 /* at the 1st buffer of unfinished frame. */
5374
5375 start_index = frame_start_index;
5376
5377 do
5378 {
5379 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5380
5381 /* Adjust index for wrap around. */
5382 if ( start_index == info->rx_buffer_count )
5383 start_index = 0;
5384
5385 } while( start_index != end_index );
5386
5387 reprogram_dma = true;
5388 }
5389
5390 if ( reprogram_dma )
5391 {
5392 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5393 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5394 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5395
5396 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5397
5398 /* This empties the receive FIFO and loads the RCC with RCLR */
5399 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5400
5401 /* program 16C32 with physical address of 1st DMA buffer entry */
5402 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5403 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5404 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5405
5406 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5407 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5408 usc_EnableInterrupts( info, RECEIVE_STATUS );
5409
5410 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5411 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5412
5413 usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
5414 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5415 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5416 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5417 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5418 else
5419 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5420 }
5421 else
5422 {
5423 /* This empties the receive FIFO and loads the RCC with RCLR */
5424 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5425 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5426 }
5427
5428 } /* end of usc_process_rxoverrun_sync() */
5429
5430 /* usc_stop_receiver()
5431 *
5432 * Disable USC receiver
5433 *
5434 * Arguments: info pointer to device instance data
5435 * Return Value: None
5436 */
usc_stop_receiver(struct mgsl_struct * info)5437 static void usc_stop_receiver( struct mgsl_struct *info )
5438 {
5439 if (debug_level >= DEBUG_LEVEL_ISR)
5440 printk("%s(%d):usc_stop_receiver(%s)\n",
5441 __FILE__,__LINE__, info->device_name );
5442
5443 /* Disable receive DMA channel. */
5444 /* This also disables receive DMA channel interrupts */
5445 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5446
5447 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5448 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5449 usc_DisableInterrupts( info, RECEIVE_DATA | RECEIVE_STATUS );
5450
5451 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5452
5453 /* This empties the receive FIFO and loads the RCC with RCLR */
5454 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5455 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5456
5457 info->rx_enabled = false;
5458 info->rx_overflow = false;
5459 info->rx_rcc_underrun = false;
5460
5461 } /* end of stop_receiver() */
5462
5463 /* usc_start_receiver()
5464 *
5465 * Enable the USC receiver
5466 *
5467 * Arguments: info pointer to device instance data
5468 * Return Value: None
5469 */
usc_start_receiver(struct mgsl_struct * info)5470 static void usc_start_receiver( struct mgsl_struct *info )
5471 {
5472 u32 phys_addr;
5473
5474 if (debug_level >= DEBUG_LEVEL_ISR)
5475 printk("%s(%d):usc_start_receiver(%s)\n",
5476 __FILE__,__LINE__, info->device_name );
5477
5478 mgsl_reset_rx_dma_buffers( info );
5479 usc_stop_receiver( info );
5480
5481 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5482 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5483
5484 if ( info->params.mode == MGSL_MODE_HDLC ||
5485 info->params.mode == MGSL_MODE_RAW ) {
5486 /* DMA mode Transfers */
5487 /* Program the DMA controller. */
5488 /* Enable the DMA controller end of buffer interrupt. */
5489
5490 /* program 16C32 with physical address of 1st DMA buffer entry */
5491 phys_addr = info->rx_buffer_list[0].phys_entry;
5492 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5493 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5494
5495 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5496 usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
5497 usc_EnableInterrupts( info, RECEIVE_STATUS );
5498
5499 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5500 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5501
5502 usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
5503 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5504 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5505 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5506 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5507 else
5508 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5509 } else {
5510 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5511 usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
5512 usc_EnableInterrupts(info, RECEIVE_DATA);
5513
5514 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5515 usc_RCmd( info, RCmd_EnterHuntmode );
5516
5517 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5518 }
5519
5520 usc_OutReg( info, CCSR, 0x1020 );
5521
5522 info->rx_enabled = true;
5523
5524 } /* end of usc_start_receiver() */
5525
5526 /* usc_start_transmitter()
5527 *
5528 * Enable the USC transmitter and send a transmit frame if
5529 * one is loaded in the DMA buffers.
5530 *
5531 * Arguments: info pointer to device instance data
5532 * Return Value: None
5533 */
usc_start_transmitter(struct mgsl_struct * info)5534 static void usc_start_transmitter( struct mgsl_struct *info )
5535 {
5536 u32 phys_addr;
5537 unsigned int FrameSize;
5538
5539 if (debug_level >= DEBUG_LEVEL_ISR)
5540 printk("%s(%d):usc_start_transmitter(%s)\n",
5541 __FILE__,__LINE__, info->device_name );
5542
5543 if ( info->xmit_cnt ) {
5544
5545 /* If auto RTS enabled and RTS is inactive, then assert */
5546 /* RTS and set a flag indicating that the driver should */
5547 /* negate RTS when the transmission completes. */
5548
5549 info->drop_rts_on_tx_done = false;
5550
5551 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5552 usc_get_serial_signals( info );
5553 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5554 info->serial_signals |= SerialSignal_RTS;
5555 usc_set_serial_signals( info );
5556 info->drop_rts_on_tx_done = true;
5557 }
5558 }
5559
5560
5561 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5562 if ( !info->tx_active ) {
5563 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5564 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5565 usc_EnableInterrupts(info, TRANSMIT_DATA);
5566 usc_load_txfifo(info);
5567 }
5568 } else {
5569 /* Disable transmit DMA controller while programming. */
5570 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5571
5572 /* Transmit DMA buffer is loaded, so program USC */
5573 /* to send the frame contained in the buffers. */
5574
5575 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5576
5577 /* if operating in Raw sync mode, reset the rcc component
5578 * of the tx dma buffer entry, otherwise, the serial controller
5579 * will send a closing sync char after this count.
5580 */
5581 if ( info->params.mode == MGSL_MODE_RAW )
5582 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5583
5584 /* Program the Transmit Character Length Register (TCLR) */
5585 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5586 usc_OutReg( info, TCLR, (u16)FrameSize );
5587
5588 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5589
5590 /* Program the address of the 1st DMA Buffer Entry in linked list */
5591 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5592 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5593 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5594
5595 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5596 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5597 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5598
5599 if ( info->params.mode == MGSL_MODE_RAW &&
5600 info->num_tx_dma_buffers > 1 ) {
5601 /* When running external sync mode, attempt to 'stream' transmit */
5602 /* by filling tx dma buffers as they become available. To do this */
5603 /* we need to enable Tx DMA EOB Status interrupts : */
5604 /* */
5605 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5606 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5607
5608 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5609 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5610 }
5611
5612 /* Initialize Transmit DMA Channel */
5613 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5614
5615 usc_TCmd( info, TCmd_SendFrame );
5616
5617 mod_timer(&info->tx_timer, jiffies +
5618 msecs_to_jiffies(5000));
5619 }
5620 info->tx_active = true;
5621 }
5622
5623 if ( !info->tx_enabled ) {
5624 info->tx_enabled = true;
5625 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5626 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5627 else
5628 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5629 }
5630
5631 } /* end of usc_start_transmitter() */
5632
5633 /* usc_stop_transmitter()
5634 *
5635 * Stops the transmitter and DMA
5636 *
5637 * Arguments: info pointer to device isntance data
5638 * Return Value: None
5639 */
usc_stop_transmitter(struct mgsl_struct * info)5640 static void usc_stop_transmitter( struct mgsl_struct *info )
5641 {
5642 if (debug_level >= DEBUG_LEVEL_ISR)
5643 printk("%s(%d):usc_stop_transmitter(%s)\n",
5644 __FILE__,__LINE__, info->device_name );
5645
5646 del_timer(&info->tx_timer);
5647
5648 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5649 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5650 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5651
5652 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5653 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5654 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5655
5656 info->tx_enabled = false;
5657 info->tx_active = false;
5658
5659 } /* end of usc_stop_transmitter() */
5660
5661 /* usc_load_txfifo()
5662 *
5663 * Fill the transmit FIFO until the FIFO is full or
5664 * there is no more data to load.
5665 *
5666 * Arguments: info pointer to device extension (instance data)
5667 * Return Value: None
5668 */
usc_load_txfifo(struct mgsl_struct * info)5669 static void usc_load_txfifo( struct mgsl_struct *info )
5670 {
5671 int Fifocount;
5672 u8 TwoBytes[2];
5673
5674 if ( !info->xmit_cnt && !info->x_char )
5675 return;
5676
5677 /* Select transmit FIFO status readback in TICR */
5678 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5679
5680 /* load the Transmit FIFO until FIFOs full or all data sent */
5681
5682 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5683 /* there is more space in the transmit FIFO and */
5684 /* there is more data in transmit buffer */
5685
5686 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5687 /* write a 16-bit word from transmit buffer to 16C32 */
5688
5689 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5690 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5691 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5692 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5693
5694 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5695
5696 info->xmit_cnt -= 2;
5697 info->icount.tx += 2;
5698 } else {
5699 /* only 1 byte left to transmit or 1 FIFO slot left */
5700
5701 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5702 info->io_base + CCAR );
5703
5704 if (info->x_char) {
5705 /* transmit pending high priority char */
5706 outw( info->x_char,info->io_base + CCAR );
5707 info->x_char = 0;
5708 } else {
5709 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5710 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5711 info->xmit_cnt--;
5712 }
5713 info->icount.tx++;
5714 }
5715 }
5716
5717 } /* end of usc_load_txfifo() */
5718
5719 /* usc_reset()
5720 *
5721 * Reset the adapter to a known state and prepare it for further use.
5722 *
5723 * Arguments: info pointer to device instance data
5724 * Return Value: None
5725 */
usc_reset(struct mgsl_struct * info)5726 static void usc_reset( struct mgsl_struct *info )
5727 {
5728 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5729 int i;
5730 u32 readval;
5731
5732 /* Set BIT30 of Misc Control Register */
5733 /* (Local Control Register 0x50) to force reset of USC. */
5734
5735 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5736 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5737
5738 info->misc_ctrl_value |= BIT30;
5739 *MiscCtrl = info->misc_ctrl_value;
5740
5741 /*
5742 * Force at least 170ns delay before clearing
5743 * reset bit. Each read from LCR takes at least
5744 * 30ns so 10 times for 300ns to be safe.
5745 */
5746 for(i=0;i<10;i++)
5747 readval = *MiscCtrl;
5748
5749 info->misc_ctrl_value &= ~BIT30;
5750 *MiscCtrl = info->misc_ctrl_value;
5751
5752 *LCR0BRDR = BUS_DESCRIPTOR(
5753 1, // Write Strobe Hold (0-3)
5754 2, // Write Strobe Delay (0-3)
5755 2, // Read Strobe Delay (0-3)
5756 0, // NWDD (Write data-data) (0-3)
5757 4, // NWAD (Write Addr-data) (0-31)
5758 0, // NXDA (Read/Write Data-Addr) (0-3)
5759 0, // NRDD (Read Data-Data) (0-3)
5760 5 // NRAD (Read Addr-Data) (0-31)
5761 );
5762 } else {
5763 /* do HW reset */
5764 outb( 0,info->io_base + 8 );
5765 }
5766
5767 info->mbre_bit = 0;
5768 info->loopback_bits = 0;
5769 info->usc_idle_mode = 0;
5770
5771 /*
5772 * Program the Bus Configuration Register (BCR)
5773 *
5774 * <15> 0 Don't use separate address
5775 * <14..6> 0 reserved
5776 * <5..4> 00 IAckmode = Default, don't care
5777 * <3> 1 Bus Request Totem Pole output
5778 * <2> 1 Use 16 Bit data bus
5779 * <1> 0 IRQ Totem Pole output
5780 * <0> 0 Don't Shift Right Addr
5781 *
5782 * 0000 0000 0000 1100 = 0x000c
5783 *
5784 * By writing to io_base + SDPIN the Wait/Ack pin is
5785 * programmed to work as a Wait pin.
5786 */
5787
5788 outw( 0x000c,info->io_base + SDPIN );
5789
5790
5791 outw( 0,info->io_base );
5792 outw( 0,info->io_base + CCAR );
5793
5794 /* select little endian byte ordering */
5795 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5796
5797
5798 /* Port Control Register (PCR)
5799 *
5800 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5801 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5802 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5803 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5804 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5805 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5806 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5807 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5808 *
5809 * 1111 0000 1111 0101 = 0xf0f5
5810 */
5811
5812 usc_OutReg( info, PCR, 0xf0f5 );
5813
5814
5815 /*
5816 * Input/Output Control Register
5817 *
5818 * <15..14> 00 CTS is active low input
5819 * <13..12> 00 DCD is active low input
5820 * <11..10> 00 TxREQ pin is input (DSR)
5821 * <9..8> 00 RxREQ pin is input (RI)
5822 * <7..6> 00 TxD is output (Transmit Data)
5823 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5824 * <2..0> 100 RxC is Output (drive with BRG0)
5825 *
5826 * 0000 0000 0000 0100 = 0x0004
5827 */
5828
5829 usc_OutReg( info, IOCR, 0x0004 );
5830
5831 } /* end of usc_reset() */
5832
5833 /* usc_set_async_mode()
5834 *
5835 * Program adapter for asynchronous communications.
5836 *
5837 * Arguments: info pointer to device instance data
5838 * Return Value: None
5839 */
usc_set_async_mode(struct mgsl_struct * info)5840 static void usc_set_async_mode( struct mgsl_struct *info )
5841 {
5842 u16 RegValue;
5843
5844 /* disable interrupts while programming USC */
5845 usc_DisableMasterIrqBit( info );
5846
5847 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5848 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5849
5850 usc_loopback_frame( info );
5851
5852 /* Channel mode Register (CMR)
5853 *
5854 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5855 * <13..12> 00 00 = 16X Clock
5856 * <11..8> 0000 Transmitter mode = Asynchronous
5857 * <7..6> 00 reserved?
5858 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5859 * <3..0> 0000 Receiver mode = Asynchronous
5860 *
5861 * 0000 0000 0000 0000 = 0x0
5862 */
5863
5864 RegValue = 0;
5865 if ( info->params.stop_bits != 1 )
5866 RegValue |= BIT14;
5867 usc_OutReg( info, CMR, RegValue );
5868
5869
5870 /* Receiver mode Register (RMR)
5871 *
5872 * <15..13> 000 encoding = None
5873 * <12..08> 00000 reserved (Sync Only)
5874 * <7..6> 00 Even parity
5875 * <5> 0 parity disabled
5876 * <4..2> 000 Receive Char Length = 8 bits
5877 * <1..0> 00 Disable Receiver
5878 *
5879 * 0000 0000 0000 0000 = 0x0
5880 */
5881
5882 RegValue = 0;
5883
5884 if ( info->params.data_bits != 8 )
5885 RegValue |= BIT4 | BIT3 | BIT2;
5886
5887 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5888 RegValue |= BIT5;
5889 if ( info->params.parity != ASYNC_PARITY_ODD )
5890 RegValue |= BIT6;
5891 }
5892
5893 usc_OutReg( info, RMR, RegValue );
5894
5895
5896 /* Set IRQ trigger level */
5897
5898 usc_RCmd( info, RCmd_SelectRicrIntLevel );
5899
5900
5901 /* Receive Interrupt Control Register (RICR)
5902 *
5903 * <15..8> ? RxFIFO IRQ Request Level
5904 *
5905 * Note: For async mode the receive FIFO level must be set
5906 * to 0 to avoid the situation where the FIFO contains fewer bytes
5907 * than the trigger level and no more data is expected.
5908 *
5909 * <7> 0 Exited Hunt IA (Interrupt Arm)
5910 * <6> 0 Idle Received IA
5911 * <5> 0 Break/Abort IA
5912 * <4> 0 Rx Bound IA
5913 * <3> 0 Queued status reflects oldest byte in FIFO
5914 * <2> 0 Abort/PE IA
5915 * <1> 0 Rx Overrun IA
5916 * <0> 0 Select TC0 value for readback
5917 *
5918 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
5919 */
5920
5921 usc_OutReg( info, RICR, 0x0000 );
5922
5923 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5924 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
5925
5926
5927 /* Transmit mode Register (TMR)
5928 *
5929 * <15..13> 000 encoding = None
5930 * <12..08> 00000 reserved (Sync Only)
5931 * <7..6> 00 Transmit parity Even
5932 * <5> 0 Transmit parity Disabled
5933 * <4..2> 000 Tx Char Length = 8 bits
5934 * <1..0> 00 Disable Transmitter
5935 *
5936 * 0000 0000 0000 0000 = 0x0
5937 */
5938
5939 RegValue = 0;
5940
5941 if ( info->params.data_bits != 8 )
5942 RegValue |= BIT4 | BIT3 | BIT2;
5943
5944 if ( info->params.parity != ASYNC_PARITY_NONE ) {
5945 RegValue |= BIT5;
5946 if ( info->params.parity != ASYNC_PARITY_ODD )
5947 RegValue |= BIT6;
5948 }
5949
5950 usc_OutReg( info, TMR, RegValue );
5951
5952 usc_set_txidle( info );
5953
5954
5955 /* Set IRQ trigger level */
5956
5957 usc_TCmd( info, TCmd_SelectTicrIntLevel );
5958
5959
5960 /* Transmit Interrupt Control Register (TICR)
5961 *
5962 * <15..8> ? Transmit FIFO IRQ Level
5963 * <7> 0 Present IA (Interrupt Arm)
5964 * <6> 1 Idle Sent IA
5965 * <5> 0 Abort Sent IA
5966 * <4> 0 EOF/EOM Sent IA
5967 * <3> 0 CRC Sent IA
5968 * <2> 0 1 = Wait for SW Trigger to Start Frame
5969 * <1> 0 Tx Underrun IA
5970 * <0> 0 TC0 constant on read back
5971 *
5972 * 0000 0000 0100 0000 = 0x0040
5973 */
5974
5975 usc_OutReg( info, TICR, 0x1f40 );
5976
5977 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5978 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5979
5980 usc_enable_async_clock( info, info->params.data_rate );
5981
5982
5983 /* Channel Control/status Register (CCSR)
5984 *
5985 * <15> X RCC FIFO Overflow status (RO)
5986 * <14> X RCC FIFO Not Empty status (RO)
5987 * <13> 0 1 = Clear RCC FIFO (WO)
5988 * <12> X DPLL in Sync status (RO)
5989 * <11> X DPLL 2 Missed Clocks status (RO)
5990 * <10> X DPLL 1 Missed Clock status (RO)
5991 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5992 * <7> X SDLC Loop On status (RO)
5993 * <6> X SDLC Loop Send status (RO)
5994 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5995 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5996 * <1..0> 00 reserved
5997 *
5998 * 0000 0000 0010 0000 = 0x0020
5999 */
6000
6001 usc_OutReg( info, CCSR, 0x0020 );
6002
6003 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6004 RECEIVE_DATA + RECEIVE_STATUS );
6005
6006 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6007 RECEIVE_DATA + RECEIVE_STATUS );
6008
6009 usc_EnableMasterIrqBit( info );
6010
6011 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6012 /* Enable INTEN (Port 6, Bit12) */
6013 /* This connects the IRQ request signal to the ISA bus */
6014 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6015 }
6016
6017 if (info->params.loopback) {
6018 info->loopback_bits = 0x300;
6019 outw(0x0300, info->io_base + CCAR);
6020 }
6021
6022 } /* end of usc_set_async_mode() */
6023
6024 /* usc_loopback_frame()
6025 *
6026 * Loop back a small (2 byte) dummy SDLC frame.
6027 * Interrupts and DMA are NOT used. The purpose of this is to
6028 * clear any 'stale' status info left over from running in async mode.
6029 *
6030 * The 16C32 shows the strange behaviour of marking the 1st
6031 * received SDLC frame with a CRC error even when there is no
6032 * CRC error. To get around this a small dummy from of 2 bytes
6033 * is looped back when switching from async to sync mode.
6034 *
6035 * Arguments: info pointer to device instance data
6036 * Return Value: None
6037 */
usc_loopback_frame(struct mgsl_struct * info)6038 static void usc_loopback_frame( struct mgsl_struct *info )
6039 {
6040 int i;
6041 unsigned long oldmode = info->params.mode;
6042
6043 info->params.mode = MGSL_MODE_HDLC;
6044
6045 usc_DisableMasterIrqBit( info );
6046
6047 usc_set_sdlc_mode( info );
6048 usc_enable_loopback( info, 1 );
6049
6050 /* Write 16-bit Time Constant for BRG0 */
6051 usc_OutReg( info, TC0R, 0 );
6052
6053 /* Channel Control Register (CCR)
6054 *
6055 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6056 * <13> 0 Trigger Tx on SW Command Disabled
6057 * <12> 0 Flag Preamble Disabled
6058 * <11..10> 00 Preamble Length = 8-Bits
6059 * <9..8> 01 Preamble Pattern = flags
6060 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6061 * <5> 0 Trigger Rx on SW Command Disabled
6062 * <4..0> 0 reserved
6063 *
6064 * 0000 0001 0000 0000 = 0x0100
6065 */
6066
6067 usc_OutReg( info, CCR, 0x0100 );
6068
6069 /* SETUP RECEIVER */
6070 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6071 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6072
6073 /* SETUP TRANSMITTER */
6074 /* Program the Transmit Character Length Register (TCLR) */
6075 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6076 usc_OutReg( info, TCLR, 2 );
6077 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6078
6079 /* unlatch Tx status bits, and start transmit channel. */
6080 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6081 outw(0,info->io_base + DATAREG);
6082
6083 /* ENABLE TRANSMITTER */
6084 usc_TCmd( info, TCmd_SendFrame );
6085 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6086
6087 /* WAIT FOR RECEIVE COMPLETE */
6088 for (i=0 ; i<1000 ; i++)
6089 if (usc_InReg( info, RCSR ) & (BIT8 | BIT4 | BIT3 | BIT1))
6090 break;
6091
6092 /* clear Internal Data loopback mode */
6093 usc_enable_loopback(info, 0);
6094
6095 usc_EnableMasterIrqBit(info);
6096
6097 info->params.mode = oldmode;
6098
6099 } /* end of usc_loopback_frame() */
6100
6101 /* usc_set_sync_mode() Programs the USC for SDLC communications.
6102 *
6103 * Arguments: info pointer to adapter info structure
6104 * Return Value: None
6105 */
usc_set_sync_mode(struct mgsl_struct * info)6106 static void usc_set_sync_mode( struct mgsl_struct *info )
6107 {
6108 usc_loopback_frame( info );
6109 usc_set_sdlc_mode( info );
6110
6111 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6112 /* Enable INTEN (Port 6, Bit12) */
6113 /* This connects the IRQ request signal to the ISA bus */
6114 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6115 }
6116
6117 usc_enable_aux_clock(info, info->params.clock_speed);
6118
6119 if (info->params.loopback)
6120 usc_enable_loopback(info,1);
6121
6122 } /* end of mgsl_set_sync_mode() */
6123
6124 /* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6125 *
6126 * Arguments: info pointer to device instance data
6127 * Return Value: None
6128 */
usc_set_txidle(struct mgsl_struct * info)6129 static void usc_set_txidle( struct mgsl_struct *info )
6130 {
6131 u16 usc_idle_mode = IDLEMODE_FLAGS;
6132
6133 /* Map API idle mode to USC register bits */
6134
6135 switch( info->idle_mode ){
6136 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6137 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6138 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6139 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6140 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6141 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6142 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6143 }
6144
6145 info->usc_idle_mode = usc_idle_mode;
6146 //usc_OutReg(info, TCSR, usc_idle_mode);
6147 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6148 info->tcsr_value += usc_idle_mode;
6149 usc_OutReg(info, TCSR, info->tcsr_value);
6150
6151 /*
6152 * if SyncLink WAN adapter is running in external sync mode, the
6153 * transmitter has been set to Monosync in order to try to mimic
6154 * a true raw outbound bit stream. Monosync still sends an open/close
6155 * sync char at the start/end of a frame. Try to match those sync
6156 * patterns to the idle mode set here
6157 */
6158 if ( info->params.mode == MGSL_MODE_RAW ) {
6159 unsigned char syncpat = 0;
6160 switch( info->idle_mode ) {
6161 case HDLC_TXIDLE_FLAGS:
6162 syncpat = 0x7e;
6163 break;
6164 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6165 syncpat = 0x55;
6166 break;
6167 case HDLC_TXIDLE_ZEROS:
6168 case HDLC_TXIDLE_SPACE:
6169 syncpat = 0x00;
6170 break;
6171 case HDLC_TXIDLE_ONES:
6172 case HDLC_TXIDLE_MARK:
6173 syncpat = 0xff;
6174 break;
6175 case HDLC_TXIDLE_ALT_MARK_SPACE:
6176 syncpat = 0xaa;
6177 break;
6178 }
6179
6180 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6181 }
6182
6183 } /* end of usc_set_txidle() */
6184
6185 /* usc_get_serial_signals()
6186 *
6187 * Query the adapter for the state of the V24 status (input) signals.
6188 *
6189 * Arguments: info pointer to device instance data
6190 * Return Value: None
6191 */
usc_get_serial_signals(struct mgsl_struct * info)6192 static void usc_get_serial_signals( struct mgsl_struct *info )
6193 {
6194 u16 status;
6195
6196 /* clear all serial signals except RTS and DTR */
6197 info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
6198
6199 /* Read the Misc Interrupt status Register (MISR) to get */
6200 /* the V24 status signals. */
6201
6202 status = usc_InReg( info, MISR );
6203
6204 /* set serial signal bits to reflect MISR */
6205
6206 if ( status & MISCSTATUS_CTS )
6207 info->serial_signals |= SerialSignal_CTS;
6208
6209 if ( status & MISCSTATUS_DCD )
6210 info->serial_signals |= SerialSignal_DCD;
6211
6212 if ( status & MISCSTATUS_RI )
6213 info->serial_signals |= SerialSignal_RI;
6214
6215 if ( status & MISCSTATUS_DSR )
6216 info->serial_signals |= SerialSignal_DSR;
6217
6218 } /* end of usc_get_serial_signals() */
6219
6220 /* usc_set_serial_signals()
6221 *
6222 * Set the state of RTS and DTR based on contents of
6223 * serial_signals member of device extension.
6224 *
6225 * Arguments: info pointer to device instance data
6226 * Return Value: None
6227 */
usc_set_serial_signals(struct mgsl_struct * info)6228 static void usc_set_serial_signals( struct mgsl_struct *info )
6229 {
6230 u16 Control;
6231 unsigned char V24Out = info->serial_signals;
6232
6233 /* get the current value of the Port Control Register (PCR) */
6234
6235 Control = usc_InReg( info, PCR );
6236
6237 if ( V24Out & SerialSignal_RTS )
6238 Control &= ~(BIT6);
6239 else
6240 Control |= BIT6;
6241
6242 if ( V24Out & SerialSignal_DTR )
6243 Control &= ~(BIT4);
6244 else
6245 Control |= BIT4;
6246
6247 usc_OutReg( info, PCR, Control );
6248
6249 } /* end of usc_set_serial_signals() */
6250
6251 /* usc_enable_async_clock()
6252 *
6253 * Enable the async clock at the specified frequency.
6254 *
6255 * Arguments: info pointer to device instance data
6256 * data_rate data rate of clock in bps
6257 * 0 disables the AUX clock.
6258 * Return Value: None
6259 */
usc_enable_async_clock(struct mgsl_struct * info,u32 data_rate)6260 static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6261 {
6262 if ( data_rate ) {
6263 /*
6264 * Clock mode Control Register (CMCR)
6265 *
6266 * <15..14> 00 counter 1 Disabled
6267 * <13..12> 00 counter 0 Disabled
6268 * <11..10> 11 BRG1 Input is TxC Pin
6269 * <9..8> 11 BRG0 Input is TxC Pin
6270 * <7..6> 01 DPLL Input is BRG1 Output
6271 * <5..3> 100 TxCLK comes from BRG0
6272 * <2..0> 100 RxCLK comes from BRG0
6273 *
6274 * 0000 1111 0110 0100 = 0x0f64
6275 */
6276
6277 usc_OutReg( info, CMCR, 0x0f64 );
6278
6279
6280 /*
6281 * Write 16-bit Time Constant for BRG0
6282 * Time Constant = (ClkSpeed / data_rate) - 1
6283 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6284 */
6285
6286 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6287 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6288 else
6289 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6290
6291
6292 /*
6293 * Hardware Configuration Register (HCR)
6294 * Clear Bit 1, BRG0 mode = Continuous
6295 * Set Bit 0 to enable BRG0.
6296 */
6297
6298 usc_OutReg( info, HCR,
6299 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6300
6301
6302 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6303
6304 usc_OutReg( info, IOCR,
6305 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6306 } else {
6307 /* data rate == 0 so turn off BRG0 */
6308 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6309 }
6310
6311 } /* end of usc_enable_async_clock() */
6312
6313 /*
6314 * Buffer Structures:
6315 *
6316 * Normal memory access uses virtual addresses that can make discontiguous
6317 * physical memory pages appear to be contiguous in the virtual address
6318 * space (the processors memory mapping handles the conversions).
6319 *
6320 * DMA transfers require physically contiguous memory. This is because
6321 * the DMA system controller and DMA bus masters deal with memory using
6322 * only physical addresses.
6323 *
6324 * This causes a problem under Windows NT when large DMA buffers are
6325 * needed. Fragmentation of the nonpaged pool prevents allocations of
6326 * physically contiguous buffers larger than the PAGE_SIZE.
6327 *
6328 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6329 * allows DMA transfers to physically discontiguous buffers. Information
6330 * about each data transfer buffer is contained in a memory structure
6331 * called a 'buffer entry'. A list of buffer entries is maintained
6332 * to track and control the use of the data transfer buffers.
6333 *
6334 * To support this strategy we will allocate sufficient PAGE_SIZE
6335 * contiguous memory buffers to allow for the total required buffer
6336 * space.
6337 *
6338 * The 16C32 accesses the list of buffer entries using Bus Master
6339 * DMA. Control information is read from the buffer entries by the
6340 * 16C32 to control data transfers. status information is written to
6341 * the buffer entries by the 16C32 to indicate the status of completed
6342 * transfers.
6343 *
6344 * The CPU writes control information to the buffer entries to control
6345 * the 16C32 and reads status information from the buffer entries to
6346 * determine information about received and transmitted frames.
6347 *
6348 * Because the CPU and 16C32 (adapter) both need simultaneous access
6349 * to the buffer entries, the buffer entry memory is allocated with
6350 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6351 * entry list to PAGE_SIZE.
6352 *
6353 * The actual data buffers on the other hand will only be accessed
6354 * by the CPU or the adapter but not by both simultaneously. This allows
6355 * Scatter/Gather packet based DMA procedures for using physically
6356 * discontiguous pages.
6357 */
6358
6359 /*
6360 * mgsl_reset_tx_dma_buffers()
6361 *
6362 * Set the count for all transmit buffers to 0 to indicate the
6363 * buffer is available for use and set the current buffer to the
6364 * first buffer. This effectively makes all buffers free and
6365 * discards any data in buffers.
6366 *
6367 * Arguments: info pointer to device instance data
6368 * Return Value: None
6369 */
mgsl_reset_tx_dma_buffers(struct mgsl_struct * info)6370 static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6371 {
6372 unsigned int i;
6373
6374 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6375 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6376 }
6377
6378 info->current_tx_buffer = 0;
6379 info->start_tx_dma_buffer = 0;
6380 info->tx_dma_buffers_used = 0;
6381
6382 info->get_tx_holding_index = 0;
6383 info->put_tx_holding_index = 0;
6384 info->tx_holding_count = 0;
6385
6386 } /* end of mgsl_reset_tx_dma_buffers() */
6387
6388 /*
6389 * num_free_tx_dma_buffers()
6390 *
6391 * returns the number of free tx dma buffers available
6392 *
6393 * Arguments: info pointer to device instance data
6394 * Return Value: number of free tx dma buffers
6395 */
num_free_tx_dma_buffers(struct mgsl_struct * info)6396 static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6397 {
6398 return info->tx_buffer_count - info->tx_dma_buffers_used;
6399 }
6400
6401 /*
6402 * mgsl_reset_rx_dma_buffers()
6403 *
6404 * Set the count for all receive buffers to DMABUFFERSIZE
6405 * and set the current buffer to the first buffer. This effectively
6406 * makes all buffers free and discards any data in buffers.
6407 *
6408 * Arguments: info pointer to device instance data
6409 * Return Value: None
6410 */
mgsl_reset_rx_dma_buffers(struct mgsl_struct * info)6411 static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6412 {
6413 unsigned int i;
6414
6415 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6416 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6417 // info->rx_buffer_list[i].count = DMABUFFERSIZE;
6418 // info->rx_buffer_list[i].status = 0;
6419 }
6420
6421 info->current_rx_buffer = 0;
6422
6423 } /* end of mgsl_reset_rx_dma_buffers() */
6424
6425 /*
6426 * mgsl_free_rx_frame_buffers()
6427 *
6428 * Free the receive buffers used by a received SDLC
6429 * frame such that the buffers can be reused.
6430 *
6431 * Arguments:
6432 *
6433 * info pointer to device instance data
6434 * StartIndex index of 1st receive buffer of frame
6435 * EndIndex index of last receive buffer of frame
6436 *
6437 * Return Value: None
6438 */
mgsl_free_rx_frame_buffers(struct mgsl_struct * info,unsigned int StartIndex,unsigned int EndIndex)6439 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6440 {
6441 bool Done = false;
6442 DMABUFFERENTRY *pBufEntry;
6443 unsigned int Index;
6444
6445 /* Starting with 1st buffer entry of the frame clear the status */
6446 /* field and set the count field to DMA Buffer Size. */
6447
6448 Index = StartIndex;
6449
6450 while( !Done ) {
6451 pBufEntry = &(info->rx_buffer_list[Index]);
6452
6453 if ( Index == EndIndex ) {
6454 /* This is the last buffer of the frame! */
6455 Done = true;
6456 }
6457
6458 /* reset current buffer for reuse */
6459 // pBufEntry->status = 0;
6460 // pBufEntry->count = DMABUFFERSIZE;
6461 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6462
6463 /* advance to next buffer entry in linked list */
6464 Index++;
6465 if ( Index == info->rx_buffer_count )
6466 Index = 0;
6467 }
6468
6469 /* set current buffer to next buffer after last buffer of frame */
6470 info->current_rx_buffer = Index;
6471
6472 } /* end of free_rx_frame_buffers() */
6473
6474 /* mgsl_get_rx_frame()
6475 *
6476 * This function attempts to return a received SDLC frame from the
6477 * receive DMA buffers. Only frames received without errors are returned.
6478 *
6479 * Arguments: info pointer to device extension
6480 * Return Value: true if frame returned, otherwise false
6481 */
mgsl_get_rx_frame(struct mgsl_struct * info)6482 static bool mgsl_get_rx_frame(struct mgsl_struct *info)
6483 {
6484 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6485 unsigned short status;
6486 DMABUFFERENTRY *pBufEntry;
6487 unsigned int framesize = 0;
6488 bool ReturnCode = false;
6489 unsigned long flags;
6490 struct tty_struct *tty = info->port.tty;
6491 bool return_frame = false;
6492
6493 /*
6494 * current_rx_buffer points to the 1st buffer of the next available
6495 * receive frame. To find the last buffer of the frame look for
6496 * a non-zero status field in the buffer entries. (The status
6497 * field is set by the 16C32 after completing a receive frame.
6498 */
6499
6500 StartIndex = EndIndex = info->current_rx_buffer;
6501
6502 while( !info->rx_buffer_list[EndIndex].status ) {
6503 /*
6504 * If the count field of the buffer entry is non-zero then
6505 * this buffer has not been used. (The 16C32 clears the count
6506 * field when it starts using the buffer.) If an unused buffer
6507 * is encountered then there are no frames available.
6508 */
6509
6510 if ( info->rx_buffer_list[EndIndex].count )
6511 goto Cleanup;
6512
6513 /* advance to next buffer entry in linked list */
6514 EndIndex++;
6515 if ( EndIndex == info->rx_buffer_count )
6516 EndIndex = 0;
6517
6518 /* if entire list searched then no frame available */
6519 if ( EndIndex == StartIndex ) {
6520 /* If this occurs then something bad happened,
6521 * all buffers have been 'used' but none mark
6522 * the end of a frame. Reset buffers and receiver.
6523 */
6524
6525 if ( info->rx_enabled ){
6526 spin_lock_irqsave(&info->irq_spinlock,flags);
6527 usc_start_receiver(info);
6528 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6529 }
6530 goto Cleanup;
6531 }
6532 }
6533
6534
6535 /* check status of receive frame */
6536
6537 status = info->rx_buffer_list[EndIndex].status;
6538
6539 if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
6540 RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
6541 if ( status & RXSTATUS_SHORT_FRAME )
6542 info->icount.rxshort++;
6543 else if ( status & RXSTATUS_ABORT )
6544 info->icount.rxabort++;
6545 else if ( status & RXSTATUS_OVERRUN )
6546 info->icount.rxover++;
6547 else {
6548 info->icount.rxcrc++;
6549 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6550 return_frame = true;
6551 }
6552 framesize = 0;
6553 #if SYNCLINK_GENERIC_HDLC
6554 {
6555 info->netdev->stats.rx_errors++;
6556 info->netdev->stats.rx_frame_errors++;
6557 }
6558 #endif
6559 } else
6560 return_frame = true;
6561
6562 if ( return_frame ) {
6563 /* receive frame has no errors, get frame size.
6564 * The frame size is the starting value of the RCC (which was
6565 * set to 0xffff) minus the ending value of the RCC (decremented
6566 * once for each receive character) minus 2 for the 16-bit CRC.
6567 */
6568
6569 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6570
6571 /* adjust frame size for CRC if any */
6572 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6573 framesize -= 2;
6574 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6575 framesize -= 4;
6576 }
6577
6578 if ( debug_level >= DEBUG_LEVEL_BH )
6579 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6580 __FILE__,__LINE__,info->device_name,status,framesize);
6581
6582 if ( debug_level >= DEBUG_LEVEL_DATA )
6583 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6584 min_t(int, framesize, DMABUFFERSIZE),0);
6585
6586 if (framesize) {
6587 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6588 ((framesize+1) > info->max_frame_size) ) ||
6589 (framesize > info->max_frame_size) )
6590 info->icount.rxlong++;
6591 else {
6592 /* copy dma buffer(s) to contiguous intermediate buffer */
6593 int copy_count = framesize;
6594 int index = StartIndex;
6595 unsigned char *ptmp = info->intermediate_rxbuffer;
6596
6597 if ( !(status & RXSTATUS_CRC_ERROR))
6598 info->icount.rxok++;
6599
6600 while(copy_count) {
6601 int partial_count;
6602 if ( copy_count > DMABUFFERSIZE )
6603 partial_count = DMABUFFERSIZE;
6604 else
6605 partial_count = copy_count;
6606
6607 pBufEntry = &(info->rx_buffer_list[index]);
6608 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6609 ptmp += partial_count;
6610 copy_count -= partial_count;
6611
6612 if ( ++index == info->rx_buffer_count )
6613 index = 0;
6614 }
6615
6616 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6617 ++framesize;
6618 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6619 RX_CRC_ERROR :
6620 RX_OK);
6621
6622 if ( debug_level >= DEBUG_LEVEL_DATA )
6623 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6624 __FILE__,__LINE__,info->device_name,
6625 *ptmp);
6626 }
6627
6628 #if SYNCLINK_GENERIC_HDLC
6629 if (info->netcount)
6630 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6631 else
6632 #endif
6633 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6634 }
6635 }
6636 /* Free the buffers used by this frame. */
6637 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6638
6639 ReturnCode = true;
6640
6641 Cleanup:
6642
6643 if ( info->rx_enabled && info->rx_overflow ) {
6644 /* The receiver needs to restarted because of
6645 * a receive overflow (buffer or FIFO). If the
6646 * receive buffers are now empty, then restart receiver.
6647 */
6648
6649 if ( !info->rx_buffer_list[EndIndex].status &&
6650 info->rx_buffer_list[EndIndex].count ) {
6651 spin_lock_irqsave(&info->irq_spinlock,flags);
6652 usc_start_receiver(info);
6653 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6654 }
6655 }
6656
6657 return ReturnCode;
6658
6659 } /* end of mgsl_get_rx_frame() */
6660
6661 /* mgsl_get_raw_rx_frame()
6662 *
6663 * This function attempts to return a received frame from the
6664 * receive DMA buffers when running in external loop mode. In this mode,
6665 * we will return at most one DMABUFFERSIZE frame to the application.
6666 * The USC receiver is triggering off of DCD going active to start a new
6667 * frame, and DCD going inactive to terminate the frame (similar to
6668 * processing a closing flag character).
6669 *
6670 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6671 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6672 * status field and the RCC field will indicate the length of the
6673 * entire received frame. We take this RCC field and get the modulus
6674 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6675 * last Rx DMA buffer and return that last portion of the frame.
6676 *
6677 * Arguments: info pointer to device extension
6678 * Return Value: true if frame returned, otherwise false
6679 */
mgsl_get_raw_rx_frame(struct mgsl_struct * info)6680 static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6681 {
6682 unsigned int CurrentIndex, NextIndex;
6683 unsigned short status;
6684 DMABUFFERENTRY *pBufEntry;
6685 unsigned int framesize = 0;
6686 bool ReturnCode = false;
6687 unsigned long flags;
6688 struct tty_struct *tty = info->port.tty;
6689
6690 /*
6691 * current_rx_buffer points to the 1st buffer of the next available
6692 * receive frame. The status field is set by the 16C32 after
6693 * completing a receive frame. If the status field of this buffer
6694 * is zero, either the USC is still filling this buffer or this
6695 * is one of a series of buffers making up a received frame.
6696 *
6697 * If the count field of this buffer is zero, the USC is either
6698 * using this buffer or has used this buffer. Look at the count
6699 * field of the next buffer. If that next buffer's count is
6700 * non-zero, the USC is still actively using the current buffer.
6701 * Otherwise, if the next buffer's count field is zero, the
6702 * current buffer is complete and the USC is using the next
6703 * buffer.
6704 */
6705 CurrentIndex = NextIndex = info->current_rx_buffer;
6706 ++NextIndex;
6707 if ( NextIndex == info->rx_buffer_count )
6708 NextIndex = 0;
6709
6710 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6711 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6712 info->rx_buffer_list[NextIndex].count == 0)) {
6713 /*
6714 * Either the status field of this dma buffer is non-zero
6715 * (indicating the last buffer of a receive frame) or the next
6716 * buffer is marked as in use -- implying this buffer is complete
6717 * and an intermediate buffer for this received frame.
6718 */
6719
6720 status = info->rx_buffer_list[CurrentIndex].status;
6721
6722 if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
6723 RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
6724 if ( status & RXSTATUS_SHORT_FRAME )
6725 info->icount.rxshort++;
6726 else if ( status & RXSTATUS_ABORT )
6727 info->icount.rxabort++;
6728 else if ( status & RXSTATUS_OVERRUN )
6729 info->icount.rxover++;
6730 else
6731 info->icount.rxcrc++;
6732 framesize = 0;
6733 } else {
6734 /*
6735 * A receive frame is available, get frame size and status.
6736 *
6737 * The frame size is the starting value of the RCC (which was
6738 * set to 0xffff) minus the ending value of the RCC (decremented
6739 * once for each receive character) minus 2 or 4 for the 16-bit
6740 * or 32-bit CRC.
6741 *
6742 * If the status field is zero, this is an intermediate buffer.
6743 * It's size is 4K.
6744 *
6745 * If the DMA Buffer Entry's Status field is non-zero, the
6746 * receive operation completed normally (ie: DCD dropped). The
6747 * RCC field is valid and holds the received frame size.
6748 * It is possible that the RCC field will be zero on a DMA buffer
6749 * entry with a non-zero status. This can occur if the total
6750 * frame size (number of bytes between the time DCD goes active
6751 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6752 * case the 16C32 has underrun on the RCC count and appears to
6753 * stop updating this counter to let us know the actual received
6754 * frame size. If this happens (non-zero status and zero RCC),
6755 * simply return the entire RxDMA Buffer
6756 */
6757 if ( status ) {
6758 /*
6759 * In the event that the final RxDMA Buffer is
6760 * terminated with a non-zero status and the RCC
6761 * field is zero, we interpret this as the RCC
6762 * having underflowed (received frame > 65535 bytes).
6763 *
6764 * Signal the event to the user by passing back
6765 * a status of RxStatus_CrcError returning the full
6766 * buffer and let the app figure out what data is
6767 * actually valid
6768 */
6769 if ( info->rx_buffer_list[CurrentIndex].rcc )
6770 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6771 else
6772 framesize = DMABUFFERSIZE;
6773 }
6774 else
6775 framesize = DMABUFFERSIZE;
6776 }
6777
6778 if ( framesize > DMABUFFERSIZE ) {
6779 /*
6780 * if running in raw sync mode, ISR handler for
6781 * End Of Buffer events terminates all buffers at 4K.
6782 * If this frame size is said to be >4K, get the
6783 * actual number of bytes of the frame in this buffer.
6784 */
6785 framesize = framesize % DMABUFFERSIZE;
6786 }
6787
6788
6789 if ( debug_level >= DEBUG_LEVEL_BH )
6790 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6791 __FILE__,__LINE__,info->device_name,status,framesize);
6792
6793 if ( debug_level >= DEBUG_LEVEL_DATA )
6794 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6795 min_t(int, framesize, DMABUFFERSIZE),0);
6796
6797 if (framesize) {
6798 /* copy dma buffer(s) to contiguous intermediate buffer */
6799 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6800
6801 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6802 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6803 info->icount.rxok++;
6804
6805 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6806 }
6807
6808 /* Free the buffers used by this frame. */
6809 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6810
6811 ReturnCode = true;
6812 }
6813
6814
6815 if ( info->rx_enabled && info->rx_overflow ) {
6816 /* The receiver needs to restarted because of
6817 * a receive overflow (buffer or FIFO). If the
6818 * receive buffers are now empty, then restart receiver.
6819 */
6820
6821 if ( !info->rx_buffer_list[CurrentIndex].status &&
6822 info->rx_buffer_list[CurrentIndex].count ) {
6823 spin_lock_irqsave(&info->irq_spinlock,flags);
6824 usc_start_receiver(info);
6825 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6826 }
6827 }
6828
6829 return ReturnCode;
6830
6831 } /* end of mgsl_get_raw_rx_frame() */
6832
6833 /* mgsl_load_tx_dma_buffer()
6834 *
6835 * Load the transmit DMA buffer with the specified data.
6836 *
6837 * Arguments:
6838 *
6839 * info pointer to device extension
6840 * Buffer pointer to buffer containing frame to load
6841 * BufferSize size in bytes of frame in Buffer
6842 *
6843 * Return Value: None
6844 */
mgsl_load_tx_dma_buffer(struct mgsl_struct * info,const char * Buffer,unsigned int BufferSize)6845 static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6846 const char *Buffer, unsigned int BufferSize)
6847 {
6848 unsigned short Copycount;
6849 unsigned int i = 0;
6850 DMABUFFERENTRY *pBufEntry;
6851
6852 if ( debug_level >= DEBUG_LEVEL_DATA )
6853 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6854
6855 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6856 /* set CMR:13 to start transmit when
6857 * next GoAhead (abort) is received
6858 */
6859 info->cmr_value |= BIT13;
6860 }
6861
6862 /* begin loading the frame in the next available tx dma
6863 * buffer, remember it's starting location for setting
6864 * up tx dma operation
6865 */
6866 i = info->current_tx_buffer;
6867 info->start_tx_dma_buffer = i;
6868
6869 /* Setup the status and RCC (Frame Size) fields of the 1st */
6870 /* buffer entry in the transmit DMA buffer list. */
6871
6872 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
6873 info->tx_buffer_list[i].rcc = BufferSize;
6874 info->tx_buffer_list[i].count = BufferSize;
6875
6876 /* Copy frame data from 1st source buffer to the DMA buffers. */
6877 /* The frame data may span multiple DMA buffers. */
6878
6879 while( BufferSize ){
6880 /* Get a pointer to next DMA buffer entry. */
6881 pBufEntry = &info->tx_buffer_list[i++];
6882
6883 if ( i == info->tx_buffer_count )
6884 i=0;
6885
6886 /* Calculate the number of bytes that can be copied from */
6887 /* the source buffer to this DMA buffer. */
6888 if ( BufferSize > DMABUFFERSIZE )
6889 Copycount = DMABUFFERSIZE;
6890 else
6891 Copycount = BufferSize;
6892
6893 /* Actually copy data from source buffer to DMA buffer. */
6894 /* Also set the data count for this individual DMA buffer. */
6895 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6896 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
6897 else
6898 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
6899
6900 pBufEntry->count = Copycount;
6901
6902 /* Advance source pointer and reduce remaining data count. */
6903 Buffer += Copycount;
6904 BufferSize -= Copycount;
6905
6906 ++info->tx_dma_buffers_used;
6907 }
6908
6909 /* remember next available tx dma buffer */
6910 info->current_tx_buffer = i;
6911
6912 } /* end of mgsl_load_tx_dma_buffer() */
6913
6914 /*
6915 * mgsl_register_test()
6916 *
6917 * Performs a register test of the 16C32.
6918 *
6919 * Arguments: info pointer to device instance data
6920 * Return Value: true if test passed, otherwise false
6921 */
mgsl_register_test(struct mgsl_struct * info)6922 static bool mgsl_register_test( struct mgsl_struct *info )
6923 {
6924 static unsigned short BitPatterns[] =
6925 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
6926 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
6927 unsigned int i;
6928 bool rc = true;
6929 unsigned long flags;
6930
6931 spin_lock_irqsave(&info->irq_spinlock,flags);
6932 usc_reset(info);
6933
6934 /* Verify the reset state of some registers. */
6935
6936 if ( (usc_InReg( info, SICR ) != 0) ||
6937 (usc_InReg( info, IVR ) != 0) ||
6938 (usc_InDmaReg( info, DIVR ) != 0) ){
6939 rc = false;
6940 }
6941
6942 if ( rc ){
6943 /* Write bit patterns to various registers but do it out of */
6944 /* sync, then read back and verify values. */
6945
6946 for ( i = 0 ; i < Patterncount ; i++ ) {
6947 usc_OutReg( info, TC0R, BitPatterns[i] );
6948 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
6949 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
6950 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
6951 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
6952 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
6953
6954 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
6955 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
6956 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
6957 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
6958 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
6959 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
6960 rc = false;
6961 break;
6962 }
6963 }
6964 }
6965
6966 usc_reset(info);
6967 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6968
6969 return rc;
6970
6971 } /* end of mgsl_register_test() */
6972
6973 /* mgsl_irq_test() Perform interrupt test of the 16C32.
6974 *
6975 * Arguments: info pointer to device instance data
6976 * Return Value: true if test passed, otherwise false
6977 */
mgsl_irq_test(struct mgsl_struct * info)6978 static bool mgsl_irq_test( struct mgsl_struct *info )
6979 {
6980 unsigned long EndTime;
6981 unsigned long flags;
6982
6983 spin_lock_irqsave(&info->irq_spinlock,flags);
6984 usc_reset(info);
6985
6986 /*
6987 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
6988 * The ISR sets irq_occurred to true.
6989 */
6990
6991 info->irq_occurred = false;
6992
6993 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
6994 /* Enable INTEN (Port 6, Bit12) */
6995 /* This connects the IRQ request signal to the ISA bus */
6996 /* on the ISA adapter. This has no effect for the PCI adapter */
6997 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
6998
6999 usc_EnableMasterIrqBit(info);
7000 usc_EnableInterrupts(info, IO_PIN);
7001 usc_ClearIrqPendingBits(info, IO_PIN);
7002
7003 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7004 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7005
7006 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7007
7008 EndTime=100;
7009 while( EndTime-- && !info->irq_occurred ) {
7010 msleep_interruptible(10);
7011 }
7012
7013 spin_lock_irqsave(&info->irq_spinlock,flags);
7014 usc_reset(info);
7015 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7016
7017 return info->irq_occurred;
7018
7019 } /* end of mgsl_irq_test() */
7020
7021 /* mgsl_dma_test()
7022 *
7023 * Perform a DMA test of the 16C32. A small frame is
7024 * transmitted via DMA from a transmit buffer to a receive buffer
7025 * using single buffer DMA mode.
7026 *
7027 * Arguments: info pointer to device instance data
7028 * Return Value: true if test passed, otherwise false
7029 */
mgsl_dma_test(struct mgsl_struct * info)7030 static bool mgsl_dma_test( struct mgsl_struct *info )
7031 {
7032 unsigned short FifoLevel;
7033 unsigned long phys_addr;
7034 unsigned int FrameSize;
7035 unsigned int i;
7036 char *TmpPtr;
7037 bool rc = true;
7038 unsigned short status=0;
7039 unsigned long EndTime;
7040 unsigned long flags;
7041 MGSL_PARAMS tmp_params;
7042
7043 /* save current port options */
7044 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7045 /* load default port options */
7046 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7047
7048 #define TESTFRAMESIZE 40
7049
7050 spin_lock_irqsave(&info->irq_spinlock,flags);
7051
7052 /* setup 16C32 for SDLC DMA transfer mode */
7053
7054 usc_reset(info);
7055 usc_set_sdlc_mode(info);
7056 usc_enable_loopback(info,1);
7057
7058 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7059 * field of the buffer entry after fetching buffer address. This
7060 * way we can detect a DMA failure for a DMA read (which should be
7061 * non-destructive to system memory) before we try and write to
7062 * memory (where a failure could corrupt system memory).
7063 */
7064
7065 /* Receive DMA mode Register (RDMR)
7066 *
7067 * <15..14> 11 DMA mode = Linked List Buffer mode
7068 * <13> 1 RSBinA/L = store Rx status Block in List entry
7069 * <12> 0 1 = Clear count of List Entry after fetching
7070 * <11..10> 00 Address mode = Increment
7071 * <9> 1 Terminate Buffer on RxBound
7072 * <8> 0 Bus Width = 16bits
7073 * <7..0> ? status Bits (write as 0s)
7074 *
7075 * 1110 0010 0000 0000 = 0xe200
7076 */
7077
7078 usc_OutDmaReg( info, RDMR, 0xe200 );
7079
7080 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7081
7082
7083 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7084
7085 FrameSize = TESTFRAMESIZE;
7086
7087 /* setup 1st transmit buffer entry: */
7088 /* with frame size and transmit control word */
7089
7090 info->tx_buffer_list[0].count = FrameSize;
7091 info->tx_buffer_list[0].rcc = FrameSize;
7092 info->tx_buffer_list[0].status = 0x4000;
7093
7094 /* build a transmit frame in 1st transmit DMA buffer */
7095
7096 TmpPtr = info->tx_buffer_list[0].virt_addr;
7097 for (i = 0; i < FrameSize; i++ )
7098 *TmpPtr++ = i;
7099
7100 /* setup 1st receive buffer entry: */
7101 /* clear status, set max receive buffer size */
7102
7103 info->rx_buffer_list[0].status = 0;
7104 info->rx_buffer_list[0].count = FrameSize + 4;
7105
7106 /* zero out the 1st receive buffer */
7107
7108 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7109
7110 /* Set count field of next buffer entries to prevent */
7111 /* 16C32 from using buffers after the 1st one. */
7112
7113 info->tx_buffer_list[1].count = 0;
7114 info->rx_buffer_list[1].count = 0;
7115
7116
7117 /***************************/
7118 /* Program 16C32 receiver. */
7119 /***************************/
7120
7121 spin_lock_irqsave(&info->irq_spinlock,flags);
7122
7123 /* setup DMA transfers */
7124 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7125
7126 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7127 phys_addr = info->rx_buffer_list[0].phys_entry;
7128 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7129 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7130
7131 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7132 usc_InDmaReg( info, RDMR );
7133 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7134
7135 /* Enable Receiver (RMR <1..0> = 10) */
7136 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7137
7138 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7139
7140
7141 /*************************************************************/
7142 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7143 /*************************************************************/
7144
7145 /* Wait 100ms for interrupt. */
7146 EndTime = jiffies + msecs_to_jiffies(100);
7147
7148 for(;;) {
7149 if (time_after(jiffies, EndTime)) {
7150 rc = false;
7151 break;
7152 }
7153
7154 spin_lock_irqsave(&info->irq_spinlock,flags);
7155 status = usc_InDmaReg( info, RDMR );
7156 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7157
7158 if ( !(status & BIT4) && (status & BIT5) ) {
7159 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7160 /* BUSY (BIT 5) is active (channel still active). */
7161 /* This means the buffer entry read has completed. */
7162 break;
7163 }
7164 }
7165
7166
7167 /******************************/
7168 /* Program 16C32 transmitter. */
7169 /******************************/
7170
7171 spin_lock_irqsave(&info->irq_spinlock,flags);
7172
7173 /* Program the Transmit Character Length Register (TCLR) */
7174 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7175
7176 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7177 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7178
7179 /* Program the address of the 1st DMA Buffer Entry in linked list */
7180
7181 phys_addr = info->tx_buffer_list[0].phys_entry;
7182 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7183 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7184
7185 /* unlatch Tx status bits, and start transmit channel. */
7186
7187 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7188 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7189
7190 /* wait for DMA controller to fill transmit FIFO */
7191
7192 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7193
7194 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7195
7196
7197 /**********************************/
7198 /* WAIT FOR TRANSMIT FIFO TO FILL */
7199 /**********************************/
7200
7201 /* Wait 100ms */
7202 EndTime = jiffies + msecs_to_jiffies(100);
7203
7204 for(;;) {
7205 if (time_after(jiffies, EndTime)) {
7206 rc = false;
7207 break;
7208 }
7209
7210 spin_lock_irqsave(&info->irq_spinlock,flags);
7211 FifoLevel = usc_InReg(info, TICR) >> 8;
7212 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7213
7214 if ( FifoLevel < 16 )
7215 break;
7216 else
7217 if ( FrameSize < 32 ) {
7218 /* This frame is smaller than the entire transmit FIFO */
7219 /* so wait for the entire frame to be loaded. */
7220 if ( FifoLevel <= (32 - FrameSize) )
7221 break;
7222 }
7223 }
7224
7225
7226 if ( rc )
7227 {
7228 /* Enable 16C32 transmitter. */
7229
7230 spin_lock_irqsave(&info->irq_spinlock,flags);
7231
7232 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7233 usc_TCmd( info, TCmd_SendFrame );
7234 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7235
7236 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7237
7238
7239 /******************************/
7240 /* WAIT FOR TRANSMIT COMPLETE */
7241 /******************************/
7242
7243 /* Wait 100ms */
7244 EndTime = jiffies + msecs_to_jiffies(100);
7245
7246 /* While timer not expired wait for transmit complete */
7247
7248 spin_lock_irqsave(&info->irq_spinlock,flags);
7249 status = usc_InReg( info, TCSR );
7250 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7251
7252 while ( !(status & (BIT6 | BIT5 | BIT4 | BIT2 | BIT1)) ) {
7253 if (time_after(jiffies, EndTime)) {
7254 rc = false;
7255 break;
7256 }
7257
7258 spin_lock_irqsave(&info->irq_spinlock,flags);
7259 status = usc_InReg( info, TCSR );
7260 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7261 }
7262 }
7263
7264
7265 if ( rc ){
7266 /* CHECK FOR TRANSMIT ERRORS */
7267 if ( status & (BIT5 | BIT1) )
7268 rc = false;
7269 }
7270
7271 if ( rc ) {
7272 /* WAIT FOR RECEIVE COMPLETE */
7273
7274 /* Wait 100ms */
7275 EndTime = jiffies + msecs_to_jiffies(100);
7276
7277 /* Wait for 16C32 to write receive status to buffer entry. */
7278 status=info->rx_buffer_list[0].status;
7279 while ( status == 0 ) {
7280 if (time_after(jiffies, EndTime)) {
7281 rc = false;
7282 break;
7283 }
7284 status=info->rx_buffer_list[0].status;
7285 }
7286 }
7287
7288
7289 if ( rc ) {
7290 /* CHECK FOR RECEIVE ERRORS */
7291 status = info->rx_buffer_list[0].status;
7292
7293 if ( status & (BIT8 | BIT3 | BIT1) ) {
7294 /* receive error has occurred */
7295 rc = false;
7296 } else {
7297 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7298 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7299 rc = false;
7300 }
7301 }
7302 }
7303
7304 spin_lock_irqsave(&info->irq_spinlock,flags);
7305 usc_reset( info );
7306 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7307
7308 /* restore current port options */
7309 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7310
7311 return rc;
7312
7313 } /* end of mgsl_dma_test() */
7314
7315 /* mgsl_adapter_test()
7316 *
7317 * Perform the register, IRQ, and DMA tests for the 16C32.
7318 *
7319 * Arguments: info pointer to device instance data
7320 * Return Value: 0 if success, otherwise -ENODEV
7321 */
mgsl_adapter_test(struct mgsl_struct * info)7322 static int mgsl_adapter_test( struct mgsl_struct *info )
7323 {
7324 if ( debug_level >= DEBUG_LEVEL_INFO )
7325 printk( "%s(%d):Testing device %s\n",
7326 __FILE__,__LINE__,info->device_name );
7327
7328 if ( !mgsl_register_test( info ) ) {
7329 info->init_error = DiagStatus_AddressFailure;
7330 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7331 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7332 return -ENODEV;
7333 }
7334
7335 if ( !mgsl_irq_test( info ) ) {
7336 info->init_error = DiagStatus_IrqFailure;
7337 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7338 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7339 return -ENODEV;
7340 }
7341
7342 if ( !mgsl_dma_test( info ) ) {
7343 info->init_error = DiagStatus_DmaFailure;
7344 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7345 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7346 return -ENODEV;
7347 }
7348
7349 if ( debug_level >= DEBUG_LEVEL_INFO )
7350 printk( "%s(%d):device %s passed diagnostics\n",
7351 __FILE__,__LINE__,info->device_name );
7352
7353 return 0;
7354
7355 } /* end of mgsl_adapter_test() */
7356
7357 /* mgsl_memory_test()
7358 *
7359 * Test the shared memory on a PCI adapter.
7360 *
7361 * Arguments: info pointer to device instance data
7362 * Return Value: true if test passed, otherwise false
7363 */
mgsl_memory_test(struct mgsl_struct * info)7364 static bool mgsl_memory_test( struct mgsl_struct *info )
7365 {
7366 static unsigned long BitPatterns[] =
7367 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7368 unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
7369 unsigned long i;
7370 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7371 unsigned long * TestAddr;
7372
7373 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7374 return true;
7375
7376 TestAddr = (unsigned long *)info->memory_base;
7377
7378 /* Test data lines with test pattern at one location. */
7379
7380 for ( i = 0 ; i < Patterncount ; i++ ) {
7381 *TestAddr = BitPatterns[i];
7382 if ( *TestAddr != BitPatterns[i] )
7383 return false;
7384 }
7385
7386 /* Test address lines with incrementing pattern over */
7387 /* entire address range. */
7388
7389 for ( i = 0 ; i < TestLimit ; i++ ) {
7390 *TestAddr = i * 4;
7391 TestAddr++;
7392 }
7393
7394 TestAddr = (unsigned long *)info->memory_base;
7395
7396 for ( i = 0 ; i < TestLimit ; i++ ) {
7397 if ( *TestAddr != i * 4 )
7398 return false;
7399 TestAddr++;
7400 }
7401
7402 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7403
7404 return true;
7405
7406 } /* End Of mgsl_memory_test() */
7407
7408
7409 /* mgsl_load_pci_memory()
7410 *
7411 * Load a large block of data into the PCI shared memory.
7412 * Use this instead of memcpy() or memmove() to move data
7413 * into the PCI shared memory.
7414 *
7415 * Notes:
7416 *
7417 * This function prevents the PCI9050 interface chip from hogging
7418 * the adapter local bus, which can starve the 16C32 by preventing
7419 * 16C32 bus master cycles.
7420 *
7421 * The PCI9050 documentation says that the 9050 will always release
7422 * control of the local bus after completing the current read
7423 * or write operation.
7424 *
7425 * It appears that as long as the PCI9050 write FIFO is full, the
7426 * PCI9050 treats all of the writes as a single burst transaction
7427 * and will not release the bus. This causes DMA latency problems
7428 * at high speeds when copying large data blocks to the shared
7429 * memory.
7430 *
7431 * This function in effect, breaks the a large shared memory write
7432 * into multiple transations by interleaving a shared memory read
7433 * which will flush the write FIFO and 'complete' the write
7434 * transation. This allows any pending DMA request to gain control
7435 * of the local bus in a timely fasion.
7436 *
7437 * Arguments:
7438 *
7439 * TargetPtr pointer to target address in PCI shared memory
7440 * SourcePtr pointer to source buffer for data
7441 * count count in bytes of data to copy
7442 *
7443 * Return Value: None
7444 */
mgsl_load_pci_memory(char * TargetPtr,const char * SourcePtr,unsigned short count)7445 static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7446 unsigned short count )
7447 {
7448 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7449 #define PCI_LOAD_INTERVAL 64
7450
7451 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7452 unsigned short Index;
7453 unsigned long Dummy;
7454
7455 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7456 {
7457 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7458 Dummy = *((volatile unsigned long *)TargetPtr);
7459 TargetPtr += PCI_LOAD_INTERVAL;
7460 SourcePtr += PCI_LOAD_INTERVAL;
7461 }
7462
7463 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7464
7465 } /* End Of mgsl_load_pci_memory() */
7466
mgsl_trace_block(struct mgsl_struct * info,const char * data,int count,int xmit)7467 static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7468 {
7469 int i;
7470 int linecount;
7471 if (xmit)
7472 printk("%s tx data:\n",info->device_name);
7473 else
7474 printk("%s rx data:\n",info->device_name);
7475
7476 while(count) {
7477 if (count > 16)
7478 linecount = 16;
7479 else
7480 linecount = count;
7481
7482 for(i=0;i<linecount;i++)
7483 printk("%02X ",(unsigned char)data[i]);
7484 for(;i<17;i++)
7485 printk(" ");
7486 for(i=0;i<linecount;i++) {
7487 if (data[i]>=040 && data[i]<=0176)
7488 printk("%c",data[i]);
7489 else
7490 printk(".");
7491 }
7492 printk("\n");
7493
7494 data += linecount;
7495 count -= linecount;
7496 }
7497 } /* end of mgsl_trace_block() */
7498
7499 /* mgsl_tx_timeout()
7500 *
7501 * called when HDLC frame times out
7502 * update stats and do tx completion processing
7503 *
7504 * Arguments: context pointer to device instance data
7505 * Return Value: None
7506 */
mgsl_tx_timeout(struct timer_list * t)7507 static void mgsl_tx_timeout(struct timer_list *t)
7508 {
7509 struct mgsl_struct *info = from_timer(info, t, tx_timer);
7510 unsigned long flags;
7511
7512 if ( debug_level >= DEBUG_LEVEL_INFO )
7513 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7514 __FILE__,__LINE__,info->device_name);
7515 if(info->tx_active &&
7516 (info->params.mode == MGSL_MODE_HDLC ||
7517 info->params.mode == MGSL_MODE_RAW) ) {
7518 info->icount.txtimeout++;
7519 }
7520 spin_lock_irqsave(&info->irq_spinlock,flags);
7521 info->tx_active = false;
7522 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7523
7524 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7525 usc_loopmode_cancel_transmit( info );
7526
7527 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7528
7529 #if SYNCLINK_GENERIC_HDLC
7530 if (info->netcount)
7531 hdlcdev_tx_done(info);
7532 else
7533 #endif
7534 mgsl_bh_transmit(info);
7535
7536 } /* end of mgsl_tx_timeout() */
7537
7538 /* signal that there are no more frames to send, so that
7539 * line is 'released' by echoing RxD to TxD when current
7540 * transmission is complete (or immediately if no tx in progress).
7541 */
mgsl_loopmode_send_done(struct mgsl_struct * info)7542 static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7543 {
7544 unsigned long flags;
7545
7546 spin_lock_irqsave(&info->irq_spinlock,flags);
7547 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7548 if (info->tx_active)
7549 info->loopmode_send_done_requested = true;
7550 else
7551 usc_loopmode_send_done(info);
7552 }
7553 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7554
7555 return 0;
7556 }
7557
7558 /* release the line by echoing RxD to TxD
7559 * upon completion of a transmit frame
7560 */
usc_loopmode_send_done(struct mgsl_struct * info)7561 static void usc_loopmode_send_done( struct mgsl_struct * info )
7562 {
7563 info->loopmode_send_done_requested = false;
7564 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7565 info->cmr_value &= ~BIT13;
7566 usc_OutReg(info, CMR, info->cmr_value);
7567 }
7568
7569 /* abort a transmit in progress while in HDLC LoopMode
7570 */
usc_loopmode_cancel_transmit(struct mgsl_struct * info)7571 static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7572 {
7573 /* reset tx dma channel and purge TxFifo */
7574 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7575 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7576 usc_loopmode_send_done( info );
7577 }
7578
7579 /* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7580 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7581 * we must clear CMR:13 to begin repeating TxData to RxData
7582 */
usc_loopmode_insert_request(struct mgsl_struct * info)7583 static void usc_loopmode_insert_request( struct mgsl_struct * info )
7584 {
7585 info->loopmode_insert_requested = true;
7586
7587 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7588 * begin repeating TxData on RxData (complete insertion)
7589 */
7590 usc_OutReg( info, RICR,
7591 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7592
7593 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7594 info->cmr_value |= BIT13;
7595 usc_OutReg(info, CMR, info->cmr_value);
7596 }
7597
7598 /* return 1 if station is inserted into the loop, otherwise 0
7599 */
usc_loopmode_active(struct mgsl_struct * info)7600 static int usc_loopmode_active( struct mgsl_struct * info)
7601 {
7602 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7603 }
7604
7605 #if SYNCLINK_GENERIC_HDLC
7606
7607 /**
7608 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7609 * set encoding and frame check sequence (FCS) options
7610 *
7611 * dev pointer to network device structure
7612 * encoding serial encoding setting
7613 * parity FCS setting
7614 *
7615 * returns 0 if success, otherwise error code
7616 */
hdlcdev_attach(struct net_device * dev,unsigned short encoding,unsigned short parity)7617 static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7618 unsigned short parity)
7619 {
7620 struct mgsl_struct *info = dev_to_port(dev);
7621 unsigned char new_encoding;
7622 unsigned short new_crctype;
7623
7624 /* return error if TTY interface open */
7625 if (info->port.count)
7626 return -EBUSY;
7627
7628 switch (encoding)
7629 {
7630 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7631 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7632 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7633 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7634 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7635 default: return -EINVAL;
7636 }
7637
7638 switch (parity)
7639 {
7640 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7641 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7642 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7643 default: return -EINVAL;
7644 }
7645
7646 info->params.encoding = new_encoding;
7647 info->params.crc_type = new_crctype;
7648
7649 /* if network interface up, reprogram hardware */
7650 if (info->netcount)
7651 mgsl_program_hw(info);
7652
7653 return 0;
7654 }
7655
7656 /**
7657 * called by generic HDLC layer to send frame
7658 *
7659 * skb socket buffer containing HDLC frame
7660 * dev pointer to network device structure
7661 */
hdlcdev_xmit(struct sk_buff * skb,struct net_device * dev)7662 static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
7663 struct net_device *dev)
7664 {
7665 struct mgsl_struct *info = dev_to_port(dev);
7666 unsigned long flags;
7667
7668 if (debug_level >= DEBUG_LEVEL_INFO)
7669 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7670
7671 /* stop sending until this frame completes */
7672 netif_stop_queue(dev);
7673
7674 /* copy data to device buffers */
7675 info->xmit_cnt = skb->len;
7676 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7677
7678 /* update network statistics */
7679 dev->stats.tx_packets++;
7680 dev->stats.tx_bytes += skb->len;
7681
7682 /* done with socket buffer, so free it */
7683 dev_kfree_skb(skb);
7684
7685 /* save start time for transmit timeout detection */
7686 netif_trans_update(dev);
7687
7688 /* start hardware transmitter if necessary */
7689 spin_lock_irqsave(&info->irq_spinlock,flags);
7690 if (!info->tx_active)
7691 usc_start_transmitter(info);
7692 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7693
7694 return NETDEV_TX_OK;
7695 }
7696
7697 /**
7698 * called by network layer when interface enabled
7699 * claim resources and initialize hardware
7700 *
7701 * dev pointer to network device structure
7702 *
7703 * returns 0 if success, otherwise error code
7704 */
hdlcdev_open(struct net_device * dev)7705 static int hdlcdev_open(struct net_device *dev)
7706 {
7707 struct mgsl_struct *info = dev_to_port(dev);
7708 int rc;
7709 unsigned long flags;
7710
7711 if (debug_level >= DEBUG_LEVEL_INFO)
7712 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7713
7714 /* generic HDLC layer open processing */
7715 rc = hdlc_open(dev);
7716 if (rc)
7717 return rc;
7718
7719 /* arbitrate between network and tty opens */
7720 spin_lock_irqsave(&info->netlock, flags);
7721 if (info->port.count != 0 || info->netcount != 0) {
7722 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7723 spin_unlock_irqrestore(&info->netlock, flags);
7724 return -EBUSY;
7725 }
7726 info->netcount=1;
7727 spin_unlock_irqrestore(&info->netlock, flags);
7728
7729 /* claim resources and init adapter */
7730 if ((rc = startup(info)) != 0) {
7731 spin_lock_irqsave(&info->netlock, flags);
7732 info->netcount=0;
7733 spin_unlock_irqrestore(&info->netlock, flags);
7734 return rc;
7735 }
7736
7737 /* assert RTS and DTR, apply hardware settings */
7738 info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
7739 mgsl_program_hw(info);
7740
7741 /* enable network layer transmit */
7742 netif_trans_update(dev);
7743 netif_start_queue(dev);
7744
7745 /* inform generic HDLC layer of current DCD status */
7746 spin_lock_irqsave(&info->irq_spinlock, flags);
7747 usc_get_serial_signals(info);
7748 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7749 if (info->serial_signals & SerialSignal_DCD)
7750 netif_carrier_on(dev);
7751 else
7752 netif_carrier_off(dev);
7753 return 0;
7754 }
7755
7756 /**
7757 * called by network layer when interface is disabled
7758 * shutdown hardware and release resources
7759 *
7760 * dev pointer to network device structure
7761 *
7762 * returns 0 if success, otherwise error code
7763 */
hdlcdev_close(struct net_device * dev)7764 static int hdlcdev_close(struct net_device *dev)
7765 {
7766 struct mgsl_struct *info = dev_to_port(dev);
7767 unsigned long flags;
7768
7769 if (debug_level >= DEBUG_LEVEL_INFO)
7770 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7771
7772 netif_stop_queue(dev);
7773
7774 /* shutdown adapter and release resources */
7775 shutdown(info);
7776
7777 hdlc_close(dev);
7778
7779 spin_lock_irqsave(&info->netlock, flags);
7780 info->netcount=0;
7781 spin_unlock_irqrestore(&info->netlock, flags);
7782
7783 return 0;
7784 }
7785
7786 /**
7787 * called by network layer to process IOCTL call to network device
7788 *
7789 * dev pointer to network device structure
7790 * ifr pointer to network interface request structure
7791 * cmd IOCTL command code
7792 *
7793 * returns 0 if success, otherwise error code
7794 */
hdlcdev_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)7795 static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7796 {
7797 const size_t size = sizeof(sync_serial_settings);
7798 sync_serial_settings new_line;
7799 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7800 struct mgsl_struct *info = dev_to_port(dev);
7801 unsigned int flags;
7802
7803 if (debug_level >= DEBUG_LEVEL_INFO)
7804 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7805
7806 /* return error if TTY interface open */
7807 if (info->port.count)
7808 return -EBUSY;
7809
7810 if (cmd != SIOCWANDEV)
7811 return hdlc_ioctl(dev, ifr, cmd);
7812
7813 switch(ifr->ifr_settings.type) {
7814 case IF_GET_IFACE: /* return current sync_serial_settings */
7815
7816 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7817 if (ifr->ifr_settings.size < size) {
7818 ifr->ifr_settings.size = size; /* data size wanted */
7819 return -ENOBUFS;
7820 }
7821
7822 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7823 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7824 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7825 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7826
7827 memset(&new_line, 0, sizeof(new_line));
7828 switch (flags){
7829 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7830 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7831 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7832 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7833 default: new_line.clock_type = CLOCK_DEFAULT;
7834 }
7835
7836 new_line.clock_rate = info->params.clock_speed;
7837 new_line.loopback = info->params.loopback ? 1:0;
7838
7839 if (copy_to_user(line, &new_line, size))
7840 return -EFAULT;
7841 return 0;
7842
7843 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7844
7845 if(!capable(CAP_NET_ADMIN))
7846 return -EPERM;
7847 if (copy_from_user(&new_line, line, size))
7848 return -EFAULT;
7849
7850 switch (new_line.clock_type)
7851 {
7852 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7853 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7854 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7855 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7856 case CLOCK_DEFAULT: flags = info->params.flags &
7857 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7858 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7859 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7860 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7861 default: return -EINVAL;
7862 }
7863
7864 if (new_line.loopback != 0 && new_line.loopback != 1)
7865 return -EINVAL;
7866
7867 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7868 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7869 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7870 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7871 info->params.flags |= flags;
7872
7873 info->params.loopback = new_line.loopback;
7874
7875 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
7876 info->params.clock_speed = new_line.clock_rate;
7877 else
7878 info->params.clock_speed = 0;
7879
7880 /* if network interface up, reprogram hardware */
7881 if (info->netcount)
7882 mgsl_program_hw(info);
7883 return 0;
7884
7885 default:
7886 return hdlc_ioctl(dev, ifr, cmd);
7887 }
7888 }
7889
7890 /**
7891 * called by network layer when transmit timeout is detected
7892 *
7893 * dev pointer to network device structure
7894 */
hdlcdev_tx_timeout(struct net_device * dev)7895 static void hdlcdev_tx_timeout(struct net_device *dev)
7896 {
7897 struct mgsl_struct *info = dev_to_port(dev);
7898 unsigned long flags;
7899
7900 if (debug_level >= DEBUG_LEVEL_INFO)
7901 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
7902
7903 dev->stats.tx_errors++;
7904 dev->stats.tx_aborted_errors++;
7905
7906 spin_lock_irqsave(&info->irq_spinlock,flags);
7907 usc_stop_transmitter(info);
7908 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7909
7910 netif_wake_queue(dev);
7911 }
7912
7913 /**
7914 * called by device driver when transmit completes
7915 * reenable network layer transmit if stopped
7916 *
7917 * info pointer to device instance information
7918 */
hdlcdev_tx_done(struct mgsl_struct * info)7919 static void hdlcdev_tx_done(struct mgsl_struct *info)
7920 {
7921 if (netif_queue_stopped(info->netdev))
7922 netif_wake_queue(info->netdev);
7923 }
7924
7925 /**
7926 * called by device driver when frame received
7927 * pass frame to network layer
7928 *
7929 * info pointer to device instance information
7930 * buf pointer to buffer contianing frame data
7931 * size count of data bytes in buf
7932 */
hdlcdev_rx(struct mgsl_struct * info,char * buf,int size)7933 static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
7934 {
7935 struct sk_buff *skb = dev_alloc_skb(size);
7936 struct net_device *dev = info->netdev;
7937
7938 if (debug_level >= DEBUG_LEVEL_INFO)
7939 printk("hdlcdev_rx(%s)\n", dev->name);
7940
7941 if (skb == NULL) {
7942 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
7943 dev->name);
7944 dev->stats.rx_dropped++;
7945 return;
7946 }
7947
7948 skb_put_data(skb, buf, size);
7949
7950 skb->protocol = hdlc_type_trans(skb, dev);
7951
7952 dev->stats.rx_packets++;
7953 dev->stats.rx_bytes += size;
7954
7955 netif_rx(skb);
7956 }
7957
7958 static const struct net_device_ops hdlcdev_ops = {
7959 .ndo_open = hdlcdev_open,
7960 .ndo_stop = hdlcdev_close,
7961 .ndo_start_xmit = hdlc_start_xmit,
7962 .ndo_do_ioctl = hdlcdev_ioctl,
7963 .ndo_tx_timeout = hdlcdev_tx_timeout,
7964 };
7965
7966 /**
7967 * called by device driver when adding device instance
7968 * do generic HDLC initialization
7969 *
7970 * info pointer to device instance information
7971 *
7972 * returns 0 if success, otherwise error code
7973 */
hdlcdev_init(struct mgsl_struct * info)7974 static int hdlcdev_init(struct mgsl_struct *info)
7975 {
7976 int rc;
7977 struct net_device *dev;
7978 hdlc_device *hdlc;
7979
7980 /* allocate and initialize network and HDLC layer objects */
7981
7982 dev = alloc_hdlcdev(info);
7983 if (!dev) {
7984 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
7985 return -ENOMEM;
7986 }
7987
7988 /* for network layer reporting purposes only */
7989 dev->base_addr = info->io_base;
7990 dev->irq = info->irq_level;
7991 dev->dma = info->dma_level;
7992
7993 /* network layer callbacks and settings */
7994 dev->netdev_ops = &hdlcdev_ops;
7995 dev->watchdog_timeo = 10 * HZ;
7996 dev->tx_queue_len = 50;
7997
7998 /* generic HDLC layer callbacks and settings */
7999 hdlc = dev_to_hdlc(dev);
8000 hdlc->attach = hdlcdev_attach;
8001 hdlc->xmit = hdlcdev_xmit;
8002
8003 /* register objects with HDLC layer */
8004 rc = register_hdlc_device(dev);
8005 if (rc) {
8006 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8007 free_netdev(dev);
8008 return rc;
8009 }
8010
8011 info->netdev = dev;
8012 return 0;
8013 }
8014
8015 /**
8016 * called by device driver when removing device instance
8017 * do generic HDLC cleanup
8018 *
8019 * info pointer to device instance information
8020 */
hdlcdev_exit(struct mgsl_struct * info)8021 static void hdlcdev_exit(struct mgsl_struct *info)
8022 {
8023 unregister_hdlc_device(info->netdev);
8024 free_netdev(info->netdev);
8025 info->netdev = NULL;
8026 }
8027
8028 #endif /* CONFIG_HDLC */
8029
8030
synclink_init_one(struct pci_dev * dev,const struct pci_device_id * ent)8031 static int synclink_init_one (struct pci_dev *dev,
8032 const struct pci_device_id *ent)
8033 {
8034 struct mgsl_struct *info;
8035
8036 if (pci_enable_device(dev)) {
8037 printk("error enabling pci device %p\n", dev);
8038 return -EIO;
8039 }
8040
8041 info = mgsl_allocate_device();
8042 if (!info) {
8043 printk("can't allocate device instance data.\n");
8044 return -EIO;
8045 }
8046
8047 /* Copy user configuration info to device instance data */
8048
8049 info->io_base = pci_resource_start(dev, 2);
8050 info->irq_level = dev->irq;
8051 info->phys_memory_base = pci_resource_start(dev, 3);
8052
8053 /* Because veremap only works on page boundaries we must map
8054 * a larger area than is actually implemented for the LCR
8055 * memory range. We map a full page starting at the page boundary.
8056 */
8057 info->phys_lcr_base = pci_resource_start(dev, 0);
8058 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8059 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8060
8061 info->bus_type = MGSL_BUS_TYPE_PCI;
8062 info->io_addr_size = 8;
8063 info->irq_flags = IRQF_SHARED;
8064
8065 if (dev->device == 0x0210) {
8066 /* Version 1 PCI9030 based universal PCI adapter */
8067 info->misc_ctrl_value = 0x007c4080;
8068 info->hw_version = 1;
8069 } else {
8070 /* Version 0 PCI9050 based 5V PCI adapter
8071 * A PCI9050 bug prevents reading LCR registers if
8072 * LCR base address bit 7 is set. Maintain shadow
8073 * value so we can write to LCR misc control reg.
8074 */
8075 info->misc_ctrl_value = 0x087e4546;
8076 info->hw_version = 0;
8077 }
8078
8079 mgsl_add_device(info);
8080
8081 return 0;
8082 }
8083
synclink_remove_one(struct pci_dev * dev)8084 static void synclink_remove_one (struct pci_dev *dev)
8085 {
8086 }
8087
8088