1 /* sbni.c: Granch SBNI12 leased line adapters driver for linux
2 *
3 * Written 2001 by Denis I.Timofeev (timofeev@granch.ru)
4 *
5 * Previous versions were written by Yaroslav Polyakov,
6 * Alexey Zverev and Max Khon.
7 *
8 * Driver supports SBNI12-02,-04,-05,-10,-11 cards, single and
9 * double-channel, PCI and ISA modifications.
10 * More info and useful utilities to work with SBNI12 cards you can find
11 * at http://www.granch.com (English) or http://www.granch.ru (Russian)
12 *
13 * This software may be used and distributed according to the terms
14 * of the GNU General Public License.
15 *
16 *
17 * 5.0.1 Jun 22 2001
18 * - Fixed bug in probe
19 * 5.0.0 Jun 06 2001
20 * - Driver was completely redesigned by Denis I.Timofeev,
21 * - now PCI/Dual, ISA/Dual (with single interrupt line) models are
22 * - supported
23 * 3.3.0 Thu Feb 24 21:30:28 NOVT 2000
24 * - PCI cards support
25 * 3.2.0 Mon Dec 13 22:26:53 NOVT 1999
26 * - Completely rebuilt all the packet storage system
27 * - to work in Ethernet-like style.
28 * 3.1.1 just fixed some bugs (5 aug 1999)
29 * 3.1.0 added balancing feature (26 apr 1999)
30 * 3.0.1 just fixed some bugs (14 apr 1999).
31 * 3.0.0 Initial Revision, Yaroslav Polyakov (24 Feb 1999)
32 * - added pre-calculation for CRC, fixed bug with "len-2" frames,
33 * - removed outbound fragmentation (MTU=1000), written CRC-calculation
34 * - on asm, added work with hard_headers and now we have our own cache
35 * - for them, optionally supported word-interchange on some chipsets,
36 *
37 * Known problem: this driver wasn't tested on multiprocessor machine.
38 */
39
40 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
41
42 #include <linux/module.h>
43 #include <linux/kernel.h>
44 #include <linux/ptrace.h>
45 #include <linux/fcntl.h>
46 #include <linux/ioport.h>
47 #include <linux/interrupt.h>
48 #include <linux/string.h>
49 #include <linux/errno.h>
50 #include <linux/netdevice.h>
51 #include <linux/etherdevice.h>
52 #include <linux/pci.h>
53 #include <linux/skbuff.h>
54 #include <linux/timer.h>
55 #include <linux/init.h>
56 #include <linux/delay.h>
57
58 #include <net/net_namespace.h>
59 #include <net/arp.h>
60 #include <net/Space.h>
61
62 #include <asm/io.h>
63 #include <asm/types.h>
64 #include <asm/byteorder.h>
65 #include <asm/irq.h>
66 #include <linux/uaccess.h>
67
68 #include "sbni.h"
69
70 /* device private data */
71
72 struct net_local {
73 struct timer_list watchdog;
74 struct net_device *watchdog_dev;
75
76 spinlock_t lock;
77 struct sk_buff *rx_buf_p; /* receive buffer ptr */
78 struct sk_buff *tx_buf_p; /* transmit buffer ptr */
79
80 unsigned int framelen; /* current frame length */
81 unsigned int maxframe; /* maximum valid frame length */
82 unsigned int state;
83 unsigned int inppos, outpos; /* positions in rx/tx buffers */
84
85 /* transmitting frame number - from frames qty to 1 */
86 unsigned int tx_frameno;
87
88 /* expected number of next receiving frame */
89 unsigned int wait_frameno;
90
91 /* count of failed attempts to frame send - 32 attempts do before
92 error - while receiver tunes on opposite side of wire */
93 unsigned int trans_errors;
94
95 /* idle time; send pong when limit exceeded */
96 unsigned int timer_ticks;
97
98 /* fields used for receive level autoselection */
99 int delta_rxl;
100 unsigned int cur_rxl_index, timeout_rxl;
101 unsigned long cur_rxl_rcvd, prev_rxl_rcvd;
102
103 struct sbni_csr1 csr1; /* current value of CSR1 */
104 struct sbni_in_stats in_stats; /* internal statistics */
105
106 struct net_device *second; /* for ISA/dual cards */
107
108 #ifdef CONFIG_SBNI_MULTILINE
109 struct net_device *master;
110 struct net_device *link;
111 #endif
112 };
113
114
115 static int sbni_card_probe( unsigned long );
116 static int sbni_pci_probe( struct net_device * );
117 static struct net_device *sbni_probe1(struct net_device *, unsigned long, int);
118 static int sbni_open( struct net_device * );
119 static int sbni_close( struct net_device * );
120 static netdev_tx_t sbni_start_xmit(struct sk_buff *,
121 struct net_device * );
122 static int sbni_ioctl( struct net_device *, struct ifreq *, int );
123 static void set_multicast_list( struct net_device * );
124
125 static irqreturn_t sbni_interrupt( int, void * );
126 static void handle_channel( struct net_device * );
127 static int recv_frame( struct net_device * );
128 static void send_frame( struct net_device * );
129 static int upload_data( struct net_device *,
130 unsigned, unsigned, unsigned, u32 );
131 static void download_data( struct net_device *, u32 * );
132 static void sbni_watchdog(struct timer_list *);
133 static void interpret_ack( struct net_device *, unsigned );
134 static int append_frame_to_pkt( struct net_device *, unsigned, u32 );
135 static void indicate_pkt( struct net_device * );
136 static void card_start( struct net_device * );
137 static void prepare_to_send( struct sk_buff *, struct net_device * );
138 static void drop_xmit_queue( struct net_device * );
139 static void send_frame_header( struct net_device *, u32 * );
140 static int skip_tail( unsigned int, unsigned int, u32 );
141 static int check_fhdr( u32, u32 *, u32 *, u32 *, u32 *, u32 * );
142 static void change_level( struct net_device * );
143 static void timeout_change_level( struct net_device * );
144 static u32 calc_crc32( u32, u8 *, u32 );
145 static struct sk_buff * get_rx_buf( struct net_device * );
146 static int sbni_init( struct net_device * );
147
148 #ifdef CONFIG_SBNI_MULTILINE
149 static int enslave( struct net_device *, struct net_device * );
150 static int emancipate( struct net_device * );
151 #endif
152
153 static const char version[] =
154 "Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n";
155
156 static bool skip_pci_probe __initdata = false;
157 static int scandone __initdata = 0;
158 static int num __initdata = 0;
159
160 static unsigned char rxl_tab[];
161 static u32 crc32tab[];
162
163 /* A list of all installed devices, for removing the driver module. */
164 static struct net_device *sbni_cards[ SBNI_MAX_NUM_CARDS ];
165
166 /* Lists of device's parameters */
167 static u32 io[ SBNI_MAX_NUM_CARDS ] __initdata =
168 { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
169 static u32 irq[ SBNI_MAX_NUM_CARDS ] __initdata;
170 static u32 baud[ SBNI_MAX_NUM_CARDS ] __initdata;
171 static u32 rxl[ SBNI_MAX_NUM_CARDS ] __initdata =
172 { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
173 static u32 mac[ SBNI_MAX_NUM_CARDS ] __initdata;
174
175 #ifndef MODULE
176 typedef u32 iarr[];
177 static iarr *dest[5] __initdata = { &io, &irq, &baud, &rxl, &mac };
178 #endif
179
180 /* A zero-terminated list of I/O addresses to be probed on ISA bus */
181 static unsigned int netcard_portlist[ ] __initdata = {
182 0x210, 0x214, 0x220, 0x224, 0x230, 0x234, 0x240, 0x244, 0x250, 0x254,
183 0x260, 0x264, 0x270, 0x274, 0x280, 0x284, 0x290, 0x294, 0x2a0, 0x2a4,
184 0x2b0, 0x2b4, 0x2c0, 0x2c4, 0x2d0, 0x2d4, 0x2e0, 0x2e4, 0x2f0, 0x2f4,
185 0 };
186
187 #define NET_LOCAL_LOCK(dev) (((struct net_local *)netdev_priv(dev))->lock)
188
189 /*
190 * Look for SBNI card which addr stored in dev->base_addr, if nonzero.
191 * Otherwise, look through PCI bus. If none PCI-card was found, scan ISA.
192 */
193
194 static inline int __init
sbni_isa_probe(struct net_device * dev)195 sbni_isa_probe( struct net_device *dev )
196 {
197 if( dev->base_addr > 0x1ff &&
198 request_region( dev->base_addr, SBNI_IO_EXTENT, dev->name ) &&
199 sbni_probe1( dev, dev->base_addr, dev->irq ) )
200
201 return 0;
202 else {
203 pr_err("base address 0x%lx is busy, or adapter is malfunctional!\n",
204 dev->base_addr);
205 return -ENODEV;
206 }
207 }
208
209 static const struct net_device_ops sbni_netdev_ops = {
210 .ndo_open = sbni_open,
211 .ndo_stop = sbni_close,
212 .ndo_start_xmit = sbni_start_xmit,
213 .ndo_set_rx_mode = set_multicast_list,
214 .ndo_do_ioctl = sbni_ioctl,
215 .ndo_set_mac_address = eth_mac_addr,
216 .ndo_validate_addr = eth_validate_addr,
217 };
218
sbni_devsetup(struct net_device * dev)219 static void __init sbni_devsetup(struct net_device *dev)
220 {
221 ether_setup( dev );
222 dev->netdev_ops = &sbni_netdev_ops;
223 }
224
sbni_probe(int unit)225 int __init sbni_probe(int unit)
226 {
227 struct net_device *dev;
228 int err;
229
230 dev = alloc_netdev(sizeof(struct net_local), "sbni",
231 NET_NAME_UNKNOWN, sbni_devsetup);
232 if (!dev)
233 return -ENOMEM;
234
235 dev->netdev_ops = &sbni_netdev_ops;
236
237 sprintf(dev->name, "sbni%d", unit);
238 netdev_boot_setup_check(dev);
239
240 err = sbni_init(dev);
241 if (err) {
242 free_netdev(dev);
243 return err;
244 }
245
246 err = register_netdev(dev);
247 if (err) {
248 release_region( dev->base_addr, SBNI_IO_EXTENT );
249 free_netdev(dev);
250 return err;
251 }
252 pr_info_once("%s", version);
253 return 0;
254 }
255
sbni_init(struct net_device * dev)256 static int __init sbni_init(struct net_device *dev)
257 {
258 int i;
259 if( dev->base_addr )
260 return sbni_isa_probe( dev );
261 /* otherwise we have to perform search our adapter */
262
263 if( io[ num ] != -1 )
264 dev->base_addr = io[ num ],
265 dev->irq = irq[ num ];
266 else if( scandone || io[ 0 ] != -1 )
267 return -ENODEV;
268
269 /* if io[ num ] contains non-zero address, then that is on ISA bus */
270 if( dev->base_addr )
271 return sbni_isa_probe( dev );
272
273 /* ...otherwise - scan PCI first */
274 if( !skip_pci_probe && !sbni_pci_probe( dev ) )
275 return 0;
276
277 if( io[ num ] == -1 ) {
278 /* Auto-scan will be stopped when first ISA card were found */
279 scandone = 1;
280 if( num > 0 )
281 return -ENODEV;
282 }
283
284 for( i = 0; netcard_portlist[ i ]; ++i ) {
285 int ioaddr = netcard_portlist[ i ];
286 if( request_region( ioaddr, SBNI_IO_EXTENT, dev->name ) &&
287 sbni_probe1( dev, ioaddr, 0 ))
288 return 0;
289 }
290
291 return -ENODEV;
292 }
293
294
295 static int __init
sbni_pci_probe(struct net_device * dev)296 sbni_pci_probe( struct net_device *dev )
297 {
298 struct pci_dev *pdev = NULL;
299
300 while( (pdev = pci_get_class( PCI_CLASS_NETWORK_OTHER << 8, pdev ))
301 != NULL ) {
302 int pci_irq_line;
303 unsigned long pci_ioaddr;
304
305 if( pdev->vendor != SBNI_PCI_VENDOR &&
306 pdev->device != SBNI_PCI_DEVICE )
307 continue;
308
309 pci_ioaddr = pci_resource_start( pdev, 0 );
310 pci_irq_line = pdev->irq;
311
312 /* Avoid already found cards from previous calls */
313 if( !request_region( pci_ioaddr, SBNI_IO_EXTENT, dev->name ) ) {
314 if (pdev->subsystem_device != 2)
315 continue;
316
317 /* Dual adapter is present */
318 if (!request_region(pci_ioaddr += 4, SBNI_IO_EXTENT,
319 dev->name ) )
320 continue;
321 }
322
323 if (pci_irq_line <= 0 || pci_irq_line >= nr_irqs)
324 pr_warn(
325 "WARNING: The PCI BIOS assigned this PCI card to IRQ %d, which is unlikely to work!.\n"
326 "You should use the PCI BIOS setup to assign a valid IRQ line.\n",
327 pci_irq_line );
328
329 /* avoiding re-enable dual adapters */
330 if( (pci_ioaddr & 7) == 0 && pci_enable_device( pdev ) ) {
331 release_region( pci_ioaddr, SBNI_IO_EXTENT );
332 pci_dev_put( pdev );
333 return -EIO;
334 }
335 if( sbni_probe1( dev, pci_ioaddr, pci_irq_line ) ) {
336 SET_NETDEV_DEV(dev, &pdev->dev);
337 /* not the best thing to do, but this is all messed up
338 for hotplug systems anyway... */
339 pci_dev_put( pdev );
340 return 0;
341 }
342 }
343 return -ENODEV;
344 }
345
346
347 static struct net_device * __init
sbni_probe1(struct net_device * dev,unsigned long ioaddr,int irq)348 sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
349 {
350 struct net_local *nl;
351
352 if( sbni_card_probe( ioaddr ) ) {
353 release_region( ioaddr, SBNI_IO_EXTENT );
354 return NULL;
355 }
356
357 outb( 0, ioaddr + CSR0 );
358
359 if( irq < 2 ) {
360 unsigned long irq_mask;
361
362 irq_mask = probe_irq_on();
363 outb( EN_INT | TR_REQ, ioaddr + CSR0 );
364 outb( PR_RES, ioaddr + CSR1 );
365 mdelay(50);
366 irq = probe_irq_off(irq_mask);
367 outb( 0, ioaddr + CSR0 );
368
369 if( !irq ) {
370 pr_err("%s: can't detect device irq!\n", dev->name);
371 release_region( ioaddr, SBNI_IO_EXTENT );
372 return NULL;
373 }
374 } else if( irq == 2 )
375 irq = 9;
376
377 dev->irq = irq;
378 dev->base_addr = ioaddr;
379
380 /* Fill in sbni-specific dev fields. */
381 nl = netdev_priv(dev);
382 if( !nl ) {
383 pr_err("%s: unable to get memory!\n", dev->name);
384 release_region( ioaddr, SBNI_IO_EXTENT );
385 return NULL;
386 }
387
388 memset( nl, 0, sizeof(struct net_local) );
389 spin_lock_init( &nl->lock );
390
391 /* store MAC address (generate if that isn't known) */
392 *(__be16 *)dev->dev_addr = htons( 0x00ff );
393 *(__be32 *)(dev->dev_addr + 2) = htonl( 0x01000000 |
394 ((mac[num] ?
395 mac[num] :
396 (u32)((long)netdev_priv(dev))) & 0x00ffffff));
397
398 /* store link settings (speed, receive level ) */
399 nl->maxframe = DEFAULT_FRAME_LEN;
400 nl->csr1.rate = baud[ num ];
401
402 if( (nl->cur_rxl_index = rxl[ num ]) == -1 )
403 /* autotune rxl */
404 nl->cur_rxl_index = DEF_RXL,
405 nl->delta_rxl = DEF_RXL_DELTA;
406 else
407 nl->delta_rxl = 0;
408 nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
409 if( inb( ioaddr + CSR0 ) & 0x01 )
410 nl->state |= FL_SLOW_MODE;
411
412 pr_notice("%s: ioaddr %#lx, irq %d, MAC: 00:ff:01:%02x:%02x:%02x\n",
413 dev->name, dev->base_addr, dev->irq,
414 ((u8 *)dev->dev_addr)[3],
415 ((u8 *)dev->dev_addr)[4],
416 ((u8 *)dev->dev_addr)[5]);
417
418 pr_notice("%s: speed %d",
419 dev->name,
420 ((nl->state & FL_SLOW_MODE) ? 500000 : 2000000)
421 / (1 << nl->csr1.rate));
422
423 if( nl->delta_rxl == 0 )
424 pr_cont(", receive level 0x%x (fixed)\n", nl->cur_rxl_index);
425 else
426 pr_cont(", receive level (auto)\n");
427
428 #ifdef CONFIG_SBNI_MULTILINE
429 nl->master = dev;
430 nl->link = NULL;
431 #endif
432
433 sbni_cards[ num++ ] = dev;
434 return dev;
435 }
436
437 /* -------------------------------------------------------------------------- */
438
439 #ifdef CONFIG_SBNI_MULTILINE
440
441 static netdev_tx_t
sbni_start_xmit(struct sk_buff * skb,struct net_device * dev)442 sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
443 {
444 struct net_device *p;
445
446 netif_stop_queue( dev );
447
448 /* Looking for idle device in the list */
449 for( p = dev; p; ) {
450 struct net_local *nl = netdev_priv(p);
451 spin_lock( &nl->lock );
452 if( nl->tx_buf_p || (nl->state & FL_LINE_DOWN) ) {
453 p = nl->link;
454 spin_unlock( &nl->lock );
455 } else {
456 /* Idle dev is found */
457 prepare_to_send( skb, p );
458 spin_unlock( &nl->lock );
459 netif_start_queue( dev );
460 return NETDEV_TX_OK;
461 }
462 }
463
464 return NETDEV_TX_BUSY;
465 }
466
467 #else /* CONFIG_SBNI_MULTILINE */
468
469 static netdev_tx_t
sbni_start_xmit(struct sk_buff * skb,struct net_device * dev)470 sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
471 {
472 struct net_local *nl = netdev_priv(dev);
473
474 netif_stop_queue( dev );
475 spin_lock( &nl->lock );
476
477 prepare_to_send( skb, dev );
478
479 spin_unlock( &nl->lock );
480 return NETDEV_TX_OK;
481 }
482
483 #endif /* CONFIG_SBNI_MULTILINE */
484
485 /* -------------------------------------------------------------------------- */
486
487 /* interrupt handler */
488
489 /*
490 * SBNI12D-10, -11/ISA boards within "common interrupt" mode could not
491 * be looked as two independent single-channel devices. Every channel seems
492 * as Ethernet interface but interrupt handler must be common. Really, first
493 * channel ("master") driver only registers the handler. In its struct net_local
494 * it has got pointer to "slave" channel's struct net_local and handles that's
495 * interrupts too.
496 * dev of successfully attached ISA SBNI boards is linked to list.
497 * While next board driver is initialized, it scans this list. If one
498 * has found dev with same irq and ioaddr different by 4 then it assumes
499 * this board to be "master".
500 */
501
502 static irqreturn_t
sbni_interrupt(int irq,void * dev_id)503 sbni_interrupt( int irq, void *dev_id )
504 {
505 struct net_device *dev = dev_id;
506 struct net_local *nl = netdev_priv(dev);
507 int repeat;
508
509 spin_lock( &nl->lock );
510 if( nl->second )
511 spin_lock(&NET_LOCAL_LOCK(nl->second));
512
513 do {
514 repeat = 0;
515 if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) )
516 handle_channel( dev ),
517 repeat = 1;
518 if( nl->second && /* second channel present */
519 (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) )
520 handle_channel( nl->second ),
521 repeat = 1;
522 } while( repeat );
523
524 if( nl->second )
525 spin_unlock(&NET_LOCAL_LOCK(nl->second));
526 spin_unlock( &nl->lock );
527 return IRQ_HANDLED;
528 }
529
530
531 static void
handle_channel(struct net_device * dev)532 handle_channel( struct net_device *dev )
533 {
534 struct net_local *nl = netdev_priv(dev);
535 unsigned long ioaddr = dev->base_addr;
536
537 int req_ans;
538 unsigned char csr0;
539
540 #ifdef CONFIG_SBNI_MULTILINE
541 /* Lock the master device because we going to change its local data */
542 if( nl->state & FL_SLAVE )
543 spin_lock(&NET_LOCAL_LOCK(nl->master));
544 #endif
545
546 outb( (inb( ioaddr + CSR0 ) & ~EN_INT) | TR_REQ, ioaddr + CSR0 );
547
548 nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
549 for(;;) {
550 csr0 = inb( ioaddr + CSR0 );
551 if( ( csr0 & (RC_RDY | TR_RDY) ) == 0 )
552 break;
553
554 req_ans = !(nl->state & FL_PREV_OK);
555
556 if( csr0 & RC_RDY )
557 req_ans = recv_frame( dev );
558
559 /*
560 * TR_RDY always equals 1 here because we have owned the marker,
561 * and we set TR_REQ when disabled interrupts
562 */
563 csr0 = inb( ioaddr + CSR0 );
564 if( !(csr0 & TR_RDY) || (csr0 & RC_RDY) )
565 netdev_err(dev, "internal error!\n");
566
567 /* if state & FL_NEED_RESEND != 0 then tx_frameno != 0 */
568 if( req_ans || nl->tx_frameno != 0 )
569 send_frame( dev );
570 else
571 /* send marker without any data */
572 outb( inb( ioaddr + CSR0 ) & ~TR_REQ, ioaddr + CSR0 );
573 }
574
575 outb( inb( ioaddr + CSR0 ) | EN_INT, ioaddr + CSR0 );
576
577 #ifdef CONFIG_SBNI_MULTILINE
578 if( nl->state & FL_SLAVE )
579 spin_unlock(&NET_LOCAL_LOCK(nl->master));
580 #endif
581 }
582
583
584 /*
585 * Routine returns 1 if it needs to acknowledge received frame.
586 * Empty frame received without errors won't be acknowledged.
587 */
588
589 static int
recv_frame(struct net_device * dev)590 recv_frame( struct net_device *dev )
591 {
592 struct net_local *nl = netdev_priv(dev);
593 unsigned long ioaddr = dev->base_addr;
594
595 u32 crc = CRC32_INITIAL;
596
597 unsigned framelen = 0, frameno, ack;
598 unsigned is_first, frame_ok = 0;
599
600 if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) {
601 frame_ok = framelen > 4
602 ? upload_data( dev, framelen, frameno, is_first, crc )
603 : skip_tail( ioaddr, framelen, crc );
604 if( frame_ok )
605 interpret_ack( dev, ack );
606 }
607
608 outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 );
609 if( frame_ok ) {
610 nl->state |= FL_PREV_OK;
611 if( framelen > 4 )
612 nl->in_stats.all_rx_number++;
613 } else
614 nl->state &= ~FL_PREV_OK,
615 change_level( dev ),
616 nl->in_stats.all_rx_number++,
617 nl->in_stats.bad_rx_number++;
618
619 return !frame_ok || framelen > 4;
620 }
621
622
623 static void
send_frame(struct net_device * dev)624 send_frame( struct net_device *dev )
625 {
626 struct net_local *nl = netdev_priv(dev);
627
628 u32 crc = CRC32_INITIAL;
629
630 if( nl->state & FL_NEED_RESEND ) {
631
632 /* if frame was sended but not ACK'ed - resend it */
633 if( nl->trans_errors ) {
634 --nl->trans_errors;
635 if( nl->framelen != 0 )
636 nl->in_stats.resend_tx_number++;
637 } else {
638 /* cannot xmit with many attempts */
639 #ifdef CONFIG_SBNI_MULTILINE
640 if( (nl->state & FL_SLAVE) || nl->link )
641 #endif
642 nl->state |= FL_LINE_DOWN;
643 drop_xmit_queue( dev );
644 goto do_send;
645 }
646 } else
647 nl->trans_errors = TR_ERROR_COUNT;
648
649 send_frame_header( dev, &crc );
650 nl->state |= FL_NEED_RESEND;
651 /*
652 * FL_NEED_RESEND will be cleared after ACK, but if empty
653 * frame sended then in prepare_to_send next frame
654 */
655
656
657 if( nl->framelen ) {
658 download_data( dev, &crc );
659 nl->in_stats.all_tx_number++;
660 nl->state |= FL_WAIT_ACK;
661 }
662
663 outsb( dev->base_addr + DAT, (u8 *)&crc, sizeof crc );
664
665 do_send:
666 outb( inb( dev->base_addr + CSR0 ) & ~TR_REQ, dev->base_addr + CSR0 );
667
668 if( nl->tx_frameno )
669 /* next frame exists - we request card to send it */
670 outb( inb( dev->base_addr + CSR0 ) | TR_REQ,
671 dev->base_addr + CSR0 );
672 }
673
674
675 /*
676 * Write the frame data into adapter's buffer memory, and calculate CRC.
677 * Do padding if necessary.
678 */
679
680 static void
download_data(struct net_device * dev,u32 * crc_p)681 download_data( struct net_device *dev, u32 *crc_p )
682 {
683 struct net_local *nl = netdev_priv(dev);
684 struct sk_buff *skb = nl->tx_buf_p;
685
686 unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen);
687
688 outsb( dev->base_addr + DAT, skb->data + nl->outpos, len );
689 *crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len );
690
691 /* if packet too short we should write some more bytes to pad */
692 for( len = nl->framelen - len; len--; )
693 outb( 0, dev->base_addr + DAT ),
694 *crc_p = CRC32( 0, *crc_p );
695 }
696
697
698 static int
upload_data(struct net_device * dev,unsigned framelen,unsigned frameno,unsigned is_first,u32 crc)699 upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
700 unsigned is_first, u32 crc )
701 {
702 struct net_local *nl = netdev_priv(dev);
703
704 int frame_ok;
705
706 if( is_first )
707 nl->wait_frameno = frameno,
708 nl->inppos = 0;
709
710 if( nl->wait_frameno == frameno ) {
711
712 if( nl->inppos + framelen <= ETHER_MAX_LEN )
713 frame_ok = append_frame_to_pkt( dev, framelen, crc );
714
715 /*
716 * if CRC is right but framelen incorrect then transmitter
717 * error was occurred... drop entire packet
718 */
719 else if( (frame_ok = skip_tail( dev->base_addr, framelen, crc ))
720 != 0 )
721 nl->wait_frameno = 0,
722 nl->inppos = 0,
723 #ifdef CONFIG_SBNI_MULTILINE
724 nl->master->stats.rx_errors++,
725 nl->master->stats.rx_missed_errors++;
726 #else
727 dev->stats.rx_errors++,
728 dev->stats.rx_missed_errors++;
729 #endif
730 /* now skip all frames until is_first != 0 */
731 } else
732 frame_ok = skip_tail( dev->base_addr, framelen, crc );
733
734 if( is_first && !frame_ok )
735 /*
736 * Frame has been broken, but we had already stored
737 * is_first... Drop entire packet.
738 */
739 nl->wait_frameno = 0,
740 #ifdef CONFIG_SBNI_MULTILINE
741 nl->master->stats.rx_errors++,
742 nl->master->stats.rx_crc_errors++;
743 #else
744 dev->stats.rx_errors++,
745 dev->stats.rx_crc_errors++;
746 #endif
747
748 return frame_ok;
749 }
750
751
752 static inline void
send_complete(struct net_device * dev)753 send_complete( struct net_device *dev )
754 {
755 struct net_local *nl = netdev_priv(dev);
756
757 #ifdef CONFIG_SBNI_MULTILINE
758 nl->master->stats.tx_packets++;
759 nl->master->stats.tx_bytes += nl->tx_buf_p->len;
760 #else
761 dev->stats.tx_packets++;
762 dev->stats.tx_bytes += nl->tx_buf_p->len;
763 #endif
764 dev_consume_skb_irq(nl->tx_buf_p);
765
766 nl->tx_buf_p = NULL;
767
768 nl->outpos = 0;
769 nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
770 nl->framelen = 0;
771 }
772
773
774 static void
interpret_ack(struct net_device * dev,unsigned ack)775 interpret_ack( struct net_device *dev, unsigned ack )
776 {
777 struct net_local *nl = netdev_priv(dev);
778
779 if( ack == FRAME_SENT_OK ) {
780 nl->state &= ~FL_NEED_RESEND;
781
782 if( nl->state & FL_WAIT_ACK ) {
783 nl->outpos += nl->framelen;
784
785 if( --nl->tx_frameno )
786 nl->framelen = min_t(unsigned int,
787 nl->maxframe,
788 nl->tx_buf_p->len - nl->outpos);
789 else
790 send_complete( dev ),
791 #ifdef CONFIG_SBNI_MULTILINE
792 netif_wake_queue( nl->master );
793 #else
794 netif_wake_queue( dev );
795 #endif
796 }
797 }
798
799 nl->state &= ~FL_WAIT_ACK;
800 }
801
802
803 /*
804 * Glue received frame with previous fragments of packet.
805 * Indicate packet when last frame would be accepted.
806 */
807
808 static int
append_frame_to_pkt(struct net_device * dev,unsigned framelen,u32 crc)809 append_frame_to_pkt( struct net_device *dev, unsigned framelen, u32 crc )
810 {
811 struct net_local *nl = netdev_priv(dev);
812
813 u8 *p;
814
815 if( nl->inppos + framelen > ETHER_MAX_LEN )
816 return 0;
817
818 if( !nl->rx_buf_p && !(nl->rx_buf_p = get_rx_buf( dev )) )
819 return 0;
820
821 p = nl->rx_buf_p->data + nl->inppos;
822 insb( dev->base_addr + DAT, p, framelen );
823 if( calc_crc32( crc, p, framelen ) != CRC32_REMAINDER )
824 return 0;
825
826 nl->inppos += framelen - 4;
827 if( --nl->wait_frameno == 0 ) /* last frame received */
828 indicate_pkt( dev );
829
830 return 1;
831 }
832
833
834 /*
835 * Prepare to start output on adapter.
836 * Transmitter will be actually activated when marker is accepted.
837 */
838
839 static void
prepare_to_send(struct sk_buff * skb,struct net_device * dev)840 prepare_to_send( struct sk_buff *skb, struct net_device *dev )
841 {
842 struct net_local *nl = netdev_priv(dev);
843
844 unsigned int len;
845
846 /* nl->tx_buf_p == NULL here! */
847 if( nl->tx_buf_p )
848 netdev_err(dev, "memory leak!\n");
849
850 nl->outpos = 0;
851 nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
852
853 len = skb->len;
854 if( len < SBNI_MIN_LEN )
855 len = SBNI_MIN_LEN;
856
857 nl->tx_buf_p = skb;
858 nl->tx_frameno = DIV_ROUND_UP(len, nl->maxframe);
859 nl->framelen = len < nl->maxframe ? len : nl->maxframe;
860
861 outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 );
862 #ifdef CONFIG_SBNI_MULTILINE
863 netif_trans_update(nl->master);
864 #else
865 netif_trans_update(dev);
866 #endif
867 }
868
869
870 static void
drop_xmit_queue(struct net_device * dev)871 drop_xmit_queue( struct net_device *dev )
872 {
873 struct net_local *nl = netdev_priv(dev);
874
875 if( nl->tx_buf_p )
876 dev_kfree_skb_any( nl->tx_buf_p ),
877 nl->tx_buf_p = NULL,
878 #ifdef CONFIG_SBNI_MULTILINE
879 nl->master->stats.tx_errors++,
880 nl->master->stats.tx_carrier_errors++;
881 #else
882 dev->stats.tx_errors++,
883 dev->stats.tx_carrier_errors++;
884 #endif
885
886 nl->tx_frameno = 0;
887 nl->framelen = 0;
888 nl->outpos = 0;
889 nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
890 #ifdef CONFIG_SBNI_MULTILINE
891 netif_start_queue( nl->master );
892 netif_trans_update(nl->master);
893 #else
894 netif_start_queue( dev );
895 netif_trans_update(dev);
896 #endif
897 }
898
899
900 static void
send_frame_header(struct net_device * dev,u32 * crc_p)901 send_frame_header( struct net_device *dev, u32 *crc_p )
902 {
903 struct net_local *nl = netdev_priv(dev);
904
905 u32 crc = *crc_p;
906 u32 len_field = nl->framelen + 6; /* CRC + frameno + reserved */
907 u8 value;
908
909 if( nl->state & FL_NEED_RESEND )
910 len_field |= FRAME_RETRY; /* non-first attempt... */
911
912 if( nl->outpos == 0 )
913 len_field |= FRAME_FIRST;
914
915 len_field |= (nl->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD;
916 outb( SBNI_SIG, dev->base_addr + DAT );
917
918 value = (u8) len_field;
919 outb( value, dev->base_addr + DAT );
920 crc = CRC32( value, crc );
921 value = (u8) (len_field >> 8);
922 outb( value, dev->base_addr + DAT );
923 crc = CRC32( value, crc );
924
925 outb( nl->tx_frameno, dev->base_addr + DAT );
926 crc = CRC32( nl->tx_frameno, crc );
927 outb( 0, dev->base_addr + DAT );
928 crc = CRC32( 0, crc );
929 *crc_p = crc;
930 }
931
932
933 /*
934 * if frame tail not needed (incorrect number or received twice),
935 * it won't store, but CRC will be calculated
936 */
937
938 static int
skip_tail(unsigned int ioaddr,unsigned int tail_len,u32 crc)939 skip_tail( unsigned int ioaddr, unsigned int tail_len, u32 crc )
940 {
941 while( tail_len-- )
942 crc = CRC32( inb( ioaddr + DAT ), crc );
943
944 return crc == CRC32_REMAINDER;
945 }
946
947
948 /*
949 * Preliminary checks if frame header is correct, calculates its CRC
950 * and split it to simple fields
951 */
952
953 static int
check_fhdr(u32 ioaddr,u32 * framelen,u32 * frameno,u32 * ack,u32 * is_first,u32 * crc_p)954 check_fhdr( u32 ioaddr, u32 *framelen, u32 *frameno, u32 *ack,
955 u32 *is_first, u32 *crc_p )
956 {
957 u32 crc = *crc_p;
958 u8 value;
959
960 if( inb( ioaddr + DAT ) != SBNI_SIG )
961 return 0;
962
963 value = inb( ioaddr + DAT );
964 *framelen = (u32)value;
965 crc = CRC32( value, crc );
966 value = inb( ioaddr + DAT );
967 *framelen |= ((u32)value) << 8;
968 crc = CRC32( value, crc );
969
970 *ack = *framelen & FRAME_ACK_MASK;
971 *is_first = (*framelen & FRAME_FIRST) != 0;
972
973 if( (*framelen &= FRAME_LEN_MASK) < 6 ||
974 *framelen > SBNI_MAX_FRAME - 3 )
975 return 0;
976
977 value = inb( ioaddr + DAT );
978 *frameno = (u32)value;
979 crc = CRC32( value, crc );
980
981 crc = CRC32( inb( ioaddr + DAT ), crc ); /* reserved byte */
982 *framelen -= 2;
983
984 *crc_p = crc;
985 return 1;
986 }
987
988
989 static struct sk_buff *
get_rx_buf(struct net_device * dev)990 get_rx_buf( struct net_device *dev )
991 {
992 /* +2 is to compensate for the alignment fixup below */
993 struct sk_buff *skb = dev_alloc_skb( ETHER_MAX_LEN + 2 );
994 if( !skb )
995 return NULL;
996
997 skb_reserve( skb, 2 ); /* Align IP on longword boundaries */
998 return skb;
999 }
1000
1001
1002 static void
indicate_pkt(struct net_device * dev)1003 indicate_pkt( struct net_device *dev )
1004 {
1005 struct net_local *nl = netdev_priv(dev);
1006 struct sk_buff *skb = nl->rx_buf_p;
1007
1008 skb_put( skb, nl->inppos );
1009
1010 #ifdef CONFIG_SBNI_MULTILINE
1011 skb->protocol = eth_type_trans( skb, nl->master );
1012 netif_rx( skb );
1013 ++nl->master->stats.rx_packets;
1014 nl->master->stats.rx_bytes += nl->inppos;
1015 #else
1016 skb->protocol = eth_type_trans( skb, dev );
1017 netif_rx( skb );
1018 ++dev->stats.rx_packets;
1019 dev->stats.rx_bytes += nl->inppos;
1020 #endif
1021 nl->rx_buf_p = NULL; /* protocol driver will clear this sk_buff */
1022 }
1023
1024
1025 /* -------------------------------------------------------------------------- */
1026
1027 /*
1028 * Routine checks periodically wire activity and regenerates marker if
1029 * connect was inactive for a long time.
1030 */
1031
1032 static void
sbni_watchdog(struct timer_list * t)1033 sbni_watchdog(struct timer_list *t)
1034 {
1035 struct net_local *nl = from_timer(nl, t, watchdog);
1036 struct net_device *dev = nl->watchdog_dev;
1037 unsigned long flags;
1038 unsigned char csr0;
1039
1040 spin_lock_irqsave( &nl->lock, flags );
1041
1042 csr0 = inb( dev->base_addr + CSR0 );
1043 if( csr0 & RC_CHK ) {
1044
1045 if( nl->timer_ticks ) {
1046 if( csr0 & (RC_RDY | BU_EMP) )
1047 /* receiving not active */
1048 nl->timer_ticks--;
1049 } else {
1050 nl->in_stats.timeout_number++;
1051 if( nl->delta_rxl )
1052 timeout_change_level( dev );
1053
1054 outb( *(u_char *)&nl->csr1 | PR_RES,
1055 dev->base_addr + CSR1 );
1056 csr0 = inb( dev->base_addr + CSR0 );
1057 }
1058 } else
1059 nl->state &= ~FL_LINE_DOWN;
1060
1061 outb( csr0 | RC_CHK, dev->base_addr + CSR0 );
1062
1063 mod_timer(t, jiffies + SBNI_TIMEOUT);
1064
1065 spin_unlock_irqrestore( &nl->lock, flags );
1066 }
1067
1068
1069 static unsigned char rxl_tab[] = {
1070 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08,
1071 0x0a, 0x0c, 0x0f, 0x16, 0x18, 0x1a, 0x1c, 0x1f
1072 };
1073
1074 #define SIZE_OF_TIMEOUT_RXL_TAB 4
1075 static unsigned char timeout_rxl_tab[] = {
1076 0x03, 0x05, 0x08, 0x0b
1077 };
1078
1079 /* -------------------------------------------------------------------------- */
1080
1081 static void
card_start(struct net_device * dev)1082 card_start( struct net_device *dev )
1083 {
1084 struct net_local *nl = netdev_priv(dev);
1085
1086 nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
1087 nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
1088 nl->state |= FL_PREV_OK;
1089
1090 nl->inppos = nl->outpos = 0;
1091 nl->wait_frameno = 0;
1092 nl->tx_frameno = 0;
1093 nl->framelen = 0;
1094
1095 outb( *(u_char *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
1096 outb( EN_INT, dev->base_addr + CSR0 );
1097 }
1098
1099 /* -------------------------------------------------------------------------- */
1100
1101 /* Receive level auto-selection */
1102
1103 static void
change_level(struct net_device * dev)1104 change_level( struct net_device *dev )
1105 {
1106 struct net_local *nl = netdev_priv(dev);
1107
1108 if( nl->delta_rxl == 0 ) /* do not auto-negotiate RxL */
1109 return;
1110
1111 if( nl->cur_rxl_index == 0 )
1112 nl->delta_rxl = 1;
1113 else if( nl->cur_rxl_index == 15 )
1114 nl->delta_rxl = -1;
1115 else if( nl->cur_rxl_rcvd < nl->prev_rxl_rcvd )
1116 nl->delta_rxl = -nl->delta_rxl;
1117
1118 nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index += nl->delta_rxl ];
1119 inb( dev->base_addr + CSR0 ); /* needs for PCI cards */
1120 outb( *(u8 *)&nl->csr1, dev->base_addr + CSR1 );
1121
1122 nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
1123 nl->cur_rxl_rcvd = 0;
1124 }
1125
1126
1127 static void
timeout_change_level(struct net_device * dev)1128 timeout_change_level( struct net_device *dev )
1129 {
1130 struct net_local *nl = netdev_priv(dev);
1131
1132 nl->cur_rxl_index = timeout_rxl_tab[ nl->timeout_rxl ];
1133 if( ++nl->timeout_rxl >= 4 )
1134 nl->timeout_rxl = 0;
1135
1136 nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
1137 inb( dev->base_addr + CSR0 );
1138 outb( *(unsigned char *)&nl->csr1, dev->base_addr + CSR1 );
1139
1140 nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
1141 nl->cur_rxl_rcvd = 0;
1142 }
1143
1144 /* -------------------------------------------------------------------------- */
1145
1146 /*
1147 * Open/initialize the board.
1148 */
1149
1150 static int
sbni_open(struct net_device * dev)1151 sbni_open( struct net_device *dev )
1152 {
1153 struct net_local *nl = netdev_priv(dev);
1154 struct timer_list *w = &nl->watchdog;
1155
1156 /*
1157 * For double ISA adapters within "common irq" mode, we have to
1158 * determine whether primary or secondary channel is initialized,
1159 * and set the irq handler only in first case.
1160 */
1161 if( dev->base_addr < 0x400 ) { /* ISA only */
1162 struct net_device **p = sbni_cards;
1163 for( ; *p && p < sbni_cards + SBNI_MAX_NUM_CARDS; ++p )
1164 if( (*p)->irq == dev->irq &&
1165 ((*p)->base_addr == dev->base_addr + 4 ||
1166 (*p)->base_addr == dev->base_addr - 4) &&
1167 (*p)->flags & IFF_UP ) {
1168
1169 ((struct net_local *) (netdev_priv(*p)))
1170 ->second = dev;
1171 netdev_notice(dev, "using shared irq with %s\n",
1172 (*p)->name);
1173 nl->state |= FL_SECONDARY;
1174 goto handler_attached;
1175 }
1176 }
1177
1178 if( request_irq(dev->irq, sbni_interrupt, IRQF_SHARED, dev->name, dev) ) {
1179 netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
1180 return -EAGAIN;
1181 }
1182
1183 handler_attached:
1184
1185 spin_lock( &nl->lock );
1186 memset( &dev->stats, 0, sizeof(struct net_device_stats) );
1187 memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
1188
1189 card_start( dev );
1190
1191 netif_start_queue( dev );
1192
1193 /* set timer watchdog */
1194 nl->watchdog_dev = dev;
1195 timer_setup(w, sbni_watchdog, 0);
1196 w->expires = jiffies + SBNI_TIMEOUT;
1197 add_timer( w );
1198
1199 spin_unlock( &nl->lock );
1200 return 0;
1201 }
1202
1203
1204 static int
sbni_close(struct net_device * dev)1205 sbni_close( struct net_device *dev )
1206 {
1207 struct net_local *nl = netdev_priv(dev);
1208
1209 if( nl->second && nl->second->flags & IFF_UP ) {
1210 netdev_notice(dev, "Secondary channel (%s) is active!\n",
1211 nl->second->name);
1212 return -EBUSY;
1213 }
1214
1215 #ifdef CONFIG_SBNI_MULTILINE
1216 if( nl->state & FL_SLAVE )
1217 emancipate( dev );
1218 else
1219 while( nl->link ) /* it's master device! */
1220 emancipate( nl->link );
1221 #endif
1222
1223 spin_lock( &nl->lock );
1224
1225 nl->second = NULL;
1226 drop_xmit_queue( dev );
1227 netif_stop_queue( dev );
1228
1229 del_timer( &nl->watchdog );
1230
1231 outb( 0, dev->base_addr + CSR0 );
1232
1233 if( !(nl->state & FL_SECONDARY) )
1234 free_irq( dev->irq, dev );
1235 nl->state &= FL_SECONDARY;
1236
1237 spin_unlock( &nl->lock );
1238 return 0;
1239 }
1240
1241
1242 /*
1243 Valid combinations in CSR0 (for probing):
1244
1245 VALID_DECODER 0000,0011,1011,1010
1246
1247 ; 0 ; -
1248 TR_REQ ; 1 ; +
1249 TR_RDY ; 2 ; -
1250 TR_RDY TR_REQ ; 3 ; +
1251 BU_EMP ; 4 ; +
1252 BU_EMP TR_REQ ; 5 ; +
1253 BU_EMP TR_RDY ; 6 ; -
1254 BU_EMP TR_RDY TR_REQ ; 7 ; +
1255 RC_RDY ; 8 ; +
1256 RC_RDY TR_REQ ; 9 ; +
1257 RC_RDY TR_RDY ; 10 ; -
1258 RC_RDY TR_RDY TR_REQ ; 11 ; -
1259 RC_RDY BU_EMP ; 12 ; -
1260 RC_RDY BU_EMP TR_REQ ; 13 ; -
1261 RC_RDY BU_EMP TR_RDY ; 14 ; -
1262 RC_RDY BU_EMP TR_RDY TR_REQ ; 15 ; -
1263 */
1264
1265 #define VALID_DECODER (2 + 8 + 0x10 + 0x20 + 0x80 + 0x100 + 0x200)
1266
1267
1268 static int
sbni_card_probe(unsigned long ioaddr)1269 sbni_card_probe( unsigned long ioaddr )
1270 {
1271 unsigned char csr0;
1272
1273 csr0 = inb( ioaddr + CSR0 );
1274 if( csr0 != 0xff && csr0 != 0x00 ) {
1275 csr0 &= ~EN_INT;
1276 if( csr0 & BU_EMP )
1277 csr0 |= EN_INT;
1278
1279 if( VALID_DECODER & (1 << (csr0 >> 4)) )
1280 return 0;
1281 }
1282
1283 return -ENODEV;
1284 }
1285
1286 /* -------------------------------------------------------------------------- */
1287
1288 static int
sbni_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1289 sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
1290 {
1291 struct net_local *nl = netdev_priv(dev);
1292 struct sbni_flags flags;
1293 int error = 0;
1294
1295 #ifdef CONFIG_SBNI_MULTILINE
1296 struct net_device *slave_dev;
1297 char slave_name[ 8 ];
1298 #endif
1299
1300 switch( cmd ) {
1301 case SIOCDEVGETINSTATS :
1302 if (copy_to_user( ifr->ifr_data, &nl->in_stats,
1303 sizeof(struct sbni_in_stats) ))
1304 error = -EFAULT;
1305 break;
1306
1307 case SIOCDEVRESINSTATS :
1308 if (!capable(CAP_NET_ADMIN))
1309 return -EPERM;
1310 memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
1311 break;
1312
1313 case SIOCDEVGHWSTATE :
1314 flags.mac_addr = *(u32 *)(dev->dev_addr + 3);
1315 flags.rate = nl->csr1.rate;
1316 flags.slow_mode = (nl->state & FL_SLOW_MODE) != 0;
1317 flags.rxl = nl->cur_rxl_index;
1318 flags.fixed_rxl = nl->delta_rxl == 0;
1319
1320 if (copy_to_user( ifr->ifr_data, &flags, sizeof flags ))
1321 error = -EFAULT;
1322 break;
1323
1324 case SIOCDEVSHWSTATE :
1325 if (!capable(CAP_NET_ADMIN))
1326 return -EPERM;
1327
1328 spin_lock( &nl->lock );
1329 flags = *(struct sbni_flags*) &ifr->ifr_ifru;
1330 if( flags.fixed_rxl )
1331 nl->delta_rxl = 0,
1332 nl->cur_rxl_index = flags.rxl;
1333 else
1334 nl->delta_rxl = DEF_RXL_DELTA,
1335 nl->cur_rxl_index = DEF_RXL;
1336
1337 nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
1338 nl->csr1.rate = flags.rate;
1339 outb( *(u8 *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
1340 spin_unlock( &nl->lock );
1341 break;
1342
1343 #ifdef CONFIG_SBNI_MULTILINE
1344
1345 case SIOCDEVENSLAVE :
1346 if (!capable(CAP_NET_ADMIN))
1347 return -EPERM;
1348
1349 if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name ))
1350 return -EFAULT;
1351 slave_dev = dev_get_by_name(&init_net, slave_name );
1352 if( !slave_dev || !(slave_dev->flags & IFF_UP) ) {
1353 netdev_err(dev, "trying to enslave non-active device %s\n",
1354 slave_name);
1355 if (slave_dev)
1356 dev_put(slave_dev);
1357 return -EPERM;
1358 }
1359
1360 return enslave( dev, slave_dev );
1361
1362 case SIOCDEVEMANSIPATE :
1363 if (!capable(CAP_NET_ADMIN))
1364 return -EPERM;
1365
1366 return emancipate( dev );
1367
1368 #endif /* CONFIG_SBNI_MULTILINE */
1369
1370 default :
1371 return -EOPNOTSUPP;
1372 }
1373
1374 return error;
1375 }
1376
1377
1378 #ifdef CONFIG_SBNI_MULTILINE
1379
1380 static int
enslave(struct net_device * dev,struct net_device * slave_dev)1381 enslave( struct net_device *dev, struct net_device *slave_dev )
1382 {
1383 struct net_local *nl = netdev_priv(dev);
1384 struct net_local *snl = netdev_priv(slave_dev);
1385
1386 if( nl->state & FL_SLAVE ) /* This isn't master or free device */
1387 return -EBUSY;
1388
1389 if( snl->state & FL_SLAVE ) /* That was already enslaved */
1390 return -EBUSY;
1391
1392 spin_lock( &nl->lock );
1393 spin_lock( &snl->lock );
1394
1395 /* append to list */
1396 snl->link = nl->link;
1397 nl->link = slave_dev;
1398 snl->master = dev;
1399 snl->state |= FL_SLAVE;
1400
1401 /* Summary statistics of MultiLine operation will be stored
1402 in master's counters */
1403 memset( &slave_dev->stats, 0, sizeof(struct net_device_stats) );
1404 netif_stop_queue( slave_dev );
1405 netif_wake_queue( dev ); /* Now we are able to transmit */
1406
1407 spin_unlock( &snl->lock );
1408 spin_unlock( &nl->lock );
1409 netdev_notice(dev, "slave device (%s) attached\n", slave_dev->name);
1410 return 0;
1411 }
1412
1413
1414 static int
emancipate(struct net_device * dev)1415 emancipate( struct net_device *dev )
1416 {
1417 struct net_local *snl = netdev_priv(dev);
1418 struct net_device *p = snl->master;
1419 struct net_local *nl = netdev_priv(p);
1420
1421 if( !(snl->state & FL_SLAVE) )
1422 return -EINVAL;
1423
1424 spin_lock( &nl->lock );
1425 spin_lock( &snl->lock );
1426 drop_xmit_queue( dev );
1427
1428 /* exclude from list */
1429 for(;;) { /* must be in list */
1430 struct net_local *t = netdev_priv(p);
1431 if( t->link == dev ) {
1432 t->link = snl->link;
1433 break;
1434 }
1435 p = t->link;
1436 }
1437
1438 snl->link = NULL;
1439 snl->master = dev;
1440 snl->state &= ~FL_SLAVE;
1441
1442 netif_start_queue( dev );
1443
1444 spin_unlock( &snl->lock );
1445 spin_unlock( &nl->lock );
1446
1447 dev_put( dev );
1448 return 0;
1449 }
1450
1451 #endif
1452
1453 static void
set_multicast_list(struct net_device * dev)1454 set_multicast_list( struct net_device *dev )
1455 {
1456 return; /* sbni always operate in promiscuos mode */
1457 }
1458
1459
1460 #ifdef MODULE
1461 module_param_hw_array(io, int, ioport, NULL, 0);
1462 module_param_hw_array(irq, int, irq, NULL, 0);
1463 module_param_array(baud, int, NULL, 0);
1464 module_param_array(rxl, int, NULL, 0);
1465 module_param_array(mac, int, NULL, 0);
1466 module_param(skip_pci_probe, bool, 0);
1467
1468 MODULE_LICENSE("GPL");
1469
1470
init_module(void)1471 int __init init_module( void )
1472 {
1473 struct net_device *dev;
1474 int err;
1475
1476 while( num < SBNI_MAX_NUM_CARDS ) {
1477 dev = alloc_netdev(sizeof(struct net_local), "sbni%d",
1478 NET_NAME_UNKNOWN, sbni_devsetup);
1479 if( !dev)
1480 break;
1481
1482 sprintf( dev->name, "sbni%d", num );
1483
1484 err = sbni_init(dev);
1485 if (err) {
1486 free_netdev(dev);
1487 break;
1488 }
1489
1490 if( register_netdev( dev ) ) {
1491 release_region( dev->base_addr, SBNI_IO_EXTENT );
1492 free_netdev( dev );
1493 break;
1494 }
1495 }
1496
1497 return *sbni_cards ? 0 : -ENODEV;
1498 }
1499
1500 void
cleanup_module(void)1501 cleanup_module(void)
1502 {
1503 int i;
1504
1505 for (i = 0; i < SBNI_MAX_NUM_CARDS; ++i) {
1506 struct net_device *dev = sbni_cards[i];
1507 if (dev != NULL) {
1508 unregister_netdev(dev);
1509 release_region(dev->base_addr, SBNI_IO_EXTENT);
1510 free_netdev(dev);
1511 }
1512 }
1513 }
1514
1515 #else /* MODULE */
1516
1517 static int __init
sbni_setup(char * p)1518 sbni_setup( char *p )
1519 {
1520 int n, parm;
1521
1522 if( *p++ != '(' )
1523 goto bad_param;
1524
1525 for( n = 0, parm = 0; *p && n < 8; ) {
1526 (*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 );
1527 if( !*p || *p == ')' )
1528 return 1;
1529 if( *p == ';' )
1530 ++p, ++n, parm = 0;
1531 else if( *p++ != ',' )
1532 break;
1533 else
1534 if( ++parm >= 5 )
1535 break;
1536 }
1537 bad_param:
1538 pr_err("Error in sbni kernel parameter!\n");
1539 return 0;
1540 }
1541
1542 __setup( "sbni=", sbni_setup );
1543
1544 #endif /* MODULE */
1545
1546 /* -------------------------------------------------------------------------- */
1547
1548 static u32
calc_crc32(u32 crc,u8 * p,u32 len)1549 calc_crc32( u32 crc, u8 *p, u32 len )
1550 {
1551 while( len-- )
1552 crc = CRC32( *p++, crc );
1553
1554 return crc;
1555 }
1556
1557 static u32 crc32tab[] __attribute__ ((aligned(8))) = {
1558 0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37,
1559 0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E,
1560 0xDCD967BF, 0xABDE5729, 0x32D70693, 0x45D03605,
1561 0xDBB4A3A6, 0xACB39330, 0x35BAC28A, 0x42BDF21C,
1562 0xCFB5FFE9, 0xB8B2CF7F, 0x21BB9EC5, 0x56BCAE53,
1563 0xC8D83BF0, 0xBFDF0B66, 0x26D65ADC, 0x51D16A4A,
1564 0xC16E77DB, 0xB669474D, 0x2F6016F7, 0x58672661,
1565 0xC603B3C2, 0xB1048354, 0x280DD2EE, 0x5F0AE278,
1566 0xE96CCF45, 0x9E6BFFD3, 0x0762AE69, 0x70659EFF,
1567 0xEE010B5C, 0x99063BCA, 0x000F6A70, 0x77085AE6,
1568 0xE7B74777, 0x90B077E1, 0x09B9265B, 0x7EBE16CD,
1569 0xE0DA836E, 0x97DDB3F8, 0x0ED4E242, 0x79D3D2D4,
1570 0xF4DBDF21, 0x83DCEFB7, 0x1AD5BE0D, 0x6DD28E9B,
1571 0xF3B61B38, 0x84B12BAE, 0x1DB87A14, 0x6ABF4A82,
1572 0xFA005713, 0x8D076785, 0x140E363F, 0x630906A9,
1573 0xFD6D930A, 0x8A6AA39C, 0x1363F226, 0x6464C2B0,
1574 0xA4DEAE1D, 0xD3D99E8B, 0x4AD0CF31, 0x3DD7FFA7,
1575 0xA3B36A04, 0xD4B45A92, 0x4DBD0B28, 0x3ABA3BBE,
1576 0xAA05262F, 0xDD0216B9, 0x440B4703, 0x330C7795,
1577 0xAD68E236, 0xDA6FD2A0, 0x4366831A, 0x3461B38C,
1578 0xB969BE79, 0xCE6E8EEF, 0x5767DF55, 0x2060EFC3,
1579 0xBE047A60, 0xC9034AF6, 0x500A1B4C, 0x270D2BDA,
1580 0xB7B2364B, 0xC0B506DD, 0x59BC5767, 0x2EBB67F1,
1581 0xB0DFF252, 0xC7D8C2C4, 0x5ED1937E, 0x29D6A3E8,
1582 0x9FB08ED5, 0xE8B7BE43, 0x71BEEFF9, 0x06B9DF6F,
1583 0x98DD4ACC, 0xEFDA7A5A, 0x76D32BE0, 0x01D41B76,
1584 0x916B06E7, 0xE66C3671, 0x7F6567CB, 0x0862575D,
1585 0x9606C2FE, 0xE101F268, 0x7808A3D2, 0x0F0F9344,
1586 0x82079EB1, 0xF500AE27, 0x6C09FF9D, 0x1B0ECF0B,
1587 0x856A5AA8, 0xF26D6A3E, 0x6B643B84, 0x1C630B12,
1588 0x8CDC1683, 0xFBDB2615, 0x62D277AF, 0x15D54739,
1589 0x8BB1D29A, 0xFCB6E20C, 0x65BFB3B6, 0x12B88320,
1590 0x3FBA6CAD, 0x48BD5C3B, 0xD1B40D81, 0xA6B33D17,
1591 0x38D7A8B4, 0x4FD09822, 0xD6D9C998, 0xA1DEF90E,
1592 0x3161E49F, 0x4666D409, 0xDF6F85B3, 0xA868B525,
1593 0x360C2086, 0x410B1010, 0xD80241AA, 0xAF05713C,
1594 0x220D7CC9, 0x550A4C5F, 0xCC031DE5, 0xBB042D73,
1595 0x2560B8D0, 0x52678846, 0xCB6ED9FC, 0xBC69E96A,
1596 0x2CD6F4FB, 0x5BD1C46D, 0xC2D895D7, 0xB5DFA541,
1597 0x2BBB30E2, 0x5CBC0074, 0xC5B551CE, 0xB2B26158,
1598 0x04D44C65, 0x73D37CF3, 0xEADA2D49, 0x9DDD1DDF,
1599 0x03B9887C, 0x74BEB8EA, 0xEDB7E950, 0x9AB0D9C6,
1600 0x0A0FC457, 0x7D08F4C1, 0xE401A57B, 0x930695ED,
1601 0x0D62004E, 0x7A6530D8, 0xE36C6162, 0x946B51F4,
1602 0x19635C01, 0x6E646C97, 0xF76D3D2D, 0x806A0DBB,
1603 0x1E0E9818, 0x6909A88E, 0xF000F934, 0x8707C9A2,
1604 0x17B8D433, 0x60BFE4A5, 0xF9B6B51F, 0x8EB18589,
1605 0x10D5102A, 0x67D220BC, 0xFEDB7106, 0x89DC4190,
1606 0x49662D3D, 0x3E611DAB, 0xA7684C11, 0xD06F7C87,
1607 0x4E0BE924, 0x390CD9B2, 0xA0058808, 0xD702B89E,
1608 0x47BDA50F, 0x30BA9599, 0xA9B3C423, 0xDEB4F4B5,
1609 0x40D06116, 0x37D75180, 0xAEDE003A, 0xD9D930AC,
1610 0x54D13D59, 0x23D60DCF, 0xBADF5C75, 0xCDD86CE3,
1611 0x53BCF940, 0x24BBC9D6, 0xBDB2986C, 0xCAB5A8FA,
1612 0x5A0AB56B, 0x2D0D85FD, 0xB404D447, 0xC303E4D1,
1613 0x5D677172, 0x2A6041E4, 0xB369105E, 0xC46E20C8,
1614 0x72080DF5, 0x050F3D63, 0x9C066CD9, 0xEB015C4F,
1615 0x7565C9EC, 0x0262F97A, 0x9B6BA8C0, 0xEC6C9856,
1616 0x7CD385C7, 0x0BD4B551, 0x92DDE4EB, 0xE5DAD47D,
1617 0x7BBE41DE, 0x0CB97148, 0x95B020F2, 0xE2B71064,
1618 0x6FBF1D91, 0x18B82D07, 0x81B17CBD, 0xF6B64C2B,
1619 0x68D2D988, 0x1FD5E91E, 0x86DCB8A4, 0xF1DB8832,
1620 0x616495A3, 0x1663A535, 0x8F6AF48F, 0xF86DC419,
1621 0x660951BA, 0x110E612C, 0x88073096, 0xFF000000
1622 };
1623
1624