Lines Matching +full:broken +full:- +full:prefetch +full:- +full:cmd

1 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
8 3 primary sources of the mess --
26 03/02/2000 changes for better/correct(?) cache-flushing (deller)
33 This driver is for the Apricot 82596 bus-master interface
45 non-cached page, so we can run on 68060 in copyback mode.
52 Most of my modifications relate to the braindead big-endian
54 'big-endian' mode, it thinks a 32 bit value of 0x12345678
83 #include <linux/dma-mapping.h>
115 * the following commands are available (p5-18). The 32-bit port command
116 * must be word-swapped with the most significant word written first.
139 #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
140 #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
201 struct i596_cmd cmd; member
215 struct i596_cmd cmd; member
221 struct i596_cmd cmd; member
227 struct i596_cmd cmd; member
232 struct i596_cmd cmd; member
238 unsigned short cmd; member
277 u32 cmd; member
334 0x8E, /* length, prefetch on */
353 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
362 static int max_cmd_backlog = TX_RING_SIZE-1;
370 return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma); in virt_to_dma()
377 dma_sync_single_for_device(ndev->dev.parent, in dma_sync_dev()
385 dma_sync_single_for_cpu(ndev->dev.parent, in dma_sync_cpu()
402 dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp)); in wait_istat()
403 while (--delcnt && dma->iscp.stat) { in wait_istat()
405 dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp)); in wait_istat()
409 dev->name, str, SWAP16(dma->iscp.stat)); in wait_istat()
410 return -1; in wait_istat()
418 dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb)); in wait_cmd()
419 while (--delcnt && dma->scb.command) { in wait_cmd()
421 dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb)); in wait_cmd()
424 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", in wait_cmd()
425 dev->name, str, in wait_cmd()
426 SWAP16(dma->scb.status), in wait_cmd()
427 SWAP16(dma->scb.command)); in wait_cmd()
428 return -1; in wait_cmd()
437 struct i596_dma *dma = lp->dma; in i596_display_data()
438 struct i596_cmd *cmd; in i596_display_data() local
443 &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp)); in i596_display_data()
445 &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb)); in i596_display_data()
447 " .cmd = %08x, .rfd = %08x\n", in i596_display_data()
448 &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command), in i596_display_data()
449 SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd)); in i596_display_data()
452 SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err), in i596_display_data()
453 SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err), in i596_display_data()
454 SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err)); in i596_display_data()
455 cmd = lp->cmd_head; in i596_display_data()
456 while (cmd != NULL) { in i596_display_data()
458 "cmd at %p, .status = %04x, .command = %04x," in i596_display_data()
460 cmd, SWAP16(cmd->status), SWAP16(cmd->command), in i596_display_data()
461 SWAP32(cmd->b_next)); in i596_display_data()
462 cmd = cmd->v_next; in i596_display_data()
464 rfd = lp->rfd_head; in i596_display_data()
468 " %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x," in i596_display_data()
470 rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd), in i596_display_data()
471 SWAP32(rfd->b_next), SWAP32(rfd->rbd), in i596_display_data()
472 SWAP16(rfd->count)); in i596_display_data()
473 rfd = rfd->v_next; in i596_display_data()
474 } while (rfd != lp->rfd_head); in i596_display_data()
475 rbd = lp->rbd_head; in i596_display_data()
481 rbd, SWAP16(rbd->count), SWAP32(rbd->b_next), in i596_display_data()
482 SWAP32(rbd->b_data), SWAP16(rbd->size)); in i596_display_data()
483 rbd = rbd->v_next; in i596_display_data()
484 } while (rbd != lp->rbd_head); in i596_display_data()
491 struct i596_dma *dma = lp->dma; in init_rx_bufs()
498 for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) { in init_rx_bufs()
504 return -1; in init_rx_bufs()
505 dma_addr = dma_map_single(dev->dev.parent, skb->data, in init_rx_bufs()
507 rbd->v_next = rbd+1; in init_rx_bufs()
508 rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1)); in init_rx_bufs()
509 rbd->b_addr = SWAP32(virt_to_dma(lp, rbd)); in init_rx_bufs()
510 rbd->skb = skb; in init_rx_bufs()
511 rbd->v_data = skb->data; in init_rx_bufs()
512 rbd->b_data = SWAP32(dma_addr); in init_rx_bufs()
513 rbd->size = SWAP16(PKT_BUF_SZ); in init_rx_bufs()
515 lp->rbd_head = dma->rbds; in init_rx_bufs()
516 rbd = dma->rbds + rx_ring_size - 1; in init_rx_bufs()
517 rbd->v_next = dma->rbds; in init_rx_bufs()
518 rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds)); in init_rx_bufs()
522 for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) { in init_rx_bufs()
523 rfd->rbd = I596_NULL; in init_rx_bufs()
524 rfd->v_next = rfd+1; in init_rx_bufs()
525 rfd->v_prev = rfd-1; in init_rx_bufs()
526 rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1)); in init_rx_bufs()
527 rfd->cmd = SWAP16(CMD_FLEX); in init_rx_bufs()
529 lp->rfd_head = dma->rfds; in init_rx_bufs()
530 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); in init_rx_bufs()
531 rfd = dma->rfds; in init_rx_bufs()
532 rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head)); in init_rx_bufs()
533 rfd->v_prev = dma->rfds + rx_ring_size - 1; in init_rx_bufs()
534 rfd = dma->rfds + rx_ring_size - 1; in init_rx_bufs()
535 rfd->v_next = dma->rfds; in init_rx_bufs()
536 rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds)); in init_rx_bufs()
537 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); in init_rx_bufs()
549 for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) { in remove_rx_bufs()
550 if (rbd->skb == NULL) in remove_rx_bufs()
552 dma_unmap_single(dev->dev.parent, in remove_rx_bufs()
553 (dma_addr_t)SWAP32(rbd->b_data), in remove_rx_bufs()
555 dev_kfree_skb(rbd->skb); in remove_rx_bufs()
563 struct i596_dma *dma = lp->dma; in rebuild_rx_bufs()
569 dma->rfds[i].rbd = I596_NULL; in rebuild_rx_bufs()
570 dma->rfds[i].cmd = SWAP16(CMD_FLEX); in rebuild_rx_bufs()
572 dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX); in rebuild_rx_bufs()
573 lp->rfd_head = dma->rfds; in rebuild_rx_bufs()
574 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); in rebuild_rx_bufs()
575 lp->rbd_head = dma->rbds; in rebuild_rx_bufs()
576 dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds)); in rebuild_rx_bufs()
585 struct i596_dma *dma = lp->dma; in init_i596_mem()
589 udelay(100); /* Wait 100us - seems to help */ in init_i596_mem()
593 lp->last_cmd = jiffies; in init_i596_mem()
595 dma->scp.sysbus = SYSBUS; in init_i596_mem()
596 dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp))); in init_i596_mem()
597 dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb))); in init_i596_mem()
598 dma->iscp.stat = SWAP32(ISCP_BUSY); in init_i596_mem()
599 lp->cmd_backlog = 0; in init_i596_mem()
601 lp->cmd_head = NULL; in init_i596_mem()
602 dma->scb.cmd = I596_NULL; in init_i596_mem()
604 DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name)); in init_i596_mem()
606 dma_sync_dev(dev, &(dma->scp), sizeof(struct i596_scp)); in init_i596_mem()
607 dma_sync_dev(dev, &(dma->iscp), sizeof(struct i596_iscp)); in init_i596_mem()
608 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb)); in init_i596_mem()
610 mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp)); in init_i596_mem()
616 dev->name)); in init_i596_mem()
618 if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) { in init_i596_mem()
619 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); in init_i596_mem()
626 dma->scb.command = 0; in init_i596_mem()
627 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb)); in init_i596_mem()
630 "%s: queuing CmdConfigure\n", dev->name)); in init_i596_mem()
631 memcpy(dma->cf_cmd.i596_config, init_setup, 14); in init_i596_mem()
632 dma->cf_cmd.cmd.command = SWAP16(CmdConfigure); in init_i596_mem()
633 dma_sync_dev(dev, &(dma->cf_cmd), sizeof(struct cf_cmd)); in init_i596_mem()
634 i596_add_cmd(dev, &dma->cf_cmd.cmd); in init_i596_mem()
636 DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); in init_i596_mem()
637 memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN); in init_i596_mem()
638 dma->sa_cmd.cmd.command = SWAP16(CmdSASetup); in init_i596_mem()
639 dma_sync_dev(dev, &(dma->sa_cmd), sizeof(struct sa_cmd)); in init_i596_mem()
640 i596_add_cmd(dev, &dma->sa_cmd.cmd); in init_i596_mem()
642 DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name)); in init_i596_mem()
643 dma->tdr_cmd.cmd.command = SWAP16(CmdTDR); in init_i596_mem()
644 dma_sync_dev(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd)); in init_i596_mem()
645 i596_add_cmd(dev, &dma->tdr_cmd.cmd); in init_i596_mem()
647 spin_lock_irqsave (&lp->lock, flags); in init_i596_mem()
650 spin_unlock_irqrestore (&lp->lock, flags); in init_i596_mem()
653 DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name)); in init_i596_mem()
654 dma->scb.command = SWAP16(RX_START); in init_i596_mem()
655 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); in init_i596_mem()
656 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb)); in init_i596_mem()
660 spin_unlock_irqrestore (&lp->lock, flags); in init_i596_mem()
664 "%s: Receive unit started OK\n", dev->name)); in init_i596_mem()
668 free_irq(dev->irq, dev); in init_i596_mem()
670 printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name); in init_i596_mem()
672 return -1; in init_i596_mem()
685 lp->rfd_head, lp->rbd_head)); in i596_rx()
688 rfd = lp->rfd_head; /* Ref next frame to check */ in i596_rx()
691 while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */ in i596_rx()
692 if (rfd->rbd == I596_NULL) in i596_rx()
694 else if (rfd->rbd == lp->rbd_head->b_addr) { in i596_rx()
695 rbd = lp->rbd_head; in i596_rx()
698 printk(KERN_ERR "%s: rbd chain broken!\n", dev->name); in i596_rx()
704 rfd, rfd->rbd, rfd->stat)); in i596_rx()
706 if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) { in i596_rx()
708 int pkt_len = SWAP16(rbd->count) & 0x3fff; in i596_rx()
709 struct sk_buff *skb = rbd->skb; in i596_rx()
712 DEB(DEB_RXADDR, print_eth(rbd->v_data, "received")); in i596_rx()
723 dma_unmap_single(dev->dev.parent, in i596_rx()
724 (dma_addr_t)SWAP32(rbd->b_data), in i596_rx()
737 rbd->skb = newskb; in i596_rx()
738 dma_addr = dma_map_single(dev->dev.parent, in i596_rx()
739 newskb->data, in i596_rx()
742 rbd->v_data = newskb->data; in i596_rx()
743 rbd->b_data = SWAP32(dma_addr); in i596_rx()
751 dev->stats.rx_dropped++; in i596_rx()
755 dma_sync_single_for_cpu(dev->dev.parent, in i596_rx()
756 (dma_addr_t)SWAP32(rbd->b_data), in i596_rx()
758 skb_put_data(skb, rbd->v_data, in i596_rx()
760 dma_sync_single_for_device(dev->dev.parent, in i596_rx()
761 (dma_addr_t)SWAP32(rbd->b_data), in i596_rx()
764 skb->len = pkt_len; in i596_rx()
765 skb->protocol = eth_type_trans(skb, dev); in i596_rx()
767 dev->stats.rx_packets++; in i596_rx()
768 dev->stats.rx_bytes += pkt_len; in i596_rx()
773 dev->name, rfd->stat)); in i596_rx()
774 dev->stats.rx_errors++; in i596_rx()
775 if (rfd->stat & SWAP16(0x0100)) in i596_rx()
776 dev->stats.collisions++; in i596_rx()
777 if (rfd->stat & SWAP16(0x8000)) in i596_rx()
778 dev->stats.rx_length_errors++; in i596_rx()
779 if (rfd->stat & SWAP16(0x0001)) in i596_rx()
780 dev->stats.rx_over_errors++; in i596_rx()
781 if (rfd->stat & SWAP16(0x0002)) in i596_rx()
782 dev->stats.rx_fifo_errors++; in i596_rx()
783 if (rfd->stat & SWAP16(0x0004)) in i596_rx()
784 dev->stats.rx_frame_errors++; in i596_rx()
785 if (rfd->stat & SWAP16(0x0008)) in i596_rx()
786 dev->stats.rx_crc_errors++; in i596_rx()
787 if (rfd->stat & SWAP16(0x0010)) in i596_rx()
788 dev->stats.rx_length_errors++; in i596_rx()
793 if (rbd != NULL && (rbd->count & SWAP16(0x4000))) { in i596_rx()
794 rbd->count = 0; in i596_rx()
795 lp->rbd_head = rbd->v_next; in i596_rx()
801 rfd->rbd = I596_NULL; in i596_rx()
802 rfd->stat = 0; in i596_rx()
803 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); in i596_rx()
804 rfd->count = 0; in i596_rx()
808 lp->dma->scb.rfd = rfd->b_next; in i596_rx()
809 lp->rfd_head = rfd->v_next; in i596_rx()
812 /* Remove end-of-list from old end descriptor */ in i596_rx()
814 rfd->v_prev->cmd = SWAP16(CMD_FLEX); in i596_rx()
815 dma_sync_dev(dev, rfd->v_prev, sizeof(struct i596_rfd)); in i596_rx()
816 rfd = lp->rfd_head; in i596_rx()
830 while (lp->cmd_head != NULL) { in i596_cleanup_cmd()
831 ptr = lp->cmd_head; in i596_cleanup_cmd()
832 lp->cmd_head = ptr->v_next; in i596_cleanup_cmd()
833 lp->cmd_backlog--; in i596_cleanup_cmd()
835 switch (SWAP16(ptr->command) & 0x7) { in i596_cleanup_cmd()
839 struct sk_buff *skb = tx_cmd->skb; in i596_cleanup_cmd()
840 dma_unmap_single(dev->dev.parent, in i596_cleanup_cmd()
841 tx_cmd->dma_addr, in i596_cleanup_cmd()
842 skb->len, DMA_TO_DEVICE); in i596_cleanup_cmd()
846 dev->stats.tx_errors++; in i596_cleanup_cmd()
847 dev->stats.tx_aborted_errors++; in i596_cleanup_cmd()
849 ptr->v_next = NULL; in i596_cleanup_cmd()
850 ptr->b_next = I596_NULL; in i596_cleanup_cmd()
851 tx_cmd->cmd.command = 0; /* Mark as free */ in i596_cleanup_cmd()
855 ptr->v_next = NULL; in i596_cleanup_cmd()
856 ptr->b_next = I596_NULL; in i596_cleanup_cmd()
861 wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out"); in i596_cleanup_cmd()
862 lp->dma->scb.cmd = I596_NULL; in i596_cleanup_cmd()
863 dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb)); in i596_cleanup_cmd()
873 spin_lock_irqsave (&lp->lock, flags); in i596_reset()
875 wait_cmd(dev, lp->dma, 100, "i596_reset timed out"); in i596_reset()
880 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT); in i596_reset()
881 dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb)); in i596_reset()
885 wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out"); in i596_reset()
886 spin_unlock_irqrestore (&lp->lock, flags); in i596_reset()
896 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) in i596_add_cmd() argument
899 struct i596_dma *dma = lp->dma; in i596_add_cmd()
903 lp->cmd_head)); in i596_add_cmd()
905 cmd->status = 0; in i596_add_cmd()
906 cmd->command |= SWAP16(CMD_EOL | CMD_INTR); in i596_add_cmd()
907 cmd->v_next = NULL; in i596_add_cmd()
908 cmd->b_next = I596_NULL; in i596_add_cmd()
909 dma_sync_dev(dev, cmd, sizeof(struct i596_cmd)); in i596_add_cmd()
911 spin_lock_irqsave (&lp->lock, flags); in i596_add_cmd()
913 if (lp->cmd_head != NULL) { in i596_add_cmd()
914 lp->cmd_tail->v_next = cmd; in i596_add_cmd()
915 lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status)); in i596_add_cmd()
916 dma_sync_dev(dev, lp->cmd_tail, sizeof(struct i596_cmd)); in i596_add_cmd()
918 lp->cmd_head = cmd; in i596_add_cmd()
920 dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status)); in i596_add_cmd()
921 dma->scb.command = SWAP16(CUC_START); in i596_add_cmd()
922 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb)); in i596_add_cmd()
925 lp->cmd_tail = cmd; in i596_add_cmd()
926 lp->cmd_backlog++; in i596_add_cmd()
928 spin_unlock_irqrestore (&lp->lock, flags); in i596_add_cmd()
930 if (lp->cmd_backlog > max_cmd_backlog) { in i596_add_cmd()
931 unsigned long tickssofar = jiffies - lp->last_cmd; in i596_add_cmd()
938 dev->name); in i596_add_cmd()
948 "%s: i596_open() irq %d.\n", dev->name, dev->irq)); in i596_open()
951 printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name); in i596_open()
952 return -EAGAIN; in i596_open()
955 printk(KERN_ERR "%s: Failed to init memory\n", dev->name); in i596_open()
964 return -EAGAIN; in i596_open()
974 dev->name)); in i596_tx_timeout()
976 dev->stats.tx_errors++; in i596_tx_timeout()
979 if (lp->last_restart == dev->stats.tx_packets) { in i596_tx_timeout()
986 lp->dma->scb.command = SWAP16(CUC_START | RX_START); in i596_tx_timeout()
987 dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb)); in i596_tx_timeout()
989 lp->last_restart = dev->stats.tx_packets; in i596_tx_timeout()
1002 short length = skb->len; in i596_start_xmit()
1006 dev->name, skb->len, skb->data)); in i596_start_xmit()
1016 tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd; in i596_start_xmit()
1017 tbd = lp->dma->tbds + lp->next_tx_cmd; in i596_start_xmit()
1019 if (tx_cmd->cmd.command) { in i596_start_xmit()
1022 dev->name)); in i596_start_xmit()
1023 dev->stats.tx_dropped++; in i596_start_xmit()
1027 if (++lp->next_tx_cmd == TX_RING_SIZE) in i596_start_xmit()
1028 lp->next_tx_cmd = 0; in i596_start_xmit()
1029 tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd)); in i596_start_xmit()
1030 tbd->next = I596_NULL; in i596_start_xmit()
1032 tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx); in i596_start_xmit()
1033 tx_cmd->skb = skb; in i596_start_xmit()
1035 tx_cmd->pad = 0; in i596_start_xmit()
1036 tx_cmd->size = 0; in i596_start_xmit()
1037 tbd->pad = 0; in i596_start_xmit()
1038 tbd->size = SWAP16(EOF | length); in i596_start_xmit()
1040 tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data, in i596_start_xmit()
1041 skb->len, DMA_TO_DEVICE); in i596_start_xmit()
1042 tbd->data = SWAP32(tx_cmd->dma_addr); in i596_start_xmit()
1044 DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued")); in i596_start_xmit()
1047 i596_add_cmd(dev, &tx_cmd->cmd); in i596_start_xmit()
1049 dev->stats.tx_packets++; in i596_start_xmit()
1050 dev->stats.tx_bytes += length; in i596_start_xmit()
1060 printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n", in print_eth()
1090 if (!dev->base_addr || !dev->irq) in i82596_probe()
1091 return -ENODEV; in i82596_probe()
1093 dev->netdev_ops = &i596_netdev_ops; in i82596_probe()
1094 dev->watchdog_timeo = TX_TIMEOUT; in i82596_probe()
1096 memset(lp->dma, 0, sizeof(struct i596_dma)); in i82596_probe()
1097 lp->dma->scb.command = 0; in i82596_probe()
1098 lp->dma->scb.cmd = I596_NULL; in i82596_probe()
1099 lp->dma->scb.rfd = I596_NULL; in i82596_probe()
1100 spin_lock_init(&lp->lock); in i82596_probe()
1102 dma_sync_dev(dev, lp->dma, sizeof(struct i596_dma)); in i82596_probe()
1109 dev->name, dev->base_addr, dev->dev_addr, in i82596_probe()
1110 dev->irq)); in i82596_probe()
1112 "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n", in i82596_probe()
1113 dev->name, lp->dma, (int)sizeof(struct i596_dma), in i82596_probe()
1114 &lp->dma->scb)); in i82596_probe()
1122 disable_irq(dev->irq); in i596_poll_controller()
1123 i596_interrupt(dev->irq, dev); in i596_poll_controller()
1124 enable_irq(dev->irq); in i596_poll_controller()
1136 dma = lp->dma; in i596_interrupt()
1138 spin_lock (&lp->lock); in i596_interrupt()
1141 status = SWAP16(dma->scb.status); in i596_interrupt()
1145 dev->name, dev->irq, status)); in i596_interrupt()
1152 dev->name)); in i596_interrupt()
1153 spin_unlock (&lp->lock); in i596_interrupt()
1164 dev->name)); in i596_interrupt()
1169 dev->name, status & 0x0700)); in i596_interrupt()
1171 while (lp->cmd_head != NULL) { in i596_interrupt()
1172 dma_sync_cpu(dev, lp->cmd_head, sizeof(struct i596_cmd)); in i596_interrupt()
1173 if (!(lp->cmd_head->status & SWAP16(STAT_C))) in i596_interrupt()
1176 ptr = lp->cmd_head; in i596_interrupt()
1180 "cmd_head->status = %04x, ->command = %04x\n", in i596_interrupt()
1181 SWAP16(lp->cmd_head->status), in i596_interrupt()
1182 SWAP16(lp->cmd_head->command))); in i596_interrupt()
1183 lp->cmd_head = ptr->v_next; in i596_interrupt()
1184 lp->cmd_backlog--; in i596_interrupt()
1186 switch (SWAP16(ptr->command) & 0x7) { in i596_interrupt()
1190 struct sk_buff *skb = tx_cmd->skb; in i596_interrupt()
1192 if (ptr->status & SWAP16(STAT_OK)) { in i596_interrupt()
1194 print_eth(skb->data, "tx-done")); in i596_interrupt()
1196 dev->stats.tx_errors++; in i596_interrupt()
1197 if (ptr->status & SWAP16(0x0020)) in i596_interrupt()
1198 dev->stats.collisions++; in i596_interrupt()
1199 if (!(ptr->status & SWAP16(0x0040))) in i596_interrupt()
1200 dev->stats.tx_heartbeat_errors++; in i596_interrupt()
1201 if (ptr->status & SWAP16(0x0400)) in i596_interrupt()
1202 dev->stats.tx_carrier_errors++; in i596_interrupt()
1203 if (ptr->status & SWAP16(0x0800)) in i596_interrupt()
1204 dev->stats.collisions++; in i596_interrupt()
1205 if (ptr->status & SWAP16(0x1000)) in i596_interrupt()
1206 dev->stats.tx_aborted_errors++; in i596_interrupt()
1208 dma_unmap_single(dev->dev.parent, in i596_interrupt()
1209 tx_cmd->dma_addr, in i596_interrupt()
1210 skb->len, DMA_TO_DEVICE); in i596_interrupt()
1213 tx_cmd->cmd.command = 0; /* Mark free */ in i596_interrupt()
1218 unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status); in i596_interrupt()
1223 dev->name)); in i596_interrupt()
1228 dev->name); in i596_interrupt()
1232 dev->name); in i596_interrupt()
1236 dev->name); in i596_interrupt()
1240 dev->name, status & 0x07ff)); in i596_interrupt()
1249 ptr->command = 0; in i596_interrupt()
1252 ptr->v_next = NULL; in i596_interrupt()
1253 ptr->b_next = I596_NULL; in i596_interrupt()
1255 lp->last_cmd = jiffies; in i596_interrupt()
1260 * only add to the cmd queue when the CU is stopped. in i596_interrupt()
1262 ptr = lp->cmd_head; in i596_interrupt()
1263 while ((ptr != NULL) && (ptr != lp->cmd_tail)) { in i596_interrupt()
1266 ptr->command &= SWAP16(0x1fff); in i596_interrupt()
1267 ptr = ptr->v_next; in i596_interrupt()
1271 if (lp->cmd_head != NULL) in i596_interrupt()
1273 dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status)); in i596_interrupt()
1274 dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb)); in i596_interrupt()
1281 dev->name)); in i596_interrupt()
1283 /* Only RX_START if stopped - RGH 07-07-96 */ in i596_interrupt()
1289 dev->name, status)); in i596_interrupt()
1291 dev->stats.rx_errors++; in i596_interrupt()
1292 dev->stats.rx_fifo_errors++; in i596_interrupt()
1298 dma->scb.command = SWAP16(ack_cmd); in i596_interrupt()
1299 dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb)); in i596_interrupt()
1308 DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name)); in i596_interrupt()
1310 spin_unlock (&lp->lock); in i596_interrupt()
1324 dev->name, SWAP16(lp->dma->scb.status))); in i596_close()
1326 spin_lock_irqsave(&lp->lock, flags); in i596_close()
1328 wait_cmd(dev, lp->dma, 100, "close1 timed out"); in i596_close()
1329 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT); in i596_close()
1330 dma_sync_dev(dev, &lp->dma->scb, sizeof(struct i596_scb)); in i596_close()
1334 wait_cmd(dev, lp->dma, 100, "close2 timed out"); in i596_close()
1335 spin_unlock_irqrestore(&lp->lock, flags); in i596_close()
1339 free_irq(dev->irq, dev); in i596_close()
1352 struct i596_dma *dma = lp->dma; in set_multicast_list()
1358 dev->name, netdev_mc_count(dev), in set_multicast_list()
1359 dev->flags & IFF_PROMISC ? "ON" : "OFF", in set_multicast_list()
1360 dev->flags & IFF_ALLMULTI ? "ON" : "OFF")); in set_multicast_list()
1362 if ((dev->flags & IFF_PROMISC) && in set_multicast_list()
1363 !(dma->cf_cmd.i596_config[8] & 0x01)) { in set_multicast_list()
1364 dma->cf_cmd.i596_config[8] |= 0x01; in set_multicast_list()
1367 if (!(dev->flags & IFF_PROMISC) && in set_multicast_list()
1368 (dma->cf_cmd.i596_config[8] & 0x01)) { in set_multicast_list()
1369 dma->cf_cmd.i596_config[8] &= ~0x01; in set_multicast_list()
1372 if ((dev->flags & IFF_ALLMULTI) && in set_multicast_list()
1373 (dma->cf_cmd.i596_config[11] & 0x20)) { in set_multicast_list()
1374 dma->cf_cmd.i596_config[11] &= ~0x20; in set_multicast_list()
1377 if (!(dev->flags & IFF_ALLMULTI) && in set_multicast_list()
1378 !(dma->cf_cmd.i596_config[11] & 0x20)) { in set_multicast_list()
1379 dma->cf_cmd.i596_config[11] |= 0x20; in set_multicast_list()
1383 if (dma->cf_cmd.cmd.command) in set_multicast_list()
1386 dev->name); in set_multicast_list()
1388 dma->cf_cmd.cmd.command = SWAP16(CmdConfigure); in set_multicast_list()
1389 dma_sync_dev(dev, &dma->cf_cmd, sizeof(struct cf_cmd)); in set_multicast_list()
1390 i596_add_cmd(dev, &dma->cf_cmd.cmd); in set_multicast_list()
1398 dev->name, cnt); in set_multicast_list()
1404 struct mc_cmd *cmd; in set_multicast_list() local
1406 cmd = &dma->mc_cmd; in set_multicast_list()
1407 cmd->cmd.command = SWAP16(CmdMulticastList); in set_multicast_list()
1408 cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6); in set_multicast_list()
1409 cp = cmd->mc_addrs; in set_multicast_list()
1411 if (!cnt--) in set_multicast_list()
1413 memcpy(cp, ha->addr, ETH_ALEN); in set_multicast_list()
1418 dev->name, cp)); in set_multicast_list()
1421 dma_sync_dev(dev, &dma->mc_cmd, sizeof(struct mc_cmd)); in set_multicast_list()
1422 i596_add_cmd(dev, &cmd->cmd); in set_multicast_list()