Lines Matching full:mp

91 static inline void mace_clean_rings(struct mace_data *mp);
112 struct mace_data *mp; in mace_probe() local
155 mp = netdev_priv(dev); in mace_probe()
156 mp->mdev = mdev; in mace_probe()
160 mp->mace = ioremap(dev->base_addr, 0x1000); in mace_probe()
161 if (mp->mace == NULL) { in mace_probe()
173 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) | in mace_probe()
174 in_8(&mp->mace->chipid_lo); in mace_probe()
177 mp = netdev_priv(dev); in mace_probe()
178 mp->maccc = ENXMT | ENRCV; in mace_probe()
180 mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000); in mace_probe()
181 if (mp->tx_dma == NULL) { in mace_probe()
186 mp->tx_dma_intr = macio_irq(mdev, 1); in mace_probe()
188 mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000); in mace_probe()
189 if (mp->rx_dma == NULL) { in mace_probe()
194 mp->rx_dma_intr = macio_irq(mdev, 2); in mace_probe()
196 mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1); in mace_probe()
197 mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1; in mace_probe()
199 memset((char *) mp->tx_cmds, 0, in mace_probe()
201 timer_setup(&mp->tx_timeout, mace_tx_timeout, 0); in mace_probe()
202 spin_lock_init(&mp->lock); in mace_probe()
203 mp->timeout_active = 0; in mace_probe()
206 mp->port_aaui = port_aaui; in mace_probe()
210 mp->port_aaui = 1; in mace_probe()
213 mp->port_aaui = 1; in mace_probe()
215 mp->port_aaui = 0; in mace_probe()
232 rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev); in mace_probe()
234 printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr); in mace_probe()
237 rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev); in mace_probe()
239 printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr); in mace_probe()
251 mp->chipid >> 8, mp->chipid & 0xff); in mace_probe()
262 iounmap(mp->rx_dma); in mace_probe()
264 iounmap(mp->tx_dma); in mace_probe()
266 iounmap(mp->mace); in mace_probe()
278 struct mace_data *mp; in mace_remove() local
284 mp = netdev_priv(dev); in mace_remove()
289 free_irq(mp->tx_dma_intr, dev); in mace_remove()
290 free_irq(mp->rx_dma_intr, dev); in mace_remove()
292 iounmap(mp->rx_dma); in mace_remove()
293 iounmap(mp->tx_dma); in mace_remove()
294 iounmap(mp->mace); in mace_remove()
320 struct mace_data *mp = netdev_priv(dev); in mace_reset() local
321 volatile struct mace __iomem *mb = mp->mace; in mace_reset()
353 if (mp->chipid == BROKEN_ADDRCHG_REV) in mace_reset()
364 if (mp->chipid != BROKEN_ADDRCHG_REV) in mace_reset()
367 if (mp->port_aaui) in mace_reset()
375 struct mace_data *mp = netdev_priv(dev); in __mace_set_address() local
376 volatile struct mace __iomem *mb = mp->mace; in __mace_set_address()
382 if (mp->chipid == BROKEN_ADDRCHG_REV) in __mace_set_address()
394 if (mp->chipid != BROKEN_ADDRCHG_REV) in __mace_set_address()
400 struct mace_data *mp = netdev_priv(dev); in mace_set_address() local
401 volatile struct mace __iomem *mb = mp->mace; in mace_set_address()
404 spin_lock_irqsave(&mp->lock, flags); in mace_set_address()
409 out_8(&mb->maccc, mp->maccc); in mace_set_address()
411 spin_unlock_irqrestore(&mp->lock, flags); in mace_set_address()
415 static inline void mace_clean_rings(struct mace_data *mp) in mace_clean_rings() argument
421 if (mp->rx_bufs[i] != NULL) { in mace_clean_rings()
422 dev_kfree_skb(mp->rx_bufs[i]); in mace_clean_rings()
423 mp->rx_bufs[i] = NULL; in mace_clean_rings()
426 for (i = mp->tx_empty; i != mp->tx_fill; ) { in mace_clean_rings()
427 dev_kfree_skb(mp->tx_bufs[i]); in mace_clean_rings()
435 struct mace_data *mp = netdev_priv(dev); in mace_open() local
436 volatile struct mace __iomem *mb = mp->mace; in mace_open()
437 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_open()
438 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_open()
448 mace_clean_rings(mp); in mace_open()
449 memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd)); in mace_open()
450 cp = mp->rx_cmds; in mace_open()
459 mp->rx_bufs[i] = skb; in mace_open()
466 mp->rx_bufs[i] = NULL; in mace_open()
468 mp->rx_fill = i; in mace_open()
469 mp->rx_empty = 0; in mace_open()
474 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->rx_cmds)); in mace_open()
478 out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds)); in mace_open()
482 cp = mp->tx_cmds + NCMDS_TX * N_TX_RING; in mace_open()
484 cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->tx_cmds)); in mace_open()
488 out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds)); in mace_open()
489 mp->tx_fill = 0; in mace_open()
490 mp->tx_empty = 0; in mace_open()
491 mp->tx_fullup = 0; in mace_open()
492 mp->tx_active = 0; in mace_open()
493 mp->tx_bad_runt = 0; in mace_open()
496 out_8(&mb->maccc, mp->maccc); in mace_open()
505 struct mace_data *mp = netdev_priv(dev); in mace_close() local
506 volatile struct mace __iomem *mb = mp->mace; in mace_close()
507 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_close()
508 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_close()
518 mace_clean_rings(mp); in mace_close()
525 struct mace_data *mp = netdev_priv(dev); in mace_set_timeout() local
527 if (mp->timeout_active) in mace_set_timeout()
528 del_timer(&mp->tx_timeout); in mace_set_timeout()
529 mp->tx_timeout.expires = jiffies + TX_TIMEOUT; in mace_set_timeout()
530 add_timer(&mp->tx_timeout); in mace_set_timeout()
531 mp->timeout_active = 1; in mace_set_timeout()
536 struct mace_data *mp = netdev_priv(dev); in mace_xmit_start() local
537 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_xmit_start()
543 spin_lock_irqsave(&mp->lock, flags); in mace_xmit_start()
544 fill = mp->tx_fill; in mace_xmit_start()
548 if (next == mp->tx_empty) { in mace_xmit_start()
550 mp->tx_fullup = 1; in mace_xmit_start()
551 spin_unlock_irqrestore(&mp->lock, flags); in mace_xmit_start()
554 spin_unlock_irqrestore(&mp->lock, flags); in mace_xmit_start()
562 mp->tx_bufs[fill] = skb; in mace_xmit_start()
563 cp = mp->tx_cmds + NCMDS_TX * fill; in mace_xmit_start()
567 np = mp->tx_cmds + NCMDS_TX * next; in mace_xmit_start()
571 spin_lock_irqsave(&mp->lock, flags); in mace_xmit_start()
572 mp->tx_fill = next; in mace_xmit_start()
573 if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) { in mace_xmit_start()
577 ++mp->tx_active; in mace_xmit_start()
582 if (next == mp->tx_empty) in mace_xmit_start()
584 spin_unlock_irqrestore(&mp->lock, flags); in mace_xmit_start()
591 struct mace_data *mp = netdev_priv(dev); in mace_set_multicast() local
592 volatile struct mace __iomem *mb = mp->mace; in mace_set_multicast()
597 spin_lock_irqsave(&mp->lock, flags); in mace_set_multicast()
598 mp->maccc &= ~PROM; in mace_set_multicast()
600 mp->maccc |= PROM; in mace_set_multicast()
624 if (mp->chipid == BROKEN_ADDRCHG_REV) in mace_set_multicast()
633 if (mp->chipid != BROKEN_ADDRCHG_REV) in mace_set_multicast()
637 out_8(&mb->maccc, mp->maccc); in mace_set_multicast()
638 spin_unlock_irqrestore(&mp->lock, flags); in mace_set_multicast()
641 static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev) in mace_handle_misc_intrs() argument
643 volatile struct mace __iomem *mb = mp->mace; in mace_handle_misc_intrs()
665 struct mace_data *mp = netdev_priv(dev); in mace_interrupt() local
666 volatile struct mace __iomem *mb = mp->mace; in mace_interrupt()
667 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_interrupt()
674 spin_lock_irqsave(&mp->lock, flags); in mace_interrupt()
677 mace_handle_misc_intrs(mp, intr, dev); in mace_interrupt()
679 i = mp->tx_empty; in mace_interrupt()
681 del_timer(&mp->tx_timeout); in mace_interrupt()
682 mp->timeout_active = 0; in mace_interrupt()
690 mace_handle_misc_intrs(mp, intr, dev); in mace_interrupt()
691 if (mp->tx_bad_runt) { in mace_interrupt()
693 mp->tx_bad_runt = 0; in mace_interrupt()
729 cp = mp->tx_cmds + NCMDS_TX * i; in mace_interrupt()
740 mp->tx_bad_runt = 1; in mace_interrupt()
757 if (i == mp->tx_fill) { in mace_interrupt()
770 dev->stats.tx_bytes += mp->tx_bufs[i]->len; in mace_interrupt()
773 dev_consume_skb_irq(mp->tx_bufs[i]); in mace_interrupt()
774 --mp->tx_active; in mace_interrupt()
783 if (i != mp->tx_empty) { in mace_interrupt()
784 mp->tx_fullup = 0; in mace_interrupt()
787 mp->tx_empty = i; in mace_interrupt()
788 i += mp->tx_active; in mace_interrupt()
791 if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) { in mace_interrupt()
794 cp = mp->tx_cmds + NCMDS_TX * i; in mace_interrupt()
797 ++mp->tx_active; in mace_interrupt()
800 } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE); in mace_interrupt()
804 spin_unlock_irqrestore(&mp->lock, flags); in mace_interrupt()
810 struct mace_data *mp = from_timer(mp, t, tx_timeout); in mace_tx_timeout() local
811 struct net_device *dev = macio_get_drvdata(mp->mdev); in mace_tx_timeout()
812 volatile struct mace __iomem *mb = mp->mace; in mace_tx_timeout()
813 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_tx_timeout()
814 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_tx_timeout()
819 spin_lock_irqsave(&mp->lock, flags); in mace_tx_timeout()
820 mp->timeout_active = 0; in mace_tx_timeout()
821 if (mp->tx_active == 0 && !mp->tx_bad_runt) in mace_tx_timeout()
825 mace_handle_misc_intrs(mp, in_8(&mb->ir), dev); in mace_tx_timeout()
827 cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty; in mace_tx_timeout()
843 i = mp->tx_empty; in mace_tx_timeout()
844 mp->tx_active = 0; in mace_tx_timeout()
846 if (mp->tx_bad_runt) { in mace_tx_timeout()
847 mp->tx_bad_runt = 0; in mace_tx_timeout()
848 } else if (i != mp->tx_fill) { in mace_tx_timeout()
849 dev_kfree_skb(mp->tx_bufs[i]); in mace_tx_timeout()
852 mp->tx_empty = i; in mace_tx_timeout()
854 mp->tx_fullup = 0; in mace_tx_timeout()
856 if (i != mp->tx_fill) { in mace_tx_timeout()
857 cp = mp->tx_cmds + NCMDS_TX * i; in mace_tx_timeout()
862 ++mp->tx_active; in mace_tx_timeout()
868 out_8(&mb->maccc, mp->maccc); in mace_tx_timeout()
871 spin_unlock_irqrestore(&mp->lock, flags); in mace_tx_timeout()
882 struct mace_data *mp = netdev_priv(dev); in mace_rxdma_intr() local
883 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_rxdma_intr()
892 spin_lock_irqsave(&mp->lock, flags); in mace_rxdma_intr()
893 for (i = mp->rx_empty; i != mp->rx_fill; ) { in mace_rxdma_intr()
894 cp = mp->rx_cmds + i; in mace_rxdma_intr()
900 np = mp->rx_cmds + next; in mace_rxdma_intr()
901 if (next != mp->rx_fill && in mace_rxdma_intr()
911 skb = mp->rx_bufs[i]; in mace_rxdma_intr()
938 mp->rx_bufs[i] = NULL; in mace_rxdma_intr()
950 mp->rx_empty = i; in mace_rxdma_intr()
952 i = mp->rx_fill; in mace_rxdma_intr()
957 if (next == mp->rx_empty) in mace_rxdma_intr()
959 cp = mp->rx_cmds + i; in mace_rxdma_intr()
960 skb = mp->rx_bufs[i]; in mace_rxdma_intr()
965 mp->rx_bufs[i] = skb; in mace_rxdma_intr()
982 if (i != mp->rx_fill) { in mace_rxdma_intr()
984 mp->rx_fill = i; in mace_rxdma_intr()
986 spin_unlock_irqrestore(&mp->lock, flags); in mace_rxdma_intr()