Lines Matching +full:x +full:- +full:rp

1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
27 http://www.scyld.com/network/via-rhine.html
28 [link no longer provides useful info -jgarzik]
34 #define DRV_NAME "via-rhine"
38 /* A few user-configurable values.
44 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
54 /* Work-around for broken BIOSes: they are unable to get the chip back out of
55 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
63 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
64 The Rhine has a 64 element 8390-like hash table. */
75 * There are no ill effects from too-large receive rings.
78 #define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */
100 #include <linux/dma-mapping.h>
126 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
127 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
137 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
140 II. Board-specific settings
142 Boards with this chip are functional only in a bus-master PCI slot.
154 This driver uses two statically allocated fixed-size descriptor lists
160 This driver attempts to use a zero-copy receive and transmit scheme.
166 open() time and passes the skb->data field to the chip as receive data
173 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
174 using a full-sized skbuff for small frames vs. the copying costs of larger
185 has the beneficial effect of 16-byte aligning the IP header.
189 The driver runs as two independent, single-threaded flows of control. One
190 is the send-packet routine, which enforces single-threaded use by the
191 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
195 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
228 Note the matching code -- the first table entry matchs all 56** cards but
250 rqWOL = 0x0001, /* Wake-On-LAN support */
263 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
280 /* OpenFirmware identifiers for platform-bus devices
285 { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
400 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
441 /* The addresses of receive-in-place skbuffs. */
445 /* The saved address of a sent-in-place packet/buffer, for later free(). */
449 /* Tx bounce buffers (Rhine-I only) */
481 #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0) argument
482 #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0) argument
483 #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0) argument
485 #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x)) argument
486 #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x)) argument
487 #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x)) argument
489 #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0) argument
490 #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0) argument
491 #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0) argument
493 #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0) argument
494 #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0) argument
495 #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0) argument
521 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low) in rhine_wait_bit() argument
523 void __iomem *ioaddr = rp->base; in rhine_wait_bit()
534 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle " in rhine_wait_bit()
539 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask) in rhine_wait_bit_high() argument
541 rhine_wait_bit(rp, reg, mask, false); in rhine_wait_bit_high()
544 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask) in rhine_wait_bit_low() argument
546 rhine_wait_bit(rp, reg, mask, true); in rhine_wait_bit_low()
549 static u32 rhine_get_events(struct rhine_private *rp) in rhine_get_events() argument
551 void __iomem *ioaddr = rp->base; in rhine_get_events()
555 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */ in rhine_get_events()
556 if (rp->quirks & rqStatusWBRace) in rhine_get_events()
561 static void rhine_ack_events(struct rhine_private *rp, u32 mask) in rhine_ack_events() argument
563 void __iomem *ioaddr = rp->base; in rhine_ack_events()
565 if (rp->quirks & rqStatusWBRace) in rhine_ack_events()
576 struct rhine_private *rp = netdev_priv(dev); in rhine_power_init() local
577 void __iomem *ioaddr = rp->base; in rhine_power_init()
580 if (rp->quirks & rqWOL) { in rhine_power_init()
584 /* Disable "force PME-enable" */ in rhine_power_init()
587 /* Clear power-event config bits (WOL) */ in rhine_power_init()
590 if (rp->quirks & rq6patterns) in rhine_power_init()
593 /* Save power-event status bits */ in rhine_power_init()
595 if (rp->quirks & rq6patterns) in rhine_power_init()
598 /* Clear power-event status bits */ in rhine_power_init()
600 if (rp->quirks & rq6patterns) in rhine_power_init()
632 struct rhine_private *rp = netdev_priv(dev); in rhine_chip_reset() local
633 void __iomem *ioaddr = rp->base; in rhine_chip_reset()
643 if (rp->quirks & rqForceReset) in rhine_chip_reset()
647 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset); in rhine_chip_reset()
651 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ? in rhine_chip_reset()
687 "MMIO do not match PIO [%02x] (%02x != %02x)\n", in verify_mmio()
689 return -EIO; in verify_mmio()
697 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
698 * (plus 0x6C for Rhine-I/II)
702 struct rhine_private *rp = netdev_priv(dev); in rhine_reload_eeprom() local
703 void __iomem *ioaddr = rp->base; in rhine_reload_eeprom()
715 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable in rhine_reload_eeprom()
717 * it is not known if that still works with the "win98-reboot" problem. in rhine_reload_eeprom()
719 enable_mmio(pioaddr, rp->quirks); in rhine_reload_eeprom()
721 /* Turn off EEPROM-controlled wake-up (magic packet) */ in rhine_reload_eeprom()
722 if (rp->quirks & rqWOL) in rhine_reload_eeprom()
730 struct rhine_private *rp = netdev_priv(dev); in rhine_poll() local
731 const int irq = rp->irq; in rhine_poll()
739 static void rhine_kick_tx_threshold(struct rhine_private *rp) in rhine_kick_tx_threshold() argument
741 if (rp->tx_thresh < 0xe0) { in rhine_kick_tx_threshold()
742 void __iomem *ioaddr = rp->base; in rhine_kick_tx_threshold()
744 rp->tx_thresh += 0x20; in rhine_kick_tx_threshold()
745 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig); in rhine_kick_tx_threshold()
749 static void rhine_tx_err(struct rhine_private *rp, u32 status) in rhine_tx_err() argument
751 struct net_device *dev = rp->dev; in rhine_tx_err()
754 netif_info(rp, tx_err, dev, in rhine_tx_err()
755 "Abort %08x, frame dropped\n", status); in rhine_tx_err()
759 rhine_kick_tx_threshold(rp); in rhine_tx_err()
760 netif_info(rp, tx_err ,dev, "Transmitter underrun, " in rhine_tx_err()
761 "Tx threshold now %02x\n", rp->tx_thresh); in rhine_tx_err()
765 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n"); in rhine_tx_err()
769 rhine_kick_tx_threshold(rp); in rhine_tx_err()
770 netif_info(rp, tx_err, dev, "Unspecified error. " in rhine_tx_err()
771 "Tx threshold now %02x\n", rp->tx_thresh); in rhine_tx_err()
777 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp) in rhine_update_rx_crc_and_missed_errord() argument
779 void __iomem *ioaddr = rp->base; in rhine_update_rx_crc_and_missed_errord()
780 struct net_device_stats *stats = &rp->dev->stats; in rhine_update_rx_crc_and_missed_errord()
782 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs); in rhine_update_rx_crc_and_missed_errord()
783 stats->rx_missed_errors += ioread16(ioaddr + RxMissed); in rhine_update_rx_crc_and_missed_errord()
818 struct rhine_private *rp = container_of(napi, struct rhine_private, napi); in rhine_napipoll() local
819 struct net_device *dev = rp->dev; in rhine_napipoll()
820 void __iomem *ioaddr = rp->base; in rhine_napipoll()
825 status = rhine_get_events(rp); in rhine_napipoll()
826 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW); in rhine_napipoll()
834 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn); in rhine_napipoll()
836 netif_warn(rp, tx_err, dev, "Tx still on\n"); in rhine_napipoll()
842 rhine_tx_err(rp, status); in rhine_napipoll()
846 spin_lock(&rp->lock); in rhine_napipoll()
847 rhine_update_rx_crc_and_missed_errord(rp); in rhine_napipoll()
848 spin_unlock(&rp->lock); in rhine_napipoll()
853 schedule_work(&rp->slow_event_task); in rhine_napipoll()
865 struct rhine_private *rp = netdev_priv(dev); in rhine_hw_init() local
870 /* Rhine-I needs extra time to recuperate before EEPROM reload */ in rhine_hw_init()
871 if (rp->quirks & rqRhineI) in rhine_hw_init()
875 if (dev_is_pci(dev->dev.parent)) in rhine_hw_init()
900 struct rhine_private *rp; in rhine_init_one_common() local
907 dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n"); in rhine_init_one_common()
913 rc = -ENOMEM; in rhine_init_one_common()
918 rp = netdev_priv(dev); in rhine_init_one_common()
919 rp->dev = dev; in rhine_init_one_common()
920 rp->quirks = quirks; in rhine_init_one_common()
921 rp->pioaddr = pioaddr; in rhine_init_one_common()
922 rp->base = ioaddr; in rhine_init_one_common()
923 rp->irq = irq; in rhine_init_one_common()
924 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT); in rhine_init_one_common()
926 phy_id = rp->quirks & rqIntPHY ? 1 : 0; in rhine_init_one_common()
928 u64_stats_init(&rp->tx_stats.syncp); in rhine_init_one_common()
929 u64_stats_init(&rp->rx_stats.syncp); in rhine_init_one_common()
936 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i); in rhine_init_one_common()
938 if (!is_valid_ether_addr(dev->dev_addr)) { in rhine_init_one_common()
940 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr); in rhine_init_one_common()
943 dev->dev_addr); in rhine_init_one_common()
946 /* For Rhine-I/II, phy_id is loaded from EEPROM */ in rhine_init_one_common()
950 spin_lock_init(&rp->lock); in rhine_init_one_common()
951 mutex_init(&rp->task_lock); in rhine_init_one_common()
952 INIT_WORK(&rp->reset_task, rhine_reset_task); in rhine_init_one_common()
953 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task); in rhine_init_one_common()
955 rp->mii_if.dev = dev; in rhine_init_one_common()
956 rp->mii_if.mdio_read = mdio_read; in rhine_init_one_common()
957 rp->mii_if.mdio_write = mdio_write; in rhine_init_one_common()
958 rp->mii_if.phy_id_mask = 0x1f; in rhine_init_one_common()
959 rp->mii_if.reg_num_mask = 0x1f; in rhine_init_one_common()
961 /* The chip-specific entries in the device structure. */ in rhine_init_one_common()
962 dev->netdev_ops = &rhine_netdev_ops; in rhine_init_one_common()
963 dev->ethtool_ops = &netdev_ethtool_ops; in rhine_init_one_common()
964 dev->watchdog_timeo = TX_TIMEOUT; in rhine_init_one_common()
966 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); in rhine_init_one_common()
968 if (rp->quirks & rqRhineI) in rhine_init_one_common()
969 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; in rhine_init_one_common()
971 if (rp->quirks & rqMgmt) in rhine_init_one_common()
972 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | in rhine_init_one_common()
976 /* dev->name not defined before register_netdev()! */ in rhine_init_one_common()
981 if (rp->quirks & rqRhineI) in rhine_init_one_common()
983 else if (rp->quirks & rqStatusWBRace) in rhine_init_one_common()
985 else if (rp->quirks & rqMgmt) in rhine_init_one_common()
991 name, ioaddr, dev->dev_addr, rp->irq); in rhine_init_one_common()
1001 rp->mii_if.advertising = mdio_read(dev, phy_id, 4); in rhine_init_one_common()
1003 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n", in rhine_init_one_common()
1005 mii_status, rp->mii_if.advertising, in rhine_init_one_common()
1016 rp->mii_if.phy_id = phy_id; in rhine_init_one_common()
1018 netif_info(rp, probe, dev, "No D3 power state at shutdown\n"); in rhine_init_one_common()
1031 struct device *hwdev = &pdev->dev; in rhine_init_one_pci()
1035 int io_size = pdev->revision < VTunknown0 ? 128 : 256; in rhine_init_one_pci()
1052 if (pdev->revision < VTunknown0) { in rhine_init_one_pci()
1054 } else if (pdev->revision >= VT6102) { in rhine_init_one_pci()
1056 if (pdev->revision < VT6105) { in rhine_init_one_pci()
1060 if (pdev->revision >= VT6105_B0) in rhine_init_one_pci()
1062 if (pdev->revision >= VT6105M) in rhine_init_one_pci()
1070 rc = -EIO; in rhine_init_one_pci()
1086 rc = -EIO; in rhine_init_one_pci()
1088 "ioremap failed for device %s, region 0x%X @ 0x%lX\n", in rhine_init_one_pci()
1099 rc = rhine_init_one_common(&pdev->dev, quirks, in rhine_init_one_pci()
1100 pioaddr, ioaddr, pdev->irq); in rhine_init_one_pci()
1120 quirks = of_device_get_match_data(&pdev->dev); in rhine_init_one_platform()
1122 return -EINVAL; in rhine_init_one_platform()
1128 irq = irq_of_parse_and_map(pdev->dev.of_node, 0); in rhine_init_one_platform()
1130 return -EINVAL; in rhine_init_one_platform()
1132 return rhine_init_one_common(&pdev->dev, *quirks, in rhine_init_one_platform()
1138 struct rhine_private *rp = netdev_priv(dev); in alloc_ring() local
1139 struct device *hwdev = dev->dev.parent; in alloc_ring()
1150 return -ENOMEM; in alloc_ring()
1152 if (rp->quirks & rqRhineI) { in alloc_ring()
1153 rp->tx_bufs = dma_alloc_coherent(hwdev, in alloc_ring()
1155 &rp->tx_bufs_dma, in alloc_ring()
1157 if (rp->tx_bufs == NULL) { in alloc_ring()
1162 return -ENOMEM; in alloc_ring()
1166 rp->rx_ring = ring; in alloc_ring()
1167 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); in alloc_ring()
1168 rp->rx_ring_dma = ring_dma; in alloc_ring()
1169 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); in alloc_ring()
1176 struct rhine_private *rp = netdev_priv(dev); in free_ring() local
1177 struct device *hwdev = dev->dev.parent; in free_ring()
1182 rp->rx_ring, rp->rx_ring_dma); in free_ring()
1183 rp->tx_ring = NULL; in free_ring()
1185 if (rp->tx_bufs) in free_ring()
1187 rp->tx_bufs, rp->tx_bufs_dma); in free_ring()
1189 rp->tx_bufs = NULL; in free_ring()
1201 struct rhine_private *rp = netdev_priv(dev); in rhine_skb_dma_init() local
1202 struct device *hwdev = dev->dev.parent; in rhine_skb_dma_init()
1203 const int size = rp->rx_buf_sz; in rhine_skb_dma_init()
1205 sd->skb = netdev_alloc_skb(dev, size); in rhine_skb_dma_init()
1206 if (!sd->skb) in rhine_skb_dma_init()
1207 return -ENOMEM; in rhine_skb_dma_init()
1209 sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE); in rhine_skb_dma_init()
1210 if (unlikely(dma_mapping_error(hwdev, sd->dma))) { in rhine_skb_dma_init()
1211 netif_err(rp, drv, dev, "Rx DMA mapping failure\n"); in rhine_skb_dma_init()
1212 dev_kfree_skb_any(sd->skb); in rhine_skb_dma_init()
1213 return -EIO; in rhine_skb_dma_init()
1219 static void rhine_reset_rbufs(struct rhine_private *rp) in rhine_reset_rbufs() argument
1223 rp->cur_rx = 0; in rhine_reset_rbufs()
1226 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); in rhine_reset_rbufs()
1229 static inline void rhine_skb_dma_nic_store(struct rhine_private *rp, in rhine_skb_dma_nic_store() argument
1232 rp->rx_skbuff_dma[entry] = sd->dma; in rhine_skb_dma_nic_store()
1233 rp->rx_skbuff[entry] = sd->skb; in rhine_skb_dma_nic_store()
1235 rp->rx_ring[entry].addr = cpu_to_le32(sd->dma); in rhine_skb_dma_nic_store()
1243 struct rhine_private *rp = netdev_priv(dev); in alloc_rbufs() local
1247 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); in alloc_rbufs()
1248 next = rp->rx_ring_dma; in alloc_rbufs()
1252 rp->rx_ring[i].rx_status = 0; in alloc_rbufs()
1253 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); in alloc_rbufs()
1255 rp->rx_ring[i].next_desc = cpu_to_le32(next); in alloc_rbufs()
1256 rp->rx_skbuff[i] = NULL; in alloc_rbufs()
1259 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); in alloc_rbufs()
1271 rhine_skb_dma_nic_store(rp, &sd, i); in alloc_rbufs()
1274 rhine_reset_rbufs(rp); in alloc_rbufs()
1281 struct rhine_private *rp = netdev_priv(dev); in free_rbufs() local
1282 struct device *hwdev = dev->dev.parent; in free_rbufs()
1287 rp->rx_ring[i].rx_status = 0; in free_rbufs()
1288 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ in free_rbufs()
1289 if (rp->rx_skbuff[i]) { in free_rbufs()
1291 rp->rx_skbuff_dma[i], in free_rbufs()
1292 rp->rx_buf_sz, DMA_FROM_DEVICE); in free_rbufs()
1293 dev_kfree_skb(rp->rx_skbuff[i]); in free_rbufs()
1295 rp->rx_skbuff[i] = NULL; in free_rbufs()
1301 struct rhine_private *rp = netdev_priv(dev); in alloc_tbufs() local
1305 rp->dirty_tx = rp->cur_tx = 0; in alloc_tbufs()
1306 next = rp->tx_ring_dma; in alloc_tbufs()
1308 rp->tx_skbuff[i] = NULL; in alloc_tbufs()
1309 rp->tx_ring[i].tx_status = 0; in alloc_tbufs()
1310 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); in alloc_tbufs()
1312 rp->tx_ring[i].next_desc = cpu_to_le32(next); in alloc_tbufs()
1313 if (rp->quirks & rqRhineI) in alloc_tbufs()
1314 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ]; in alloc_tbufs()
1316 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); in alloc_tbufs()
1323 struct rhine_private *rp = netdev_priv(dev); in free_tbufs() local
1324 struct device *hwdev = dev->dev.parent; in free_tbufs()
1328 rp->tx_ring[i].tx_status = 0; in free_tbufs()
1329 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); in free_tbufs()
1330 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ in free_tbufs()
1331 if (rp->tx_skbuff[i]) { in free_tbufs()
1332 if (rp->tx_skbuff_dma[i]) { in free_tbufs()
1334 rp->tx_skbuff_dma[i], in free_tbufs()
1335 rp->tx_skbuff[i]->len, in free_tbufs()
1338 dev_kfree_skb(rp->tx_skbuff[i]); in free_tbufs()
1340 rp->tx_skbuff[i] = NULL; in free_tbufs()
1341 rp->tx_buf[i] = NULL; in free_tbufs()
1347 struct rhine_private *rp = netdev_priv(dev); in rhine_check_media() local
1348 void __iomem *ioaddr = rp->base; in rhine_check_media()
1350 if (!rp->mii_if.force_media) in rhine_check_media()
1351 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media); in rhine_check_media()
1353 if (rp->mii_if.full_duplex) in rhine_check_media()
1360 netif_info(rp, link, dev, "force_media %d, carrier %d\n", in rhine_check_media()
1361 rp->mii_if.force_media, netif_carrier_ok(dev)); in rhine_check_media()
1367 struct net_device *dev = mii->dev; in rhine_set_carrier()
1368 struct rhine_private *rp = netdev_priv(dev); in rhine_set_carrier() local
1370 if (mii->force_media) { in rhine_set_carrier()
1378 netif_info(rp, link, dev, "force_media %d, carrier %d\n", in rhine_set_carrier()
1379 mii->force_media, netif_carrier_ok(dev)); in rhine_set_carrier()
1383 * rhine_set_cam - set CAM multicast filters
1385 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1397 /* Paranoid -- idx out of range should never happen */ in rhine_set_cam()
1398 idx &= (MCAM_SIZE - 1); in rhine_set_cam()
1414 * rhine_set_vlan_cam - set CAM VLAN filters
1416 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1426 /* Paranoid -- idx out of range should never happen */ in rhine_set_vlan_cam()
1427 idx &= (VCAM_SIZE - 1); in rhine_set_vlan_cam()
1442 * rhine_set_cam_mask - set multicast CAM mask
1461 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1480 * rhine_init_cam_filter - initialize CAM filters
1488 struct rhine_private *rp = netdev_priv(dev); in rhine_init_cam_filter() local
1489 void __iomem *ioaddr = rp->base; in rhine_init_cam_filter()
1501 * rhine_update_vcam - update VLAN CAM filters
1508 struct rhine_private *rp = netdev_priv(dev); in rhine_update_vcam() local
1509 void __iomem *ioaddr = rp->base; in rhine_update_vcam()
1514 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) { in rhine_update_vcam()
1525 struct rhine_private *rp = netdev_priv(dev); in rhine_vlan_rx_add_vid() local
1527 spin_lock_bh(&rp->lock); in rhine_vlan_rx_add_vid()
1528 set_bit(vid, rp->active_vlans); in rhine_vlan_rx_add_vid()
1530 spin_unlock_bh(&rp->lock); in rhine_vlan_rx_add_vid()
1536 struct rhine_private *rp = netdev_priv(dev); in rhine_vlan_rx_kill_vid() local
1538 spin_lock_bh(&rp->lock); in rhine_vlan_rx_kill_vid()
1539 clear_bit(vid, rp->active_vlans); in rhine_vlan_rx_kill_vid()
1541 spin_unlock_bh(&rp->lock); in rhine_vlan_rx_kill_vid()
1547 struct rhine_private *rp = netdev_priv(dev); in init_registers() local
1548 void __iomem *ioaddr = rp->base; in init_registers()
1552 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i); in init_registers()
1558 rp->tx_thresh = 0x20; in init_registers()
1559 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */ in init_registers()
1561 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr); in init_registers()
1562 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr); in init_registers()
1566 if (rp->quirks & rqMgmt) in init_registers()
1569 napi_enable(&rp->napi); in init_registers()
1578 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1579 static void rhine_enable_linkmon(struct rhine_private *rp) in rhine_enable_linkmon() argument
1581 void __iomem *ioaddr = rp->base; in rhine_enable_linkmon()
1587 rhine_wait_bit_high(rp, MIIRegAddr, 0x20); in rhine_enable_linkmon()
1592 /* Disable MII link status auto-polling (required for MDIO access) */
1593 static void rhine_disable_linkmon(struct rhine_private *rp) in rhine_disable_linkmon() argument
1595 void __iomem *ioaddr = rp->base; in rhine_disable_linkmon()
1599 if (rp->quirks & rqRhineI) { in rhine_disable_linkmon()
1608 rhine_wait_bit_high(rp, MIIRegAddr, 0x20); in rhine_disable_linkmon()
1614 rhine_wait_bit_high(rp, MIIRegAddr, 0x80); in rhine_disable_linkmon()
1621 struct rhine_private *rp = netdev_priv(dev); in mdio_read() local
1622 void __iomem *ioaddr = rp->base; in mdio_read()
1625 rhine_disable_linkmon(rp); in mdio_read()
1631 rhine_wait_bit_low(rp, MIICmd, 0x40); in mdio_read()
1634 rhine_enable_linkmon(rp); in mdio_read()
1640 struct rhine_private *rp = netdev_priv(dev); in mdio_write() local
1641 void __iomem *ioaddr = rp->base; in mdio_write()
1643 rhine_disable_linkmon(rp); in mdio_write()
1650 rhine_wait_bit_low(rp, MIICmd, 0x20); in mdio_write()
1652 rhine_enable_linkmon(rp); in mdio_write()
1655 static void rhine_task_disable(struct rhine_private *rp) in rhine_task_disable() argument
1657 mutex_lock(&rp->task_lock); in rhine_task_disable()
1658 rp->task_enable = false; in rhine_task_disable()
1659 mutex_unlock(&rp->task_lock); in rhine_task_disable()
1661 cancel_work_sync(&rp->slow_event_task); in rhine_task_disable()
1662 cancel_work_sync(&rp->reset_task); in rhine_task_disable()
1665 static void rhine_task_enable(struct rhine_private *rp) in rhine_task_enable() argument
1667 mutex_lock(&rp->task_lock); in rhine_task_enable()
1668 rp->task_enable = true; in rhine_task_enable()
1669 mutex_unlock(&rp->task_lock); in rhine_task_enable()
1674 struct rhine_private *rp = netdev_priv(dev); in rhine_open() local
1675 void __iomem *ioaddr = rp->base; in rhine_open()
1678 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev); in rhine_open()
1682 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq); in rhine_open()
1693 enable_mmio(rp->pioaddr, rp->quirks); in rhine_open()
1696 rhine_task_enable(rp); in rhine_open()
1699 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n", in rhine_open()
1701 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); in rhine_open()
1711 free_irq(rp->irq, dev); in rhine_open()
1717 struct rhine_private *rp = container_of(work, struct rhine_private, in rhine_reset_task() local
1719 struct net_device *dev = rp->dev; in rhine_reset_task()
1721 mutex_lock(&rp->task_lock); in rhine_reset_task()
1723 if (!rp->task_enable) in rhine_reset_task()
1726 napi_disable(&rp->napi); in rhine_reset_task()
1728 spin_lock_bh(&rp->lock); in rhine_reset_task()
1734 rhine_reset_rbufs(rp); in rhine_reset_task()
1740 spin_unlock_bh(&rp->lock); in rhine_reset_task()
1743 dev->stats.tx_errors++; in rhine_reset_task()
1747 mutex_unlock(&rp->task_lock); in rhine_reset_task()
1752 struct rhine_private *rp = netdev_priv(dev); in rhine_tx_timeout() local
1753 void __iomem *ioaddr = rp->base; in rhine_tx_timeout()
1755 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n", in rhine_tx_timeout()
1757 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); in rhine_tx_timeout()
1759 schedule_work(&rp->reset_task); in rhine_tx_timeout()
1762 static inline bool rhine_tx_queue_full(struct rhine_private *rp) in rhine_tx_queue_full() argument
1764 return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN; in rhine_tx_queue_full()
1770 struct rhine_private *rp = netdev_priv(dev); in rhine_start_tx() local
1771 struct device *hwdev = dev->dev.parent; in rhine_start_tx()
1772 void __iomem *ioaddr = rp->base; in rhine_start_tx()
1779 entry = rp->cur_tx % TX_RING_SIZE; in rhine_start_tx()
1784 rp->tx_skbuff[entry] = skb; in rhine_start_tx()
1786 if ((rp->quirks & rqRhineI) && in rhine_start_tx()
1787 …(((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PA… in rhine_start_tx()
1789 if (skb->len > PKT_BUF_SZ) { in rhine_start_tx()
1792 rp->tx_skbuff[entry] = NULL; in rhine_start_tx()
1793 dev->stats.tx_dropped++; in rhine_start_tx()
1798 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); in rhine_start_tx()
1799 if (skb->len < ETH_ZLEN) in rhine_start_tx()
1800 memset(rp->tx_buf[entry] + skb->len, 0, in rhine_start_tx()
1801 ETH_ZLEN - skb->len); in rhine_start_tx()
1802 rp->tx_skbuff_dma[entry] = 0; in rhine_start_tx()
1803 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + in rhine_start_tx()
1804 (rp->tx_buf[entry] - in rhine_start_tx()
1805 rp->tx_bufs)); in rhine_start_tx()
1807 rp->tx_skbuff_dma[entry] = in rhine_start_tx()
1808 dma_map_single(hwdev, skb->data, skb->len, in rhine_start_tx()
1810 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) { in rhine_start_tx()
1812 rp->tx_skbuff_dma[entry] = 0; in rhine_start_tx()
1813 dev->stats.tx_dropped++; in rhine_start_tx()
1816 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]); in rhine_start_tx()
1819 rp->tx_ring[entry].desc_length = in rhine_start_tx()
1820 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); in rhine_start_tx()
1828 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16); in rhine_start_tx()
1830 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); in rhine_start_tx()
1833 rp->tx_ring[entry].tx_status = 0; in rhine_start_tx()
1835 netdev_sent_queue(dev, skb->len); in rhine_start_tx()
1838 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); in rhine_start_tx()
1841 rp->cur_tx++; in rhine_start_tx()
1849 /* Non-x86 Todo: explicitly flush cache lines here. */ in rhine_start_tx()
1852 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ in rhine_start_tx()
1855 /* Wake the potentially-idle transmit channel */ in rhine_start_tx()
1860 /* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */ in rhine_start_tx()
1861 if (rhine_tx_queue_full(rp)) { in rhine_start_tx()
1865 if (!rhine_tx_queue_full(rp)) in rhine_start_tx()
1869 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n", in rhine_start_tx()
1870 rp->cur_tx - 1, entry); in rhine_start_tx()
1875 static void rhine_irq_disable(struct rhine_private *rp) in rhine_irq_disable() argument
1877 iowrite16(0x0000, rp->base + IntrEnable); in rhine_irq_disable()
1885 struct rhine_private *rp = netdev_priv(dev); in rhine_interrupt() local
1889 status = rhine_get_events(rp); in rhine_interrupt()
1891 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status); in rhine_interrupt()
1896 rhine_irq_disable(rp); in rhine_interrupt()
1897 napi_schedule(&rp->napi); in rhine_interrupt()
1901 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n", in rhine_interrupt()
1912 struct rhine_private *rp = netdev_priv(dev); in rhine_tx() local
1913 struct device *hwdev = dev->dev.parent; in rhine_tx()
1915 unsigned int dirty_tx = rp->dirty_tx; in rhine_tx()
1926 cur_tx = rp->cur_tx; in rhine_tx()
1930 u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); in rhine_tx()
1932 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n", in rhine_tx()
1936 skb = rp->tx_skbuff[entry]; in rhine_tx()
1938 netif_dbg(rp, tx_done, dev, in rhine_tx()
1939 "Transmit error, Tx status %08x\n", txstatus); in rhine_tx()
1940 dev->stats.tx_errors++; in rhine_tx()
1942 dev->stats.tx_carrier_errors++; in rhine_tx()
1944 dev->stats.tx_window_errors++; in rhine_tx()
1946 dev->stats.tx_aborted_errors++; in rhine_tx()
1948 dev->stats.tx_heartbeat_errors++; in rhine_tx()
1949 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) || in rhine_tx()
1951 dev->stats.tx_fifo_errors++; in rhine_tx()
1952 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); in rhine_tx()
1953 break; /* Keep the skb - we try again */ in rhine_tx()
1957 if (rp->quirks & rqRhineI) in rhine_tx()
1958 dev->stats.collisions += (txstatus >> 3) & 0x0F; in rhine_tx()
1960 dev->stats.collisions += txstatus & 0x0F; in rhine_tx()
1961 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n", in rhine_tx()
1964 u64_stats_update_begin(&rp->tx_stats.syncp); in rhine_tx()
1965 rp->tx_stats.bytes += skb->len; in rhine_tx()
1966 rp->tx_stats.packets++; in rhine_tx()
1967 u64_stats_update_end(&rp->tx_stats.syncp); in rhine_tx()
1970 if (rp->tx_skbuff_dma[entry]) { in rhine_tx()
1972 rp->tx_skbuff_dma[entry], in rhine_tx()
1973 skb->len, in rhine_tx()
1976 bytes_compl += skb->len; in rhine_tx()
1979 rp->tx_skbuff[entry] = NULL; in rhine_tx()
1983 rp->dirty_tx = dirty_tx; in rhine_tx()
1989 /* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */ in rhine_tx()
1990 if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) { in rhine_tx()
1994 if (rhine_tx_queue_full(rp)) in rhine_tx()
2000 * rhine_get_vlan_tci - extract TCI from Rx data buffer
2005 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
2010 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2; in rhine_get_vlan_tci()
2018 if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) { in rhine_rx_vlan_tag()
2029 struct rhine_private *rp = netdev_priv(dev); in rhine_rx() local
2030 struct device *hwdev = dev->dev.parent; in rhine_rx()
2031 int entry = rp->cur_rx % RX_RING_SIZE; in rhine_rx()
2034 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__, in rhine_rx()
2035 entry, le32_to_cpu(rp->rx_ring[entry].rx_status)); in rhine_rx()
2039 struct rx_desc *desc = rp->rx_ring + entry; in rhine_rx()
2040 u32 desc_status = le32_to_cpu(desc->rx_status); in rhine_rx()
2046 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__, in rhine_rx()
2053 "entry %#x length %d status %08x!\n", in rhine_rx()
2056 dev->stats.rx_length_errors++; in rhine_rx()
2059 netif_dbg(rp, rx_err, dev, in rhine_rx()
2060 "%s() Rx error %08x\n", __func__, in rhine_rx()
2062 dev->stats.rx_errors++; in rhine_rx()
2064 dev->stats.rx_length_errors++; in rhine_rx()
2066 dev->stats.rx_fifo_errors++; in rhine_rx()
2068 dev->stats.rx_frame_errors++; in rhine_rx()
2071 spin_lock(&rp->lock); in rhine_rx()
2072 dev->stats.rx_crc_errors++; in rhine_rx()
2073 spin_unlock(&rp->lock); in rhine_rx()
2078 int pkt_len = data_size - 4; in rhine_rx()
2082 copying to a minimally-sized skbuff. */ in rhine_rx()
2089 rp->rx_skbuff_dma[entry], in rhine_rx()
2090 rp->rx_buf_sz, in rhine_rx()
2094 rp->rx_skbuff[entry]->data, in rhine_rx()
2098 rp->rx_skbuff_dma[entry], in rhine_rx()
2099 rp->rx_buf_sz, in rhine_rx()
2107 skb = rp->rx_skbuff[entry]; in rhine_rx()
2110 rp->rx_skbuff_dma[entry], in rhine_rx()
2111 rp->rx_buf_sz, in rhine_rx()
2113 rhine_skb_dma_nic_store(rp, &sd, entry); in rhine_rx()
2120 skb->protocol = eth_type_trans(skb, dev); in rhine_rx()
2124 u64_stats_update_begin(&rp->rx_stats.syncp); in rhine_rx()
2125 rp->rx_stats.bytes += pkt_len; in rhine_rx()
2126 rp->rx_stats.packets++; in rhine_rx()
2127 u64_stats_update_end(&rp->rx_stats.syncp); in rhine_rx()
2130 desc->rx_status = cpu_to_le32(DescOwn); in rhine_rx()
2131 entry = (++rp->cur_rx) % RX_RING_SIZE; in rhine_rx()
2137 dev->stats.rx_dropped++; in rhine_rx()
2142 struct rhine_private *rp = netdev_priv(dev); in rhine_restart_tx() local
2143 void __iomem *ioaddr = rp->base; in rhine_restart_tx()
2144 int entry = rp->dirty_tx % TX_RING_SIZE; in rhine_restart_tx()
2151 intr_status = rhine_get_events(rp); in rhine_restart_tx()
2156 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc), in rhine_restart_tx()
2162 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000)) in rhine_restart_tx()
2163 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */ in rhine_restart_tx()
2172 netif_warn(rp, tx_err, dev, "another error occurred %08x\n", in rhine_restart_tx()
2180 struct rhine_private *rp = in rhine_slow_event_task() local
2182 struct net_device *dev = rp->dev; in rhine_slow_event_task()
2185 mutex_lock(&rp->task_lock); in rhine_slow_event_task()
2187 if (!rp->task_enable) in rhine_slow_event_task()
2190 intr_status = rhine_get_events(rp); in rhine_slow_event_task()
2191 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW); in rhine_slow_event_task()
2197 netif_warn(rp, hw, dev, "PCI error\n"); in rhine_slow_event_task()
2199 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); in rhine_slow_event_task()
2202 mutex_unlock(&rp->task_lock); in rhine_slow_event_task()
2208 struct rhine_private *rp = netdev_priv(dev); in rhine_get_stats64() local
2211 spin_lock_bh(&rp->lock); in rhine_get_stats64()
2212 rhine_update_rx_crc_and_missed_errord(rp); in rhine_get_stats64()
2213 spin_unlock_bh(&rp->lock); in rhine_get_stats64()
2215 netdev_stats_to_stats64(stats, &dev->stats); in rhine_get_stats64()
2218 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp); in rhine_get_stats64()
2219 stats->rx_packets = rp->rx_stats.packets; in rhine_get_stats64()
2220 stats->rx_bytes = rp->rx_stats.bytes; in rhine_get_stats64()
2221 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start)); in rhine_get_stats64()
2224 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp); in rhine_get_stats64()
2225 stats->tx_packets = rp->tx_stats.packets; in rhine_get_stats64()
2226 stats->tx_bytes = rp->tx_stats.bytes; in rhine_get_stats64()
2227 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); in rhine_get_stats64()
2232 struct rhine_private *rp = netdev_priv(dev); in rhine_set_rx_mode() local
2233 void __iomem *ioaddr = rp->base; in rhine_set_rx_mode()
2238 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ in rhine_set_rx_mode()
2243 (dev->flags & IFF_ALLMULTI)) { in rhine_set_rx_mode()
2247 } else if (rp->quirks & rqMgmt) { in rhine_set_rx_mode()
2253 rhine_set_cam(ioaddr, i, ha->addr); in rhine_set_rx_mode()
2261 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; in rhine_set_rx_mode()
2269 if (rp->quirks & rqMgmt) { in rhine_set_rx_mode()
2270 if (dev->flags & IFF_PROMISC) in rhine_set_rx_mode()
2280 struct device *hwdev = dev->dev.parent; in netdev_get_drvinfo()
2282 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); in netdev_get_drvinfo()
2283 strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info)); in netdev_get_drvinfo()
2289 struct rhine_private *rp = netdev_priv(dev); in netdev_get_link_ksettings() local
2291 mutex_lock(&rp->task_lock); in netdev_get_link_ksettings()
2292 mii_ethtool_get_link_ksettings(&rp->mii_if, cmd); in netdev_get_link_ksettings()
2293 mutex_unlock(&rp->task_lock); in netdev_get_link_ksettings()
2301 struct rhine_private *rp = netdev_priv(dev); in netdev_set_link_ksettings() local
2304 mutex_lock(&rp->task_lock); in netdev_set_link_ksettings()
2305 rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd); in netdev_set_link_ksettings()
2306 rhine_set_carrier(&rp->mii_if); in netdev_set_link_ksettings()
2307 mutex_unlock(&rp->task_lock); in netdev_set_link_ksettings()
2314 struct rhine_private *rp = netdev_priv(dev); in netdev_nway_reset() local
2316 return mii_nway_restart(&rp->mii_if); in netdev_nway_reset()
2321 struct rhine_private *rp = netdev_priv(dev); in netdev_get_link() local
2323 return mii_link_ok(&rp->mii_if); in netdev_get_link()
2328 struct rhine_private *rp = netdev_priv(dev); in netdev_get_msglevel() local
2330 return rp->msg_enable; in netdev_get_msglevel()
2335 struct rhine_private *rp = netdev_priv(dev); in netdev_set_msglevel() local
2337 rp->msg_enable = value; in netdev_set_msglevel()
2342 struct rhine_private *rp = netdev_priv(dev); in rhine_get_wol() local
2344 if (!(rp->quirks & rqWOL)) in rhine_get_wol()
2347 spin_lock_irq(&rp->lock); in rhine_get_wol()
2348 wol->supported = WAKE_PHY | WAKE_MAGIC | in rhine_get_wol()
2350 wol->wolopts = rp->wolopts; in rhine_get_wol()
2351 spin_unlock_irq(&rp->lock); in rhine_get_wol()
2356 struct rhine_private *rp = netdev_priv(dev); in rhine_set_wol() local
2360 if (!(rp->quirks & rqWOL)) in rhine_set_wol()
2361 return -EINVAL; in rhine_set_wol()
2363 if (wol->wolopts & ~support) in rhine_set_wol()
2364 return -EINVAL; in rhine_set_wol()
2366 spin_lock_irq(&rp->lock); in rhine_set_wol()
2367 rp->wolopts = wol->wolopts; in rhine_set_wol()
2368 spin_unlock_irq(&rp->lock); in rhine_set_wol()
2387 struct rhine_private *rp = netdev_priv(dev); in netdev_ioctl() local
2391 return -EINVAL; in netdev_ioctl()
2393 mutex_lock(&rp->task_lock); in netdev_ioctl()
2394 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); in netdev_ioctl()
2395 rhine_set_carrier(&rp->mii_if); in netdev_ioctl()
2396 mutex_unlock(&rp->task_lock); in netdev_ioctl()
2403 struct rhine_private *rp = netdev_priv(dev); in rhine_close() local
2404 void __iomem *ioaddr = rp->base; in rhine_close()
2406 rhine_task_disable(rp); in rhine_close()
2407 napi_disable(&rp->napi); in rhine_close()
2410 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n", in rhine_close()
2414 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); in rhine_close()
2416 rhine_irq_disable(rp); in rhine_close()
2421 free_irq(rp->irq, dev); in rhine_close()
2433 struct rhine_private *rp = netdev_priv(dev); in rhine_remove_one_pci() local
2437 pci_iounmap(pdev, rp->base); in rhine_remove_one_pci()
2447 struct rhine_private *rp = netdev_priv(dev); in rhine_remove_one_platform() local
2451 iounmap(rp->base); in rhine_remove_one_platform()
2461 struct rhine_private *rp = netdev_priv(dev); in rhine_shutdown_pci() local
2462 void __iomem *ioaddr = rp->base; in rhine_shutdown_pci()
2464 if (!(rp->quirks & rqWOL)) in rhine_shutdown_pci()
2465 return; /* Nothing to do for non-WOL adapters */ in rhine_shutdown_pci()
2470 if (rp->quirks & rq6patterns) in rhine_shutdown_pci()
2473 spin_lock(&rp->lock); in rhine_shutdown_pci()
2475 if (rp->wolopts & WAKE_MAGIC) { in rhine_shutdown_pci()
2478 * Turn EEPROM-controlled wake-up back on -- some hardware may in rhine_shutdown_pci()
2484 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST)) in rhine_shutdown_pci()
2487 if (rp->wolopts & WAKE_PHY) in rhine_shutdown_pci()
2490 if (rp->wolopts & WAKE_UCAST) in rhine_shutdown_pci()
2493 if (rp->wolopts) { in rhine_shutdown_pci()
2499 spin_unlock(&rp->lock); in rhine_shutdown_pci()
2513 struct rhine_private *rp = netdev_priv(dev); in rhine_suspend() local
2518 rhine_task_disable(rp); in rhine_suspend()
2519 rhine_irq_disable(rp); in rhine_suspend()
2520 napi_disable(&rp->napi); in rhine_suspend()
2533 struct rhine_private *rp = netdev_priv(dev); in rhine_resume() local
2538 enable_mmio(rp->pioaddr, rp->quirks); in rhine_resume()
2542 rhine_reset_rbufs(rp); in rhine_resume()
2543 rhine_task_enable(rp); in rhine_resume()
2544 spin_lock_bh(&rp->lock); in rhine_resume()
2546 spin_unlock_bh(&rp->lock); in rhine_resume()
2583 .ident = "EPIA-M",