Lines Matching +full:mii +full:- +full:rt

3 	Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
33 * Investigate using skb->priority with h/w VLAN priority
34 * Investigate using High Priority Tx Queue with skb->priority
41 * Support external MII transceivers (patch available)
65 #include <linux/dma-mapping.h>
69 #include <linux/mii.h>
86 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
90 static int debug = -1;
94 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
113 #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
114 #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
116 (((CP)->tx_tail <= (CP)->tx_head) ? \
117 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
118 (CP)->tx_tail - (CP)->tx_head - 1)
123 /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
140 StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
141 TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
142 HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
147 ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
155 BasicModeCtrl = 0x62, /* MII BMCR */
156 BasicModeStatus = 0x64, /* MII BMSR */
157 NWayAdvert = 0x66, /* MII ADVERTISE */
158 NWayLPAR = 0x68, /* MII LPA */
159 NWayExpansion = 0x6A, /* MII Expansion */
166 RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
190 PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
196 TxOWC = (1 << 22), /* Tx Out-of-window collision */
199 TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
200 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
225 SWInt = (1 << 8), /* Software-requested interrupt */
242 CmdReset = (1 << 4), /* Enable to reset; self-clearing */
247 RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
249 PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
255 Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
256 Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
260 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
263 TxThreshMask = 0x3f, /* Mask bits 5-0 */
272 PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
274 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
348 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
355 #define cpr8(reg) readb(cp->regs + (reg))
356 #define cpr16(reg) readw(cp->regs + (reg))
357 #define cpr32(reg) readl(cp->regs + (reg))
358 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
359 #define cpw16(reg,val) writew((val), cp->regs + (reg))
360 #define cpw32(reg,val) writel((val), cp->regs + (reg))
362 writeb((val), cp->regs + (reg)); \
363 readb(cp->regs + (reg)); \
366 writew((val), cp->regs + (reg)); \
367 readw(cp->regs + (reg)); \
370 writel((val), cp->regs + (reg)); \
371 readl(cp->regs + (reg)); \
409 unsigned int mtu = cp->dev->mtu; in cp_set_rxbufsize()
413 cp->rx_buf_sz = mtu + ETH_HLEN + 8; in cp_set_rxbufsize()
415 cp->rx_buf_sz = PKT_BUF_SZ; in cp_set_rxbufsize()
421 u32 opts2 = le32_to_cpu(desc->opts2); in cp_rx_skb()
423 skb->protocol = eth_type_trans (skb, cp->dev); in cp_rx_skb()
425 cp->dev->stats.rx_packets++; in cp_rx_skb()
426 cp->dev->stats.rx_bytes += skb->len; in cp_rx_skb()
431 napi_gro_receive(&cp->napi, skb); in cp_rx_skb()
437 netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n", in cp_rx_err_acct()
439 cp->dev->stats.rx_errors++; in cp_rx_err_acct()
441 cp->dev->stats.rx_frame_errors++; in cp_rx_err_acct()
443 cp->dev->stats.rx_crc_errors++; in cp_rx_err_acct()
445 cp->dev->stats.rx_length_errors++; in cp_rx_err_acct()
447 cp->dev->stats.rx_length_errors++; in cp_rx_err_acct()
449 cp->dev->stats.rx_fifo_errors++; in cp_rx_err_acct()
466 struct net_device *dev = cp->dev; in cp_rx_poll()
467 unsigned int rx_tail = cp->rx_tail; in cp_rx_poll()
477 const unsigned buflen = cp->rx_buf_sz; in cp_rx_poll()
479 skb = cp->rx_skb[rx_tail]; in cp_rx_poll()
482 desc = &cp->rx_ring[rx_tail]; in cp_rx_poll()
483 status = le32_to_cpu(desc->opts1); in cp_rx_poll()
487 len = (status & 0x1fff) - 4; in cp_rx_poll()
488 mapping = le64_to_cpu(desc->addr); in cp_rx_poll()
493 * pre-allocated RX skbs are properly sized such in cp_rx_poll()
497 dev->stats.rx_dropped++; in cp_rx_poll()
498 cp->cp_stats.rx_frags++; in cp_rx_poll()
512 dev->stats.rx_dropped++; in cp_rx_poll()
516 new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, in cp_rx_poll()
518 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { in cp_rx_poll()
519 dev->stats.rx_dropped++; in cp_rx_poll()
524 dma_unmap_single(&cp->pdev->dev, mapping, in cp_rx_poll()
529 skb->ip_summed = CHECKSUM_UNNECESSARY; in cp_rx_poll()
535 cp->rx_skb[rx_tail] = new_skb; in cp_rx_poll()
542 cp->rx_ring[rx_tail].opts2 = 0; in cp_rx_poll()
543 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping); in cp_rx_poll()
544 if (rx_tail == (CP_RX_RING_SIZE - 1)) in cp_rx_poll()
545 desc->opts1 = cpu_to_le32(DescOwn | RingEnd | in cp_rx_poll()
546 cp->rx_buf_sz); in cp_rx_poll()
548 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); in cp_rx_poll()
552 cp->rx_tail = rx_tail; in cp_rx_poll()
560 spin_lock_irqsave(&cp->lock, flags); in cp_rx_poll()
562 spin_unlock_irqrestore(&cp->lock, flags); in cp_rx_poll()
580 spin_lock(&cp->lock); in cp_interrupt()
604 if (napi_schedule_prep(&cp->napi)) { in cp_interrupt()
606 __napi_schedule(&cp->napi); in cp_interrupt()
612 mii_check_media(&cp->mii_if, netif_msg_link(cp), false); in cp_interrupt()
618 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status); in cp_interrupt()
619 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status); in cp_interrupt()
627 spin_unlock(&cp->lock); in cp_interrupt()
634 * Polling receive - used by netconsole and other diagnostic tools
640 const int irq = cp->pdev->irq; in cp_poll_controller()
650 unsigned tx_head = cp->tx_head; in cp_tx()
651 unsigned tx_tail = cp->tx_tail; in cp_tx()
655 struct cp_desc *txd = cp->tx_ring + tx_tail; in cp_tx()
660 status = le32_to_cpu(txd->opts1); in cp_tx()
664 skb = cp->tx_skb[tx_tail]; in cp_tx()
667 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), in cp_tx()
668 cp->tx_opts[tx_tail] & 0xffff, in cp_tx()
673 netif_dbg(cp, tx_err, cp->dev, in cp_tx()
675 cp->dev->stats.tx_errors++; in cp_tx()
677 cp->dev->stats.tx_window_errors++; in cp_tx()
679 cp->dev->stats.tx_aborted_errors++; in cp_tx()
681 cp->dev->stats.tx_carrier_errors++; in cp_tx()
683 cp->dev->stats.tx_fifo_errors++; in cp_tx()
685 cp->dev->stats.collisions += in cp_tx()
687 cp->dev->stats.tx_packets++; in cp_tx()
688 cp->dev->stats.tx_bytes += skb->len; in cp_tx()
689 netif_dbg(cp, tx_done, cp->dev, in cp_tx()
692 bytes_compl += skb->len; in cp_tx()
697 cp->tx_skb[tx_tail] = NULL; in cp_tx()
702 cp->tx_tail = tx_tail; in cp_tx()
704 netdev_completed_queue(cp->dev, pkts_compl, bytes_compl); in cp_tx()
706 netif_wake_queue(cp->dev); in cp_tx()
723 cp->tx_skb[index] = NULL; in unwind_tx_frag_mapping()
724 txd = &cp->tx_ring[index]; in unwind_tx_frag_mapping()
725 this_frag = &skb_shinfo(skb)->frags[frag]; in unwind_tx_frag_mapping()
726 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), in unwind_tx_frag_mapping()
741 spin_lock_irqsave(&cp->lock, intr_flags); in cp_start_xmit()
744 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { in cp_start_xmit()
746 spin_unlock_irqrestore(&cp->lock, intr_flags); in cp_start_xmit()
751 entry = cp->tx_head; in cp_start_xmit()
752 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; in cp_start_xmit()
753 mss = skb_shinfo(skb)->gso_size; in cp_start_xmit()
765 else if (skb->ip_summed == CHECKSUM_PARTIAL) { in cp_start_xmit()
767 if (ip->protocol == IPPROTO_TCP) in cp_start_xmit()
769 else if (ip->protocol == IPPROTO_UDP) in cp_start_xmit()
778 if (skb_shinfo(skb)->nr_frags == 0) { in cp_start_xmit()
779 struct cp_desc *txd = &cp->tx_ring[entry]; in cp_start_xmit()
783 len = skb->len; in cp_start_xmit()
784 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); in cp_start_xmit()
785 if (dma_mapping_error(&cp->pdev->dev, mapping)) in cp_start_xmit()
788 txd->opts2 = opts2; in cp_start_xmit()
789 txd->addr = cpu_to_le64(mapping); in cp_start_xmit()
794 txd->opts1 = cpu_to_le32(opts1); in cp_start_xmit()
797 cp->tx_skb[entry] = skb; in cp_start_xmit()
798 cp->tx_opts[entry] = opts1; in cp_start_xmit()
799 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", in cp_start_xmit()
800 entry, skb->len); in cp_start_xmit()
812 first_mapping = dma_map_single(&cp->pdev->dev, skb->data, in cp_start_xmit()
814 if (dma_mapping_error(&cp->pdev->dev, first_mapping)) in cp_start_xmit()
817 cp->tx_skb[entry] = skb; in cp_start_xmit()
819 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { in cp_start_xmit()
820 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; in cp_start_xmit()
827 mapping = dma_map_single(&cp->pdev->dev, in cp_start_xmit()
830 if (dma_mapping_error(&cp->pdev->dev, mapping)) { in cp_start_xmit()
835 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; in cp_start_xmit()
839 if (frag == skb_shinfo(skb)->nr_frags - 1) in cp_start_xmit()
842 txd = &cp->tx_ring[entry]; in cp_start_xmit()
843 txd->opts2 = opts2; in cp_start_xmit()
844 txd->addr = cpu_to_le64(mapping); in cp_start_xmit()
847 txd->opts1 = cpu_to_le32(ctrl); in cp_start_xmit()
850 cp->tx_opts[entry] = ctrl; in cp_start_xmit()
851 cp->tx_skb[entry] = skb; in cp_start_xmit()
854 txd = &cp->tx_ring[first_entry]; in cp_start_xmit()
855 txd->opts2 = opts2; in cp_start_xmit()
856 txd->addr = cpu_to_le64(first_mapping); in cp_start_xmit()
860 txd->opts1 = cpu_to_le32(ctrl); in cp_start_xmit()
863 cp->tx_opts[first_entry] = ctrl; in cp_start_xmit()
864 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n", in cp_start_xmit()
865 first_entry, entry, skb->len); in cp_start_xmit()
867 cp->tx_head = NEXT_TX(entry); in cp_start_xmit()
869 netdev_sent_queue(dev, skb->len); in cp_start_xmit()
874 spin_unlock_irqrestore(&cp->lock, intr_flags); in cp_start_xmit()
881 cp->dev->stats.tx_dropped++; in cp_start_xmit()
895 if (dev->flags & IFF_PROMISC) { in __cp_set_rx_mode()
902 (dev->flags & IFF_ALLMULTI)) { in __cp_set_rx_mode()
903 /* Too many to filter perfectly -- accept all multicasts. */ in __cp_set_rx_mode()
911 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; in __cp_set_rx_mode()
919 cp->rx_config = cp_rx_config | rx_mode; in __cp_set_rx_mode()
920 cpw32_f(RxConfig, cp->rx_config); in __cp_set_rx_mode()
931 spin_lock_irqsave (&cp->lock, flags); in cp_set_rx_mode()
933 spin_unlock_irqrestore (&cp->lock, flags); in cp_set_rx_mode()
939 cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff); in __cp_get_stats()
949 spin_lock_irqsave(&cp->lock, flags); in cp_get_stats()
952 spin_unlock_irqrestore(&cp->lock, flags); in cp_get_stats()
954 return &dev->stats; in cp_get_stats()
965 cp->rx_tail = 0; in cp_stop_hw()
966 cp->tx_head = cp->tx_tail = 0; in cp_stop_hw()
968 netdev_reset_queue(cp->dev); in cp_stop_hw()
977 while (work--) { in cp_reset_hw()
984 netdev_err(cp->dev, "hardware reset timeout\n"); in cp_reset_hw()
991 cpw16(CpCmd, cp->cpcmd); in cp_start_hw()
997 * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware in cp_start_hw()
1004 ring_dma = cp->ring_dma; in cp_start_hw()
1020 netdev_reset_queue(cp->dev); in cp_start_hw()
1030 struct net_device *dev = cp->dev; in cp_init_hw()
1037 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); in cp_init_hw()
1038 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); in cp_init_hw()
1047 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */ in cp_init_hw()
1049 cp->wol_enabled = 0; in cp_init_hw()
1060 struct net_device *dev = cp->dev; in cp_refill_rx()
1067 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz); in cp_refill_rx()
1071 mapping = dma_map_single(&cp->pdev->dev, skb->data, in cp_refill_rx()
1072 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); in cp_refill_rx()
1073 if (dma_mapping_error(&cp->pdev->dev, mapping)) { in cp_refill_rx()
1077 cp->rx_skb[i] = skb; in cp_refill_rx()
1079 cp->rx_ring[i].opts2 = 0; in cp_refill_rx()
1080 cp->rx_ring[i].addr = cpu_to_le64(mapping); in cp_refill_rx()
1081 if (i == (CP_RX_RING_SIZE - 1)) in cp_refill_rx()
1082 cp->rx_ring[i].opts1 = in cp_refill_rx()
1083 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); in cp_refill_rx()
1085 cp->rx_ring[i].opts1 = in cp_refill_rx()
1086 cpu_to_le32(DescOwn | cp->rx_buf_sz); in cp_refill_rx()
1093 return -ENOMEM; in cp_refill_rx()
1098 cp->rx_tail = 0; in cp_init_rings_index()
1099 cp->tx_head = cp->tx_tail = 0; in cp_init_rings_index()
1104 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); in cp_init_rings()
1105 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); in cp_init_rings()
1106 memset(cp->tx_opts, 0, sizeof(cp->tx_opts)); in cp_init_rings()
1115 struct device *d = &cp->pdev->dev; in cp_alloc_rings()
1119 mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL); in cp_alloc_rings()
1121 return -ENOMEM; in cp_alloc_rings()
1123 cp->rx_ring = mem; in cp_alloc_rings()
1124 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; in cp_alloc_rings()
1128 dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); in cp_alloc_rings()
1139 if (cp->rx_skb[i]) { in cp_clean_rings()
1140 desc = cp->rx_ring + i; in cp_clean_rings()
1141 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), in cp_clean_rings()
1142 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); in cp_clean_rings()
1143 dev_kfree_skb_any(cp->rx_skb[i]); in cp_clean_rings()
1148 if (cp->tx_skb[i]) { in cp_clean_rings()
1149 struct sk_buff *skb = cp->tx_skb[i]; in cp_clean_rings()
1151 desc = cp->tx_ring + i; in cp_clean_rings()
1152 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), in cp_clean_rings()
1153 le32_to_cpu(desc->opts1) & 0xffff, in cp_clean_rings()
1155 if (le32_to_cpu(desc->opts1) & LastFrag) in cp_clean_rings()
1157 cp->dev->stats.tx_dropped++; in cp_clean_rings()
1160 netdev_reset_queue(cp->dev); in cp_clean_rings()
1162 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); in cp_clean_rings()
1163 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); in cp_clean_rings()
1164 memset(cp->tx_opts, 0, sizeof(cp->tx_opts)); in cp_clean_rings()
1166 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); in cp_clean_rings()
1167 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); in cp_clean_rings()
1173 dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring, in cp_free_rings()
1174 cp->ring_dma); in cp_free_rings()
1175 cp->rx_ring = NULL; in cp_free_rings()
1176 cp->tx_ring = NULL; in cp_free_rings()
1182 const int irq = cp->pdev->irq; in cp_open()
1191 napi_enable(&cp->napi); in cp_open()
1195 rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev); in cp_open()
1202 mii_check_media(&cp->mii_if, netif_msg_link(cp), true); in cp_open()
1208 napi_disable(&cp->napi); in cp_open()
1219 napi_disable(&cp->napi); in cp_close()
1223 spin_lock_irqsave(&cp->lock, flags); in cp_close()
1230 spin_unlock_irqrestore(&cp->lock, flags); in cp_close()
1232 free_irq(cp->pdev->irq, dev); in cp_close()
1248 spin_lock_irqsave(&cp->lock, flags); in cp_tx_timeout()
1250 netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n", in cp_tx_timeout()
1251 cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc)); in cp_tx_timeout()
1253 netif_dbg(cp, tx_err, cp->dev, in cp_tx_timeout()
1255 i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1), in cp_tx_timeout()
1256 cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2), in cp_tx_timeout()
1257 le64_to_cpu(cp->tx_ring[i].addr), in cp_tx_timeout()
1258 cp->tx_skb[i]); in cp_tx_timeout()
1269 napi_schedule_irqoff(&cp->napi); in cp_tx_timeout()
1271 spin_unlock_irqrestore(&cp->lock, flags); in cp_tx_timeout()
1280 dev->mtu = new_mtu; in cp_change_mtu()
1287 dev->mtu = new_mtu; in cp_change_mtu()
1308 readw(cp->regs + mii_2_8139_map[location]) : 0; in mdio_read()
1325 /* Set the ethtool Wake-on-LAN settings */
1333 if (wol->wolopts) { in netdev_set_wol()
1334 if (wol->wolopts & WAKE_PHY) options |= LinkUp; in netdev_set_wol()
1335 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket; in netdev_set_wol()
1345 if (wol->wolopts) { in netdev_set_wol()
1346 if (wol->wolopts & WAKE_UCAST) options |= UWF; in netdev_set_wol()
1347 if (wol->wolopts & WAKE_BCAST) options |= BWF; in netdev_set_wol()
1348 if (wol->wolopts & WAKE_MCAST) options |= MWF; in netdev_set_wol()
1353 cp->wol_enabled = (wol->wolopts) ? 1 : 0; in netdev_set_wol()
1358 /* Get the ethtool Wake-on-LAN settings */
1364 wol->wolopts = 0; /* Start from scratch */ in netdev_get_wol()
1365 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC | in netdev_get_wol()
1368 if (!cp->wol_enabled) return; in netdev_get_wol()
1371 if (options & LinkUp) wol->wolopts |= WAKE_PHY; in netdev_get_wol()
1372 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC; in netdev_get_wol()
1376 if (options & UWF) wol->wolopts |= WAKE_UCAST; in netdev_get_wol()
1377 if (options & BWF) wol->wolopts |= WAKE_BCAST; in netdev_get_wol()
1378 if (options & MWF) wol->wolopts |= WAKE_MCAST; in netdev_get_wol()
1385 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); in cp_get_drvinfo()
1386 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); in cp_get_drvinfo()
1387 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); in cp_get_drvinfo()
1393 ring->rx_max_pending = CP_RX_RING_SIZE; in cp_get_ringparam()
1394 ring->tx_max_pending = CP_TX_RING_SIZE; in cp_get_ringparam()
1395 ring->rx_pending = CP_RX_RING_SIZE; in cp_get_ringparam()
1396 ring->tx_pending = CP_TX_RING_SIZE; in cp_get_ringparam()
1410 return -EOPNOTSUPP; in cp_get_sset_count()
1420 spin_lock_irqsave(&cp->lock, flags); in cp_get_link_ksettings()
1421 mii_ethtool_get_link_ksettings(&cp->mii_if, cmd); in cp_get_link_ksettings()
1422 spin_unlock_irqrestore(&cp->lock, flags); in cp_get_link_ksettings()
1434 spin_lock_irqsave(&cp->lock, flags); in cp_set_link_ksettings()
1435 rc = mii_ethtool_set_link_ksettings(&cp->mii_if, cmd); in cp_set_link_ksettings()
1436 spin_unlock_irqrestore(&cp->lock, flags); in cp_set_link_ksettings()
1444 return mii_nway_restart(&cp->mii_if); in cp_nway_reset()
1450 return cp->msg_enable; in cp_get_msglevel()
1456 cp->msg_enable = value; in cp_set_msglevel()
1464 if (!((dev->features ^ features) & NETIF_F_RXCSUM)) in cp_set_features()
1467 spin_lock_irqsave(&cp->lock, flags); in cp_set_features()
1470 cp->cpcmd |= RxChkSum; in cp_set_features()
1472 cp->cpcmd &= ~RxChkSum; in cp_set_features()
1475 cp->cpcmd |= RxVlanOn; in cp_set_features()
1477 cp->cpcmd &= ~RxVlanOn; in cp_set_features()
1479 cpw16_f(CpCmd, cp->cpcmd); in cp_set_features()
1480 spin_unlock_irqrestore(&cp->lock, flags); in cp_set_features()
1491 if (regs->len < CP_REGS_SIZE) in cp_get_regs()
1492 return /* -EINVAL */; in cp_get_regs()
1494 regs->version = CP_REGS_VER; in cp_get_regs()
1496 spin_lock_irqsave(&cp->lock, flags); in cp_get_regs()
1497 memcpy_fromio(p, cp->regs, CP_REGS_SIZE); in cp_get_regs()
1498 spin_unlock_irqrestore(&cp->lock, flags); in cp_get_regs()
1506 spin_lock_irqsave (&cp->lock, flags); in cp_get_wol()
1508 spin_unlock_irqrestore (&cp->lock, flags); in cp_get_wol()
1517 spin_lock_irqsave (&cp->lock, flags); in cp_set_wol()
1519 spin_unlock_irqrestore (&cp->lock, flags); in cp_set_wol()
1544 nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats), in cp_get_ethtool_stats()
1564 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok); in cp_get_ethtool_stats()
1565 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok); in cp_get_ethtool_stats()
1566 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err); in cp_get_ethtool_stats()
1567 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err); in cp_get_ethtool_stats()
1568 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo); in cp_get_ethtool_stats()
1569 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align); in cp_get_ethtool_stats()
1570 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col); in cp_get_ethtool_stats()
1571 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol); in cp_get_ethtool_stats()
1572 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys); in cp_get_ethtool_stats()
1573 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast); in cp_get_ethtool_stats()
1574 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast); in cp_get_ethtool_stats()
1575 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort); in cp_get_ethtool_stats()
1576 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun); in cp_get_ethtool_stats()
1577 tmp_stats[i++] = cp->cp_stats.rx_frags; in cp_get_ethtool_stats()
1580 dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma); in cp_get_ethtool_stats()
1611 return -EINVAL; in cp_ioctl()
1613 spin_lock_irqsave(&cp->lock, flags); in cp_ioctl()
1614 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL); in cp_ioctl()
1615 spin_unlock_irqrestore(&cp->lock, flags); in cp_ioctl()
1624 if (!is_valid_ether_addr(addr->sa_data)) in cp_set_mac_address()
1625 return -EADDRNOTAVAIL; in cp_set_mac_address()
1627 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); in cp_set_mac_address()
1629 spin_lock_irq(&cp->lock); in cp_set_mac_address()
1632 cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); in cp_set_mac_address()
1633 cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); in cp_set_mac_address()
1636 spin_unlock_irq(&cp->lock); in cp_set_mac_address()
1658 /* The EEPROM commands include the alway-set leading bit. */
1683 for (i = cmd_len - 1; i >= 0; i--) { in eeprom_cmd()
1703 int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2)); in eeprom_extend_cmd()
1720 for (i = 16; i > 0; i--) { in read_eeprom()
1763 spin_lock_irq(&cp->lock); in cp_get_eeprom_len()
1764 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128; in cp_get_eeprom_len()
1765 spin_unlock_irq(&cp->lock); in cp_get_eeprom_len()
1776 u32 offset = eeprom->offset >> 1; in cp_get_eeprom()
1777 u32 len = eeprom->len; in cp_get_eeprom()
1780 eeprom->magic = CP_EEPROM_MAGIC; in cp_get_eeprom()
1782 spin_lock_irq(&cp->lock); in cp_get_eeprom()
1784 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; in cp_get_eeprom()
1786 if (eeprom->offset & 1) { in cp_get_eeprom()
1787 val = read_eeprom(cp->regs, offset, addr_len); in cp_get_eeprom()
1792 while (i < len - 1) { in cp_get_eeprom()
1793 val = read_eeprom(cp->regs, offset, addr_len); in cp_get_eeprom()
1800 val = read_eeprom(cp->regs, offset, addr_len); in cp_get_eeprom()
1804 spin_unlock_irq(&cp->lock); in cp_get_eeprom()
1814 u32 offset = eeprom->offset >> 1; in cp_set_eeprom()
1815 u32 len = eeprom->len; in cp_set_eeprom()
1818 if (eeprom->magic != CP_EEPROM_MAGIC) in cp_set_eeprom()
1819 return -EINVAL; in cp_set_eeprom()
1821 spin_lock_irq(&cp->lock); in cp_set_eeprom()
1823 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; in cp_set_eeprom()
1825 if (eeprom->offset & 1) { in cp_set_eeprom()
1826 val = read_eeprom(cp->regs, offset, addr_len) & 0xff; in cp_set_eeprom()
1828 write_eeprom(cp->regs, offset, val, addr_len); in cp_set_eeprom()
1832 while (i < len - 1) { in cp_set_eeprom()
1835 write_eeprom(cp->regs, offset, val, addr_len); in cp_set_eeprom()
1840 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00; in cp_set_eeprom()
1842 write_eeprom(cp->regs, offset, val, addr_len); in cp_set_eeprom()
1845 spin_unlock_irq(&cp->lock); in cp_set_eeprom()
1852 pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */ in cp_set_d3_state()
1853 pci_set_power_state (cp->pdev, PCI_D3hot); in cp_set_d3_state()
1860 if (skb_shinfo(skb)->gso_size > MSSMask) in cp_features_check()
1895 if (pdev->vendor == PCI_VENDOR_ID_REALTEK && in cp_init_one()
1896 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) { in cp_init_one()
1897 dev_info(&pdev->dev, in cp_init_one()
1899 pdev->vendor, pdev->device, pdev->revision); in cp_init_one()
1900 return -ENODEV; in cp_init_one()
1905 return -ENOMEM; in cp_init_one()
1906 SET_NETDEV_DEV(dev, &pdev->dev); in cp_init_one()
1909 cp->pdev = pdev; in cp_init_one()
1910 cp->dev = dev; in cp_init_one()
1911 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug); in cp_init_one()
1912 spin_lock_init (&cp->lock); in cp_init_one()
1913 cp->mii_if.dev = dev; in cp_init_one()
1914 cp->mii_if.mdio_read = mdio_read; in cp_init_one()
1915 cp->mii_if.mdio_write = mdio_write; in cp_init_one()
1916 cp->mii_if.phy_id = CP_INTERNAL_PHY; in cp_init_one()
1917 cp->mii_if.phy_id_mask = 0x1f; in cp_init_one()
1918 cp->mii_if.reg_num_mask = 0x1f; in cp_init_one()
1935 rc = -EIO; in cp_init_one()
1936 dev_err(&pdev->dev, "no MMIO resource\n"); in cp_init_one()
1940 rc = -EIO; in cp_init_one()
1941 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n", in cp_init_one()
1956 dev_err(&pdev->dev, in cp_init_one()
1962 dev_err(&pdev->dev, in cp_init_one()
1968 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) | in cp_init_one()
1971 dev->features |= NETIF_F_RXCSUM; in cp_init_one()
1972 dev->hw_features |= NETIF_F_RXCSUM; in cp_init_one()
1976 rc = -EIO; in cp_init_one()
1977 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n", in cp_init_one()
1982 cp->regs = regs; in cp_init_one()
1989 ((__le16 *) (dev->dev_addr))[i] = in cp_init_one()
1992 dev->netdev_ops = &cp_netdev_ops; in cp_init_one()
1993 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); in cp_init_one()
1994 dev->ethtool_ops = &cp_ethtool_ops; in cp_init_one()
1995 dev->watchdog_timeo = TX_TIMEOUT; in cp_init_one()
1997 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | in cp_init_one()
2001 dev->features |= NETIF_F_HIGHDMA; in cp_init_one()
2003 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | in cp_init_one()
2005 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | in cp_init_one()
2008 /* MTU range: 60 - 4096 */ in cp_init_one()
2009 dev->min_mtu = CP_MIN_MTU; in cp_init_one()
2010 dev->max_mtu = CP_MAX_MTU; in cp_init_one()
2016 netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n", in cp_init_one()
2017 regs, dev->dev_addr, pdev->irq); in cp_init_one()
2021 /* enable busmastering and memory-write-invalidate */ in cp_init_one()
2024 if (cp->wol_enabled) in cp_init_one()
2048 iounmap(cp->regs); in cp_remove_one()
2049 if (cp->wol_enabled) in cp_remove_one()
2069 spin_lock_irqsave (&cp->lock, flags); in cp_suspend()
2075 spin_unlock_irqrestore (&cp->lock, flags); in cp_suspend()
2077 device_set_wakeup_enable(device, cp->wol_enabled); in cp_suspend()
2099 spin_lock_irqsave (&cp->lock, flags); in cp_resume()
2101 mii_check_media(&cp->mii_if, netif_msg_link(cp), false); in cp_resume()
2103 spin_unlock_irqrestore (&cp->lock, flags); in cp_resume()