Lines Matching full:cp
1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
51 #define DRV_NAME "8139cp"
92 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
98 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
115 #define TX_BUFFS_AVAIL(CP) \ argument
116 (((CP)->tx_tail <= (CP)->tx_head) ? \
117 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
118 (CP)->tx_tail - (CP)->tx_head - 1)
355 #define cpr8(reg) readb(cp->regs + (reg))
356 #define cpr16(reg) readw(cp->regs + (reg))
357 #define cpr32(reg) readl(cp->regs + (reg))
358 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
359 #define cpw16(reg,val) writew((val), cp->regs + (reg))
360 #define cpw32(reg,val) writel((val), cp->regs + (reg))
362 writeb((val), cp->regs + (reg)); \
363 readb(cp->regs + (reg)); \
366 writew((val), cp->regs + (reg)); \
367 readw(cp->regs + (reg)); \
370 writel((val), cp->regs + (reg)); \
371 readl(cp->regs + (reg)); \
376 static void cp_tx (struct cp_private *cp);
377 static void cp_clean_rings (struct cp_private *cp);
407 static inline void cp_set_rxbufsize (struct cp_private *cp) in cp_set_rxbufsize() argument
409 unsigned int mtu = cp->dev->mtu; in cp_set_rxbufsize()
413 cp->rx_buf_sz = mtu + ETH_HLEN + 8; in cp_set_rxbufsize()
415 cp->rx_buf_sz = PKT_BUF_SZ; in cp_set_rxbufsize()
418 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, in cp_rx_skb() argument
423 skb->protocol = eth_type_trans (skb, cp->dev); in cp_rx_skb()
425 cp->dev->stats.rx_packets++; in cp_rx_skb()
426 cp->dev->stats.rx_bytes += skb->len; in cp_rx_skb()
431 napi_gro_receive(&cp->napi, skb); in cp_rx_skb()
434 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, in cp_rx_err_acct() argument
437 netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n", in cp_rx_err_acct()
439 cp->dev->stats.rx_errors++; in cp_rx_err_acct()
441 cp->dev->stats.rx_frame_errors++; in cp_rx_err_acct()
443 cp->dev->stats.rx_crc_errors++; in cp_rx_err_acct()
445 cp->dev->stats.rx_length_errors++; in cp_rx_err_acct()
447 cp->dev->stats.rx_length_errors++; in cp_rx_err_acct()
449 cp->dev->stats.rx_fifo_errors++; in cp_rx_err_acct()
465 struct cp_private *cp = container_of(napi, struct cp_private, napi); in cp_rx_poll() local
466 struct net_device *dev = cp->dev; in cp_rx_poll()
467 unsigned int rx_tail = cp->rx_tail; in cp_rx_poll()
477 const unsigned buflen = cp->rx_buf_sz; in cp_rx_poll()
479 skb = cp->rx_skb[rx_tail]; in cp_rx_poll()
482 desc = &cp->rx_ring[rx_tail]; in cp_rx_poll()
496 cp_rx_err_acct(cp, rx_tail, status, len); in cp_rx_poll()
498 cp->cp_stats.rx_frags++; in cp_rx_poll()
503 cp_rx_err_acct(cp, rx_tail, status, len); in cp_rx_poll()
507 netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n", in cp_rx_poll()
516 new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, in cp_rx_poll()
518 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { in cp_rx_poll()
524 dma_unmap_single(&cp->pdev->dev, mapping, in cp_rx_poll()
535 cp->rx_skb[rx_tail] = new_skb; in cp_rx_poll()
537 cp_rx_skb(cp, skb, desc); in cp_rx_poll()
542 cp->rx_ring[rx_tail].opts2 = 0; in cp_rx_poll()
543 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping); in cp_rx_poll()
546 cp->rx_buf_sz); in cp_rx_poll()
548 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); in cp_rx_poll()
552 cp->rx_tail = rx_tail; in cp_rx_poll()
560 spin_lock_irqsave(&cp->lock, flags); in cp_rx_poll()
562 spin_unlock_irqrestore(&cp->lock, flags); in cp_rx_poll()
571 struct cp_private *cp; in cp_interrupt() local
578 cp = netdev_priv(dev); in cp_interrupt()
580 spin_lock(&cp->lock); in cp_interrupt()
592 netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n", in cp_interrupt()
604 if (napi_schedule_prep(&cp->napi)) { in cp_interrupt()
606 __napi_schedule(&cp->napi); in cp_interrupt()
610 cp_tx(cp); in cp_interrupt()
612 mii_check_media(&cp->mii_if, netif_msg_link(cp), false); in cp_interrupt()
618 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status); in cp_interrupt()
619 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status); in cp_interrupt()
627 spin_unlock(&cp->lock); in cp_interrupt()
639 struct cp_private *cp = netdev_priv(dev); in cp_poll_controller() local
640 const int irq = cp->pdev->irq; in cp_poll_controller()
648 static void cp_tx (struct cp_private *cp) in cp_tx() argument
650 unsigned tx_head = cp->tx_head; in cp_tx()
651 unsigned tx_tail = cp->tx_tail; in cp_tx()
655 struct cp_desc *txd = cp->tx_ring + tx_tail; in cp_tx()
664 skb = cp->tx_skb[tx_tail]; in cp_tx()
667 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), in cp_tx()
668 cp->tx_opts[tx_tail] & 0xffff, in cp_tx()
673 netif_dbg(cp, tx_err, cp->dev, in cp_tx()
675 cp->dev->stats.tx_errors++; in cp_tx()
677 cp->dev->stats.tx_window_errors++; in cp_tx()
679 cp->dev->stats.tx_aborted_errors++; in cp_tx()
681 cp->dev->stats.tx_carrier_errors++; in cp_tx()
683 cp->dev->stats.tx_fifo_errors++; in cp_tx()
685 cp->dev->stats.collisions += in cp_tx()
687 cp->dev->stats.tx_packets++; in cp_tx()
688 cp->dev->stats.tx_bytes += skb->len; in cp_tx()
689 netif_dbg(cp, tx_done, cp->dev, in cp_tx()
697 cp->tx_skb[tx_tail] = NULL; in cp_tx()
702 cp->tx_tail = tx_tail; in cp_tx()
704 netdev_completed_queue(cp->dev, pkts_compl, bytes_compl); in cp_tx()
705 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) in cp_tx()
706 netif_wake_queue(cp->dev); in cp_tx()
715 static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb, in unwind_tx_frag_mapping() argument
723 cp->tx_skb[index] = NULL; in unwind_tx_frag_mapping()
724 txd = &cp->tx_ring[index]; in unwind_tx_frag_mapping()
726 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), in unwind_tx_frag_mapping()
734 struct cp_private *cp = netdev_priv(dev); in cp_start_xmit() local
741 spin_lock_irqsave(&cp->lock, intr_flags); in cp_start_xmit()
744 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { in cp_start_xmit()
746 spin_unlock_irqrestore(&cp->lock, intr_flags); in cp_start_xmit()
751 entry = cp->tx_head; in cp_start_xmit()
756 netdev_WARN_ONCE(dev, "Net bug: GSO size %d too large for 8139CP\n", in cp_start_xmit()
779 struct cp_desc *txd = &cp->tx_ring[entry]; in cp_start_xmit()
784 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); in cp_start_xmit()
785 if (dma_mapping_error(&cp->pdev->dev, mapping)) in cp_start_xmit()
797 cp->tx_skb[entry] = skb; in cp_start_xmit()
798 cp->tx_opts[entry] = opts1; in cp_start_xmit()
799 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", in cp_start_xmit()
812 first_mapping = dma_map_single(&cp->pdev->dev, skb->data, in cp_start_xmit()
814 if (dma_mapping_error(&cp->pdev->dev, first_mapping)) in cp_start_xmit()
817 cp->tx_skb[entry] = skb; in cp_start_xmit()
827 mapping = dma_map_single(&cp->pdev->dev, in cp_start_xmit()
830 if (dma_mapping_error(&cp->pdev->dev, mapping)) { in cp_start_xmit()
831 unwind_tx_frag_mapping(cp, skb, first_entry, entry); in cp_start_xmit()
842 txd = &cp->tx_ring[entry]; in cp_start_xmit()
850 cp->tx_opts[entry] = ctrl; in cp_start_xmit()
851 cp->tx_skb[entry] = skb; in cp_start_xmit()
854 txd = &cp->tx_ring[first_entry]; in cp_start_xmit()
863 cp->tx_opts[first_entry] = ctrl; in cp_start_xmit()
864 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n", in cp_start_xmit()
867 cp->tx_head = NEXT_TX(entry); in cp_start_xmit()
870 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) in cp_start_xmit()
874 spin_unlock_irqrestore(&cp->lock, intr_flags); in cp_start_xmit()
881 cp->dev->stats.tx_dropped++; in cp_start_xmit()
890 struct cp_private *cp = netdev_priv(dev); in __cp_set_rx_mode() local
919 cp->rx_config = cp_rx_config | rx_mode; in __cp_set_rx_mode()
920 cpw32_f(RxConfig, cp->rx_config); in __cp_set_rx_mode()
929 struct cp_private *cp = netdev_priv(dev); in cp_set_rx_mode() local
931 spin_lock_irqsave (&cp->lock, flags); in cp_set_rx_mode()
933 spin_unlock_irqrestore (&cp->lock, flags); in cp_set_rx_mode()
936 static void __cp_get_stats(struct cp_private *cp) in __cp_get_stats() argument
939 cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff); in __cp_get_stats()
945 struct cp_private *cp = netdev_priv(dev); in cp_get_stats() local
949 spin_lock_irqsave(&cp->lock, flags); in cp_get_stats()
951 __cp_get_stats(cp); in cp_get_stats()
952 spin_unlock_irqrestore(&cp->lock, flags); in cp_get_stats()
957 static void cp_stop_hw (struct cp_private *cp) in cp_stop_hw() argument
965 cp->rx_tail = 0; in cp_stop_hw()
966 cp->tx_head = cp->tx_tail = 0; in cp_stop_hw()
968 netdev_reset_queue(cp->dev); in cp_stop_hw()
971 static void cp_reset_hw (struct cp_private *cp) in cp_reset_hw() argument
984 netdev_err(cp->dev, "hardware reset timeout\n"); in cp_reset_hw()
987 static inline void cp_start_hw (struct cp_private *cp) in cp_start_hw() argument
991 cpw16(CpCmd, cp->cpcmd); in cp_start_hw()
1004 ring_dma = cp->ring_dma; in cp_start_hw()
1020 netdev_reset_queue(cp->dev); in cp_start_hw()
1023 static void cp_enable_irq(struct cp_private *cp) in cp_enable_irq() argument
1028 static void cp_init_hw (struct cp_private *cp) in cp_init_hw() argument
1030 struct net_device *dev = cp->dev; in cp_init_hw()
1032 cp_reset_hw(cp); in cp_init_hw()
1040 cp_start_hw(cp); in cp_init_hw()
1049 cp->wol_enabled = 0; in cp_init_hw()
1058 static int cp_refill_rx(struct cp_private *cp) in cp_refill_rx() argument
1060 struct net_device *dev = cp->dev; in cp_refill_rx()
1067 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz); in cp_refill_rx()
1071 mapping = dma_map_single(&cp->pdev->dev, skb->data, in cp_refill_rx()
1072 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); in cp_refill_rx()
1073 if (dma_mapping_error(&cp->pdev->dev, mapping)) { in cp_refill_rx()
1077 cp->rx_skb[i] = skb; in cp_refill_rx()
1079 cp->rx_ring[i].opts2 = 0; in cp_refill_rx()
1080 cp->rx_ring[i].addr = cpu_to_le64(mapping); in cp_refill_rx()
1082 cp->rx_ring[i].opts1 = in cp_refill_rx()
1083 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); in cp_refill_rx()
1085 cp->rx_ring[i].opts1 = in cp_refill_rx()
1086 cpu_to_le32(DescOwn | cp->rx_buf_sz); in cp_refill_rx()
1092 cp_clean_rings(cp); in cp_refill_rx()
1096 static void cp_init_rings_index (struct cp_private *cp) in cp_init_rings_index() argument
1098 cp->rx_tail = 0; in cp_init_rings_index()
1099 cp->tx_head = cp->tx_tail = 0; in cp_init_rings_index()
1102 static int cp_init_rings (struct cp_private *cp) in cp_init_rings() argument
1104 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); in cp_init_rings()
1105 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); in cp_init_rings()
1106 memset(cp->tx_opts, 0, sizeof(cp->tx_opts)); in cp_init_rings()
1108 cp_init_rings_index(cp); in cp_init_rings()
1110 return cp_refill_rx (cp); in cp_init_rings()
1113 static int cp_alloc_rings (struct cp_private *cp) in cp_alloc_rings() argument
1115 struct device *d = &cp->pdev->dev; in cp_alloc_rings()
1119 mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL); in cp_alloc_rings()
1123 cp->rx_ring = mem; in cp_alloc_rings()
1124 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; in cp_alloc_rings()
1126 rc = cp_init_rings(cp); in cp_alloc_rings()
1128 dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); in cp_alloc_rings()
1133 static void cp_clean_rings (struct cp_private *cp) in cp_clean_rings() argument
1139 if (cp->rx_skb[i]) { in cp_clean_rings()
1140 desc = cp->rx_ring + i; in cp_clean_rings()
1141 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), in cp_clean_rings()
1142 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); in cp_clean_rings()
1143 dev_kfree_skb_any(cp->rx_skb[i]); in cp_clean_rings()
1148 if (cp->tx_skb[i]) { in cp_clean_rings()
1149 struct sk_buff *skb = cp->tx_skb[i]; in cp_clean_rings()
1151 desc = cp->tx_ring + i; in cp_clean_rings()
1152 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), in cp_clean_rings()
1157 cp->dev->stats.tx_dropped++; in cp_clean_rings()
1160 netdev_reset_queue(cp->dev); in cp_clean_rings()
1162 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); in cp_clean_rings()
1163 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); in cp_clean_rings()
1164 memset(cp->tx_opts, 0, sizeof(cp->tx_opts)); in cp_clean_rings()
1166 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); in cp_clean_rings()
1167 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); in cp_clean_rings()
1170 static void cp_free_rings (struct cp_private *cp) in cp_free_rings() argument
1172 cp_clean_rings(cp); in cp_free_rings()
1173 dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring, in cp_free_rings()
1174 cp->ring_dma); in cp_free_rings()
1175 cp->rx_ring = NULL; in cp_free_rings()
1176 cp->tx_ring = NULL; in cp_free_rings()
1181 struct cp_private *cp = netdev_priv(dev); in cp_open() local
1182 const int irq = cp->pdev->irq; in cp_open()
1185 netif_dbg(cp, ifup, dev, "enabling interface\n"); in cp_open()
1187 rc = cp_alloc_rings(cp); in cp_open()
1191 napi_enable(&cp->napi); in cp_open()
1193 cp_init_hw(cp); in cp_open()
1199 cp_enable_irq(cp); in cp_open()
1202 mii_check_media(&cp->mii_if, netif_msg_link(cp), true); in cp_open()
1208 napi_disable(&cp->napi); in cp_open()
1209 cp_stop_hw(cp); in cp_open()
1210 cp_free_rings(cp); in cp_open()
1216 struct cp_private *cp = netdev_priv(dev); in cp_close() local
1219 napi_disable(&cp->napi); in cp_close()
1221 netif_dbg(cp, ifdown, dev, "disabling interface\n"); in cp_close()
1223 spin_lock_irqsave(&cp->lock, flags); in cp_close()
1228 cp_stop_hw(cp); in cp_close()
1230 spin_unlock_irqrestore(&cp->lock, flags); in cp_close()
1232 free_irq(cp->pdev->irq, dev); in cp_close()
1234 cp_free_rings(cp); in cp_close()
1240 struct cp_private *cp = netdev_priv(dev); in cp_tx_timeout() local
1248 spin_lock_irqsave(&cp->lock, flags); in cp_tx_timeout()
1250 netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n", in cp_tx_timeout()
1251 cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc)); in cp_tx_timeout()
1253 netif_dbg(cp, tx_err, cp->dev, in cp_tx_timeout()
1255 i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1), in cp_tx_timeout()
1256 cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2), in cp_tx_timeout()
1257 le64_to_cpu(cp->tx_ring[i].addr), in cp_tx_timeout()
1258 cp->tx_skb[i]); in cp_tx_timeout()
1261 cp_stop_hw(cp); in cp_tx_timeout()
1262 cp_clean_rings(cp); in cp_tx_timeout()
1263 cp_init_rings(cp); in cp_tx_timeout()
1264 cp_start_hw(cp); in cp_tx_timeout()
1269 napi_schedule_irqoff(&cp->napi); in cp_tx_timeout()
1271 spin_unlock_irqrestore(&cp->lock, flags); in cp_tx_timeout()
1276 struct cp_private *cp = netdev_priv(dev); in cp_change_mtu() local
1281 cp_set_rxbufsize(cp); /* set new rx buf size */ in cp_change_mtu()
1288 cp_set_rxbufsize(cp); in cp_change_mtu()
1305 struct cp_private *cp = netdev_priv(dev); in mdio_read() local
1308 readw(cp->regs + mii_2_8139_map[location]) : 0; in mdio_read()
1315 struct cp_private *cp = netdev_priv(dev); in mdio_write() local
1326 static int netdev_set_wol (struct cp_private *cp, in netdev_set_wol() argument
1353 cp->wol_enabled = (wol->wolopts) ? 1 : 0; in netdev_set_wol()
1359 static void netdev_get_wol (struct cp_private *cp, in netdev_get_wol() argument
1368 if (!cp->wol_enabled) return; in netdev_get_wol()
1383 struct cp_private *cp = netdev_priv(dev); in cp_get_drvinfo() local
1387 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); in cp_get_drvinfo()
1417 struct cp_private *cp = netdev_priv(dev); in cp_get_link_ksettings() local
1420 spin_lock_irqsave(&cp->lock, flags); in cp_get_link_ksettings()
1421 mii_ethtool_get_link_ksettings(&cp->mii_if, cmd); in cp_get_link_ksettings()
1422 spin_unlock_irqrestore(&cp->lock, flags); in cp_get_link_ksettings()
1430 struct cp_private *cp = netdev_priv(dev); in cp_set_link_ksettings() local
1434 spin_lock_irqsave(&cp->lock, flags); in cp_set_link_ksettings()
1435 rc = mii_ethtool_set_link_ksettings(&cp->mii_if, cmd); in cp_set_link_ksettings()
1436 spin_unlock_irqrestore(&cp->lock, flags); in cp_set_link_ksettings()
1443 struct cp_private *cp = netdev_priv(dev); in cp_nway_reset() local
1444 return mii_nway_restart(&cp->mii_if); in cp_nway_reset()
1449 struct cp_private *cp = netdev_priv(dev); in cp_get_msglevel() local
1450 return cp->msg_enable; in cp_get_msglevel()
1455 struct cp_private *cp = netdev_priv(dev); in cp_set_msglevel() local
1456 cp->msg_enable = value; in cp_set_msglevel()
1461 struct cp_private *cp = netdev_priv(dev); in cp_set_features() local
1467 spin_lock_irqsave(&cp->lock, flags); in cp_set_features()
1470 cp->cpcmd |= RxChkSum; in cp_set_features()
1472 cp->cpcmd &= ~RxChkSum; in cp_set_features()
1475 cp->cpcmd |= RxVlanOn; in cp_set_features()
1477 cp->cpcmd &= ~RxVlanOn; in cp_set_features()
1479 cpw16_f(CpCmd, cp->cpcmd); in cp_set_features()
1480 spin_unlock_irqrestore(&cp->lock, flags); in cp_set_features()
1488 struct cp_private *cp = netdev_priv(dev); in cp_get_regs() local
1496 spin_lock_irqsave(&cp->lock, flags); in cp_get_regs()
1497 memcpy_fromio(p, cp->regs, CP_REGS_SIZE); in cp_get_regs()
1498 spin_unlock_irqrestore(&cp->lock, flags); in cp_get_regs()
1503 struct cp_private *cp = netdev_priv(dev); in cp_get_wol() local
1506 spin_lock_irqsave (&cp->lock, flags); in cp_get_wol()
1507 netdev_get_wol (cp, wol); in cp_get_wol()
1508 spin_unlock_irqrestore (&cp->lock, flags); in cp_get_wol()
1513 struct cp_private *cp = netdev_priv(dev); in cp_set_wol() local
1517 spin_lock_irqsave (&cp->lock, flags); in cp_set_wol()
1518 rc = netdev_set_wol (cp, wol); in cp_set_wol()
1519 spin_unlock_irqrestore (&cp->lock, flags); in cp_set_wol()
1539 struct cp_private *cp = netdev_priv(dev); in cp_get_ethtool_stats() local
1544 nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats), in cp_get_ethtool_stats()
1577 tmp_stats[i++] = cp->cp_stats.rx_frags; in cp_get_ethtool_stats()
1580 dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma); in cp_get_ethtool_stats()
1606 struct cp_private *cp = netdev_priv(dev); in cp_ioctl() local
1613 spin_lock_irqsave(&cp->lock, flags); in cp_ioctl()
1614 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL); in cp_ioctl()
1615 spin_unlock_irqrestore(&cp->lock, flags); in cp_ioctl()
1621 struct cp_private *cp = netdev_priv(dev); in cp_set_mac_address() local
1629 spin_lock_irq(&cp->lock); in cp_set_mac_address()
1636 spin_unlock_irq(&cp->lock); in cp_set_mac_address()
1760 struct cp_private *cp = netdev_priv(dev); in cp_get_eeprom_len() local
1763 spin_lock_irq(&cp->lock); in cp_get_eeprom_len()
1764 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128; in cp_get_eeprom_len()
1765 spin_unlock_irq(&cp->lock); in cp_get_eeprom_len()
1773 struct cp_private *cp = netdev_priv(dev); in cp_get_eeprom() local
1782 spin_lock_irq(&cp->lock); in cp_get_eeprom()
1784 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; in cp_get_eeprom()
1787 val = read_eeprom(cp->regs, offset, addr_len); in cp_get_eeprom()
1793 val = read_eeprom(cp->regs, offset, addr_len); in cp_get_eeprom()
1800 val = read_eeprom(cp->regs, offset, addr_len); in cp_get_eeprom()
1804 spin_unlock_irq(&cp->lock); in cp_get_eeprom()
1811 struct cp_private *cp = netdev_priv(dev); in cp_set_eeprom() local
1821 spin_lock_irq(&cp->lock); in cp_set_eeprom()
1823 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; in cp_set_eeprom()
1826 val = read_eeprom(cp->regs, offset, addr_len) & 0xff; in cp_set_eeprom()
1828 write_eeprom(cp->regs, offset, val, addr_len); in cp_set_eeprom()
1835 write_eeprom(cp->regs, offset, val, addr_len); in cp_set_eeprom()
1840 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00; in cp_set_eeprom()
1842 write_eeprom(cp->regs, offset, val, addr_len); in cp_set_eeprom()
1845 spin_unlock_irq(&cp->lock); in cp_set_eeprom()
1850 static void cp_set_d3_state (struct cp_private *cp) in cp_set_d3_state() argument
1852 pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */ in cp_set_d3_state()
1853 pci_set_power_state (cp->pdev, PCI_D3hot); in cp_set_d3_state()
1887 struct cp_private *cp; in cp_init_one() local
1908 cp = netdev_priv(dev); in cp_init_one()
1909 cp->pdev = pdev; in cp_init_one()
1910 cp->dev = dev; in cp_init_one()
1911 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug); in cp_init_one()
1912 spin_lock_init (&cp->lock); in cp_init_one()
1913 cp->mii_if.dev = dev; in cp_init_one()
1914 cp->mii_if.mdio_read = mdio_read; in cp_init_one()
1915 cp->mii_if.mdio_write = mdio_write; in cp_init_one()
1916 cp->mii_if.phy_id = CP_INTERNAL_PHY; in cp_init_one()
1917 cp->mii_if.phy_id_mask = 0x1f; in cp_init_one()
1918 cp->mii_if.reg_num_mask = 0x1f; in cp_init_one()
1919 cp_set_rxbufsize(cp); in cp_init_one()
1968 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) | in cp_init_one()
1982 cp->regs = regs; in cp_init_one()
1984 cp_stop_hw(cp); in cp_init_one()
1993 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); in cp_init_one()
2024 if (cp->wol_enabled) in cp_init_one()
2025 cp_set_d3_state (cp); in cp_init_one()
2045 struct cp_private *cp = netdev_priv(dev); in cp_remove_one() local
2048 iounmap(cp->regs); in cp_remove_one()
2049 if (cp->wol_enabled) in cp_remove_one()
2060 struct cp_private *cp = netdev_priv(dev); in cp_suspend() local
2069 spin_lock_irqsave (&cp->lock, flags); in cp_suspend()
2075 spin_unlock_irqrestore (&cp->lock, flags); in cp_suspend()
2077 device_set_wakeup_enable(device, cp->wol_enabled); in cp_suspend()
2085 struct cp_private *cp = netdev_priv(dev); in cp_resume() local
2094 cp_init_rings_index (cp); in cp_resume()
2095 cp_init_hw (cp); in cp_resume()
2096 cp_enable_irq(cp); in cp_resume()
2099 spin_lock_irqsave (&cp->lock, flags); in cp_resume()
2101 mii_check_media(&cp->mii_if, netif_msg_link(cp), false); in cp_resume()
2103 spin_unlock_irqrestore (&cp->lock, flags); in cp_resume()