Lines Matching refs:np
964 static bool nv_optimized(struct fe_priv *np) in nv_optimized() argument
966 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) in nv_optimized()
1001 struct fe_priv *np = get_nvpriv(dev); in setup_hw_rings() local
1004 if (!nv_optimized(np)) { in setup_hw_rings()
1006 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); in setup_hw_rings()
1008 …writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysA… in setup_hw_rings()
1011 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); in setup_hw_rings()
1012 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); in setup_hw_rings()
1015 …writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPh… in setup_hw_rings()
1016 …writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingP… in setup_hw_rings()
1023 struct fe_priv *np = get_nvpriv(dev); in free_rings() local
1025 if (!nv_optimized(np)) { in free_rings()
1026 if (np->rx_ring.orig) in free_rings()
1027 dma_free_coherent(&np->pci_dev->dev, in free_rings()
1029 (np->rx_ring_size + in free_rings()
1030 np->tx_ring_size), in free_rings()
1031 np->rx_ring.orig, np->ring_addr); in free_rings()
1033 if (np->rx_ring.ex) in free_rings()
1034 dma_free_coherent(&np->pci_dev->dev, in free_rings()
1036 (np->rx_ring_size + in free_rings()
1037 np->tx_ring_size), in free_rings()
1038 np->rx_ring.ex, np->ring_addr); in free_rings()
1040 kfree(np->rx_skb); in free_rings()
1041 kfree(np->tx_skb); in free_rings()
1046 struct fe_priv *np = get_nvpriv(dev); in using_multi_irqs() local
1048 if (!(np->msi_flags & NV_MSI_X_ENABLED) || in using_multi_irqs()
1049 ((np->msi_flags & NV_MSI_X_ENABLED) && in using_multi_irqs()
1050 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) in using_multi_irqs()
1058 struct fe_priv *np = get_nvpriv(dev); in nv_txrx_gate() local
1062 if (!np->mac_in_use && in nv_txrx_gate()
1063 (np->driver_data & DEV_HAS_POWER_CNTRL)) { in nv_txrx_gate()
1075 struct fe_priv *np = get_nvpriv(dev); in nv_enable_irq() local
1078 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_enable_irq()
1079 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); in nv_enable_irq()
1081 enable_irq(np->pci_dev->irq); in nv_enable_irq()
1083 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); in nv_enable_irq()
1084 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); in nv_enable_irq()
1085 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); in nv_enable_irq()
1091 struct fe_priv *np = get_nvpriv(dev); in nv_disable_irq() local
1094 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_disable_irq()
1095 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); in nv_disable_irq()
1097 disable_irq(np->pci_dev->irq); in nv_disable_irq()
1099 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); in nv_disable_irq()
1100 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); in nv_disable_irq()
1101 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); in nv_disable_irq()
1115 struct fe_priv *np = get_nvpriv(dev); in nv_disable_hw_interrupts() local
1118 if (np->msi_flags & NV_MSI_X_ENABLED) { in nv_disable_hw_interrupts()
1121 if (np->msi_flags & NV_MSI_ENABLED) in nv_disable_hw_interrupts()
1129 struct fe_priv *np = get_nvpriv(dev); in nv_napi_enable() local
1131 napi_enable(&np->napi); in nv_napi_enable()
1136 struct fe_priv *np = get_nvpriv(dev); in nv_napi_disable() local
1138 napi_disable(&np->napi); in nv_napi_disable()
1184 struct fe_priv *np = netdev_priv(dev); in phy_reset() local
1189 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) in phy_reset()
1198 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in phy_reset()
1206 static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np) in init_realtek_8211b() argument
1223 if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init)) in init_realtek_8211b()
1230 static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np) in init_realtek_8211c() argument
1245 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); in init_realtek_8211c()
1247 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) in init_realtek_8211c()
1249 if (mii_rw(dev, np->phyaddr, in init_realtek_8211c()
1252 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ); in init_realtek_8211c()
1255 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) in init_realtek_8211c()
1258 if (mii_rw(dev, np->phyaddr, in init_realtek_8211c()
1265 static int init_realtek_8201(struct net_device *dev, struct fe_priv *np) in init_realtek_8201() argument
1269 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) { in init_realtek_8201()
1270 phy_reserved = mii_rw(dev, np->phyaddr, in init_realtek_8201()
1273 if (mii_rw(dev, np->phyaddr, in init_realtek_8201()
1281 static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np) in init_realtek_8201_cross() argument
1286 if (mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1289 phy_reserved = mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1293 if (mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1296 if (mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1304 static int init_cicada(struct net_device *dev, struct fe_priv *np, in init_cicada() argument
1310 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); in init_cicada()
1313 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) in init_cicada()
1315 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); in init_cicada()
1317 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) in init_cicada()
1320 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); in init_cicada()
1322 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) in init_cicada()
1328 static int init_vitesse(struct net_device *dev, struct fe_priv *np) in init_vitesse() argument
1332 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1335 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1338 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1340 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) in init_vitesse()
1342 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1346 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) in init_vitesse()
1348 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1351 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1354 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1358 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) in init_vitesse()
1360 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1362 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) in init_vitesse()
1364 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1367 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1370 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1372 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) in init_vitesse()
1374 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1378 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) in init_vitesse()
1380 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1383 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1392 struct fe_priv *np = get_nvpriv(dev); in phy_init() local
1398 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { in phy_init()
1399 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); in phy_init()
1401 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { in phy_init()
1403 pci_name(np->pci_dev)); in phy_init()
1407 if (np->phy_oui == PHY_OUI_REALTEK) { in phy_init()
1408 if (np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1409 np->phy_rev == PHY_REV_REALTEK_8211B) { in phy_init()
1410 if (init_realtek_8211b(dev, np)) { in phy_init()
1412 pci_name(np->pci_dev)); in phy_init()
1415 } else if (np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1416 np->phy_rev == PHY_REV_REALTEK_8211C) { in phy_init()
1417 if (init_realtek_8211c(dev, np)) { in phy_init()
1419 pci_name(np->pci_dev)); in phy_init()
1422 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { in phy_init()
1423 if (init_realtek_8201(dev, np)) { in phy_init()
1425 pci_name(np->pci_dev)); in phy_init()
1432 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in phy_init()
1436 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { in phy_init()
1438 pci_name(np->pci_dev)); in phy_init()
1446 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in phy_init()
1448 np->gigabit = PHY_GIGABIT; in phy_init()
1449 mii_control_1000 = mii_rw(dev, np->phyaddr, in phy_init()
1457 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { in phy_init()
1459 pci_name(np->pci_dev)); in phy_init()
1463 np->gigabit = 0; in phy_init()
1465 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in phy_init()
1468 if (np->phy_oui == PHY_OUI_REALTEK && in phy_init()
1469 np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1470 np->phy_rev == PHY_REV_REALTEK_8211C) { in phy_init()
1473 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { in phy_init()
1475 pci_name(np->pci_dev)); in phy_init()
1484 pci_name(np->pci_dev)); in phy_init()
1490 if (np->phy_oui == PHY_OUI_CICADA) { in phy_init()
1491 if (init_cicada(dev, np, phyinterface)) { in phy_init()
1493 pci_name(np->pci_dev)); in phy_init()
1496 } else if (np->phy_oui == PHY_OUI_VITESSE) { in phy_init()
1497 if (init_vitesse(dev, np)) { in phy_init()
1499 pci_name(np->pci_dev)); in phy_init()
1502 } else if (np->phy_oui == PHY_OUI_REALTEK) { in phy_init()
1503 if (np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1504 np->phy_rev == PHY_REV_REALTEK_8211B) { in phy_init()
1506 if (init_realtek_8211b(dev, np)) { in phy_init()
1508 pci_name(np->pci_dev)); in phy_init()
1511 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { in phy_init()
1512 if (init_realtek_8201(dev, np) || in phy_init()
1513 init_realtek_8201_cross(dev, np)) { in phy_init()
1515 pci_name(np->pci_dev)); in phy_init()
1522 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); in phy_init()
1525 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in phy_init()
1529 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) in phy_init()
1537 struct fe_priv *np = netdev_priv(dev); in nv_start_rx() local
1542 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { in nv_start_rx()
1547 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_start_rx()
1550 if (np->mac_in_use) in nv_start_rx()
1558 struct fe_priv *np = netdev_priv(dev); in nv_stop_rx() local
1562 if (!np->mac_in_use) in nv_stop_rx()
1573 if (!np->mac_in_use) in nv_stop_rx()
1579 struct fe_priv *np = netdev_priv(dev); in nv_start_tx() local
1584 if (np->mac_in_use) in nv_start_tx()
1592 struct fe_priv *np = netdev_priv(dev); in nv_stop_tx() local
1596 if (!np->mac_in_use) in nv_stop_tx()
1607 if (!np->mac_in_use) in nv_stop_tx()
1626 struct fe_priv *np = netdev_priv(dev); in nv_txrx_reset() local
1629 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); in nv_txrx_reset()
1632 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); in nv_txrx_reset()
1638 struct fe_priv *np = netdev_priv(dev); in nv_mac_reset() local
1642 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); in nv_mac_reset()
1662 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); in nv_mac_reset()
1669 struct fe_priv *np = netdev_priv(dev); in nv_update_stats() local
1676 assert_spin_locked(&np->hwstats_lock); in nv_update_stats()
1679 np->estats.tx_bytes += readl(base + NvRegTxCnt); in nv_update_stats()
1680 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); in nv_update_stats()
1681 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); in nv_update_stats()
1682 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); in nv_update_stats()
1683 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); in nv_update_stats()
1684 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); in nv_update_stats()
1685 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); in nv_update_stats()
1686 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); in nv_update_stats()
1687 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); in nv_update_stats()
1688 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); in nv_update_stats()
1689 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); in nv_update_stats()
1690 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); in nv_update_stats()
1691 np->estats.rx_runt += readl(base + NvRegRxRunt); in nv_update_stats()
1692 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); in nv_update_stats()
1693 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); in nv_update_stats()
1694 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); in nv_update_stats()
1695 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); in nv_update_stats()
1696 np->estats.rx_length_error += readl(base + NvRegRxLenErr); in nv_update_stats()
1697 np->estats.rx_unicast += readl(base + NvRegRxUnicast); in nv_update_stats()
1698 np->estats.rx_multicast += readl(base + NvRegRxMulticast); in nv_update_stats()
1699 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); in nv_update_stats()
1700 np->estats.rx_packets = in nv_update_stats()
1701 np->estats.rx_unicast + in nv_update_stats()
1702 np->estats.rx_multicast + in nv_update_stats()
1703 np->estats.rx_broadcast; in nv_update_stats()
1704 np->estats.rx_errors_total = in nv_update_stats()
1705 np->estats.rx_crc_errors + in nv_update_stats()
1706 np->estats.rx_over_errors + in nv_update_stats()
1707 np->estats.rx_frame_error + in nv_update_stats()
1708 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + in nv_update_stats()
1709 np->estats.rx_late_collision + in nv_update_stats()
1710 np->estats.rx_runt + in nv_update_stats()
1711 np->estats.rx_frame_too_long; in nv_update_stats()
1712 np->estats.tx_errors_total = in nv_update_stats()
1713 np->estats.tx_late_collision + in nv_update_stats()
1714 np->estats.tx_fifo_errors + in nv_update_stats()
1715 np->estats.tx_carrier_errors + in nv_update_stats()
1716 np->estats.tx_excess_deferral + in nv_update_stats()
1717 np->estats.tx_retry_error; in nv_update_stats()
1719 if (np->driver_data & DEV_HAS_STATISTICS_V2) { in nv_update_stats()
1720 np->estats.tx_deferral += readl(base + NvRegTxDef); in nv_update_stats()
1721 np->estats.tx_packets += readl(base + NvRegTxFrame); in nv_update_stats()
1722 np->estats.rx_bytes += readl(base + NvRegRxCnt); in nv_update_stats()
1723 np->estats.tx_pause += readl(base + NvRegTxPause); in nv_update_stats()
1724 np->estats.rx_pause += readl(base + NvRegRxPause); in nv_update_stats()
1725 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); in nv_update_stats()
1726 np->estats.rx_errors_total += np->estats.rx_drop_frame; in nv_update_stats()
1729 if (np->driver_data & DEV_HAS_STATISTICS_V3) { in nv_update_stats()
1730 np->estats.tx_unicast += readl(base + NvRegTxUnicast); in nv_update_stats()
1731 np->estats.tx_multicast += readl(base + NvRegTxMulticast); in nv_update_stats()
1732 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast); in nv_update_stats()
1747 struct fe_priv *np = netdev_priv(dev); in nv_get_stats64() local
1762 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); in nv_get_stats64()
1763 storage->rx_packets = np->stat_rx_packets; in nv_get_stats64()
1764 storage->rx_bytes = np->stat_rx_bytes; in nv_get_stats64()
1765 storage->rx_dropped = np->stat_rx_dropped; in nv_get_stats64()
1766 storage->rx_missed_errors = np->stat_rx_missed_errors; in nv_get_stats64()
1767 } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); in nv_get_stats64()
1770 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); in nv_get_stats64()
1771 storage->tx_packets = np->stat_tx_packets; in nv_get_stats64()
1772 storage->tx_bytes = np->stat_tx_bytes; in nv_get_stats64()
1773 storage->tx_dropped = np->stat_tx_dropped; in nv_get_stats64()
1774 } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); in nv_get_stats64()
1777 if (np->driver_data & DEV_HAS_STATISTICS_V123) { in nv_get_stats64()
1778 spin_lock_bh(&np->hwstats_lock); in nv_get_stats64()
1783 storage->rx_errors = np->estats.rx_errors_total; in nv_get_stats64()
1784 storage->tx_errors = np->estats.tx_errors_total; in nv_get_stats64()
1787 storage->multicast = np->estats.rx_multicast; in nv_get_stats64()
1790 storage->rx_length_errors = np->estats.rx_length_error; in nv_get_stats64()
1791 storage->rx_over_errors = np->estats.rx_over_errors; in nv_get_stats64()
1792 storage->rx_crc_errors = np->estats.rx_crc_errors; in nv_get_stats64()
1793 storage->rx_frame_errors = np->estats.rx_frame_align_error; in nv_get_stats64()
1794 storage->rx_fifo_errors = np->estats.rx_drop_frame; in nv_get_stats64()
1797 storage->tx_carrier_errors = np->estats.tx_carrier_errors; in nv_get_stats64()
1798 storage->tx_fifo_errors = np->estats.tx_fifo_errors; in nv_get_stats64()
1800 spin_unlock_bh(&np->hwstats_lock); in nv_get_stats64()
1811 struct fe_priv *np = netdev_priv(dev); in nv_alloc_rx() local
1814 less_rx = np->get_rx.orig; in nv_alloc_rx()
1815 if (less_rx-- == np->rx_ring.orig) in nv_alloc_rx()
1816 less_rx = np->last_rx.orig; in nv_alloc_rx()
1818 while (np->put_rx.orig != less_rx) { in nv_alloc_rx()
1819 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); in nv_alloc_rx()
1821 np->put_rx_ctx->skb = skb; in nv_alloc_rx()
1822 np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, in nv_alloc_rx()
1826 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_alloc_rx()
1827 np->put_rx_ctx->dma))) { in nv_alloc_rx()
1831 np->put_rx_ctx->dma_len = skb_tailroom(skb); in nv_alloc_rx()
1832 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); in nv_alloc_rx()
1834 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); in nv_alloc_rx()
1835 if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) in nv_alloc_rx()
1836 np->put_rx.orig = np->rx_ring.orig; in nv_alloc_rx()
1837 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) in nv_alloc_rx()
1838 np->put_rx_ctx = np->rx_skb; in nv_alloc_rx()
1841 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_alloc_rx()
1842 np->stat_rx_dropped++; in nv_alloc_rx()
1843 u64_stats_update_end(&np->swstats_rx_syncp); in nv_alloc_rx()
1852 struct fe_priv *np = netdev_priv(dev); in nv_alloc_rx_optimized() local
1855 less_rx = np->get_rx.ex; in nv_alloc_rx_optimized()
1856 if (less_rx-- == np->rx_ring.ex) in nv_alloc_rx_optimized()
1857 less_rx = np->last_rx.ex; in nv_alloc_rx_optimized()
1859 while (np->put_rx.ex != less_rx) { in nv_alloc_rx_optimized()
1860 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); in nv_alloc_rx_optimized()
1862 np->put_rx_ctx->skb = skb; in nv_alloc_rx_optimized()
1863 np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, in nv_alloc_rx_optimized()
1867 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_alloc_rx_optimized()
1868 np->put_rx_ctx->dma))) { in nv_alloc_rx_optimized()
1872 np->put_rx_ctx->dma_len = skb_tailroom(skb); in nv_alloc_rx_optimized()
1873 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); in nv_alloc_rx_optimized()
1874 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); in nv_alloc_rx_optimized()
1876 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); in nv_alloc_rx_optimized()
1877 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) in nv_alloc_rx_optimized()
1878 np->put_rx.ex = np->rx_ring.ex; in nv_alloc_rx_optimized()
1879 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) in nv_alloc_rx_optimized()
1880 np->put_rx_ctx = np->rx_skb; in nv_alloc_rx_optimized()
1883 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_alloc_rx_optimized()
1884 np->stat_rx_dropped++; in nv_alloc_rx_optimized()
1885 u64_stats_update_end(&np->swstats_rx_syncp); in nv_alloc_rx_optimized()
1895 struct fe_priv *np = from_timer(np, t, oom_kick); in nv_do_rx_refill() local
1898 napi_schedule(&np->napi); in nv_do_rx_refill()
1903 struct fe_priv *np = netdev_priv(dev); in nv_init_rx() local
1906 np->get_rx = np->rx_ring; in nv_init_rx()
1907 np->put_rx = np->rx_ring; in nv_init_rx()
1909 if (!nv_optimized(np)) in nv_init_rx()
1910 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; in nv_init_rx()
1912 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; in nv_init_rx()
1913 np->get_rx_ctx = np->rx_skb; in nv_init_rx()
1914 np->put_rx_ctx = np->rx_skb; in nv_init_rx()
1915 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; in nv_init_rx()
1917 for (i = 0; i < np->rx_ring_size; i++) { in nv_init_rx()
1918 if (!nv_optimized(np)) { in nv_init_rx()
1919 np->rx_ring.orig[i].flaglen = 0; in nv_init_rx()
1920 np->rx_ring.orig[i].buf = 0; in nv_init_rx()
1922 np->rx_ring.ex[i].flaglen = 0; in nv_init_rx()
1923 np->rx_ring.ex[i].txvlan = 0; in nv_init_rx()
1924 np->rx_ring.ex[i].bufhigh = 0; in nv_init_rx()
1925 np->rx_ring.ex[i].buflow = 0; in nv_init_rx()
1927 np->rx_skb[i].skb = NULL; in nv_init_rx()
1928 np->rx_skb[i].dma = 0; in nv_init_rx()
1934 struct fe_priv *np = netdev_priv(dev); in nv_init_tx() local
1937 np->get_tx = np->tx_ring; in nv_init_tx()
1938 np->put_tx = np->tx_ring; in nv_init_tx()
1940 if (!nv_optimized(np)) in nv_init_tx()
1941 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; in nv_init_tx()
1943 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; in nv_init_tx()
1944 np->get_tx_ctx = np->tx_skb; in nv_init_tx()
1945 np->put_tx_ctx = np->tx_skb; in nv_init_tx()
1946 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; in nv_init_tx()
1947 netdev_reset_queue(np->dev); in nv_init_tx()
1948 np->tx_pkts_in_progress = 0; in nv_init_tx()
1949 np->tx_change_owner = NULL; in nv_init_tx()
1950 np->tx_end_flip = NULL; in nv_init_tx()
1951 np->tx_stop = 0; in nv_init_tx()
1953 for (i = 0; i < np->tx_ring_size; i++) { in nv_init_tx()
1954 if (!nv_optimized(np)) { in nv_init_tx()
1955 np->tx_ring.orig[i].flaglen = 0; in nv_init_tx()
1956 np->tx_ring.orig[i].buf = 0; in nv_init_tx()
1958 np->tx_ring.ex[i].flaglen = 0; in nv_init_tx()
1959 np->tx_ring.ex[i].txvlan = 0; in nv_init_tx()
1960 np->tx_ring.ex[i].bufhigh = 0; in nv_init_tx()
1961 np->tx_ring.ex[i].buflow = 0; in nv_init_tx()
1963 np->tx_skb[i].skb = NULL; in nv_init_tx()
1964 np->tx_skb[i].dma = 0; in nv_init_tx()
1965 np->tx_skb[i].dma_len = 0; in nv_init_tx()
1966 np->tx_skb[i].dma_single = 0; in nv_init_tx()
1967 np->tx_skb[i].first_tx_desc = NULL; in nv_init_tx()
1968 np->tx_skb[i].next_tx_ctx = NULL; in nv_init_tx()
1974 struct fe_priv *np = netdev_priv(dev); in nv_init_ring() local
1979 if (!nv_optimized(np)) in nv_init_ring()
1985 static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) in nv_unmap_txskb() argument
1989 dma_unmap_single(&np->pci_dev->dev, tx_skb->dma, in nv_unmap_txskb()
1993 dma_unmap_page(&np->pci_dev->dev, tx_skb->dma, in nv_unmap_txskb()
2000 static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) in nv_release_txskb() argument
2002 nv_unmap_txskb(np, tx_skb); in nv_release_txskb()
2013 struct fe_priv *np = netdev_priv(dev); in nv_drain_tx() local
2016 for (i = 0; i < np->tx_ring_size; i++) { in nv_drain_tx()
2017 if (!nv_optimized(np)) { in nv_drain_tx()
2018 np->tx_ring.orig[i].flaglen = 0; in nv_drain_tx()
2019 np->tx_ring.orig[i].buf = 0; in nv_drain_tx()
2021 np->tx_ring.ex[i].flaglen = 0; in nv_drain_tx()
2022 np->tx_ring.ex[i].txvlan = 0; in nv_drain_tx()
2023 np->tx_ring.ex[i].bufhigh = 0; in nv_drain_tx()
2024 np->tx_ring.ex[i].buflow = 0; in nv_drain_tx()
2026 if (nv_release_txskb(np, &np->tx_skb[i])) { in nv_drain_tx()
2027 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_drain_tx()
2028 np->stat_tx_dropped++; in nv_drain_tx()
2029 u64_stats_update_end(&np->swstats_tx_syncp); in nv_drain_tx()
2031 np->tx_skb[i].dma = 0; in nv_drain_tx()
2032 np->tx_skb[i].dma_len = 0; in nv_drain_tx()
2033 np->tx_skb[i].dma_single = 0; in nv_drain_tx()
2034 np->tx_skb[i].first_tx_desc = NULL; in nv_drain_tx()
2035 np->tx_skb[i].next_tx_ctx = NULL; in nv_drain_tx()
2037 np->tx_pkts_in_progress = 0; in nv_drain_tx()
2038 np->tx_change_owner = NULL; in nv_drain_tx()
2039 np->tx_end_flip = NULL; in nv_drain_tx()
2044 struct fe_priv *np = netdev_priv(dev); in nv_drain_rx() local
2047 for (i = 0; i < np->rx_ring_size; i++) { in nv_drain_rx()
2048 if (!nv_optimized(np)) { in nv_drain_rx()
2049 np->rx_ring.orig[i].flaglen = 0; in nv_drain_rx()
2050 np->rx_ring.orig[i].buf = 0; in nv_drain_rx()
2052 np->rx_ring.ex[i].flaglen = 0; in nv_drain_rx()
2053 np->rx_ring.ex[i].txvlan = 0; in nv_drain_rx()
2054 np->rx_ring.ex[i].bufhigh = 0; in nv_drain_rx()
2055 np->rx_ring.ex[i].buflow = 0; in nv_drain_rx()
2058 if (np->rx_skb[i].skb) { in nv_drain_rx()
2059 dma_unmap_single(&np->pci_dev->dev, np->rx_skb[i].dma, in nv_drain_rx()
2060 (skb_end_pointer(np->rx_skb[i].skb) - in nv_drain_rx()
2061 np->rx_skb[i].skb->data), in nv_drain_rx()
2063 dev_kfree_skb(np->rx_skb[i].skb); in nv_drain_rx()
2064 np->rx_skb[i].skb = NULL; in nv_drain_rx()
2075 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) in nv_get_empty_tx_slots() argument
2077 …return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_r… in nv_get_empty_tx_slots()
2194 struct fe_priv *np = netdev_priv(dev); in nv_start_xmit() local
2196 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); in nv_start_xmit()
2219 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit()
2220 empty_slots = nv_get_empty_tx_slots(np); in nv_start_xmit()
2223 np->tx_stop = 1; in nv_start_xmit()
2224 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit()
2227 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit()
2229 start_tx = put_tx = np->put_tx.orig; in nv_start_xmit()
2234 np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, in nv_start_xmit()
2237 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_start_xmit()
2238 np->put_tx_ctx->dma))) { in nv_start_xmit()
2241 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit()
2242 np->stat_tx_dropped++; in nv_start_xmit()
2243 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit()
2246 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit()
2247 np->put_tx_ctx->dma_single = 1; in nv_start_xmit()
2248 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); in nv_start_xmit()
2251 tx_flags = np->tx_flags; in nv_start_xmit()
2254 if (unlikely(put_tx++ == np->last_tx.orig)) in nv_start_xmit()
2255 put_tx = np->tx_ring.orig; in nv_start_xmit()
2256 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit()
2257 np->put_tx_ctx = np->tx_skb; in nv_start_xmit()
2268 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; in nv_start_xmit()
2271 np->put_tx_ctx->dma = skb_frag_dma_map( in nv_start_xmit()
2272 &np->pci_dev->dev, in nv_start_xmit()
2276 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_start_xmit()
2277 np->put_tx_ctx->dma))) { in nv_start_xmit()
2281 nv_unmap_txskb(np, start_tx_ctx); in nv_start_xmit()
2282 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit()
2283 tmp_tx_ctx = np->tx_skb; in nv_start_xmit()
2284 } while (tmp_tx_ctx != np->put_tx_ctx); in nv_start_xmit()
2286 np->put_tx_ctx = start_tx_ctx; in nv_start_xmit()
2287 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit()
2288 np->stat_tx_dropped++; in nv_start_xmit()
2289 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit()
2293 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit()
2294 np->put_tx_ctx->dma_single = 0; in nv_start_xmit()
2295 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); in nv_start_xmit()
2300 if (unlikely(put_tx++ == np->last_tx.orig)) in nv_start_xmit()
2301 put_tx = np->tx_ring.orig; in nv_start_xmit()
2302 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit()
2303 np->put_tx_ctx = np->tx_skb; in nv_start_xmit()
2307 if (unlikely(put_tx == np->tx_ring.orig)) in nv_start_xmit()
2308 prev_tx = np->last_tx.orig; in nv_start_xmit()
2312 if (unlikely(np->put_tx_ctx == np->tx_skb)) in nv_start_xmit()
2313 prev_tx_ctx = np->last_tx_ctx; in nv_start_xmit()
2315 prev_tx_ctx = np->put_tx_ctx - 1; in nv_start_xmit()
2329 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit()
2334 netdev_sent_queue(np->dev, skb->len); in nv_start_xmit()
2338 np->put_tx.orig = put_tx; in nv_start_xmit()
2340 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit()
2342 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_start_xmit()
2349 struct fe_priv *np = netdev_priv(dev); in nv_start_xmit_optimized() local
2375 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit_optimized()
2376 empty_slots = nv_get_empty_tx_slots(np); in nv_start_xmit_optimized()
2379 np->tx_stop = 1; in nv_start_xmit_optimized()
2380 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit_optimized()
2383 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit_optimized()
2385 start_tx = put_tx = np->put_tx.ex; in nv_start_xmit_optimized()
2386 start_tx_ctx = np->put_tx_ctx; in nv_start_xmit_optimized()
2391 np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, in nv_start_xmit_optimized()
2394 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_start_xmit_optimized()
2395 np->put_tx_ctx->dma))) { in nv_start_xmit_optimized()
2398 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2399 np->stat_tx_dropped++; in nv_start_xmit_optimized()
2400 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2403 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit_optimized()
2404 np->put_tx_ctx->dma_single = 1; in nv_start_xmit_optimized()
2405 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2406 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2412 if (unlikely(put_tx++ == np->last_tx.ex)) in nv_start_xmit_optimized()
2413 put_tx = np->tx_ring.ex; in nv_start_xmit_optimized()
2414 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit_optimized()
2415 np->put_tx_ctx = np->tx_skb; in nv_start_xmit_optimized()
2427 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; in nv_start_xmit_optimized()
2428 np->put_tx_ctx->dma = skb_frag_dma_map( in nv_start_xmit_optimized()
2429 &np->pci_dev->dev, in nv_start_xmit_optimized()
2434 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_start_xmit_optimized()
2435 np->put_tx_ctx->dma))) { in nv_start_xmit_optimized()
2439 nv_unmap_txskb(np, start_tx_ctx); in nv_start_xmit_optimized()
2440 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit_optimized()
2441 tmp_tx_ctx = np->tx_skb; in nv_start_xmit_optimized()
2442 } while (tmp_tx_ctx != np->put_tx_ctx); in nv_start_xmit_optimized()
2444 np->put_tx_ctx = start_tx_ctx; in nv_start_xmit_optimized()
2445 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2446 np->stat_tx_dropped++; in nv_start_xmit_optimized()
2447 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2450 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit_optimized()
2451 np->put_tx_ctx->dma_single = 0; in nv_start_xmit_optimized()
2452 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2453 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2458 if (unlikely(put_tx++ == np->last_tx.ex)) in nv_start_xmit_optimized()
2459 put_tx = np->tx_ring.ex; in nv_start_xmit_optimized()
2460 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit_optimized()
2461 np->put_tx_ctx = np->tx_skb; in nv_start_xmit_optimized()
2465 if (unlikely(put_tx == np->tx_ring.ex)) in nv_start_xmit_optimized()
2466 prev_tx = np->last_tx.ex; in nv_start_xmit_optimized()
2470 if (unlikely(np->put_tx_ctx == np->tx_skb)) in nv_start_xmit_optimized()
2471 prev_tx_ctx = np->last_tx_ctx; in nv_start_xmit_optimized()
2473 prev_tx_ctx = np->put_tx_ctx - 1; in nv_start_xmit_optimized()
2494 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit_optimized()
2496 if (np->tx_limit) { in nv_start_xmit_optimized()
2502 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) { in nv_start_xmit_optimized()
2503 if (!np->tx_change_owner) in nv_start_xmit_optimized()
2504 np->tx_change_owner = start_tx_ctx; in nv_start_xmit_optimized()
2509 start_tx_ctx->next_tx_ctx = np->put_tx_ctx; in nv_start_xmit_optimized()
2510 np->tx_end_flip = np->put_tx_ctx; in nv_start_xmit_optimized()
2512 np->tx_pkts_in_progress++; in nv_start_xmit_optimized()
2519 netdev_sent_queue(np->dev, skb->len); in nv_start_xmit_optimized()
2523 np->put_tx.ex = put_tx; in nv_start_xmit_optimized()
2525 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit_optimized()
2527 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_start_xmit_optimized()
2533 struct fe_priv *np = netdev_priv(dev); in nv_tx_flip_ownership() local
2535 np->tx_pkts_in_progress--; in nv_tx_flip_ownership()
2536 if (np->tx_change_owner) { in nv_tx_flip_ownership()
2537 np->tx_change_owner->first_tx_desc->flaglen |= in nv_tx_flip_ownership()
2539 np->tx_pkts_in_progress++; in nv_tx_flip_ownership()
2541 np->tx_change_owner = np->tx_change_owner->next_tx_ctx; in nv_tx_flip_ownership()
2542 if (np->tx_change_owner == np->tx_end_flip) in nv_tx_flip_ownership()
2543 np->tx_change_owner = NULL; in nv_tx_flip_ownership()
2545 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_tx_flip_ownership()
2556 struct fe_priv *np = netdev_priv(dev); in nv_tx_done() local
2559 struct ring_desc *orig_get_tx = np->get_tx.orig; in nv_tx_done()
2562 while ((np->get_tx.orig != np->put_tx.orig) && in nv_tx_done()
2563 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && in nv_tx_done()
2566 nv_unmap_txskb(np, np->get_tx_ctx); in nv_tx_done()
2568 if (np->desc_ver == DESC_VER_1) { in nv_tx_done()
2575 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_tx_done()
2576 np->stat_tx_packets++; in nv_tx_done()
2577 np->stat_tx_bytes += np->get_tx_ctx->skb->len; in nv_tx_done()
2578 u64_stats_update_end(&np->swstats_tx_syncp); in nv_tx_done()
2580 bytes_compl += np->get_tx_ctx->skb->len; in nv_tx_done()
2581 dev_kfree_skb_any(np->get_tx_ctx->skb); in nv_tx_done()
2582 np->get_tx_ctx->skb = NULL; in nv_tx_done()
2592 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_tx_done()
2593 np->stat_tx_packets++; in nv_tx_done()
2594 np->stat_tx_bytes += np->get_tx_ctx->skb->len; in nv_tx_done()
2595 u64_stats_update_end(&np->swstats_tx_syncp); in nv_tx_done()
2597 bytes_compl += np->get_tx_ctx->skb->len; in nv_tx_done()
2598 dev_kfree_skb_any(np->get_tx_ctx->skb); in nv_tx_done()
2599 np->get_tx_ctx->skb = NULL; in nv_tx_done()
2603 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) in nv_tx_done()
2604 np->get_tx.orig = np->tx_ring.orig; in nv_tx_done()
2605 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) in nv_tx_done()
2606 np->get_tx_ctx = np->tx_skb; in nv_tx_done()
2609 netdev_completed_queue(np->dev, tx_work, bytes_compl); in nv_tx_done()
2611 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { in nv_tx_done()
2612 np->tx_stop = 0; in nv_tx_done()
2620 struct fe_priv *np = netdev_priv(dev); in nv_tx_done_optimized() local
2623 struct ring_desc_ex *orig_get_tx = np->get_tx.ex; in nv_tx_done_optimized()
2626 while ((np->get_tx.ex != np->put_tx.ex) && in nv_tx_done_optimized()
2627 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && in nv_tx_done_optimized()
2630 nv_unmap_txskb(np, np->get_tx_ctx); in nv_tx_done_optimized()
2636 if (np->driver_data & DEV_HAS_GEAR_MODE) in nv_tx_done_optimized()
2642 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_tx_done_optimized()
2643 np->stat_tx_packets++; in nv_tx_done_optimized()
2644 np->stat_tx_bytes += np->get_tx_ctx->skb->len; in nv_tx_done_optimized()
2645 u64_stats_update_end(&np->swstats_tx_syncp); in nv_tx_done_optimized()
2648 bytes_cleaned += np->get_tx_ctx->skb->len; in nv_tx_done_optimized()
2649 dev_kfree_skb_any(np->get_tx_ctx->skb); in nv_tx_done_optimized()
2650 np->get_tx_ctx->skb = NULL; in nv_tx_done_optimized()
2653 if (np->tx_limit) in nv_tx_done_optimized()
2657 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) in nv_tx_done_optimized()
2658 np->get_tx.ex = np->tx_ring.ex; in nv_tx_done_optimized()
2659 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) in nv_tx_done_optimized()
2660 np->get_tx_ctx = np->tx_skb; in nv_tx_done_optimized()
2663 netdev_completed_queue(np->dev, tx_work, bytes_cleaned); in nv_tx_done_optimized()
2665 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { in nv_tx_done_optimized()
2666 np->tx_stop = 0; in nv_tx_done_optimized()
2678 struct fe_priv *np = netdev_priv(dev); in nv_tx_timeout() local
2684 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_tx_timeout()
2694 netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr); in nv_tx_timeout()
2696 for (i = 0; i <= np->register_size; i += 32) { in nv_tx_timeout()
2707 for (i = 0; i < np->tx_ring_size; i += 4) { in nv_tx_timeout()
2708 if (!nv_optimized(np)) { in nv_tx_timeout()
2713 le32_to_cpu(np->tx_ring.orig[i].buf), in nv_tx_timeout()
2714 le32_to_cpu(np->tx_ring.orig[i].flaglen), in nv_tx_timeout()
2715 le32_to_cpu(np->tx_ring.orig[i+1].buf), in nv_tx_timeout()
2716 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), in nv_tx_timeout()
2717 le32_to_cpu(np->tx_ring.orig[i+2].buf), in nv_tx_timeout()
2718 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), in nv_tx_timeout()
2719 le32_to_cpu(np->tx_ring.orig[i+3].buf), in nv_tx_timeout()
2720 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); in nv_tx_timeout()
2728 le32_to_cpu(np->tx_ring.ex[i].bufhigh), in nv_tx_timeout()
2729 le32_to_cpu(np->tx_ring.ex[i].buflow), in nv_tx_timeout()
2730 le32_to_cpu(np->tx_ring.ex[i].flaglen), in nv_tx_timeout()
2731 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), in nv_tx_timeout()
2732 le32_to_cpu(np->tx_ring.ex[i+1].buflow), in nv_tx_timeout()
2733 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), in nv_tx_timeout()
2734 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), in nv_tx_timeout()
2735 le32_to_cpu(np->tx_ring.ex[i+2].buflow), in nv_tx_timeout()
2736 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), in nv_tx_timeout()
2737 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), in nv_tx_timeout()
2738 le32_to_cpu(np->tx_ring.ex[i+3].buflow), in nv_tx_timeout()
2739 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); in nv_tx_timeout()
2744 spin_lock_irq(&np->lock); in nv_tx_timeout()
2750 saved_tx_limit = np->tx_limit; in nv_tx_timeout()
2751 np->tx_limit = 0; /* prevent giving HW any limited pkts */ in nv_tx_timeout()
2752 np->tx_stop = 0; /* prevent waking tx queue */ in nv_tx_timeout()
2753 if (!nv_optimized(np)) in nv_tx_timeout()
2754 nv_tx_done(dev, np->tx_ring_size); in nv_tx_timeout()
2756 nv_tx_done_optimized(dev, np->tx_ring_size); in nv_tx_timeout()
2759 if (np->tx_change_owner) in nv_tx_timeout()
2760 put_tx.ex = np->tx_change_owner->first_tx_desc; in nv_tx_timeout()
2762 put_tx = np->put_tx; in nv_tx_timeout()
2769 np->get_tx = np->put_tx = put_tx; in nv_tx_timeout()
2770 np->tx_limit = saved_tx_limit; in nv_tx_timeout()
2775 spin_unlock_irq(&np->lock); in nv_tx_timeout()
2823 struct fe_priv *np = netdev_priv(dev); in nv_rx_process() local
2829 while ((np->get_rx.orig != np->put_rx.orig) && in nv_rx_process()
2830 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && in nv_rx_process()
2838 dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma, in nv_rx_process()
2839 np->get_rx_ctx->dma_len, in nv_rx_process()
2841 skb = np->get_rx_ctx->skb; in nv_rx_process()
2842 np->get_rx_ctx->skb = NULL; in nv_rx_process()
2845 if (np->desc_ver == DESC_VER_1) { in nv_rx_process()
2864 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_rx_process()
2865 np->stat_rx_missed_errors++; in nv_rx_process()
2866 u64_stats_update_end(&np->swstats_rx_syncp); in nv_rx_process()
2909 napi_gro_receive(&np->napi, skb); in nv_rx_process()
2910 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_rx_process()
2911 np->stat_rx_packets++; in nv_rx_process()
2912 np->stat_rx_bytes += len; in nv_rx_process()
2913 u64_stats_update_end(&np->swstats_rx_syncp); in nv_rx_process()
2915 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) in nv_rx_process()
2916 np->get_rx.orig = np->rx_ring.orig; in nv_rx_process()
2917 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) in nv_rx_process()
2918 np->get_rx_ctx = np->rx_skb; in nv_rx_process()
2928 struct fe_priv *np = netdev_priv(dev); in nv_rx_process_optimized() local
2935 while ((np->get_rx.ex != np->put_rx.ex) && in nv_rx_process_optimized()
2936 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && in nv_rx_process_optimized()
2944 dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma, in nv_rx_process_optimized()
2945 np->get_rx_ctx->dma_len, in nv_rx_process_optimized()
2947 skb = np->get_rx_ctx->skb; in nv_rx_process_optimized()
2948 np->get_rx_ctx->skb = NULL; in nv_rx_process_optimized()
2982 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); in nv_rx_process_optimized()
2995 napi_gro_receive(&np->napi, skb); in nv_rx_process_optimized()
2996 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_rx_process_optimized()
2997 np->stat_rx_packets++; in nv_rx_process_optimized()
2998 np->stat_rx_bytes += len; in nv_rx_process_optimized()
2999 u64_stats_update_end(&np->swstats_rx_syncp); in nv_rx_process_optimized()
3004 if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) in nv_rx_process_optimized()
3005 np->get_rx.ex = np->rx_ring.ex; in nv_rx_process_optimized()
3006 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) in nv_rx_process_optimized()
3007 np->get_rx_ctx = np->rx_skb; in nv_rx_process_optimized()
3017 struct fe_priv *np = netdev_priv(dev); in set_bufsize() local
3020 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; in set_bufsize()
3022 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; in set_bufsize()
3031 struct fe_priv *np = netdev_priv(dev); in nv_change_mtu() local
3054 spin_lock(&np->lock); in nv_change_mtu()
3063 if (!np->in_shutdown) in nv_change_mtu()
3064 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_change_mtu()
3067 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_change_mtu()
3069 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_change_mtu()
3072 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_change_mtu()
3077 spin_unlock(&np->lock); in nv_change_mtu()
3105 struct fe_priv *np = netdev_priv(dev); in nv_set_mac_address() local
3117 spin_lock_irq(&np->lock); in nv_set_mac_address()
3127 spin_unlock_irq(&np->lock); in nv_set_mac_address()
3142 struct fe_priv *np = netdev_priv(dev); in nv_set_multicast() local
3189 spin_lock_irq(&np->lock); in nv_set_multicast()
3197 spin_unlock_irq(&np->lock); in nv_set_multicast()
3202 struct fe_priv *np = netdev_priv(dev); in nv_update_pause() local
3205 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); in nv_update_pause()
3207 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { in nv_update_pause()
3211 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; in nv_update_pause()
3216 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { in nv_update_pause()
3220 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) in nv_update_pause()
3222 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { in nv_update_pause()
3229 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; in nv_update_pause()
3239 struct fe_priv *np = netdev_priv(dev); in nv_force_linkspeed() local
3244 np->linkspeed = NVREG_LINKSPEED_FORCE|speed; in nv_force_linkspeed()
3245 np->duplex = duplex; in nv_force_linkspeed()
3248 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_force_linkspeed()
3250 np->gigabit = PHY_GIGABIT; in nv_force_linkspeed()
3253 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) in nv_force_linkspeed()
3255 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) in nv_force_linkspeed()
3257 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) in nv_force_linkspeed()
3264 if (np->duplex == 0) in nv_force_linkspeed()
3266 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) in nv_force_linkspeed()
3268 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == in nv_force_linkspeed()
3274 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == in nv_force_linkspeed()
3284 if (np->desc_ver == DESC_VER_1) { in nv_force_linkspeed()
3287 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == in nv_force_linkspeed()
3295 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), in nv_force_linkspeed()
3298 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_force_linkspeed()
3315 struct fe_priv *np = netdev_priv(dev); in nv_update_linkspeed() local
3320 int newls = np->linkspeed; in nv_update_linkspeed()
3321 int newdup = np->duplex; in nv_update_linkspeed()
3332 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_update_linkspeed()
3345 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_update_linkspeed()
3346 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_update_linkspeed()
3355 if (np->autoneg == 0) { in nv_update_linkspeed()
3356 if (np->fixed_mode & LPA_100FULL) { in nv_update_linkspeed()
3359 } else if (np->fixed_mode & LPA_100HALF) { in nv_update_linkspeed()
3362 } else if (np->fixed_mode & LPA_10FULL) { in nv_update_linkspeed()
3381 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_update_linkspeed()
3382 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); in nv_update_linkspeed()
3385 if (np->gigabit == PHY_GIGABIT) { in nv_update_linkspeed()
3386 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_update_linkspeed()
3387 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); in nv_update_linkspeed()
3417 if (np->duplex == newdup && np->linkspeed == newls) in nv_update_linkspeed()
3420 np->duplex = newdup; in nv_update_linkspeed()
3421 np->linkspeed = newls; in nv_update_linkspeed()
3433 if (np->gigabit == PHY_GIGABIT) { in nv_update_linkspeed()
3436 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) || in nv_update_linkspeed()
3437 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)) in nv_update_linkspeed()
3439 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) in nv_update_linkspeed()
3446 if (np->duplex == 0) in nv_update_linkspeed()
3448 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) in nv_update_linkspeed()
3450 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) in nv_update_linkspeed()
3454 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ in nv_update_linkspeed()
3456 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { in nv_update_linkspeed()
3459 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { in nv_update_linkspeed()
3460 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) in nv_update_linkspeed()
3469 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) in nv_update_linkspeed()
3476 if (np->desc_ver == DESC_VER_1) { in nv_update_linkspeed()
3479 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) in nv_update_linkspeed()
3486 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), in nv_update_linkspeed()
3489 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_update_linkspeed()
3494 if (netif_running(dev) && (np->duplex != 0)) { in nv_update_linkspeed()
3495 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { in nv_update_linkspeed()
3503 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_update_linkspeed()
3514 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_update_linkspeed()
3522 pause_flags = np->pause_flags; in nv_update_linkspeed()
3566 static void nv_msi_workaround(struct fe_priv *np) in nv_msi_workaround() argument
3572 if (np->msi_flags & NV_MSI_ENABLED) { in nv_msi_workaround()
3573 u8 __iomem *base = np->base; in nv_msi_workaround()
3582 struct fe_priv *np = netdev_priv(dev); in nv_change_interrupt_mode() local
3587 np->quiet_count = 0; in nv_change_interrupt_mode()
3588 if (np->irqmask != NVREG_IRQMASK_CPU) { in nv_change_interrupt_mode()
3589 np->irqmask = NVREG_IRQMASK_CPU; in nv_change_interrupt_mode()
3593 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) { in nv_change_interrupt_mode()
3594 np->quiet_count++; in nv_change_interrupt_mode()
3598 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) { in nv_change_interrupt_mode()
3599 np->irqmask = NVREG_IRQMASK_THROUGHPUT; in nv_change_interrupt_mode()
3611 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq() local
3614 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { in nv_nic_irq()
3615 np->events = readl(base + NvRegIrqStatus); in nv_nic_irq()
3616 writel(np->events, base + NvRegIrqStatus); in nv_nic_irq()
3618 np->events = readl(base + NvRegMSIXIrqStatus); in nv_nic_irq()
3619 writel(np->events, base + NvRegMSIXIrqStatus); in nv_nic_irq()
3621 if (!(np->events & np->irqmask)) in nv_nic_irq()
3624 nv_msi_workaround(np); in nv_nic_irq()
3626 if (napi_schedule_prep(&np->napi)) { in nv_nic_irq()
3631 __napi_schedule(&np->napi); in nv_nic_irq()
3644 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq_optimized() local
3647 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { in nv_nic_irq_optimized()
3648 np->events = readl(base + NvRegIrqStatus); in nv_nic_irq_optimized()
3649 writel(np->events, base + NvRegIrqStatus); in nv_nic_irq_optimized()
3651 np->events = readl(base + NvRegMSIXIrqStatus); in nv_nic_irq_optimized()
3652 writel(np->events, base + NvRegMSIXIrqStatus); in nv_nic_irq_optimized()
3654 if (!(np->events & np->irqmask)) in nv_nic_irq_optimized()
3657 nv_msi_workaround(np); in nv_nic_irq_optimized()
3659 if (napi_schedule_prep(&np->napi)) { in nv_nic_irq_optimized()
3664 __napi_schedule(&np->napi); in nv_nic_irq_optimized()
3673 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq_tx() local
3683 if (!(events & np->irqmask)) in nv_nic_irq_tx()
3686 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_tx()
3688 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_tx()
3691 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_tx()
3696 if (!np->in_shutdown) { in nv_nic_irq_tx()
3697 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; in nv_nic_irq_tx()
3698 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_tx()
3700 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_tx()
3713 struct fe_priv *np = container_of(napi, struct fe_priv, napi); in nv_napi_poll() local
3714 struct net_device *dev = np->dev; in nv_napi_poll()
3721 if (!nv_optimized(np)) { in nv_napi_poll()
3722 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3723 tx_work += nv_tx_done(dev, np->tx_ring_size); in nv_napi_poll()
3724 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3729 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3730 tx_work += nv_tx_done_optimized(dev, np->tx_ring_size); in nv_napi_poll()
3731 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3741 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3742 if (!np->in_shutdown) in nv_napi_poll()
3743 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_napi_poll()
3744 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3749 if (unlikely(np->events & NVREG_IRQ_LINK)) { in nv_napi_poll()
3750 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3752 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3754 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { in nv_napi_poll()
3755 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3757 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3758 np->link_timeout = jiffies + LINK_TIMEOUT; in nv_napi_poll()
3760 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { in nv_napi_poll()
3761 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3762 if (!np->in_shutdown) { in nv_napi_poll()
3763 np->nic_poll_irq = np->irqmask; in nv_napi_poll()
3764 np->recover_error = 1; in nv_napi_poll()
3765 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_napi_poll()
3767 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3777 writel(np->irqmask, base + NvRegIrqMask); in nv_napi_poll()
3785 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq_rx() local
3795 if (!(events & np->irqmask)) in nv_nic_irq_rx()
3800 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_rx()
3801 if (!np->in_shutdown) in nv_nic_irq_rx()
3802 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_nic_irq_rx()
3803 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_rx()
3808 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_rx()
3813 if (!np->in_shutdown) { in nv_nic_irq_rx()
3814 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; in nv_nic_irq_rx()
3815 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_rx()
3817 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_rx()
3830 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq_other() local
3840 if (!(events & np->irqmask)) in nv_nic_irq_other()
3844 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3846 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3849 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3851 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3853 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { in nv_nic_irq_other()
3854 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3856 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3857 np->link_timeout = jiffies + LINK_TIMEOUT; in nv_nic_irq_other()
3860 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3865 if (!np->in_shutdown) { in nv_nic_irq_other()
3866 np->nic_poll_irq |= NVREG_IRQ_OTHER; in nv_nic_irq_other()
3867 np->recover_error = 1; in nv_nic_irq_other()
3868 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_other()
3870 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3874 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3879 if (!np->in_shutdown) { in nv_nic_irq_other()
3880 np->nic_poll_irq |= NVREG_IRQ_OTHER; in nv_nic_irq_other()
3881 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_other()
3883 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3897 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq_test() local
3901 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { in nv_nic_irq_test()
3912 nv_msi_workaround(np); in nv_nic_irq_test()
3914 spin_lock(&np->lock); in nv_nic_irq_test()
3915 np->intr_test = 1; in nv_nic_irq_test()
3916 spin_unlock(&np->lock); in nv_nic_irq_test()
3947 struct fe_priv *np = get_nvpriv(dev); in nv_request_irq() local
3956 if (nv_optimized(np)) in nv_request_irq()
3962 if (np->msi_flags & NV_MSI_X_CAPABLE) { in nv_request_irq()
3963 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) in nv_request_irq()
3964 np->msi_x_entry[i].entry = i; in nv_request_irq()
3965 ret = pci_enable_msix_range(np->pci_dev, in nv_request_irq()
3966 np->msi_x_entry, in nv_request_irq()
3967 np->msi_flags & NV_MSI_X_VECTORS_MASK, in nv_request_irq()
3968 np->msi_flags & NV_MSI_X_VECTORS_MASK); in nv_request_irq()
3970 np->msi_flags |= NV_MSI_X_ENABLED; in nv_request_irq()
3973 sprintf(np->name_rx, "%s-rx", dev->name); in nv_request_irq()
3974 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, in nv_request_irq()
3975 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev); in nv_request_irq()
3980 pci_disable_msix(np->pci_dev); in nv_request_irq()
3981 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
3985 sprintf(np->name_tx, "%s-tx", dev->name); in nv_request_irq()
3986 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, in nv_request_irq()
3987 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev); in nv_request_irq()
3992 pci_disable_msix(np->pci_dev); in nv_request_irq()
3993 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
3997 sprintf(np->name_other, "%s-other", dev->name); in nv_request_irq()
3998 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, in nv_request_irq()
3999 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev); in nv_request_irq()
4004 pci_disable_msix(np->pci_dev); in nv_request_irq()
4005 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
4016 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, in nv_request_irq()
4022 pci_disable_msix(np->pci_dev); in nv_request_irq()
4023 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
4035 if (np->msi_flags & NV_MSI_CAPABLE) { in nv_request_irq()
4036 ret = pci_enable_msi(np->pci_dev); in nv_request_irq()
4038 np->msi_flags |= NV_MSI_ENABLED; in nv_request_irq()
4039 ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev); in nv_request_irq()
4043 pci_disable_msi(np->pci_dev); in nv_request_irq()
4044 np->msi_flags &= ~NV_MSI_ENABLED; in nv_request_irq()
4058 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) in nv_request_irq()
4063 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); in nv_request_irq()
4065 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); in nv_request_irq()
4072 struct fe_priv *np = get_nvpriv(dev); in nv_free_irq() local
4075 if (np->msi_flags & NV_MSI_X_ENABLED) { in nv_free_irq()
4076 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) in nv_free_irq()
4077 free_irq(np->msi_x_entry[i].vector, dev); in nv_free_irq()
4078 pci_disable_msix(np->pci_dev); in nv_free_irq()
4079 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_free_irq()
4081 free_irq(np->pci_dev->irq, dev); in nv_free_irq()
4082 if (np->msi_flags & NV_MSI_ENABLED) { in nv_free_irq()
4083 pci_disable_msi(np->pci_dev); in nv_free_irq()
4084 np->msi_flags &= ~NV_MSI_ENABLED; in nv_free_irq()
4091 struct fe_priv *np = from_timer(np, t, nic_poll); in nv_do_nic_poll() local
4092 struct net_device *dev = np->dev; in nv_do_nic_poll()
4105 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_do_nic_poll()
4106 irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector; in nv_do_nic_poll()
4108 irq = np->pci_dev->irq; in nv_do_nic_poll()
4109 mask = np->irqmask; in nv_do_nic_poll()
4111 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { in nv_do_nic_poll()
4112 irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector; in nv_do_nic_poll()
4115 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { in nv_do_nic_poll()
4116 irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector; in nv_do_nic_poll()
4119 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { in nv_do_nic_poll()
4120 irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector; in nv_do_nic_poll()
4128 if (np->recover_error) { in nv_do_nic_poll()
4129 np->recover_error = 0; in nv_do_nic_poll()
4134 spin_lock(&np->lock); in nv_do_nic_poll()
4137 if (np->driver_data & DEV_HAS_POWER_CNTRL) in nv_do_nic_poll()
4145 if (!np->in_shutdown) in nv_do_nic_poll()
4146 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_do_nic_poll()
4149 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_do_nic_poll()
4151 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_do_nic_poll()
4154 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_do_nic_poll()
4157 if (!(np->msi_flags & NV_MSI_X_ENABLED)) in nv_do_nic_poll()
4164 spin_unlock(&np->lock); in nv_do_nic_poll()
4174 np->nic_poll_irq = 0; in nv_do_nic_poll()
4175 if (nv_optimized(np)) in nv_do_nic_poll()
4180 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { in nv_do_nic_poll()
4181 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; in nv_do_nic_poll()
4184 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { in nv_do_nic_poll()
4185 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; in nv_do_nic_poll()
4188 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { in nv_do_nic_poll()
4189 np->nic_poll_irq &= ~NVREG_IRQ_OTHER; in nv_do_nic_poll()
4200 struct fe_priv *np = netdev_priv(dev); in nv_poll_controller() local
4202 nv_do_nic_poll(&np->nic_poll); in nv_poll_controller()
4210 struct fe_priv *np = from_timer(np, t, stats_poll); in nv_do_stats_poll() local
4211 struct net_device *dev = np->dev; in nv_do_stats_poll()
4215 if (spin_trylock(&np->hwstats_lock)) { in nv_do_stats_poll()
4217 spin_unlock(&np->hwstats_lock); in nv_do_stats_poll()
4220 if (!np->in_shutdown) in nv_do_stats_poll()
4221 mod_timer(&np->stats_poll, in nv_do_stats_poll()
4227 struct fe_priv *np = netdev_priv(dev); in nv_get_drvinfo() local
4230 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); in nv_get_drvinfo()
4235 struct fe_priv *np = netdev_priv(dev); in nv_get_wol() local
4238 spin_lock_irq(&np->lock); in nv_get_wol()
4239 if (np->wolenabled) in nv_get_wol()
4241 spin_unlock_irq(&np->lock); in nv_get_wol()
4246 struct fe_priv *np = netdev_priv(dev); in nv_set_wol() local
4251 np->wolenabled = 0; in nv_set_wol()
4253 np->wolenabled = 1; in nv_set_wol()
4257 spin_lock_irq(&np->lock); in nv_set_wol()
4259 spin_unlock_irq(&np->lock); in nv_set_wol()
4261 device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled); in nv_set_wol()
4268 struct fe_priv *np = netdev_priv(dev); in nv_get_link_ksettings() local
4272 spin_lock_irq(&np->lock); in nv_get_link_ksettings()
4285 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) { in nv_get_link_ksettings()
4300 if (np->duplex) in nv_get_link_ksettings()
4307 cmd->base.autoneg = np->autoneg; in nv_get_link_ksettings()
4310 if (np->autoneg) { in nv_get_link_ksettings()
4312 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_get_link_ksettings()
4321 if (np->gigabit == PHY_GIGABIT) { in nv_get_link_ksettings()
4322 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_get_link_ksettings()
4331 if (np->gigabit == PHY_GIGABIT) in nv_get_link_ksettings()
4334 cmd->base.phy_address = np->phyaddr; in nv_get_link_ksettings()
4342 spin_unlock_irq(&np->lock); in nv_get_link_ksettings()
4349 struct fe_priv *np = netdev_priv(dev); in nv_set_link_ksettings() local
4358 if (cmd->base.phy_address != np->phyaddr) { in nv_set_link_ksettings()
4368 if (np->gigabit == PHY_GIGABIT) in nv_set_link_ksettings()
4395 spin_lock_irqsave(&np->lock, flags); in nv_set_link_ksettings()
4406 spin_unlock_irqrestore(&np->lock, flags); in nv_set_link_ksettings()
4414 np->autoneg = 1; in nv_set_link_ksettings()
4417 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_set_link_ksettings()
4427 …if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx p… in nv_set_link_ksettings()
4429 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_set_link_ksettings()
4431 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); in nv_set_link_ksettings()
4433 if (np->gigabit == PHY_GIGABIT) { in nv_set_link_ksettings()
4434 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_set_link_ksettings()
4438 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); in nv_set_link_ksettings()
4443 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_link_ksettings()
4444 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { in nv_set_link_ksettings()
4454 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_set_link_ksettings()
4459 np->autoneg = 0; in nv_set_link_ksettings()
4461 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_set_link_ksettings()
4471 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); in nv_set_link_ksettings()
4472 …if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx p… in nv_set_link_ksettings()
4474 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; in nv_set_link_ksettings()
4476 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { in nv_set_link_ksettings()
4478 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; in nv_set_link_ksettings()
4480 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); in nv_set_link_ksettings()
4481 np->fixed_mode = adv; in nv_set_link_ksettings()
4483 if (np->gigabit == PHY_GIGABIT) { in nv_set_link_ksettings()
4484 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_set_link_ksettings()
4486 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); in nv_set_link_ksettings()
4489 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_link_ksettings()
4491 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) in nv_set_link_ksettings()
4493 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) in nv_set_link_ksettings()
4495 if (np->phy_oui == PHY_OUI_MARVELL) { in nv_set_link_ksettings()
4502 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_set_link_ksettings()
4523 struct fe_priv *np = netdev_priv(dev); in nv_get_regs_len() local
4524 return np->register_size; in nv_get_regs_len()
4529 struct fe_priv *np = netdev_priv(dev); in nv_get_regs() local
4535 spin_lock_irq(&np->lock); in nv_get_regs()
4536 for (i = 0; i < np->register_size/sizeof(u32); i++) in nv_get_regs()
4538 spin_unlock_irq(&np->lock); in nv_get_regs()
4543 struct fe_priv *np = netdev_priv(dev); in nv_nway_reset() local
4546 if (np->autoneg) { in nv_nway_reset()
4554 spin_lock(&np->lock); in nv_nway_reset()
4557 spin_unlock(&np->lock); in nv_nway_reset()
4563 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_nway_reset()
4564 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { in nv_nway_reset()
4573 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_nway_reset()
4590 struct fe_priv *np = netdev_priv(dev); in nv_get_ringparam() local
4592 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; in nv_get_ringparam()
4593 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; in nv_get_ringparam()
4595 ring->rx_pending = np->rx_ring_size; in nv_get_ringparam()
4596 ring->tx_pending = np->tx_ring_size; in nv_get_ringparam()
4601 struct fe_priv *np = netdev_priv(dev); in nv_set_ringparam() local
4610 (np->desc_ver == DESC_VER_1 && in nv_set_ringparam()
4613 (np->desc_ver != DESC_VER_1 && in nv_set_ringparam()
4620 if (!nv_optimized(np)) { in nv_set_ringparam()
4621 rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev, in nv_set_ringparam()
4627 rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev, in nv_set_ringparam()
4639 if (!nv_optimized(np)) { in nv_set_ringparam()
4641 dma_free_coherent(&np->pci_dev->dev, in nv_set_ringparam()
4648 dma_free_coherent(&np->pci_dev->dev, in nv_set_ringparam()
4665 spin_lock(&np->lock); in nv_set_ringparam()
4676 np->rx_ring_size = ring->rx_pending; in nv_set_ringparam()
4677 np->tx_ring_size = ring->tx_pending; in nv_set_ringparam()
4679 if (!nv_optimized(np)) { in nv_set_ringparam()
4680 np->rx_ring.orig = (struct ring_desc *)rxtx_ring; in nv_set_ringparam()
4681 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; in nv_set_ringparam()
4683 np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring; in nv_set_ringparam()
4684 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; in nv_set_ringparam()
4686 np->rx_skb = (struct nv_skb_map *)rx_skbuff; in nv_set_ringparam()
4687 np->tx_skb = (struct nv_skb_map *)tx_skbuff; in nv_set_ringparam()
4688 np->ring_addr = ring_addr; in nv_set_ringparam()
4690 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); in nv_set_ringparam()
4691 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); in nv_set_ringparam()
4697 if (!np->in_shutdown) in nv_set_ringparam()
4698 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_set_ringparam()
4702 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_set_ringparam()
4704 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_set_ringparam()
4707 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_set_ringparam()
4712 spin_unlock(&np->lock); in nv_set_ringparam()
4725 struct fe_priv *np = netdev_priv(dev); in nv_get_pauseparam() local
4727 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; in nv_get_pauseparam()
4728 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; in nv_get_pauseparam()
4729 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; in nv_get_pauseparam()
4734 struct fe_priv *np = netdev_priv(dev); in nv_set_pauseparam() local
4737 if ((!np->autoneg && np->duplex == 0) || in nv_set_pauseparam()
4738 (np->autoneg && !pause->autoneg && np->duplex == 0)) { in nv_set_pauseparam()
4742 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { in nv_set_pauseparam()
4752 spin_lock(&np->lock); in nv_set_pauseparam()
4755 spin_unlock(&np->lock); in nv_set_pauseparam()
4760 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); in nv_set_pauseparam()
4762 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; in nv_set_pauseparam()
4764 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; in nv_set_pauseparam()
4766 if (np->autoneg && pause->autoneg) { in nv_set_pauseparam()
4767 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; in nv_set_pauseparam()
4769 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_set_pauseparam()
4771 …if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pa… in nv_set_pauseparam()
4773 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_set_pauseparam()
4775 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); in nv_set_pauseparam()
4779 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_pauseparam()
4781 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_set_pauseparam()
4783 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); in nv_set_pauseparam()
4785 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; in nv_set_pauseparam()
4787 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; in nv_set_pauseparam()
4792 nv_update_pause(dev, np->pause_flags); in nv_set_pauseparam()
4804 struct fe_priv *np = netdev_priv(dev); in nv_set_loopback() local
4809 spin_lock_irqsave(&np->lock, flags); in nv_set_loopback()
4810 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_loopback()
4813 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4820 err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol); in nv_set_loopback()
4823 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4833 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4839 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4845 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4850 spin_lock_irqsave(&np->lock, flags); in nv_set_loopback()
4852 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4869 struct fe_priv *np = get_nvpriv(dev); in nv_vlan_mode() local
4871 spin_lock_irq(&np->lock); in nv_vlan_mode()
4874 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP; in nv_vlan_mode()
4876 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; in nv_vlan_mode()
4879 np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS; in nv_vlan_mode()
4881 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; in nv_vlan_mode()
4883 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_vlan_mode()
4885 spin_unlock_irq(&np->lock); in nv_vlan_mode()
4890 struct fe_priv *np = netdev_priv(dev); in nv_set_features() local
4902 spin_lock_irq(&np->lock); in nv_set_features()
4905 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; in nv_set_features()
4907 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; in nv_set_features()
4910 writel(np->txrxctl_bits, base + NvRegTxRxControl); in nv_set_features()
4912 spin_unlock_irq(&np->lock); in nv_set_features()
4923 struct fe_priv *np = netdev_priv(dev); in nv_get_sset_count() local
4927 if (np->driver_data & DEV_HAS_TEST_EXTENDED) in nv_get_sset_count()
4932 if (np->driver_data & DEV_HAS_STATISTICS_V3) in nv_get_sset_count()
4934 else if (np->driver_data & DEV_HAS_STATISTICS_V2) in nv_get_sset_count()
4936 else if (np->driver_data & DEV_HAS_STATISTICS_V1) in nv_get_sset_count()
4950 struct fe_priv *np = netdev_priv(dev); in nv_get_ethtool_stats() local
4952 spin_lock_bh(&np->hwstats_lock); in nv_get_ethtool_stats()
4954 memcpy(buffer, &np->estats, in nv_get_ethtool_stats()
4956 spin_unlock_bh(&np->hwstats_lock); in nv_get_ethtool_stats()
4961 struct fe_priv *np = netdev_priv(dev); in nv_link_test() local
4964 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_link_test()
4965 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_link_test()
5004 struct fe_priv *np = netdev_priv(dev); in nv_interrupt_test() local
5017 np->intr_test = 0; in nv_interrupt_test()
5020 save_msi_flags = np->msi_flags; in nv_interrupt_test()
5021 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; in nv_interrupt_test()
5022 np->msi_flags |= 0x001; /* setup 1 vector */ in nv_interrupt_test()
5035 spin_lock_irq(&np->lock); in nv_interrupt_test()
5038 testcnt = np->intr_test; in nv_interrupt_test()
5043 if (!(np->msi_flags & NV_MSI_X_ENABLED)) in nv_interrupt_test()
5048 spin_unlock_irq(&np->lock); in nv_interrupt_test()
5052 np->msi_flags = save_msi_flags; in nv_interrupt_test()
5067 struct fe_priv *np = netdev_priv(dev); in nv_loopback_test() local
5071 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); in nv_loopback_test()
5096 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_loopback_test()
5098 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_loopback_test()
5112 test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data, in nv_loopback_test()
5115 if (unlikely(dma_mapping_error(&np->pci_dev->dev, in nv_loopback_test()
5124 if (!nv_optimized(np)) { in nv_loopback_test()
5125 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); in nv_loopback_test()
5126 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); in nv_loopback_test()
5128 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); in nv_loopback_test()
5129 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); in nv_loopback_test()
5130 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); in nv_loopback_test()
5132 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_loopback_test()
5138 if (!nv_optimized(np)) { in nv_loopback_test()
5139 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); in nv_loopback_test()
5140 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); in nv_loopback_test()
5143 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); in nv_loopback_test()
5144 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); in nv_loopback_test()
5149 } else if (np->desc_ver == DESC_VER_1) { in nv_loopback_test()
5161 rx_skb = np->rx_skb[0].skb; in nv_loopback_test()
5171 dma_unmap_single(&np->pci_dev->dev, test_dma_addr, in nv_loopback_test()
5193 struct fe_priv *np = netdev_priv(dev); in nv_self_test() local
5211 spin_lock_irq(&np->lock); in nv_self_test()
5212 nv_disable_hw_interrupts(dev, np->irqmask); in nv_self_test()
5213 if (!(np->msi_flags & NV_MSI_X_ENABLED)) in nv_self_test()
5222 spin_unlock_irq(&np->lock); in nv_self_test()
5251 if (!np->in_shutdown) in nv_self_test()
5252 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_self_test()
5255 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_self_test()
5257 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_self_test()
5260 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_self_test()
5266 nv_enable_hw_interrupts(dev, np->irqmask); in nv_self_test()
5307 struct fe_priv *np = netdev_priv(dev); in nv_mgmt_acquire_sema() local
5331 np->mgmt_sema = 1; in nv_mgmt_acquire_sema()
5342 struct fe_priv *np = netdev_priv(dev); in nv_mgmt_release_sema() local
5346 if (np->driver_data & DEV_HAS_MGMT_UNIT) { in nv_mgmt_release_sema()
5347 if (np->mgmt_sema) { in nv_mgmt_release_sema()
5358 struct fe_priv *np = netdev_priv(dev); in nv_mgmt_get_version() local
5380 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION; in nv_mgmt_get_version()
5387 struct fe_priv *np = netdev_priv(dev); in nv_open() local
5394 mii_rw(dev, np->phyaddr, MII_BMCR, in nv_open()
5395 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); in nv_open()
5399 if (np->driver_data & DEV_HAS_POWER_CNTRL) in nv_open()
5412 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) in nv_open()
5424 np->in_shutdown = 0; in nv_open()
5428 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_open()
5431 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_open()
5432 if (np->desc_ver == DESC_VER_1) in nv_open()
5436 writel(np->txrxctl_bits, base + NvRegTxRxControl); in nv_open()
5437 writel(np->vlanctl_bits, base + NvRegVlanControl); in nv_open()
5439 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); in nv_open()
5453 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_open()
5459 if (np->desc_ver == DESC_VER_1) { in nv_open()
5462 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) { in nv_open()
5480 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, in nv_open()
5484 if (np->wolenabled) in nv_open()
5495 nv_disable_hw_interrupts(dev, np->irqmask); in nv_open()
5505 nv_enable_hw_interrupts(dev, np->irqmask); in nv_open()
5507 spin_lock_irq(&np->lock); in nv_open()
5521 np->linkspeed = 0; in nv_open()
5534 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_open()
5537 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) in nv_open()
5538 mod_timer(&np->stats_poll, in nv_open()
5541 spin_unlock_irq(&np->lock); in nv_open()
5557 struct fe_priv *np = netdev_priv(dev); in nv_close() local
5560 spin_lock_irq(&np->lock); in nv_close()
5561 np->in_shutdown = 1; in nv_close()
5562 spin_unlock_irq(&np->lock); in nv_close()
5564 synchronize_irq(np->pci_dev->irq); in nv_close()
5566 del_timer_sync(&np->oom_kick); in nv_close()
5567 del_timer_sync(&np->nic_poll); in nv_close()
5568 del_timer_sync(&np->stats_poll); in nv_close()
5571 spin_lock_irq(&np->lock); in nv_close()
5578 nv_disable_hw_interrupts(dev, np->irqmask); in nv_close()
5581 spin_unlock_irq(&np->lock); in nv_close()
5587 if (np->wolenabled || !phy_power_down) { in nv_close()
5593 mii_rw(dev, np->phyaddr, MII_BMCR, in nv_close()
5594 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN); in nv_close()
5640 struct fe_priv *np; in nv_probe() local
5658 np = netdev_priv(dev); in nv_probe()
5659 np->dev = dev; in nv_probe()
5660 np->pci_dev = pci_dev; in nv_probe()
5661 spin_lock_init(&np->lock); in nv_probe()
5662 spin_lock_init(&np->hwstats_lock); in nv_probe()
5664 u64_stats_init(&np->swstats_rx_syncp); in nv_probe()
5665 u64_stats_init(&np->swstats_tx_syncp); in nv_probe()
5667 timer_setup(&np->oom_kick, nv_do_rx_refill, 0); in nv_probe()
5668 timer_setup(&np->nic_poll, nv_do_nic_poll, 0); in nv_probe()
5669 timer_setup(&np->stats_poll, nv_do_stats_poll, TIMER_DEFERRABLE); in nv_probe()
5682 np->register_size = NV_PCI_REGSZ_VER3; in nv_probe()
5684 np->register_size = NV_PCI_REGSZ_VER2; in nv_probe()
5686 np->register_size = NV_PCI_REGSZ_VER1; in nv_probe()
5692 pci_resource_len(pci_dev, i) >= np->register_size) { in nv_probe()
5703 np->driver_data = id->driver_data; in nv_probe()
5705 np->device_id = id->device; in nv_probe()
5710 np->desc_ver = DESC_VER_3; in nv_probe()
5711 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; in nv_probe()
5725 np->desc_ver = DESC_VER_2; in nv_probe()
5726 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; in nv_probe()
5729 np->desc_ver = DESC_VER_1; in nv_probe()
5730 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; in nv_probe()
5733 np->pkt_limit = NV_PKTLIMIT_1; in nv_probe()
5735 np->pkt_limit = NV_PKTLIMIT_2; in nv_probe()
5738 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; in nv_probe()
5743 np->vlanctl_bits = 0; in nv_probe()
5745 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; in nv_probe()
5757 dev->max_mtu = np->pkt_limit; in nv_probe()
5759 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; in nv_probe()
5763 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; in nv_probe()
5767 np->base = ioremap(addr, np->register_size); in nv_probe()
5768 if (!np->base) in nv_probe()
5771 np->rx_ring_size = RX_RING_DEFAULT; in nv_probe()
5772 np->tx_ring_size = TX_RING_DEFAULT; in nv_probe()
5774 if (!nv_optimized(np)) { in nv_probe()
5775 np->rx_ring.orig = dma_alloc_coherent(&pci_dev->dev, in nv_probe()
5777 (np->rx_ring_size + in nv_probe()
5778 np->tx_ring_size), in nv_probe()
5779 &np->ring_addr, in nv_probe()
5781 if (!np->rx_ring.orig) in nv_probe()
5783 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; in nv_probe()
5785 np->rx_ring.ex = dma_alloc_coherent(&pci_dev->dev, in nv_probe()
5787 (np->rx_ring_size + in nv_probe()
5788 np->tx_ring_size), in nv_probe()
5789 &np->ring_addr, GFP_KERNEL); in nv_probe()
5790 if (!np->rx_ring.ex) in nv_probe()
5792 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; in nv_probe()
5794 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); in nv_probe()
5795 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); in nv_probe()
5796 if (!np->rx_skb || !np->tx_skb) in nv_probe()
5799 if (!nv_optimized(np)) in nv_probe()
5804 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); in nv_probe()
5812 np->orig_mac[0] = readl(base + NvRegMacAddrA); in nv_probe()
5813 np->orig_mac[1] = readl(base + NvRegMacAddrB); in nv_probe()
5819 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; in nv_probe()
5820 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; in nv_probe()
5821 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; in nv_probe()
5822 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; in nv_probe()
5823 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; in nv_probe()
5824 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; in nv_probe()
5827 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; in nv_probe()
5828 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; in nv_probe()
5829 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; in nv_probe()
5830 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; in nv_probe()
5831 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; in nv_probe()
5832 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; in nv_probe()
5838 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) + in nv_probe()
5840 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8); in nv_probe()
5843 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; in nv_probe()
5844 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; in nv_probe()
5845 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; in nv_probe()
5846 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; in nv_probe()
5847 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; in nv_probe()
5848 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; in nv_probe()
5873 np->wolenabled = 0; in nv_probe()
5887 if (np->desc_ver == DESC_VER_1) in nv_probe()
5888 np->tx_flags = NV_TX_VALID; in nv_probe()
5890 np->tx_flags = NV_TX2_VALID; in nv_probe()
5892 np->msi_flags = 0; in nv_probe()
5894 np->msi_flags |= NV_MSI_CAPABLE; in nv_probe()
5901 np->msi_flags |= NV_MSI_X_CAPABLE; in nv_probe()
5906 np->irqmask = NVREG_IRQMASK_CPU; in nv_probe()
5907 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ in nv_probe()
5908 np->msi_flags |= 0x0001; in nv_probe()
5912 np->irqmask = NVREG_IRQMASK_THROUGHPUT; in nv_probe()
5914 np->msi_flags &= ~NV_MSI_X_CAPABLE; in nv_probe()
5917 np->irqmask = NVREG_IRQMASK_THROUGHPUT; in nv_probe()
5918 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ in nv_probe()
5919 np->msi_flags |= 0x0003; in nv_probe()
5923 np->irqmask |= NVREG_IRQ_TIMER; in nv_probe()
5925 np->need_linktimer = 1; in nv_probe()
5926 np->link_timeout = jiffies + LINK_TIMEOUT; in nv_probe()
5928 np->need_linktimer = 0; in nv_probe()
5933 np->tx_limit = 1; in nv_probe()
5936 np->tx_limit = 0; in nv_probe()
5955 np->mac_in_use = 1; in nv_probe()
5956 if (np->mgmt_version > 0) in nv_probe()
5957 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; in nv_probe()
5959 if (np->mac_in_use && in nv_probe()
5975 spin_lock_irq(&np->lock); in nv_probe()
5977 spin_unlock_irq(&np->lock); in nv_probe()
5980 spin_lock_irq(&np->lock); in nv_probe()
5982 spin_unlock_irq(&np->lock); in nv_probe()
5986 np->phy_model = id2 & PHYID2_MODEL_MASK; in nv_probe()
5989 np->phyaddr = phyaddr; in nv_probe()
5990 np->phy_oui = id1 | id2; in nv_probe()
5993 if (np->phy_oui == PHY_OUI_REALTEK2) in nv_probe()
5994 np->phy_oui = PHY_OUI_REALTEK; in nv_probe()
5996 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211) in nv_probe()
5997 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK; in nv_probe()
6011 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_probe()
6013 np->gigabit = PHY_GIGABIT; in nv_probe()
6017 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; in nv_probe()
6018 np->duplex = 0; in nv_probe()
6019 np->autoneg = 1; in nv_probe()
6041 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); in nv_probe()
6055 np->gigabit == PHY_GIGABIT ? "gbit " : "", in nv_probe()
6056 np->need_linktimer ? "lnktim " : "", in nv_probe()
6057 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", in nv_probe()
6058 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", in nv_probe()
6059 np->desc_ver); in nv_probe()
6082 struct fe_priv *np = netdev_priv(dev); in nv_restore_phy() local
6085 if (np->phy_oui == PHY_OUI_REALTEK && in nv_restore_phy()
6086 np->phy_model == PHY_MODEL_REALTEK_8201 && in nv_restore_phy()
6088 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3); in nv_restore_phy()
6089 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); in nv_restore_phy()
6092 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved); in nv_restore_phy()
6093 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1); in nv_restore_phy()
6096 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_restore_phy()
6098 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control); in nv_restore_phy()
6105 struct fe_priv *np = netdev_priv(dev); in nv_restore_mac_addr() local
6111 writel(np->orig_mac[0], base + NvRegMacAddrA); in nv_restore_mac_addr()
6112 writel(np->orig_mac[1], base + NvRegMacAddrB); in nv_restore_mac_addr()
6143 struct fe_priv *np = netdev_priv(dev); in nv_suspend() local
6154 for (i = 0; i <= np->register_size/sizeof(u32); i++) in nv_suspend()
6155 np->saved_config_space[i] = readl(base + i*sizeof(u32)); in nv_suspend()
6164 struct fe_priv *np = netdev_priv(dev); in nv_resume() local
6169 for (i = 0; i <= np->register_size/sizeof(u32); i++) in nv_resume()
6170 writel(np->saved_config_space[i], base+i*sizeof(u32)); in nv_resume()
6172 if (np->driver_data & DEV_NEED_MSI_FIX) in nv_resume()
6197 struct fe_priv *np = netdev_priv(dev); in nv_shutdown() local
6216 pci_wake_from_d3(pdev, np->wolenabled); in nv_shutdown()