Lines Matching +full:broken +full:- +full:prefetch +full:- +full:cmd
1 // SPDX-License-Identifier: GPL-2.0-only
8 * of the original driver such as link fail-over and link management because
28 #include <linux/dma-mapping.h>
35 #include <linux/prefetch.h>
61 MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
69 static int debug = -1; /* defaults above */
77 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */
79 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */
80 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */
81 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */
82 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */
85 { PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */
117 return hw->chip_id == CHIP_ID_GENESIS; in is_genesis()
137 const void __iomem *io = skge->hw->regs; in skge_get_regs()
139 regs->version = 1; in skge_get_regs()
140 memset(p, 0, regs->len); in skge_get_regs()
143 if (regs->len > B3_RI_WTO_R1) { in skge_get_regs()
145 regs->len - B3_RI_WTO_R1); in skge_get_regs()
155 if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) in wol_supported()
163 struct skge_hw *hw = skge->hw; in skge_wol_init()
164 int port = skge->port; in skge_wol_init()
174 /* WA code for COMA mode -- clear PHY reset */ in skge_wol_init()
175 if (hw->chip_id == CHIP_ID_YUKON_LITE && in skge_wol_init()
176 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { in skge_wol_init()
195 /* Force to 10/100 skge_reset will re-enable on resume */ in skge_wol_init()
212 memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), in skge_wol_init()
213 skge->netdev->dev_addr, ETH_ALEN); in skge_wol_init()
218 if (skge->wol & WAKE_PHY) in skge_wol_init()
223 if (skge->wol & WAKE_MAGIC) in skge_wol_init()
239 wol->supported = wol_supported(skge->hw); in skge_get_wol()
240 wol->wolopts = skge->wol; in skge_get_wol()
246 struct skge_hw *hw = skge->hw; in skge_set_wol()
248 if ((wol->wolopts & ~wol_supported(hw)) || in skge_set_wol()
249 !device_can_wakeup(&hw->pdev->dev)) in skge_set_wol()
250 return -EOPNOTSUPP; in skge_set_wol()
252 skge->wol = wol->wolopts; in skge_set_wol()
254 device_set_wakeup_enable(&hw->pdev->dev, skge->wol); in skge_set_wol()
266 if (hw->copper) { in skge_supported_modes()
282 else if (hw->chip_id == CHIP_ID_YUKON) in skge_supported_modes()
294 struct ethtool_link_ksettings *cmd) in skge_get_link_ksettings() argument
297 struct skge_hw *hw = skge->hw; in skge_get_link_ksettings()
302 if (hw->copper) { in skge_get_link_ksettings()
303 cmd->base.port = PORT_TP; in skge_get_link_ksettings()
304 cmd->base.phy_address = hw->phy_addr; in skge_get_link_ksettings()
306 cmd->base.port = PORT_FIBRE; in skge_get_link_ksettings()
308 advertising = skge->advertising; in skge_get_link_ksettings()
309 cmd->base.autoneg = skge->autoneg; in skge_get_link_ksettings()
310 cmd->base.speed = skge->speed; in skge_get_link_ksettings()
311 cmd->base.duplex = skge->duplex; in skge_get_link_ksettings()
313 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, in skge_get_link_ksettings()
315 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, in skge_get_link_ksettings()
322 const struct ethtool_link_ksettings *cmd) in skge_set_link_ksettings() argument
325 const struct skge_hw *hw = skge->hw; in skge_set_link_ksettings()
331 cmd->link_modes.advertising); in skge_set_link_ksettings()
333 if (cmd->base.autoneg == AUTONEG_ENABLE) { in skge_set_link_ksettings()
335 skge->duplex = -1; in skge_set_link_ksettings()
336 skge->speed = -1; in skge_set_link_ksettings()
339 u32 speed = cmd->base.speed; in skge_set_link_ksettings()
343 if (cmd->base.duplex == DUPLEX_FULL) in skge_set_link_ksettings()
345 else if (cmd->base.duplex == DUPLEX_HALF) in skge_set_link_ksettings()
348 return -EINVAL; in skge_set_link_ksettings()
351 if (cmd->base.duplex == DUPLEX_FULL) in skge_set_link_ksettings()
353 else if (cmd->base.duplex == DUPLEX_HALF) in skge_set_link_ksettings()
356 return -EINVAL; in skge_set_link_ksettings()
360 if (cmd->base.duplex == DUPLEX_FULL) in skge_set_link_ksettings()
362 else if (cmd->base.duplex == DUPLEX_HALF) in skge_set_link_ksettings()
365 return -EINVAL; in skge_set_link_ksettings()
368 return -EINVAL; in skge_set_link_ksettings()
372 return -EINVAL; in skge_set_link_ksettings()
374 skge->speed = speed; in skge_set_link_ksettings()
375 skge->duplex = cmd->base.duplex; in skge_set_link_ksettings()
378 skge->autoneg = cmd->base.autoneg; in skge_set_link_ksettings()
379 skge->advertising = advertising; in skge_set_link_ksettings()
398 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); in skge_get_drvinfo()
399 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); in skge_get_drvinfo()
400 strlcpy(info->bus_info, pci_name(skge->hw->pdev), in skge_get_drvinfo()
401 sizeof(info->bus_info)); in skge_get_drvinfo()
441 return -EOPNOTSUPP; in skge_get_sset_count()
450 if (is_genesis(skge->hw)) in skge_get_ethtool_stats()
465 if (is_genesis(skge->hw)) in skge_get_stats()
470 dev->stats.tx_bytes = data[0]; in skge_get_stats()
471 dev->stats.rx_bytes = data[1]; in skge_get_stats()
472 dev->stats.tx_packets = data[2] + data[4] + data[6]; in skge_get_stats()
473 dev->stats.rx_packets = data[3] + data[5] + data[7]; in skge_get_stats()
474 dev->stats.multicast = data[3] + data[5]; in skge_get_stats()
475 dev->stats.collisions = data[10]; in skge_get_stats()
476 dev->stats.tx_aborted_errors = data[12]; in skge_get_stats()
478 return &dev->stats; in skge_get_stats()
499 p->rx_max_pending = MAX_RX_RING_SIZE; in skge_get_ring_param()
500 p->tx_max_pending = MAX_TX_RING_SIZE; in skge_get_ring_param()
502 p->rx_pending = skge->rx_ring.count; in skge_get_ring_param()
503 p->tx_pending = skge->tx_ring.count; in skge_get_ring_param()
512 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || in skge_set_ring_param()
513 p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE) in skge_set_ring_param()
514 return -EINVAL; in skge_set_ring_param()
516 skge->rx_ring.count = p->rx_pending; in skge_set_ring_param()
517 skge->tx_ring.count = p->tx_pending; in skge_set_ring_param()
532 return skge->msg_enable; in skge_get_msglevel()
538 skge->msg_enable = value; in skge_set_msglevel()
545 if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev)) in skge_nway_reset()
546 return -EINVAL; in skge_nway_reset()
557 ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) || in skge_get_pauseparam()
558 (skge->flow_control == FLOW_MODE_SYM_OR_REM)); in skge_get_pauseparam()
559 ecmd->tx_pause = (ecmd->rx_pause || in skge_get_pauseparam()
560 (skge->flow_control == FLOW_MODE_LOC_SEND)); in skge_get_pauseparam()
562 ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause; in skge_get_pauseparam()
574 if (ecmd->autoneg != old.autoneg) in skge_set_pauseparam()
575 skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC; in skge_set_pauseparam()
577 if (ecmd->rx_pause && ecmd->tx_pause) in skge_set_pauseparam()
578 skge->flow_control = FLOW_MODE_SYMMETRIC; in skge_set_pauseparam()
579 else if (ecmd->rx_pause && !ecmd->tx_pause) in skge_set_pauseparam()
580 skge->flow_control = FLOW_MODE_SYM_OR_REM; in skge_set_pauseparam()
581 else if (!ecmd->rx_pause && ecmd->tx_pause) in skge_set_pauseparam()
582 skge->flow_control = FLOW_MODE_LOC_SEND; in skge_set_pauseparam()
584 skge->flow_control = FLOW_MODE_NONE; in skge_set_pauseparam()
623 struct skge_hw *hw = skge->hw; in skge_get_coalesce()
624 int port = skge->port; in skge_get_coalesce()
626 ecmd->rx_coalesce_usecs = 0; in skge_get_coalesce()
627 ecmd->tx_coalesce_usecs = 0; in skge_get_coalesce()
634 ecmd->rx_coalesce_usecs = delay; in skge_get_coalesce()
636 ecmd->tx_coalesce_usecs = delay; in skge_get_coalesce()
649 struct skge_hw *hw = skge->hw; in skge_set_coalesce()
650 int port = skge->port; in skge_set_coalesce()
654 if (ecmd->rx_coalesce_usecs == 0) in skge_set_coalesce()
656 else if (ecmd->rx_coalesce_usecs < 25 || in skge_set_coalesce()
657 ecmd->rx_coalesce_usecs > 33333) in skge_set_coalesce()
658 return -EINVAL; in skge_set_coalesce()
661 delay = ecmd->rx_coalesce_usecs; in skge_set_coalesce()
664 if (ecmd->tx_coalesce_usecs == 0) in skge_set_coalesce()
666 else if (ecmd->tx_coalesce_usecs < 25 || in skge_set_coalesce()
667 ecmd->tx_coalesce_usecs > 33333) in skge_set_coalesce()
668 return -EINVAL; in skge_set_coalesce()
671 delay = min(delay, ecmd->rx_coalesce_usecs); in skge_set_coalesce()
687 struct skge_hw *hw = skge->hw; in skge_led()
688 int port = skge->port; in skge_led()
690 spin_lock_bh(&hw->phy_lock); in skge_led()
694 if (hw->phy_type == SK_PHY_BCOM) in skge_led()
719 if (hw->phy_type == SK_PHY_BCOM) in skge_led()
748 (skge->speed == SPEED_100 ? in skge_led()
761 spin_unlock_bh(&hw->phy_lock); in skge_led()
795 pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, ®2); in skge_get_eeprom_len()
828 struct pci_dev *pdev = skge->hw->pdev; in skge_get_eeprom()
830 int length = eeprom->len; in skge_get_eeprom()
831 u16 offset = eeprom->offset; in skge_get_eeprom()
834 return -EINVAL; in skge_get_eeprom()
836 eeprom->magic = SKGE_EEPROM_MAGIC; in skge_get_eeprom()
843 length -= n; in skge_get_eeprom()
854 struct pci_dev *pdev = skge->hw->pdev; in skge_set_eeprom()
856 int length = eeprom->len; in skge_set_eeprom()
857 u16 offset = eeprom->offset; in skge_set_eeprom()
860 return -EINVAL; in skge_set_eeprom()
862 if (eeprom->magic != SKGE_EEPROM_MAGIC) in skge_set_eeprom()
863 return -EINVAL; in skge_set_eeprom()
875 length -= n; in skge_set_eeprom()
912 * One-to-one association of board descriptors with ring elements
920 ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL); in skge_ring_alloc()
921 if (!ring->start) in skge_ring_alloc()
922 return -ENOMEM; in skge_ring_alloc()
924 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { in skge_ring_alloc()
925 e->desc = d; in skge_ring_alloc()
926 if (i == ring->count - 1) { in skge_ring_alloc()
927 e->next = ring->start; in skge_ring_alloc()
928 d->next_offset = base; in skge_ring_alloc()
930 e->next = e + 1; in skge_ring_alloc()
931 d->next_offset = base + (i+1) * sizeof(*d); in skge_ring_alloc()
934 ring->to_use = ring->to_clean = ring->start; in skge_ring_alloc()
943 struct skge_rx_desc *rd = e->desc; in skge_rx_setup()
946 map = dma_map_single(&skge->hw->pdev->dev, skb->data, bufsize, in skge_rx_setup()
949 if (dma_mapping_error(&skge->hw->pdev->dev, map)) in skge_rx_setup()
950 return -1; in skge_rx_setup()
952 rd->dma_lo = lower_32_bits(map); in skge_rx_setup()
953 rd->dma_hi = upper_32_bits(map); in skge_rx_setup()
954 e->skb = skb; in skge_rx_setup()
955 rd->csum1_start = ETH_HLEN; in skge_rx_setup()
956 rd->csum2_start = ETH_HLEN; in skge_rx_setup()
957 rd->csum1 = 0; in skge_rx_setup()
958 rd->csum2 = 0; in skge_rx_setup()
962 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; in skge_rx_setup()
974 struct skge_rx_desc *rd = e->desc; in skge_rx_reuse()
976 rd->csum2 = 0; in skge_rx_reuse()
977 rd->csum2_start = ETH_HLEN; in skge_rx_reuse()
981 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size; in skge_rx_reuse()
988 struct skge_hw *hw = skge->hw; in skge_rx_clean()
989 struct skge_ring *ring = &skge->rx_ring; in skge_rx_clean()
992 e = ring->start; in skge_rx_clean()
994 struct skge_rx_desc *rd = e->desc; in skge_rx_clean()
995 rd->control = 0; in skge_rx_clean()
996 if (e->skb) { in skge_rx_clean()
997 dma_unmap_single(&hw->pdev->dev, in skge_rx_clean()
1001 dev_kfree_skb(e->skb); in skge_rx_clean()
1002 e->skb = NULL; in skge_rx_clean()
1004 } while ((e = e->next) != ring->start); in skge_rx_clean()
1014 struct skge_ring *ring = &skge->rx_ring; in skge_rx_fill()
1017 e = ring->start; in skge_rx_fill()
1021 skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN, in skge_rx_fill()
1024 return -ENOMEM; in skge_rx_fill()
1027 if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { in skge_rx_fill()
1029 return -EIO; in skge_rx_fill()
1031 } while ((e = e->next) != ring->start); in skge_rx_fill()
1033 ring->to_clean = ring->start; in skge_rx_fill()
1056 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), in skge_link_up()
1059 netif_carrier_on(skge->netdev); in skge_link_up()
1060 netif_wake_queue(skge->netdev); in skge_link_up()
1062 netif_info(skge, link, skge->netdev, in skge_link_up()
1064 skge->speed, in skge_link_up()
1065 skge->duplex == DUPLEX_FULL ? "full" : "half", in skge_link_up()
1066 skge_pause(skge->flow_status)); in skge_link_up()
1071 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF); in skge_link_down()
1072 netif_carrier_off(skge->netdev); in skge_link_down()
1073 netif_stop_queue(skge->netdev); in skge_link_down()
1075 netif_info(skge, link, skge->netdev, "Link is down\n"); in skge_link_down()
1080 struct net_device *dev = hw->dev[port]; in xm_link_down()
1093 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); in __xm_phy_read()
1096 if (hw->phy_type == SK_PHY_XMAC) in __xm_phy_read()
1105 return -ETIMEDOUT; in __xm_phy_read()
1116 pr_warn("%s: phy read timed out\n", hw->dev[port]->name); in xm_phy_read()
1124 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); in xm_phy_write()
1130 return -EIO; in xm_phy_write()
1139 return -ETIMEDOUT; in xm_phy_write()
1181 xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ in genesis_reset()
1182 xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ in genesis_reset()
1185 if (hw->phy_type == SK_PHY_BCOM) in genesis_reset()
1216 struct net_device *dev = hw->dev[port]; in bcom_check_link()
1229 if (skge->autoneg == AUTONEG_ENABLE) { in bcom_check_link()
1246 skge->duplex = DUPLEX_FULL; in bcom_check_link()
1249 skge->duplex = DUPLEX_HALF; in bcom_check_link()
1256 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ in bcom_check_link()
1259 skge->flow_status = FLOW_STAT_SYMMETRIC; in bcom_check_link()
1262 skge->flow_status = FLOW_STAT_REM_SEND; in bcom_check_link()
1265 skge->flow_status = FLOW_STAT_LOC_SEND; in bcom_check_link()
1268 skge->flow_status = FLOW_STAT_NONE; in bcom_check_link()
1270 skge->speed = SPEED_1000; in bcom_check_link()
1282 struct skge_hw *hw = skge->hw; in bcom_phy_init()
1283 int port = skge->port; in bcom_phy_init()
1345 if (skge->autoneg == AUTONEG_ENABLE) { in bcom_phy_init()
1348 * 1000Base-T Link Acquisition Failure in Slave Mode in bcom_phy_init()
1349 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register in bcom_phy_init()
1352 if (skge->advertising & ADVERTISED_1000baseT_Half) in bcom_phy_init()
1354 if (skge->advertising & ADVERTISED_1000baseT_Full) in bcom_phy_init()
1360 if (skge->duplex == DUPLEX_FULL) in bcom_phy_init()
1368 phy_pause_map[skge->flow_control] | PHY_AN_CSMA); in bcom_phy_init()
1371 if (hw->dev[port]->mtu > ETH_DATA_LEN) { in bcom_phy_init()
1388 struct skge_hw *hw = skge->hw; in xm_phy_init()
1389 int port = skge->port; in xm_phy_init()
1392 if (skge->autoneg == AUTONEG_ENABLE) { in xm_phy_init()
1393 if (skge->advertising & ADVERTISED_1000baseT_Half) in xm_phy_init()
1395 if (skge->advertising & ADVERTISED_1000baseT_Full) in xm_phy_init()
1398 ctrl |= fiber_pause_map[skge->flow_control]; in xm_phy_init()
1402 /* Restart Auto-negotiation */ in xm_phy_init()
1406 if (skge->duplex == DUPLEX_FULL) in xm_phy_init()
1409 * Do NOT enable Auto-negotiation here. This would hold in xm_phy_init()
1417 mod_timer(&skge->link_timer, jiffies + LINK_HZ); in xm_phy_init()
1423 struct skge_hw *hw = skge->hw; in xm_check_link()
1424 int port = skge->port; in xm_check_link()
1436 if (skge->autoneg == AUTONEG_ENABLE) { in xm_check_link()
1453 skge->duplex = DUPLEX_FULL; in xm_check_link()
1456 skge->duplex = DUPLEX_HALF; in xm_check_link()
1463 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ in xm_check_link()
1464 if ((skge->flow_control == FLOW_MODE_SYMMETRIC || in xm_check_link()
1465 skge->flow_control == FLOW_MODE_SYM_OR_REM) && in xm_check_link()
1467 skge->flow_status = FLOW_STAT_SYMMETRIC; in xm_check_link()
1468 else if (skge->flow_control == FLOW_MODE_SYM_OR_REM && in xm_check_link()
1471 skge->flow_status = FLOW_STAT_REM_SEND; in xm_check_link()
1472 else if (skge->flow_control == FLOW_MODE_LOC_SEND && in xm_check_link()
1475 skge->flow_status = FLOW_STAT_LOC_SEND; in xm_check_link()
1477 skge->flow_status = FLOW_STAT_NONE; in xm_check_link()
1479 skge->speed = SPEED_1000; in xm_check_link()
1496 struct net_device *dev = skge->netdev; in xm_link_timer()
1497 struct skge_hw *hw = skge->hw; in xm_link_timer()
1498 int port = skge->port; in xm_link_timer()
1505 spin_lock_irqsave(&hw->phy_lock, flags); in xm_link_timer()
1516 /* Re-enable interrupt to detect link down */ in xm_link_timer()
1524 mod_timer(&skge->link_timer, in xm_link_timer()
1527 spin_unlock_irqrestore(&hw->phy_lock, flags); in xm_link_timer()
1532 struct net_device *dev = hw->dev[port]; in genesis_mac_init()
1534 int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN; in genesis_mac_init()
1558 if (hw->phy_type != SK_PHY_XMAC) { in genesis_mac_init()
1573 switch (hw->phy_type) { in genesis_mac_init()
1583 xm_outaddr(hw, port, XM_SA, dev->dev_addr); in genesis_mac_init()
1604 if (skge->duplex == DUPLEX_HALF) { in genesis_mac_init()
1618 if (hw->ports > 1 && jumbo) in genesis_mac_init()
1642 * - Enable all bits excepting 'Octets Rx OK Low CntOv' in genesis_mac_init()
1649 * - Enable all bits excepting 'Octets Tx OK Low CntOv' in genesis_mac_init()
1690 struct skge_hw *hw = skge->hw; in genesis_stop()
1691 int port = skge->port; in genesis_stop()
1693 u16 cmd; in genesis_stop() local
1696 cmd = xm_read16(hw, port, XM_MMU_CMD); in genesis_stop()
1697 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); in genesis_stop()
1698 xm_write16(hw, port, XM_MMU_CMD, cmd); in genesis_stop()
1712 } while (--retries > 0); in genesis_stop()
1715 if (hw->phy_type != SK_PHY_XMAC) { in genesis_stop()
1738 struct skge_hw *hw = skge->hw; in genesis_get_stats()
1739 int port = skge->port; in genesis_get_stats()
1766 struct net_device *dev = hw->dev[port]; in genesis_mac_intr()
1770 netif_printk(skge, intr, KERN_DEBUG, skge->netdev, in genesis_mac_intr()
1773 if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { in genesis_mac_intr()
1775 mod_timer(&skge->link_timer, jiffies + 1); in genesis_mac_intr()
1780 ++dev->stats.tx_fifo_errors; in genesis_mac_intr()
1786 struct skge_hw *hw = skge->hw; in genesis_link_up()
1787 int port = skge->port; in genesis_link_up()
1788 u16 cmd, msk; in genesis_link_up() local
1791 cmd = xm_read16(hw, port, XM_MMU_CMD); in genesis_link_up()
1797 if (skge->flow_status == FLOW_STAT_NONE || in genesis_link_up()
1798 skge->flow_status == FLOW_STAT_LOC_SEND) in genesis_link_up()
1800 cmd |= XM_MMU_IGN_PF; in genesis_link_up()
1803 cmd &= ~XM_MMU_IGN_PF; in genesis_link_up()
1805 xm_write16(hw, port, XM_MMU_CMD, cmd); in genesis_link_up()
1808 if (skge->flow_status == FLOW_STAT_SYMMETRIC || in genesis_link_up()
1809 skge->flow_status == FLOW_STAT_LOC_SEND) { in genesis_link_up()
1816 * Send a zero pause time frame to re-start transmission. in genesis_link_up()
1846 cmd = xm_read16(hw, port, XM_MMU_CMD); in genesis_link_up()
1847 if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) in genesis_link_up()
1848 cmd |= XM_MMU_GMII_FD; in genesis_link_up()
1854 if (hw->phy_type == SK_PHY_BCOM) { in genesis_link_up()
1863 cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); in genesis_link_up()
1870 struct skge_hw *hw = skge->hw; in bcom_phy_intr()
1871 int port = skge->port; in bcom_phy_intr()
1875 netif_printk(skge, intr, KERN_DEBUG, skge->netdev, in bcom_phy_intr()
1880 hw->dev[port]->name); in bcom_phy_intr()
1904 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); in gm_phy_write()
1912 pr_warn("%s: phy write timeout\n", hw->dev[port]->name); in gm_phy_write()
1913 return -EIO; in gm_phy_write()
1921 GM_SMI_CT_PHY_AD(hw->phy_addr) in __gm_phy_read()
1930 return -ETIMEDOUT; in __gm_phy_read()
1940 pr_warn("%s: phy read timeout\n", hw->dev[port]->name); in gm_phy_read()
1947 struct skge_port *skge = netdev_priv(hw->dev[port]); in yukon_init()
1950 if (skge->autoneg == AUTONEG_ENABLE) { in yukon_init()
1963 if (skge->autoneg == AUTONEG_DISABLE) in yukon_init()
1973 if (skge->autoneg == AUTONEG_ENABLE) { in yukon_init()
1974 if (hw->copper) { in yukon_init()
1975 if (skge->advertising & ADVERTISED_1000baseT_Full) in yukon_init()
1977 if (skge->advertising & ADVERTISED_1000baseT_Half) in yukon_init()
1979 if (skge->advertising & ADVERTISED_100baseT_Full) in yukon_init()
1981 if (skge->advertising & ADVERTISED_100baseT_Half) in yukon_init()
1983 if (skge->advertising & ADVERTISED_10baseT_Full) in yukon_init()
1985 if (skge->advertising & ADVERTISED_10baseT_Half) in yukon_init()
1988 /* Set Flow-control capabilities */ in yukon_init()
1989 adv |= phy_pause_map[skge->flow_control]; in yukon_init()
1991 if (skge->advertising & ADVERTISED_1000baseT_Full) in yukon_init()
1993 if (skge->advertising & ADVERTISED_1000baseT_Half) in yukon_init()
1996 adv |= fiber_pause_map[skge->flow_control]; in yukon_init()
1999 /* Restart Auto-negotiation */ in yukon_init()
2005 if (skge->duplex == DUPLEX_FULL) in yukon_init()
2008 switch (skge->speed) { in yukon_init()
2026 if (skge->autoneg == AUTONEG_ENABLE) in yukon_init()
2045 /* Apparently, early versions of Yukon-Lite had wrong chip_id? */
2051 if (hw->chip_id != CHIP_ID_YUKON) in is_yukon_lite_a0()
2063 struct skge_port *skge = netdev_priv(hw->dev[port]); in yukon_mac_init()
2066 const u8 *addr = hw->dev[port]->dev_addr; in yukon_mac_init()
2068 /* WA code for COMA mode -- set PHY reset */ in yukon_mac_init()
2069 if (hw->chip_id == CHIP_ID_YUKON_LITE && in yukon_mac_init()
2070 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { in yukon_mac_init()
2080 /* WA code for COMA mode -- clear PHY reset */ in yukon_mac_init()
2081 if (hw->chip_id == CHIP_ID_YUKON_LITE && in yukon_mac_init()
2082 hw->chip_rev >= CHIP_REV_YU_LITE_A3) { in yukon_mac_init()
2092 reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; in yukon_mac_init()
2099 if (skge->autoneg == AUTONEG_DISABLE) { in yukon_mac_init()
2104 switch (skge->speed) { in yukon_mac_init()
2118 if (skge->duplex == DUPLEX_FULL) in yukon_mac_init()
2123 switch (skge->flow_control) { in yukon_mac_init()
2129 /* disable Rx flow-control */ in yukon_mac_init()
2134 /* enable Tx & Rx flow-control */ in yukon_mac_init()
2172 if (hw->dev[port]->mtu > ETH_DATA_LEN) in yukon_mac_init()
2193 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ in yukon_mac_init()
2202 * in order to flush pause packets in Rx FIFO on Yukon-1 in yukon_mac_init()
2232 struct skge_hw *hw = skge->hw; in yukon_stop()
2233 int port = skge->port; in yukon_stop()
2252 struct skge_hw *hw = skge->hw; in yukon_get_stats()
2253 int port = skge->port; in yukon_get_stats()
2268 struct net_device *dev = hw->dev[port]; in yukon_mac_intr()
2272 netif_printk(skge, intr, KERN_DEBUG, skge->netdev, in yukon_mac_intr()
2276 ++dev->stats.rx_fifo_errors; in yukon_mac_intr()
2281 ++dev->stats.tx_fifo_errors; in yukon_mac_intr()
2301 struct skge_hw *hw = skge->hw; in yukon_link_up()
2302 int port = skge->port; in yukon_link_up()
2309 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) in yukon_link_up()
2322 struct skge_hw *hw = skge->hw; in yukon_link_down()
2323 int port = skge->port; in yukon_link_down()
2330 if (skge->flow_status == FLOW_STAT_REM_SEND) { in yukon_link_down()
2344 struct skge_hw *hw = skge->hw; in yukon_phy_intr()
2345 int port = skge->port; in yukon_phy_intr()
2352 netif_printk(skge, intr, KERN_DEBUG, skge->netdev, in yukon_phy_intr()
2372 skge->duplex = (phystat & PHY_M_PS_FULL_DUP) in yukon_phy_intr()
2374 skge->speed = yukon_speed(hw, phystat); in yukon_phy_intr()
2376 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ in yukon_phy_intr()
2379 skge->flow_status = FLOW_STAT_SYMMETRIC; in yukon_phy_intr()
2382 skge->flow_status = FLOW_STAT_REM_SEND; in yukon_phy_intr()
2385 skge->flow_status = FLOW_STAT_LOC_SEND; in yukon_phy_intr()
2388 skge->flow_status = FLOW_STAT_NONE; in yukon_phy_intr()
2391 if (skge->flow_status == FLOW_STAT_NONE || in yukon_phy_intr()
2392 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) in yukon_phy_intr()
2401 skge->speed = yukon_speed(hw, phystat); in yukon_phy_intr()
2404 skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; in yukon_phy_intr()
2413 pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason); in yukon_phy_intr()
2420 struct skge_hw *hw = skge->hw; in skge_phy_reset()
2421 int port = skge->port; in skge_phy_reset()
2422 struct net_device *dev = hw->dev[port]; in skge_phy_reset()
2424 netif_stop_queue(skge->netdev); in skge_phy_reset()
2425 netif_carrier_off(skge->netdev); in skge_phy_reset()
2427 spin_lock_bh(&hw->phy_lock); in skge_phy_reset()
2435 spin_unlock_bh(&hw->phy_lock); in skge_phy_reset()
2441 static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) in skge_ioctl() argument
2445 struct skge_hw *hw = skge->hw; in skge_ioctl()
2446 int err = -EOPNOTSUPP; in skge_ioctl()
2449 return -ENODEV; /* Phy still in reset */ in skge_ioctl()
2451 switch (cmd) { in skge_ioctl()
2453 data->phy_id = hw->phy_addr; in skge_ioctl()
2458 spin_lock_bh(&hw->phy_lock); in skge_ioctl()
2461 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); in skge_ioctl()
2463 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); in skge_ioctl()
2464 spin_unlock_bh(&hw->phy_lock); in skge_ioctl()
2465 data->val_out = val; in skge_ioctl()
2470 spin_lock_bh(&hw->phy_lock); in skge_ioctl()
2472 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, in skge_ioctl()
2473 data->val_in); in skge_ioctl()
2475 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, in skge_ioctl()
2476 data->val_in); in skge_ioctl()
2477 spin_unlock_bh(&hw->phy_lock); in skge_ioctl()
2489 end = start + len - 1; in skge_ramset()
2517 struct skge_hw *hw = skge->hw; in skge_qset()
2519 u64 base = skge->dma + (e->desc - skge->mem); in skge_qset()
2534 struct skge_hw *hw = skge->hw; in skge_up()
2535 int port = skge->port; in skge_up()
2540 if (!is_valid_ether_addr(dev->dev_addr)) in skge_up()
2541 return -EINVAL; in skge_up()
2543 netif_info(skge, ifup, skge->netdev, "enabling interface\n"); in skge_up()
2545 if (dev->mtu > RX_BUF_SIZE) in skge_up()
2546 skge->rx_buf_size = dev->mtu + ETH_HLEN; in skge_up()
2548 skge->rx_buf_size = RX_BUF_SIZE; in skge_up()
2551 rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); in skge_up()
2552 tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); in skge_up()
2553 skge->mem_size = tx_size + rx_size; in skge_up()
2554 skge->mem = dma_alloc_coherent(&hw->pdev->dev, skge->mem_size, in skge_up()
2555 &skge->dma, GFP_KERNEL); in skge_up()
2556 if (!skge->mem) in skge_up()
2557 return -ENOMEM; in skge_up()
2559 BUG_ON(skge->dma & 7); in skge_up()
2561 if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { in skge_up()
2562 dev_err(&hw->pdev->dev, "dma_alloc_coherent region crosses 4G boundary\n"); in skge_up()
2563 err = -EINVAL; in skge_up()
2567 err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); in skge_up()
2575 err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, in skge_up()
2576 skge->dma + rx_size); in skge_up()
2580 if (hw->ports == 1) { in skge_up()
2581 err = request_irq(hw->pdev->irq, skge_intr, IRQF_SHARED, in skge_up()
2582 dev->name, hw); in skge_up()
2585 hw->pdev->irq, err); in skge_up()
2592 spin_lock_bh(&hw->phy_lock); in skge_up()
2597 spin_unlock_bh(&hw->phy_lock); in skge_up()
2599 /* Configure RAMbuffers - equally between ports and tx/rx */ in skge_up()
2600 chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); in skge_up()
2601 ram_addr = hw->ram_offset + 2 * chunk * port; in skge_up()
2604 skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); in skge_up()
2606 BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); in skge_up()
2608 skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); in skge_up()
2615 spin_lock_irq(&hw->hw_lock); in skge_up()
2616 hw->intr_mask |= portmask[port]; in skge_up()
2617 skge_write32(hw, B0_IMSK, hw->intr_mask); in skge_up()
2619 spin_unlock_irq(&hw->hw_lock); in skge_up()
2621 napi_enable(&skge->napi); in skge_up()
2628 kfree(skge->tx_ring.start); in skge_up()
2631 kfree(skge->rx_ring.start); in skge_up()
2633 dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem, in skge_up()
2634 skge->dma); in skge_up()
2635 skge->mem = NULL; in skge_up()
2652 struct skge_hw *hw = skge->hw; in skge_down()
2653 int port = skge->port; in skge_down()
2655 if (!skge->mem) in skge_down()
2658 netif_info(skge, ifdown, skge->netdev, "disabling interface\n"); in skge_down()
2662 if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC) in skge_down()
2663 del_timer_sync(&skge->link_timer); in skge_down()
2665 napi_disable(&skge->napi); in skge_down()
2668 spin_lock_irq(&hw->hw_lock); in skge_down()
2669 hw->intr_mask &= ~portmask[port]; in skge_down()
2670 skge_write32(hw, B0_IMSK, (hw->ports == 1) ? 0 : hw->intr_mask); in skge_down()
2672 spin_unlock_irq(&hw->hw_lock); in skge_down()
2674 if (hw->ports == 1) in skge_down()
2675 free_irq(hw->pdev->irq, hw); in skge_down()
2677 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_REG_OFF); in skge_down()
2722 kfree(skge->rx_ring.start); in skge_down()
2723 kfree(skge->tx_ring.start); in skge_down()
2724 dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem, in skge_down()
2725 skge->dma); in skge_down()
2726 skge->mem = NULL; in skge_down()
2733 return ((ring->to_clean > ring->to_use) ? 0 : ring->count) in skge_avail()
2734 + (ring->to_clean - ring->to_use) - 1; in skge_avail()
2741 struct skge_hw *hw = skge->hw; in skge_xmit_frame()
2751 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) in skge_xmit_frame()
2754 e = skge->tx_ring.to_use; in skge_xmit_frame()
2755 td = e->desc; in skge_xmit_frame()
2756 BUG_ON(td->control & BMU_OWN); in skge_xmit_frame()
2757 e->skb = skb; in skge_xmit_frame()
2759 map = dma_map_single(&hw->pdev->dev, skb->data, len, DMA_TO_DEVICE); in skge_xmit_frame()
2760 if (dma_mapping_error(&hw->pdev->dev, map)) in skge_xmit_frame()
2766 td->dma_lo = lower_32_bits(map); in skge_xmit_frame()
2767 td->dma_hi = upper_32_bits(map); in skge_xmit_frame()
2769 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skge_xmit_frame()
2775 if (ipip_hdr(skb)->protocol == IPPROTO_UDP && in skge_xmit_frame()
2776 hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) in skge_xmit_frame()
2781 td->csum_offs = 0; in skge_xmit_frame()
2782 td->csum_start = offset; in skge_xmit_frame()
2783 td->csum_write = offset + skb->csum_offset; in skge_xmit_frame()
2787 if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */ in skge_xmit_frame()
2793 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skge_xmit_frame()
2794 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skge_xmit_frame()
2796 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, in skge_xmit_frame()
2798 if (dma_mapping_error(&hw->pdev->dev, map)) in skge_xmit_frame()
2801 e = e->next; in skge_xmit_frame()
2802 e->skb = skb; in skge_xmit_frame()
2803 tf = e->desc; in skge_xmit_frame()
2804 BUG_ON(tf->control & BMU_OWN); in skge_xmit_frame()
2806 tf->dma_lo = lower_32_bits(map); in skge_xmit_frame()
2807 tf->dma_hi = upper_32_bits(map); in skge_xmit_frame()
2811 tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag); in skge_xmit_frame()
2813 tf->control |= BMU_EOF | BMU_IRQ_EOF; in skge_xmit_frame()
2817 td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; in skge_xmit_frame()
2820 netdev_sent_queue(dev, skb->len); in skge_xmit_frame()
2822 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); in skge_xmit_frame()
2824 netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, in skge_xmit_frame()
2826 e - skge->tx_ring.start, skb->len); in skge_xmit_frame()
2828 skge->tx_ring.to_use = e->next; in skge_xmit_frame()
2831 if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { in skge_xmit_frame()
2839 e = skge->tx_ring.to_use; in skge_xmit_frame()
2840 dma_unmap_single(&hw->pdev->dev, dma_unmap_addr(e, mapaddr), in skge_xmit_frame()
2842 while (i-- > 0) { in skge_xmit_frame()
2843 e = e->next; in skge_xmit_frame()
2844 dma_unmap_page(&hw->pdev->dev, dma_unmap_addr(e, mapaddr), in skge_xmit_frame()
2850 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); in skge_xmit_frame()
2862 dma_unmap_single(&pdev->dev, dma_unmap_addr(e, mapaddr), in skge_tx_unmap()
2865 dma_unmap_page(&pdev->dev, dma_unmap_addr(e, mapaddr), in skge_tx_unmap()
2875 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { in skge_tx_clean()
2876 struct skge_tx_desc *td = e->desc; in skge_tx_clean()
2878 skge_tx_unmap(skge->hw->pdev, e, td->control); in skge_tx_clean()
2880 if (td->control & BMU_EOF) in skge_tx_clean()
2881 dev_kfree_skb(e->skb); in skge_tx_clean()
2882 td->control = 0; in skge_tx_clean()
2886 skge->tx_ring.to_clean = e; in skge_tx_clean()
2893 netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n"); in skge_tx_timeout()
2895 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); in skge_tx_timeout()
2905 dev->mtu = new_mtu; in skge_change_mtu()
2911 dev->mtu = new_mtu; in skge_change_mtu()
2934 struct skge_hw *hw = skge->hw; in genesis_set_multicast()
2935 int port = skge->port; in genesis_set_multicast()
2942 if (dev->flags & IFF_PROMISC) in genesis_set_multicast()
2947 if (dev->flags & IFF_ALLMULTI) in genesis_set_multicast()
2952 if (skge->flow_status == FLOW_STAT_REM_SEND || in genesis_set_multicast()
2953 skge->flow_status == FLOW_STAT_SYMMETRIC) in genesis_set_multicast()
2957 genesis_add_filter(filter, ha->addr); in genesis_set_multicast()
2974 struct skge_hw *hw = skge->hw; in yukon_set_multicast()
2975 int port = skge->port; in yukon_set_multicast()
2977 int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || in yukon_set_multicast()
2978 skge->flow_status == FLOW_STAT_SYMMETRIC); in yukon_set_multicast()
2987 if (dev->flags & IFF_PROMISC) /* promiscuous */ in yukon_set_multicast()
2989 else if (dev->flags & IFF_ALLMULTI) /* all multicast */ in yukon_set_multicast()
3000 yukon_add_filter(filter, ha->addr); in yukon_set_multicast()
3037 if (is_genesis(skge->hw)) in skge_set_multicast()
3056 netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev, in skge_rx_get()
3058 e - skge->rx_ring.start, status, len); in skge_rx_get()
3060 if (len > skge->rx_buf_size) in skge_rx_get()
3066 if (bad_phy_status(skge->hw, status)) in skge_rx_get()
3069 if (phy_length(skge->hw, status) != len) in skge_rx_get()
3077 dma_sync_single_for_cpu(&skge->hw->pdev->dev, in skge_rx_get()
3081 skb_copy_from_linear_data(e->skb, skb->data, len); in skge_rx_get()
3082 dma_sync_single_for_device(&skge->hw->pdev->dev, in skge_rx_get()
3086 skge_rx_reuse(e, skge->rx_buf_size); in skge_rx_get()
3091 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); in skge_rx_get()
3098 prefetch(skb->data); in skge_rx_get()
3100 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { in skge_rx_get()
3105 dma_unmap_single(&skge->hw->pdev->dev, in skge_rx_get()
3112 if (dev->features & NETIF_F_RXCSUM) { in skge_rx_get()
3113 skb->csum = le16_to_cpu(csum); in skge_rx_get()
3114 skb->ip_summed = CHECKSUM_COMPLETE; in skge_rx_get()
3117 skb->protocol = eth_type_trans(skb, dev); in skge_rx_get()
3122 netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev, in skge_rx_get()
3124 e - skge->rx_ring.start, control, status); in skge_rx_get()
3126 if (is_genesis(skge->hw)) { in skge_rx_get()
3128 dev->stats.rx_length_errors++; in skge_rx_get()
3130 dev->stats.rx_frame_errors++; in skge_rx_get()
3132 dev->stats.rx_crc_errors++; in skge_rx_get()
3135 dev->stats.rx_length_errors++; in skge_rx_get()
3137 dev->stats.rx_frame_errors++; in skge_rx_get()
3139 dev->stats.rx_crc_errors++; in skge_rx_get()
3143 skge_rx_reuse(e, skge->rx_buf_size); in skge_rx_get()
3151 struct skge_ring *ring = &skge->tx_ring; in skge_tx_done()
3155 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); in skge_tx_done()
3157 for (e = ring->to_clean; e != ring->to_use; e = e->next) { in skge_tx_done()
3158 u32 control = ((const struct skge_tx_desc *) e->desc)->control; in skge_tx_done()
3163 skge_tx_unmap(skge->hw->pdev, e, control); in skge_tx_done()
3166 netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, in skge_tx_done()
3168 e - skge->tx_ring.start); in skge_tx_done()
3171 bytes_compl += e->skb->len; in skge_tx_done()
3173 dev_consume_skb_any(e->skb); in skge_tx_done()
3177 skge->tx_ring.to_clean = e; in skge_tx_done()
3183 skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { in skge_tx_done()
3186 skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { in skge_tx_done()
3197 struct net_device *dev = skge->netdev; in skge_poll()
3198 struct skge_hw *hw = skge->hw; in skge_poll()
3199 struct skge_ring *ring = &skge->rx_ring; in skge_poll()
3205 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); in skge_poll()
3207 for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) { in skge_poll()
3208 struct skge_rx_desc *rd = e->desc; in skge_poll()
3213 control = rd->control; in skge_poll()
3217 skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); in skge_poll()
3223 ring->to_clean = e; in skge_poll()
3227 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); in skge_poll()
3232 spin_lock_irqsave(&hw->hw_lock, flags); in skge_poll()
3233 hw->intr_mask |= napimask[skge->port]; in skge_poll()
3234 skge_write32(hw, B0_IMSK, hw->intr_mask); in skge_poll()
3236 spin_unlock_irqrestore(&hw->hw_lock, flags); in skge_poll()
3247 struct net_device *dev = hw->dev[port]; in skge_mac_parity()
3249 ++dev->stats.tx_heartbeat_errors; in skge_mac_parity()
3255 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ in skge_mac_parity()
3257 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) in skge_mac_parity()
3272 struct pci_dev *pdev = hw->pdev; in skge_error_irq()
3288 dev_err(&pdev->dev, "Ram read data parity error\n"); in skge_error_irq()
3293 dev_err(&pdev->dev, "Ram write data parity error\n"); in skge_error_irq()
3304 dev_err(&pdev->dev, "%s: receive queue parity error\n", in skge_error_irq()
3305 hw->dev[0]->name); in skge_error_irq()
3310 dev_err(&pdev->dev, "%s: receive queue parity error\n", in skge_error_irq()
3311 hw->dev[1]->name); in skge_error_irq()
3321 dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n", in skge_error_irq()
3335 dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n"); in skge_error_irq()
3336 hw->intr_mask &= ~IS_HW_ERR; in skge_error_irq()
3351 for (port = 0; port < hw->ports; port++) { in skge_extirq()
3352 struct net_device *dev = hw->dev[port]; in skge_extirq()
3357 spin_lock(&hw->phy_lock); in skge_extirq()
3360 else if (hw->phy_type == SK_PHY_BCOM) in skge_extirq()
3362 spin_unlock(&hw->phy_lock); in skge_extirq()
3366 spin_lock_irq(&hw->hw_lock); in skge_extirq()
3367 hw->intr_mask |= IS_EXT_REG; in skge_extirq()
3368 skge_write32(hw, B0_IMSK, hw->intr_mask); in skge_extirq()
3370 spin_unlock_irq(&hw->hw_lock); in skge_extirq()
3379 spin_lock(&hw->hw_lock); in skge_intr()
3386 status &= hw->intr_mask; in skge_intr()
3388 hw->intr_mask &= ~IS_EXT_REG; in skge_intr()
3389 tasklet_schedule(&hw->phy_task); in skge_intr()
3393 struct skge_port *skge = netdev_priv(hw->dev[0]); in skge_intr()
3394 hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); in skge_intr()
3395 napi_schedule(&skge->napi); in skge_intr()
3402 ++hw->dev[0]->stats.rx_over_errors; in skge_intr()
3410 if (hw->dev[1]) { in skge_intr()
3411 struct skge_port *skge = netdev_priv(hw->dev[1]); in skge_intr()
3414 hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); in skge_intr()
3415 napi_schedule(&skge->napi); in skge_intr()
3419 ++hw->dev[1]->stats.rx_over_errors; in skge_intr()
3433 skge_write32(hw, B0_IMSK, hw->intr_mask); in skge_intr()
3435 spin_unlock(&hw->hw_lock); in skge_intr()
3445 disable_irq(dev->irq); in skge_netpoll()
3446 skge_intr(dev->irq, skge->hw); in skge_netpoll()
3447 enable_irq(dev->irq); in skge_netpoll()
3454 struct skge_hw *hw = skge->hw; in skge_set_mac_address()
3455 unsigned port = skge->port; in skge_set_mac_address()
3459 if (!is_valid_ether_addr(addr->sa_data)) in skge_set_mac_address()
3460 return -EADDRNOTAVAIL; in skge_set_mac_address()
3462 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); in skge_set_mac_address()
3465 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); in skge_set_mac_address()
3466 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); in skge_set_mac_address()
3469 spin_lock_bh(&hw->phy_lock); in skge_set_mac_address()
3473 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); in skge_set_mac_address()
3474 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); in skge_set_mac_address()
3477 xm_outaddr(hw, port, XM_SA, dev->dev_addr); in skge_set_mac_address()
3479 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); in skge_set_mac_address()
3480 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); in skge_set_mac_address()
3484 spin_unlock_bh(&hw->phy_lock); in skge_set_mac_address()
3496 { CHIP_ID_YUKON_LITE, "Yukon-Lite"},
3497 { CHIP_ID_YUKON_LP, "Yukon-LP"},
3506 if (skge_chips[i].id == hw->chip_id) in skge_board_name()
3509 snprintf(buf, sizeof(buf), "chipid 0x%x", hw->chip_id); in skge_board_name()
3535 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); in skge_reset()
3536 pci_write_config_word(hw->pdev, PCI_STATUS, in skge_reset()
3541 /* restore CLK_RUN bits (for Yukon-Lite) */ in skge_reset()
3545 hw->chip_id = skge_read8(hw, B2_CHIP_ID); in skge_reset()
3546 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; in skge_reset()
3548 hw->copper = (pmd_type == 'T' || pmd_type == '1'); in skge_reset()
3550 switch (hw->chip_id) { in skge_reset()
3553 switch (hw->phy_type) { in skge_reset()
3555 hw->phy_addr = PHY_ADDR_XMAC; in skge_reset()
3558 hw->phy_addr = PHY_ADDR_BCOM; in skge_reset()
3561 dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n", in skge_reset()
3562 hw->phy_type); in skge_reset()
3563 return -EOPNOTSUPP; in skge_reset()
3567 dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n"); in skge_reset()
3568 return -EOPNOTSUPP; in skge_reset()
3574 if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S') in skge_reset()
3575 hw->copper = 1; in skge_reset()
3577 hw->phy_addr = PHY_ADDR_MARV; in skge_reset()
3581 dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", in skge_reset()
3582 hw->chip_id); in skge_reset()
3583 return -EOPNOTSUPP; in skge_reset()
3587 hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2; in skge_reset()
3588 hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4; in skge_reset()
3595 hw->ram_size = 0x100000; in skge_reset()
3596 hw->ram_offset = 0x80000; in skge_reset()
3598 hw->ram_size = t8 * 512; in skge_reset()
3600 hw->ram_size = 0x20000; in skge_reset()
3602 hw->ram_size = t8 * 4096; in skge_reset()
3604 hw->intr_mask = IS_HW_ERR; in skge_reset()
3607 if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)) in skge_reset()
3608 hw->intr_mask |= IS_EXT_REG; in skge_reset()
3620 dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n"); in skge_reset()
3621 hw->intr_mask &= ~IS_HW_ERR; in skge_reset()
3626 pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®); in skge_reset()
3628 pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg); in skge_reset()
3632 for (i = 0; i < hw->ports; i++) { in skge_reset()
3644 for (i = 0; i < hw->ports; i++) in skge_reset()
3675 for (i = 0; i < hw->ports; i++) { in skge_reset()
3692 struct net_device *dev = seq->private; in skge_debug_show()
3694 const struct skge_hw *hw = skge->hw; in skge_debug_show()
3698 return -ENETDOWN; in skge_debug_show()
3703 seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring)); in skge_debug_show()
3704 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { in skge_debug_show()
3705 const struct skge_tx_desc *t = e->desc; in skge_debug_show()
3707 t->control, t->dma_hi, t->dma_lo, t->status, in skge_debug_show()
3708 t->csum_offs, t->csum_write, t->csum_start); in skge_debug_show()
3712 for (e = skge->rx_ring.to_clean; ; e = e->next) { in skge_debug_show()
3713 const struct skge_rx_desc *r = e->desc; in skge_debug_show()
3715 if (r->control & BMU_OWN) in skge_debug_show()
3719 r->control, r->dma_hi, r->dma_lo, r->status, in skge_debug_show()
3720 r->timestamp, r->csum1, r->csum1_start); in skge_debug_show()
3737 if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug) in skge_device_event()
3743 if (skge->debugfs) in skge_device_event()
3744 skge->debugfs = debugfs_rename(skge_debug, in skge_device_event()
3745 skge->debugfs, in skge_device_event()
3746 skge_debug, dev->name); in skge_device_event()
3750 debugfs_remove(skge->debugfs); in skge_device_event()
3751 skge->debugfs = NULL; in skge_device_event()
3755 skge->debugfs = debugfs_create_file(dev->name, 0444, skge_debug, in skge_device_event()
3817 SET_NETDEV_DEV(dev, &hw->pdev->dev); in skge_devinit()
3818 dev->netdev_ops = &skge_netdev_ops; in skge_devinit()
3819 dev->ethtool_ops = &skge_ethtool_ops; in skge_devinit()
3820 dev->watchdog_timeo = TX_WATCHDOG; in skge_devinit()
3821 dev->irq = hw->pdev->irq; in skge_devinit()
3823 /* MTU range: 60 - 9000 */ in skge_devinit()
3824 dev->min_mtu = ETH_ZLEN; in skge_devinit()
3825 dev->max_mtu = ETH_JUMBO_MTU; in skge_devinit()
3828 dev->features |= NETIF_F_HIGHDMA; in skge_devinit()
3831 netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); in skge_devinit()
3832 skge->netdev = dev; in skge_devinit()
3833 skge->hw = hw; in skge_devinit()
3834 skge->msg_enable = netif_msg_init(debug, default_msg); in skge_devinit()
3836 skge->tx_ring.count = DEFAULT_TX_RING_SIZE; in skge_devinit()
3837 skge->rx_ring.count = DEFAULT_RX_RING_SIZE; in skge_devinit()
3840 skge->autoneg = AUTONEG_ENABLE; in skge_devinit()
3841 skge->flow_control = FLOW_MODE_SYM_OR_REM; in skge_devinit()
3842 skge->duplex = -1; in skge_devinit()
3843 skge->speed = -1; in skge_devinit()
3844 skge->advertising = skge_supported_modes(hw); in skge_devinit()
3846 if (device_can_wakeup(&hw->pdev->dev)) { in skge_devinit()
3847 skge->wol = wol_supported(hw) & WAKE_MAGIC; in skge_devinit()
3848 device_set_wakeup_enable(&hw->pdev->dev, skge->wol); in skge_devinit()
3851 hw->dev[port] = dev; in skge_devinit()
3853 skge->port = port; in skge_devinit()
3857 timer_setup(&skge->link_timer, xm_link_timer, 0); in skge_devinit()
3859 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | in skge_devinit()
3861 dev->features |= dev->hw_features; in skge_devinit()
3865 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); in skge_devinit()
3874 netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); in skge_show_addr()
3887 dev_err(&pdev->dev, "cannot enable PCI device\n"); in skge_probe()
3893 dev_err(&pdev->dev, "cannot obtain PCI resources\n"); in skge_probe()
3899 if (!only_32bit_dma && !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { in skge_probe()
3901 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); in skge_probe()
3902 } else if (!(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { in skge_probe()
3904 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); in skge_probe()
3908 dev_err(&pdev->dev, "no usable DMA configuration\n"); in skge_probe()
3923 err = -ENOMEM; in skge_probe()
3930 sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); in skge_probe()
3932 hw->pdev = pdev; in skge_probe()
3933 spin_lock_init(&hw->hw_lock); in skge_probe()
3934 spin_lock_init(&hw->phy_lock); in skge_probe()
3935 tasklet_setup(&hw->phy_task, skge_extirq); in skge_probe()
3937 hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000); in skge_probe()
3938 if (!hw->regs) { in skge_probe()
3939 dev_err(&pdev->dev, "cannot map device registers\n"); in skge_probe()
3949 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, in skge_probe()
3950 skge_board_name(hw), hw->chip_rev); in skge_probe()
3954 err = -ENOMEM; in skge_probe()
3958 /* Some motherboards are broken and has zero in ROM. */ in skge_probe()
3959 if (!is_valid_ether_addr(dev->dev_addr)) in skge_probe()
3960 dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n"); in skge_probe()
3964 dev_err(&pdev->dev, "cannot register net device\n"); in skge_probe()
3970 if (hw->ports > 1) { in skge_probe()
3973 err = -ENOMEM; in skge_probe()
3979 dev_err(&pdev->dev, "cannot register second net device\n"); in skge_probe()
3983 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, in skge_probe()
3984 hw->irq_name, hw); in skge_probe()
3986 dev_err(&pdev->dev, "cannot assign irq %d\n", in skge_probe()
3987 pdev->irq); in skge_probe()
4008 iounmap(hw->regs); in skge_probe()
4027 dev1 = hw->dev[1]; in skge_remove()
4030 dev0 = hw->dev[0]; in skge_remove()
4033 tasklet_kill(&hw->phy_task); in skge_remove()
4035 spin_lock_irq(&hw->hw_lock); in skge_remove()
4036 hw->intr_mask = 0; in skge_remove()
4038 if (hw->ports > 1) { in skge_remove()
4042 spin_unlock_irq(&hw->hw_lock); in skge_remove()
4047 if (hw->ports > 1) in skge_remove()
4048 free_irq(pdev->irq, hw); in skge_remove()
4055 iounmap(hw->regs); in skge_remove()
4068 for (i = 0; i < hw->ports; i++) { in skge_suspend()
4069 struct net_device *dev = hw->dev[i]; in skge_suspend()
4075 if (skge->wol) in skge_suspend()
4096 for (i = 0; i < hw->ports; i++) { in skge_resume()
4097 struct net_device *dev = hw->dev[i]; in skge_resume()
4129 for (i = 0; i < hw->ports; i++) { in skge_shutdown()
4130 struct net_device *dev = hw->dev[i]; in skge_shutdown()
4133 if (skge->wol) in skge_shutdown()
4137 pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); in skge_shutdown()
4166 .ident = "FUJITSU SIEMENS A8NE-FM",
4169 DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM")