Lines Matching +full:cs +full:- +full:dev +full:- +full:assoc

1 // SPDX-License-Identifier: GPL-2.0
13 #include <linux/dma-mapping.h>
72 #define nr64(reg) readq(np->regs + (reg))
73 #define nw64(reg, val) writeq((val), np->regs + (reg))
75 #define nr64_mac(reg) readq(np->mac_regs + (reg))
76 #define nw64_mac(reg, val) writeq((val), np->mac_regs + (reg))
78 #define nr64_ipp(reg) readq(np->regs + np->ipp_off + (reg))
79 #define nw64_ipp(reg, val) writeq((val), np->regs + np->ipp_off + (reg))
81 #define nr64_pcs(reg) readq(np->regs + np->pcs_off + (reg))
82 #define nw64_pcs(reg, val) writeq((val), np->regs + np->pcs_off + (reg))
84 #define nr64_xpcs(reg) readq(np->regs + np->xpcs_off + (reg))
85 #define nw64_xpcs(reg, val) writeq((val), np->regs + np->xpcs_off + (reg))
90 static int debug = -1;
95 spin_lock_irqsave(&np->parent->lock, flags)
97 spin_unlock_irqrestore(&np->parent->lock, flags)
104 while (--limit >= 0) { in __niu_wait_bits_clear_mac()
112 return -ENODEV; in __niu_wait_bits_clear_mac()
125 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", in __niu_set_and_wait_clear_mac()
139 while (--limit >= 0) { in __niu_wait_bits_clear_ipp()
147 return -ENODEV; in __niu_wait_bits_clear_ipp()
164 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", in __niu_set_and_wait_clear_ipp()
178 while (--limit >= 0) { in __niu_wait_bits_clear()
186 return -ENODEV; in __niu_wait_bits_clear()
204 netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n", in __niu_set_and_wait_clear()
217 u64 val = (u64) lp->timer; in niu_ldg_rearm()
222 nw64(LDG_IMGMT(lp->ldg_num), val); in niu_ldg_rearm()
231 return -EINVAL; in niu_ldn_irq_enable()
237 mask_reg = LD_IM1(ldn - 64); in niu_ldn_irq_enable()
253 struct niu_parent *parent = np->parent; in niu_enable_ldn_in_ldg()
259 if (parent->ldg_map[i] != lp->ldg_num) in niu_enable_ldn_in_ldg()
273 for (i = 0; i < np->num_ldg; i++) { in niu_enable_interrupts()
274 struct niu_ldg *lp = &np->ldg[i]; in niu_enable_interrupts()
281 for (i = 0; i < np->num_ldg; i++) in niu_enable_interrupts()
282 niu_ldg_rearm(np, &np->ldg[i], on); in niu_enable_interrupts()
302 while (--limit > 0) { in mdio_wait()
310 return -ENODEV; in mdio_wait()
313 static int mdio_read(struct niu *np, int port, int dev, int reg) in mdio_read() argument
317 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); in mdio_read()
322 nw64(MIF_FRAME_OUTPUT, MDIO_READ_OP(port, dev)); in mdio_read()
326 static int mdio_write(struct niu *np, int port, int dev, int reg, int data) in mdio_write() argument
330 nw64(MIF_FRAME_OUTPUT, MDIO_ADDR_OP(port, dev, reg)); in mdio_write()
335 nw64(MIF_FRAME_OUTPUT, MDIO_WRITE_OP(port, dev, data)); in mdio_write()
365 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in esr2_set_tx_cfg()
369 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in esr2_set_tx_cfg()
379 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in esr2_set_rx_cfg()
383 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in esr2_set_rx_cfg()
392 struct niu_link_config *lp = &np->link_config; in serdes_init_niu_10g_fiber()
401 if (lp->loopback_mode == LOOPBACK_PHY) { in serdes_init_niu_10g_fiber()
404 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_10g_fiber()
429 struct niu_link_config *lp = &np->link_config; in serdes_init_niu_1g_serdes()
443 if (np->port == 0) in serdes_init_niu_1g_serdes()
446 if (lp->loopback_mode == LOOPBACK_PHY) { in serdes_init_niu_1g_serdes()
449 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_1g_serdes()
459 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_1g_serdes()
462 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", in serdes_init_niu_1g_serdes()
463 np->port, __func__); in serdes_init_niu_1g_serdes()
469 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_1g_serdes()
472 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", in serdes_init_niu_1g_serdes()
473 np->port, __func__); in serdes_init_niu_1g_serdes()
492 switch (np->port) { in serdes_init_niu_1g_serdes()
504 return -EINVAL; in serdes_init_niu_1g_serdes()
507 while (max_retry--) { in serdes_init_niu_1g_serdes()
516 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", in serdes_init_niu_1g_serdes()
517 np->port, (int)(sig & mask), (int)val); in serdes_init_niu_1g_serdes()
518 return -ENODEV; in serdes_init_niu_1g_serdes()
526 struct niu_link_config *lp = &np->link_config; in serdes_init_niu_10g_serdes()
538 if (lp->loopback_mode == LOOPBACK_PHY) { in serdes_init_niu_10g_serdes()
541 mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_10g_serdes()
551 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_10g_serdes()
554 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n", in serdes_init_niu_10g_serdes()
555 np->port, __func__); in serdes_init_niu_10g_serdes()
561 err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR, in serdes_init_niu_10g_serdes()
564 netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n", in serdes_init_niu_10g_serdes()
565 np->port, __func__); in serdes_init_niu_10g_serdes()
586 switch (np->port) { in serdes_init_niu_10g_serdes()
610 return -EINVAL; in serdes_init_niu_10g_serdes()
613 while (max_retry--) { in serdes_init_niu_10g_serdes()
623 np->port, (int)(sig & mask), (int)val); in serdes_init_niu_10g_serdes()
628 np->flags &= ~NIU_FLAGS_10G; in serdes_init_niu_10g_serdes()
629 np->mac_xcvr = MAC_XCVR_PCS; in serdes_init_niu_10g_serdes()
631 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", in serdes_init_niu_10g_serdes()
632 np->port); in serdes_init_niu_10g_serdes()
633 return -ENODEV; in serdes_init_niu_10g_serdes()
643 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, ESR_RXTX_CTRL_L(chan)); in esr_read_rxtx_ctrl()
646 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, in esr_read_rxtx_ctrl()
659 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, in esr_read_glue0()
663 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, in esr_read_glue0()
677 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, in esr_read_reset()
681 err = mdio_read(np, np->port, NIU_ESR_DEV_ADDR, in esr_read_reset()
695 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_write_rxtx_ctrl()
698 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_write_rxtx_ctrl()
707 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_write_glue0()
710 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_write_glue0()
720 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_reset()
724 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_reset()
730 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_reset()
736 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, in esr_reset()
746 netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n", in esr_reset()
747 np->port, reset); in esr_reset()
748 return -ENODEV; in esr_reset()
756 struct niu_link_config *lp = &np->link_config; in serdes_init_10g()
761 switch (np->port) { in serdes_init_10g()
772 return -EINVAL; in serdes_init_10g()
788 if (lp->loopback_mode == LOOPBACK_PHY) { in serdes_init_10g()
839 switch (np->port) { in serdes_init_10g()
863 return -EINVAL; in serdes_init_10g()
867 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { in serdes_init_10g()
868 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; in serdes_init_10g()
871 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", in serdes_init_10g()
872 np->port, (int)(sig & mask), (int)val); in serdes_init_10g()
873 return -ENODEV; in serdes_init_10g()
875 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) in serdes_init_10g()
876 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; in serdes_init_10g()
886 switch (np->port) { in serdes_init_1g()
900 return -EINVAL; in serdes_init_1g()
909 struct niu_link_config *lp = &np->link_config; in serdes_init_1g_serdes()
918 switch (np->port) { in serdes_init_1g_serdes()
933 return -EINVAL; in serdes_init_1g_serdes()
949 if (lp->loopback_mode == LOOPBACK_PHY) { in serdes_init_1g_serdes()
1004 switch (np->port) { in serdes_init_1g_serdes()
1016 return -EINVAL; in serdes_init_1g_serdes()
1020 netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n", in serdes_init_1g_serdes()
1021 np->port, (int)(sig & mask), (int)val); in serdes_init_1g_serdes()
1022 return -ENODEV; in serdes_init_1g_serdes()
1030 struct niu_link_config *lp = &np->link_config; in link_status_1g_serdes()
1041 spin_lock_irqsave(&np->lock, flags); in link_status_1g_serdes()
1051 lp->active_speed = current_speed; in link_status_1g_serdes()
1052 lp->active_duplex = current_duplex; in link_status_1g_serdes()
1053 spin_unlock_irqrestore(&np->lock, flags); in link_status_1g_serdes()
1062 struct niu_link_config *lp = &np->link_config; in link_status_10g_serdes()
1069 if (!(np->flags & NIU_FLAGS_10G)) in link_status_10g_serdes()
1074 spin_lock_irqsave(&np->lock, flags); in link_status_10g_serdes()
1086 lp->active_speed = current_speed; in link_status_10g_serdes()
1087 lp->active_duplex = current_duplex; in link_status_10g_serdes()
1088 spin_unlock_irqrestore(&np->lock, flags); in link_status_10g_serdes()
1095 struct niu_link_config *lp = &np->link_config; in link_status_mii()
1100 err = mii_read(np, np->phy_addr, MII_BMCR); in link_status_mii()
1105 err = mii_read(np, np->phy_addr, MII_BMSR); in link_status_mii()
1110 err = mii_read(np, np->phy_addr, MII_ADVERTISE); in link_status_mii()
1115 err = mii_read(np, np->phy_addr, MII_LPA); in link_status_mii()
1121 err = mii_read(np, np->phy_addr, MII_ESTATUS); in link_status_mii()
1126 err = mii_read(np, np->phy_addr, MII_CTRL1000); in link_status_mii()
1131 err = mii_read(np, np->phy_addr, MII_STAT1000); in link_status_mii()
1153 lp->supported = supported; in link_status_mii()
1161 lp->active_autoneg = 1; in link_status_mii()
1183 lp->active_autoneg = 0; in link_status_mii()
1198 lp->active_advertising = advertising; in link_status_mii()
1199 lp->active_speed = active_speed; in link_status_mii()
1200 lp->active_duplex = active_duplex; in link_status_mii()
1208 struct niu_link_config *lp = &np->link_config; in link_status_1g_rgmii()
1218 spin_lock_irqsave(&np->lock, flags); in link_status_1g_rgmii()
1220 err = mii_read(np, np->phy_addr, MII_BMSR); in link_status_1g_rgmii()
1230 lp->active_speed = current_speed; in link_status_1g_rgmii()
1231 lp->active_duplex = current_duplex; in link_status_1g_rgmii()
1235 spin_unlock_irqrestore(&np->lock, flags); in link_status_1g_rgmii()
1243 struct niu_link_config *lp = &np->link_config; in link_status_1g()
1247 spin_lock_irqsave(&np->lock, flags); in link_status_1g()
1250 lp->supported |= SUPPORTED_TP; in link_status_1g()
1251 lp->active_advertising |= ADVERTISED_TP; in link_status_1g()
1253 spin_unlock_irqrestore(&np->lock, flags); in link_status_1g()
1261 err = mdio_read(np, np->phy_addr, in bcm8704_reset()
1266 err = mdio_write(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, in bcm8704_reset()
1272 while (--limit >= 0) { in bcm8704_reset()
1273 err = mdio_read(np, np->phy_addr, in bcm8704_reset()
1281 netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n", in bcm8704_reset()
1282 np->port, (err & 0xffff)); in bcm8704_reset()
1283 return -ENODEV; in bcm8704_reset()
1293 int err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); in bcm8704_user_dev3_readback()
1296 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, reg); in bcm8704_user_dev3_readback()
1307 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in bcm8706_init_user_dev3()
1314 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in bcm8706_init_user_dev3()
1328 err = mdio_write(np, np->phy_addr, in bcm8704_init_user_dev3()
1342 err = mdio_write(np, np->phy_addr, in bcm8704_init_user_dev3()
1358 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in bcm8704_init_user_dev3()
1364 err = mdio_write(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in bcm8704_init_user_dev3()
1378 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, in mrvl88x2011_act_led()
1386 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, in mrvl88x2011_act_led()
1394 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, in mrvl88x2011_led_blink_rate()
1400 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV2_ADDR, in mrvl88x2011_led_blink_rate()
1421 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, in xcvr_init_10g_mrvl88x2011()
1428 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, in xcvr_init_10g_mrvl88x2011()
1433 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, in xcvr_init_10g_mrvl88x2011()
1438 if (np->link_config.loopback_mode == LOOPBACK_MAC) in xcvr_init_10g_mrvl88x2011()
1443 err = mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, in xcvr_init_10g_mrvl88x2011()
1449 return mdio_write(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, in xcvr_init_10g_mrvl88x2011()
1460 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, in xcvr_diag_bcm870x()
1464 pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err); in xcvr_diag_bcm870x()
1466 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20); in xcvr_diag_bcm870x()
1469 pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err); in xcvr_diag_bcm870x()
1471 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, in xcvr_diag_bcm870x()
1475 pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err); in xcvr_diag_bcm870x()
1479 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in xcvr_diag_bcm870x()
1483 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in xcvr_diag_bcm870x()
1489 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in xcvr_diag_bcm870x()
1493 err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, in xcvr_diag_bcm870x()
1502 np->port); in xcvr_diag_bcm870x()
1505 np->port); in xcvr_diag_bcm870x()
1514 struct niu_link_config *lp = &np->link_config; in xcvr_10g_set_lb_bcm870x()
1517 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, in xcvr_10g_set_lb_bcm870x()
1524 if (lp->loopback_mode == LOOPBACK_MAC) in xcvr_10g_set_lb_bcm870x()
1527 err = mdio_write(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, in xcvr_10g_set_lb_bcm870x()
1540 if ((np->flags & NIU_FLAGS_HOTPLUG_PHY) && in xcvr_init_10g_bcm8706()
1541 (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) == 0) in xcvr_init_10g_bcm8706()
1610 phy_id = phy_decode(np->parent->port_phy, np->port); in xcvr_init_10g()
1611 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; in xcvr_init_10g()
1631 err = mii_write(np, np->phy_addr, MII_BMCR, BMCR_RESET); in mii_reset()
1636 while (--limit >= 0) { in mii_reset()
1638 err = mii_read(np, np->phy_addr, MII_BMCR); in mii_reset()
1645 netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n", in mii_reset()
1646 np->port, err); in mii_reset()
1647 return -ENODEV; in mii_reset()
1667 err = mii_read(np, np->phy_addr, MII_BMSR); in xcvr_init_1g_rgmii()
1674 err = mii_read(np, np->phy_addr, MII_ESTATUS); in xcvr_init_1g_rgmii()
1681 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); in xcvr_init_1g_rgmii()
1690 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000); in xcvr_init_1g_rgmii()
1697 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); in xcvr_init_1g_rgmii()
1701 err = mii_read(np, np->phy_addr, MII_BMCR); in xcvr_init_1g_rgmii()
1704 bmcr = mii_read(np, np->phy_addr, MII_BMCR); in xcvr_init_1g_rgmii()
1706 err = mii_read(np, np->phy_addr, MII_BMSR); in xcvr_init_1g_rgmii()
1715 struct niu_link_config *lp = &np->link_config; in mii_init_common()
1723 err = mii_read(np, np->phy_addr, MII_BMSR); in mii_init_common()
1730 err = mii_read(np, np->phy_addr, MII_ESTATUS); in mii_init_common()
1737 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); in mii_init_common()
1741 if (lp->loopback_mode == LOOPBACK_MAC) { in mii_init_common()
1743 if (lp->active_speed == SPEED_1000) in mii_init_common()
1745 if (lp->active_duplex == DUPLEX_FULL) in mii_init_common()
1749 if (lp->loopback_mode == LOOPBACK_PHY) { in mii_init_common()
1754 err = mii_write(np, np->phy_addr, BCM5464R_AUX_CTL, aux); in mii_init_common()
1759 if (lp->autoneg) { in mii_init_common()
1764 (lp->advertising & ADVERTISED_10baseT_Half)) in mii_init_common()
1767 (lp->advertising & ADVERTISED_10baseT_Full)) in mii_init_common()
1770 (lp->advertising & ADVERTISED_100baseT_Half)) in mii_init_common()
1773 (lp->advertising & ADVERTISED_100baseT_Full)) in mii_init_common()
1775 err = mii_write(np, np->phy_addr, MII_ADVERTISE, adv); in mii_init_common()
1782 (lp->advertising & ADVERTISED_1000baseT_Half)) in mii_init_common()
1785 (lp->advertising & ADVERTISED_1000baseT_Full)) in mii_init_common()
1787 err = mii_write(np, np->phy_addr, in mii_init_common()
1795 /* !lp->autoneg */ in mii_init_common()
1798 if (lp->duplex == DUPLEX_FULL) { in mii_init_common()
1801 } else if (lp->duplex == DUPLEX_HALF) in mii_init_common()
1804 return -EINVAL; in mii_init_common()
1806 if (lp->speed == SPEED_1000) { in mii_init_common()
1807 /* if X-full requested while not supported, or in mii_init_common()
1808 X-half requested while not supported... */ in mii_init_common()
1811 return -EINVAL; in mii_init_common()
1813 } else if (lp->speed == SPEED_100) { in mii_init_common()
1816 return -EINVAL; in mii_init_common()
1818 } else if (lp->speed == SPEED_10) { in mii_init_common()
1821 return -EINVAL; in mii_init_common()
1823 return -EINVAL; in mii_init_common()
1826 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr); in mii_init_common()
1831 err = mii_read(np, np->phy_addr, MII_BMCR); in mii_init_common()
1836 err = mii_read(np, np->phy_addr, MII_BMSR); in mii_init_common()
1842 np->port, bmcr, bmsr); in mii_init_common()
1862 const struct niu_phy_ops *ops = np->phy_ops; in niu_xcvr_init()
1866 if (ops->xcvr_init) in niu_xcvr_init()
1867 err = ops->xcvr_init(np); in niu_xcvr_init()
1874 const struct niu_phy_ops *ops = np->phy_ops; in niu_serdes_init()
1878 if (ops->serdes_init) in niu_serdes_init()
1879 err = ops->serdes_init(np); in niu_serdes_init()
1889 struct niu_link_config *lp = &np->link_config; in niu_link_status_common()
1890 struct net_device *dev = np->dev; in niu_link_status_common() local
1893 if (!netif_carrier_ok(dev) && link_up) { in niu_link_status_common()
1894 netif_info(np, link, dev, "Link is up at %s, %s duplex\n", in niu_link_status_common()
1895 lp->active_speed == SPEED_10000 ? "10Gb/sec" : in niu_link_status_common()
1896 lp->active_speed == SPEED_1000 ? "1Gb/sec" : in niu_link_status_common()
1897 lp->active_speed == SPEED_100 ? "100Mbit/sec" : in niu_link_status_common()
1899 lp->active_duplex == DUPLEX_FULL ? "full" : "half"); in niu_link_status_common()
1901 spin_lock_irqsave(&np->lock, flags); in niu_link_status_common()
1904 spin_unlock_irqrestore(&np->lock, flags); in niu_link_status_common()
1906 netif_carrier_on(dev); in niu_link_status_common()
1907 } else if (netif_carrier_ok(dev) && !link_up) { in niu_link_status_common()
1908 netif_warn(np, link, dev, "Link is down\n"); in niu_link_status_common()
1909 spin_lock_irqsave(&np->lock, flags); in niu_link_status_common()
1911 spin_unlock_irqrestore(&np->lock, flags); in niu_link_status_common()
1912 netif_carrier_off(dev); in niu_link_status_common()
1924 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, in link_status_10g_mrvl()
1930 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV1_ADDR, in link_status_10g_mrvl()
1938 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, in link_status_10g_mrvl()
1943 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV3_ADDR, in link_status_10g_mrvl()
1950 /* Check XGXS Register : 4.0018.[0-3,12] */ in link_status_10g_mrvl()
1951 err = mdio_read(np, np->phy_addr, MRVL88X2011_USER_DEV4_ADDR, in link_status_10g_mrvl()
1962 np->link_config.active_speed = SPEED_10000; in link_status_10g_mrvl()
1963 np->link_config.active_duplex = DUPLEX_FULL; in link_status_10g_mrvl()
1979 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, in link_status_10g_bcm8706()
1988 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, in link_status_10g_bcm8706()
1998 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, in link_status_10g_bcm8706()
2010 np->link_config.active_speed = SPEED_INVALID; in link_status_10g_bcm8706()
2011 np->link_config.active_duplex = DUPLEX_INVALID; in link_status_10g_bcm8706()
2016 np->link_config.active_speed = SPEED_10000; in link_status_10g_bcm8706()
2017 np->link_config.active_duplex = DUPLEX_FULL; in link_status_10g_bcm8706()
2031 err = mdio_read(np, np->phy_addr, BCM8704_PMA_PMD_DEV_ADDR, in link_status_10g_bcom()
2040 err = mdio_read(np, np->phy_addr, BCM8704_PCS_DEV_ADDR, in link_status_10g_bcom()
2049 err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR, in link_status_10g_bcom()
2065 np->link_config.active_speed = SPEED_10000; in link_status_10g_bcom()
2066 np->link_config.active_duplex = DUPLEX_FULL; in link_status_10g_bcom()
2077 int err = -EINVAL; in link_status_10g()
2079 spin_lock_irqsave(&np->lock, flags); in link_status_10g()
2081 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { in link_status_10g()
2084 phy_id = phy_decode(np->parent->port_phy, np->port); in link_status_10g()
2085 phy_id = np->parent->phy_probe_info.phy_id[phy_id][np->port]; in link_status_10g()
2099 spin_unlock_irqrestore(&np->lock, flags); in link_status_10g()
2109 switch (np->port) { in niu_10g_phy_present()
2148 spin_lock_irqsave(&np->lock, flags); in link_status_10g_hotplug()
2150 if (np->link_config.loopback_mode == LOOPBACK_DISABLED) { in link_status_10g_hotplug()
2151 phy_present_prev = (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) ? in link_status_10g_hotplug()
2158 np->flags |= NIU_FLAGS_HOTPLUG_PHY_PRESENT; in link_status_10g_hotplug()
2159 if (np->phy_ops->xcvr_init) in link_status_10g_hotplug()
2160 err = np->phy_ops->xcvr_init(np); in link_status_10g_hotplug()
2162 err = mdio_read(np, np->phy_addr, in link_status_10g_hotplug()
2165 /* No mdio, back-to-back XAUI */ in link_status_10g_hotplug()
2169 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; in link_status_10g_hotplug()
2172 np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT; in link_status_10g_hotplug()
2174 netif_warn(np, link, np->dev, in link_status_10g_hotplug()
2179 if (np->flags & NIU_FLAGS_HOTPLUG_PHY_PRESENT) { in link_status_10g_hotplug()
2182 /* No mdio, back-to-back XAUI: it is C10NEM */ in link_status_10g_hotplug()
2184 np->link_config.active_speed = SPEED_10000; in link_status_10g_hotplug()
2185 np->link_config.active_duplex = DUPLEX_FULL; in link_status_10g_hotplug()
2190 spin_unlock_irqrestore(&np->lock, flags); in link_status_10g_hotplug()
2197 const struct niu_phy_ops *ops = np->phy_ops; in niu_link_status()
2201 if (ops->link_status) in niu_link_status()
2202 err = ops->link_status(np, link_up_p); in niu_link_status()
2217 if (netif_carrier_ok(np->dev)) in niu_timer()
2221 np->timer.expires = jiffies + off; in niu_timer()
2223 add_timer(&np->timer); in niu_timer()
2352 struct niu_link_config *lp = &np->link_config; in serdes_init_10g_serdes()
2356 switch (np->port) { in serdes_init_10g_serdes()
2369 return -EINVAL; in serdes_init_10g_serdes()
2385 if (lp->loopback_mode == LOOPBACK_PHY) { in serdes_init_10g_serdes()
2436 switch (np->port) { in serdes_init_10g_serdes()
2460 return -EINVAL; in serdes_init_10g_serdes()
2467 np->flags &= ~NIU_FLAGS_10G; in serdes_init_10g_serdes()
2468 np->mac_xcvr = MAC_XCVR_PCS; in serdes_init_10g_serdes()
2470 netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n", in serdes_init_10g_serdes()
2471 np->port); in serdes_init_10g_serdes()
2472 return -ENODEV; in serdes_init_10g_serdes()
2481 struct niu_parent *parent = np->parent; in niu_determine_phy_disposition()
2482 u8 plat_type = parent->plat_type; in niu_determine_phy_disposition()
2487 switch (np->flags & in niu_determine_phy_disposition()
2502 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { in niu_determine_phy_disposition()
2504 if (np->port == 0) in niu_determine_phy_disposition()
2506 if (np->port == 1) in niu_determine_phy_disposition()
2510 phy_addr_off += np->port; in niu_determine_phy_disposition()
2515 switch (np->flags & in niu_determine_phy_disposition()
2527 phy_addr_off += (np->port ^ 0x3); in niu_determine_phy_disposition()
2546 phy_addr_off += np->port; in niu_determine_phy_disposition()
2547 if (np->flags & NIU_FLAGS_HOTPLUG_PHY) { in niu_determine_phy_disposition()
2549 if (np->port == 0) in niu_determine_phy_disposition()
2551 if (np->port == 1) in niu_determine_phy_disposition()
2559 switch(np->port) { in niu_determine_phy_disposition()
2569 return -EINVAL; in niu_determine_phy_disposition()
2571 phy_addr_off = niu_atca_port_num[np->port]; in niu_determine_phy_disposition()
2575 return -EINVAL; in niu_determine_phy_disposition()
2579 np->phy_ops = tp->ops; in niu_determine_phy_disposition()
2580 np->phy_addr = tp->phy_addr_base + phy_addr_off; in niu_determine_phy_disposition()
2587 struct niu_parent *parent = np->parent; in niu_init_link()
2590 if (parent->plat_type == PLAT_TYPE_NIU) { in niu_init_link()
2597 if (err && !(np->flags & NIU_FLAGS_HOTPLUG_PHY)) in niu_init_link()
2601 if (!err || (np->flags & NIU_FLAGS_HOTPLUG_PHY)) in niu_init_link()
2612 if (np->flags & NIU_FLAGS_XMAC) { in niu_set_primary_mac()
2625 if (np->flags & NIU_FLAGS_XMAC) in niu_num_alt_addr()
2638 return -EINVAL; in niu_set_alt_mac()
2640 if (np->flags & NIU_FLAGS_XMAC) { in niu_set_alt_mac()
2659 return -EINVAL; in niu_enable_alt_mac()
2661 if (np->flags & NIU_FLAGS_XMAC) { in niu_enable_alt_mac()
2697 return -EINVAL; in __set_rdc_table_num()
2698 if (np->flags & NIU_FLAGS_XMAC) in __set_rdc_table_num()
2722 return -EINVAL; in niu_set_alt_mac_rdc_table()
2777 while (--limit > 0) { in tcam_wait_bit()
2783 return -ENODEV; in tcam_wait_bit()
2892 return -EINVAL; in tcam_user_eth_class_enable()
2894 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1); in tcam_user_eth_class_enable()
2915 return -EINVAL;
2917 reg = L2_CLS(class - CLASS_CODE_ETHERTYPE1);
2935 return -EINVAL; in tcam_user_ip_class_enable()
2937 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); in tcam_user_ip_class_enable()
2960 return -EINVAL; in tcam_user_ip_class_set()
2962 reg = L3_CLS(class - CLASS_CODE_USER_PROG1); in tcam_user_ip_class_set()
3003 for (i = 0; i < np->parent->tcam_num_entries; i++) { in tcam_flush_all()
3026 return -EINVAL;
3045 return -EINVAL; in hash_write()
3094 return -EINVAL; in fflp_set_partition()
3168 parent = np->parent; in fflp_early_init()
3170 if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) { in fflp_early_init()
3171 if (np->parent->plat_type != PLAT_TYPE_NIU) { in fflp_early_init()
3176 netif_printk(np, probe, KERN_DEBUG, np->dev, in fflp_early_init()
3185 netif_printk(np, probe, KERN_DEBUG, np->dev, in fflp_early_init()
3196 netif_printk(np, probe, KERN_DEBUG, np->dev, in fflp_early_init()
3200 if (np->parent->plat_type != PLAT_TYPE_NIU) { in fflp_early_init()
3203 netif_printk(np, probe, KERN_DEBUG, np->dev, in fflp_early_init()
3212 parent->flags |= PARENT_FLGS_CLS_HWINIT; in fflp_early_init()
3223 return -EINVAL; in niu_set_flow_key()
3225 nw64(FLOW_KEY(class_code - CLASS_CODE_USER_PROG1), key); in niu_set_flow_key()
3233 return -EINVAL; in niu_set_tcam_key()
3235 nw64(TCAM_KEY(class_code - CLASS_CODE_USER_PROG1), key); in niu_set_tcam_key()
3243 if (idx >= (np->clas.tcam_sz - 1)) in tcam_get_index()
3245 return np->clas.tcam_top + ((idx+1) * np->parent->num_ports); in tcam_get_index()
3251 return np->clas.tcam_sz - 1; in tcam_get_size()
3257 return np->clas.tcam_valid_entries - 1; in tcam_get_valid_entry_cnt()
3263 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, offset, size); in niu_rx_skb_append()
3265 skb->len += size; in niu_rx_skb_append()
3266 skb->data_len += size; in niu_rx_skb_append()
3267 skb->truesize += truesize; in niu_rx_skb_append()
3275 return a & (MAX_RBR_RING_SIZE - 1); in niu_hash_rxaddr()
3285 pp = &rp->rxhash[h]; in niu_find_rxpage()
3286 for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { in niu_find_rxpage()
3287 if (p->index == addr) { in niu_find_rxpage()
3302 page->index = base; in niu_hash_page()
3303 page->mapping = (struct address_space *) rp->rxhash[h]; in niu_hash_page()
3304 rp->rxhash[h] = page; in niu_hash_page()
3316 return -ENOMEM; in niu_rbr_add_page()
3318 addr = np->ops->map_page(np->device, page, 0, in niu_rbr_add_page()
3322 return -ENOMEM; in niu_rbr_add_page()
3326 if (rp->rbr_blocks_per_page > 1) in niu_rbr_add_page()
3327 page_ref_add(page, rp->rbr_blocks_per_page - 1); in niu_rbr_add_page()
3329 for (i = 0; i < rp->rbr_blocks_per_page; i++) { in niu_rbr_add_page()
3330 __le32 *rbr = &rp->rbr[start_index + i]; in niu_rbr_add_page()
3333 addr += rp->rbr_block_size; in niu_rbr_add_page()
3341 int index = rp->rbr_index; in niu_rbr_refill()
3343 rp->rbr_pending++; in niu_rbr_refill()
3344 if ((rp->rbr_pending % rp->rbr_blocks_per_page) == 0) { in niu_rbr_refill()
3348 rp->rbr_pending--; in niu_rbr_refill()
3352 rp->rbr_index += rp->rbr_blocks_per_page; in niu_rbr_refill()
3353 BUG_ON(rp->rbr_index > rp->rbr_table_size); in niu_rbr_refill()
3354 if (rp->rbr_index == rp->rbr_table_size) in niu_rbr_refill()
3355 rp->rbr_index = 0; in niu_rbr_refill()
3357 if (rp->rbr_pending >= rp->rbr_kick_thresh) { in niu_rbr_refill()
3358 nw64(RBR_KICK(rp->rx_channel), rp->rbr_pending); in niu_rbr_refill()
3359 rp->rbr_pending = 0; in niu_rbr_refill()
3366 unsigned int index = rp->rcr_index; in niu_rx_pkt_ignore()
3369 rp->rx_dropped++; in niu_rx_pkt_ignore()
3377 val = le64_to_cpup(&rp->rcr[index]); in niu_rx_pkt_ignore()
3382 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> in niu_rx_pkt_ignore()
3384 if ((page->index + PAGE_SIZE) - rcr_size == addr) { in niu_rx_pkt_ignore()
3385 *link = (struct page *) page->mapping; in niu_rx_pkt_ignore()
3386 np->ops->unmap_page(np->device, page->index, in niu_rx_pkt_ignore()
3388 page->index = 0; in niu_rx_pkt_ignore()
3389 page->mapping = NULL; in niu_rx_pkt_ignore()
3391 rp->rbr_refill_pending++; in niu_rx_pkt_ignore()
3399 rp->rcr_index = index; in niu_rx_pkt_ignore()
3407 unsigned int index = rp->rcr_index; in niu_process_rx_pkt()
3412 skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); in niu_process_rx_pkt()
3424 val = le64_to_cpup(&rp->rcr[index]); in niu_process_rx_pkt()
3434 rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> in niu_process_rx_pkt()
3446 skb->ip_summed = CHECKSUM_UNNECESSARY; in niu_process_rx_pkt()
3450 append_size = append_size - skb->len; in niu_process_rx_pkt()
3453 if ((page->index + rp->rbr_block_size) - rcr_size == addr) { in niu_process_rx_pkt()
3454 *link = (struct page *) page->mapping; in niu_process_rx_pkt()
3455 np->ops->unmap_page(np->device, page->index, in niu_process_rx_pkt()
3457 page->index = 0; in niu_process_rx_pkt()
3458 page->mapping = NULL; in niu_process_rx_pkt()
3459 rp->rbr_refill_pending++; in niu_process_rx_pkt()
3468 rp->rcr_index = index; in niu_process_rx_pkt()
3474 rh = (struct rx_pkt_hdr1 *) skb->data; in niu_process_rx_pkt()
3475 if (np->dev->features & NETIF_F_RXHASH) in niu_process_rx_pkt()
3477 ((u32)rh->hashval2_0 << 24 | in niu_process_rx_pkt()
3478 (u32)rh->hashval2_1 << 16 | in niu_process_rx_pkt()
3479 (u32)rh->hashval1_1 << 8 | in niu_process_rx_pkt()
3480 (u32)rh->hashval1_2 << 0), in niu_process_rx_pkt()
3484 rp->rx_packets++; in niu_process_rx_pkt()
3485 rp->rx_bytes += skb->len; in niu_process_rx_pkt()
3487 skb->protocol = eth_type_trans(skb, np->dev); in niu_process_rx_pkt()
3488 skb_record_rx_queue(skb, rp->rx_channel); in niu_process_rx_pkt()
3496 int blocks_per_page = rp->rbr_blocks_per_page; in niu_rbr_fill()
3497 int err, index = rp->rbr_index; in niu_rbr_fill()
3500 while (index < (rp->rbr_table_size - blocks_per_page)) { in niu_rbr_fill()
3508 rp->rbr_index = index; in niu_rbr_fill()
3519 page = rp->rxhash[i]; in niu_rbr_free()
3521 struct page *next = (struct page *) page->mapping; in niu_rbr_free()
3522 u64 base = page->index; in niu_rbr_free()
3524 np->ops->unmap_page(np->device, base, PAGE_SIZE, in niu_rbr_free()
3526 page->index = 0; in niu_rbr_free()
3527 page->mapping = NULL; in niu_rbr_free()
3535 for (i = 0; i < rp->rbr_table_size; i++) in niu_rbr_free()
3536 rp->rbr[i] = cpu_to_le32(0); in niu_rbr_free()
3537 rp->rbr_index = 0; in niu_rbr_free()
3542 struct tx_buff_info *tb = &rp->tx_buffs[idx]; in release_tx_packet()
3543 struct sk_buff *skb = tb->skb; in release_tx_packet()
3548 tp = (struct tx_pkt_hdr *) skb->data; in release_tx_packet()
3549 tx_flags = le64_to_cpup(&tp->flags); in release_tx_packet()
3551 rp->tx_packets++; in release_tx_packet()
3552 rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - in release_tx_packet()
3556 np->ops->unmap_single(np->device, tb->mapping, in release_tx_packet()
3559 if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) in release_tx_packet()
3560 rp->mark_pending--; in release_tx_packet()
3562 tb->skb = NULL; in release_tx_packet()
3565 len -= MAX_TX_DESC_LEN; in release_tx_packet()
3568 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in release_tx_packet()
3569 tb = &rp->tx_buffs[idx]; in release_tx_packet()
3570 BUG_ON(tb->skb != NULL); in release_tx_packet()
3571 np->ops->unmap_page(np->device, tb->mapping, in release_tx_packet()
3572 skb_frag_size(&skb_shinfo(skb)->frags[i]), in release_tx_packet()
3582 #define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)
3589 u64 cs; in niu_tx_work() local
3591 index = (rp - np->tx_rings); in niu_tx_work()
3592 txq = netdev_get_tx_queue(np->dev, index); in niu_tx_work()
3594 cs = rp->tx_cs; in niu_tx_work()
3595 if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) in niu_tx_work()
3598 tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; in niu_tx_work()
3599 pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & in niu_tx_work()
3602 rp->last_pkt_cnt = tmp; in niu_tx_work()
3604 cons = rp->cons; in niu_tx_work()
3606 netif_printk(np, tx_done, KERN_DEBUG, np->dev, in niu_tx_work()
3609 while (pkt_cnt--) in niu_tx_work()
3612 rp->cons = cons; in niu_tx_work()
3631 * counters, as they are only 16-bit and can overflow quickly, in niu_sync_rx_discard_stats()
3641 int rx_channel = rp->rx_channel; in niu_sync_rx_discard_stats()
3652 rp->rx_errors += misc & RXMISC_COUNT; in niu_sync_rx_discard_stats()
3655 dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n", in niu_sync_rx_discard_stats()
3658 netif_printk(np, rx_err, KERN_DEBUG, np->dev, in niu_sync_rx_discard_stats()
3659 "rx-%d: MISC drop=%u over=%u\n", in niu_sync_rx_discard_stats()
3660 rx_channel, misc, misc-limit); in niu_sync_rx_discard_stats()
3667 rp->rx_dropped += wred & RED_DIS_CNT_COUNT; in niu_sync_rx_discard_stats()
3670 dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel); in niu_sync_rx_discard_stats()
3672 netif_printk(np, rx_err, KERN_DEBUG, np->dev, in niu_sync_rx_discard_stats()
3673 "rx-%d: WRED drop=%u over=%u\n", in niu_sync_rx_discard_stats()
3674 rx_channel, wred, wred-limit); in niu_sync_rx_discard_stats()
3682 struct rxdma_mailbox *mbox = rp->mbox; in niu_rx_work()
3686 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); in niu_rx_work()
3687 qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; in niu_rx_work()
3689 stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); in niu_rx_work()
3690 qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); in niu_rx_work()
3692 mbox->rx_dma_ctl_stat = 0; in niu_rx_work()
3693 mbox->rcrstat_a = 0; in niu_rx_work()
3695 netif_printk(np, rx_status, KERN_DEBUG, np->dev, in niu_rx_work()
3697 __func__, rp->rx_channel, (unsigned long long)stat, qlen); in niu_rx_work()
3706 if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { in niu_rx_work()
3709 for (i = 0; i < rp->rbr_refill_pending; i++) in niu_rx_work()
3711 rp->rbr_refill_pending = 0; in niu_rx_work()
3718 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); in niu_rx_work()
3729 u64 v0 = lp->v0; in niu_poll_core()
3734 netif_printk(np, intr, KERN_DEBUG, np->dev, in niu_poll_core()
3737 for (i = 0; i < np->num_tx_rings; i++) { in niu_poll_core()
3738 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_poll_core()
3739 if (tx_vec & (1 << rp->tx_channel)) in niu_poll_core()
3741 nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); in niu_poll_core()
3744 for (i = 0; i < np->num_rx_rings; i++) { in niu_poll_core()
3745 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_poll_core()
3747 if (rx_vec & (1 << rp->rx_channel)) { in niu_poll_core()
3750 this_work_done = niu_rx_work(&lp->napi, np, rp, in niu_poll_core()
3753 budget -= this_work_done; in niu_poll_core()
3756 nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); in niu_poll_core()
3765 struct niu *np = lp->np; in niu_poll()
3780 netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel); in niu_log_rxchan_errors()
3816 u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); in niu_rx_error()
3822 err = -EINVAL; in niu_rx_error()
3825 netdev_err(np->dev, "RX channel %u error, stat[%llx]\n", in niu_rx_error()
3826 rp->rx_channel, in niu_rx_error()
3832 nw64(RX_DMA_CTL_STAT(rp->rx_channel), in niu_rx_error()
3839 u64 cs) in niu_log_txchan_errors() argument
3841 netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel); in niu_log_txchan_errors()
3843 if (cs & TX_CS_MBOX_ERR) in niu_log_txchan_errors()
3845 if (cs & TX_CS_PKT_SIZE_ERR) in niu_log_txchan_errors()
3847 if (cs & TX_CS_TX_RING_OFLOW) in niu_log_txchan_errors()
3849 if (cs & TX_CS_PREF_BUF_PAR_ERR) in niu_log_txchan_errors()
3851 if (cs & TX_CS_NACK_PREF) in niu_log_txchan_errors()
3853 if (cs & TX_CS_NACK_PKT_RD) in niu_log_txchan_errors()
3855 if (cs & TX_CS_CONF_PART_ERR) in niu_log_txchan_errors()
3857 if (cs & TX_CS_PKT_PRT_ERR) in niu_log_txchan_errors()
3865 u64 cs, logh, logl; in niu_tx_error() local
3867 cs = nr64(TX_CS(rp->tx_channel)); in niu_tx_error()
3868 logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); in niu_tx_error()
3869 logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); in niu_tx_error()
3871 netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n", in niu_tx_error()
3872 rp->tx_channel, in niu_tx_error()
3873 (unsigned long long)cs, in niu_tx_error()
3877 niu_log_txchan_errors(np, rp, cs); in niu_tx_error()
3879 return -ENODEV; in niu_tx_error()
3887 if (np->flags & NIU_FLAGS_XMAC) { in niu_mif_interrupt()
3894 netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n", in niu_mif_interrupt()
3897 return -ENODEV; in niu_mif_interrupt()
3902 struct niu_xmac_stats *mp = &np->mac_stats.xmac; in niu_xmac_interrupt()
3907 mp->tx_frames += TXMAC_FRM_CNT_COUNT; in niu_xmac_interrupt()
3909 mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; in niu_xmac_interrupt()
3911 mp->tx_fifo_errors++; in niu_xmac_interrupt()
3913 mp->tx_overflow_errors++; in niu_xmac_interrupt()
3915 mp->tx_max_pkt_size_errors++; in niu_xmac_interrupt()
3917 mp->tx_underflow_errors++; in niu_xmac_interrupt()
3921 mp->rx_local_faults++; in niu_xmac_interrupt()
3923 mp->rx_remote_faults++; in niu_xmac_interrupt()
3925 mp->rx_link_faults += LINK_FAULT_CNT_COUNT; in niu_xmac_interrupt()
3927 mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; in niu_xmac_interrupt()
3929 mp->rx_frags += RXMAC_FRAG_CNT_COUNT; in niu_xmac_interrupt()
3931 mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; in niu_xmac_interrupt()
3933 mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; in niu_xmac_interrupt()
3935 mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; in niu_xmac_interrupt()
3937 mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; in niu_xmac_interrupt()
3939 mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; in niu_xmac_interrupt()
3941 mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; in niu_xmac_interrupt()
3943 mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; in niu_xmac_interrupt()
3945 mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; in niu_xmac_interrupt()
3947 mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; in niu_xmac_interrupt()
3949 mp->rx_octets += RXMAC_BT_CNT_COUNT; in niu_xmac_interrupt()
3951 mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; in niu_xmac_interrupt()
3953 mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; in niu_xmac_interrupt()
3955 mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; in niu_xmac_interrupt()
3957 mp->rx_underflows++; in niu_xmac_interrupt()
3959 mp->rx_overflows++; in niu_xmac_interrupt()
3963 mp->pause_off_state++; in niu_xmac_interrupt()
3965 mp->pause_on_state++; in niu_xmac_interrupt()
3967 mp->pause_received++; in niu_xmac_interrupt()
3972 struct niu_bmac_stats *mp = &np->mac_stats.bmac; in niu_bmac_interrupt()
3977 mp->tx_underflow_errors++; in niu_bmac_interrupt()
3979 mp->tx_max_pkt_size_errors++; in niu_bmac_interrupt()
3981 mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; in niu_bmac_interrupt()
3983 mp->tx_frames += BTXMAC_FRM_CNT_COUNT; in niu_bmac_interrupt()
3987 mp->rx_overflows++; in niu_bmac_interrupt()
3989 mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; in niu_bmac_interrupt()
3991 mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; in niu_bmac_interrupt()
3993 mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; in niu_bmac_interrupt()
3995 mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; in niu_bmac_interrupt()
3999 mp->pause_off_state++; in niu_bmac_interrupt()
4001 mp->pause_on_state++; in niu_bmac_interrupt()
4003 mp->pause_received++; in niu_bmac_interrupt()
4008 if (np->flags & NIU_FLAGS_XMAC) in niu_mac_interrupt()
4018 netdev_err(np->dev, "Core device errors ( "); in niu_log_device_error()
4050 netdev_err(np->dev, "Core device error, stat[%llx]\n", in niu_device_error()
4055 return -ENODEV; in niu_device_error()
4064 lp->v0 = v0; in niu_slowpath_interrupt()
4065 lp->v1 = v1; in niu_slowpath_interrupt()
4066 lp->v2 = v2; in niu_slowpath_interrupt()
4071 for (i = 0; i < np->num_rx_rings; i++) { in niu_slowpath_interrupt()
4072 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_slowpath_interrupt()
4074 if (rx_vec & (1 << rp->rx_channel)) { in niu_slowpath_interrupt()
4080 nw64(RX_DMA_CTL_STAT(rp->rx_channel), in niu_slowpath_interrupt()
4089 for (i = 0; i < np->num_tx_rings; i++) { in niu_slowpath_interrupt()
4090 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_slowpath_interrupt()
4092 if (tx_vec & (1 << rp->tx_channel)) { in niu_slowpath_interrupt()
4126 struct rxdma_mailbox *mbox = rp->mbox; in niu_rxchan_intr()
4127 u64 stat_write, stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); in niu_rxchan_intr()
4131 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write); in niu_rxchan_intr()
4133 netif_printk(np, intr, KERN_DEBUG, np->dev, in niu_rxchan_intr()
4140 rp->tx_cs = nr64(TX_CS(rp->tx_channel)); in niu_txchan_intr()
4142 netif_printk(np, intr, KERN_DEBUG, np->dev, in niu_txchan_intr()
4143 "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs); in niu_txchan_intr()
4148 struct niu_parent *parent = np->parent; in __niu_fastpath_interrupt()
4155 for (i = 0; i < np->num_rx_rings; i++) { in __niu_fastpath_interrupt()
4156 struct rx_ring_info *rp = &np->rx_rings[i]; in __niu_fastpath_interrupt()
4157 int ldn = LDN_RXDMA(rp->rx_channel); in __niu_fastpath_interrupt()
4159 if (parent->ldg_map[ldn] != ldg) in __niu_fastpath_interrupt()
4163 if (rx_vec & (1 << rp->rx_channel)) in __niu_fastpath_interrupt()
4167 for (i = 0; i < np->num_tx_rings; i++) { in __niu_fastpath_interrupt()
4168 struct tx_ring_info *rp = &np->tx_rings[i]; in __niu_fastpath_interrupt()
4169 int ldn = LDN_TXDMA(rp->tx_channel); in __niu_fastpath_interrupt()
4171 if (parent->ldg_map[ldn] != ldg) in __niu_fastpath_interrupt()
4175 if (tx_vec & (1 << rp->tx_channel)) in __niu_fastpath_interrupt()
4183 if (likely(napi_schedule_prep(&lp->napi))) { in niu_schedule_napi()
4184 lp->v0 = v0; in niu_schedule_napi()
4185 lp->v1 = v1; in niu_schedule_napi()
4186 lp->v2 = v2; in niu_schedule_napi()
4187 __niu_fastpath_interrupt(np, lp->ldg_num, v0); in niu_schedule_napi()
4188 __napi_schedule(&lp->napi); in niu_schedule_napi()
4195 struct niu *np = lp->np; in niu_interrupt()
4196 int ldg = lp->ldg_num; in niu_interrupt()
4204 spin_lock_irqsave(&np->lock, flags); in niu_interrupt()
4217 spin_unlock_irqrestore(&np->lock, flags); in niu_interrupt()
4231 spin_unlock_irqrestore(&np->lock, flags); in niu_interrupt()
4238 if (rp->mbox) { in niu_free_rx_ring_info()
4239 np->ops->free_coherent(np->device, in niu_free_rx_ring_info()
4241 rp->mbox, rp->mbox_dma); in niu_free_rx_ring_info()
4242 rp->mbox = NULL; in niu_free_rx_ring_info()
4244 if (rp->rcr) { in niu_free_rx_ring_info()
4245 np->ops->free_coherent(np->device, in niu_free_rx_ring_info()
4247 rp->rcr, rp->rcr_dma); in niu_free_rx_ring_info()
4248 rp->rcr = NULL; in niu_free_rx_ring_info()
4249 rp->rcr_table_size = 0; in niu_free_rx_ring_info()
4250 rp->rcr_index = 0; in niu_free_rx_ring_info()
4252 if (rp->rbr) { in niu_free_rx_ring_info()
4255 np->ops->free_coherent(np->device, in niu_free_rx_ring_info()
4257 rp->rbr, rp->rbr_dma); in niu_free_rx_ring_info()
4258 rp->rbr = NULL; in niu_free_rx_ring_info()
4259 rp->rbr_table_size = 0; in niu_free_rx_ring_info()
4260 rp->rbr_index = 0; in niu_free_rx_ring_info()
4262 kfree(rp->rxhash); in niu_free_rx_ring_info()
4263 rp->rxhash = NULL; in niu_free_rx_ring_info()
4268 if (rp->mbox) { in niu_free_tx_ring_info()
4269 np->ops->free_coherent(np->device, in niu_free_tx_ring_info()
4271 rp->mbox, rp->mbox_dma); in niu_free_tx_ring_info()
4272 rp->mbox = NULL; in niu_free_tx_ring_info()
4274 if (rp->descr) { in niu_free_tx_ring_info()
4278 if (rp->tx_buffs[i].skb) in niu_free_tx_ring_info()
4282 np->ops->free_coherent(np->device, in niu_free_tx_ring_info()
4284 rp->descr, rp->descr_dma); in niu_free_tx_ring_info()
4285 rp->descr = NULL; in niu_free_tx_ring_info()
4286 rp->pending = 0; in niu_free_tx_ring_info()
4287 rp->prod = 0; in niu_free_tx_ring_info()
4288 rp->cons = 0; in niu_free_tx_ring_info()
4289 rp->wrap_bit = 0; in niu_free_tx_ring_info()
4297 if (np->rx_rings) { in niu_free_channels()
4298 for (i = 0; i < np->num_rx_rings; i++) { in niu_free_channels()
4299 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_free_channels()
4303 kfree(np->rx_rings); in niu_free_channels()
4304 np->rx_rings = NULL; in niu_free_channels()
4305 np->num_rx_rings = 0; in niu_free_channels()
4308 if (np->tx_rings) { in niu_free_channels()
4309 for (i = 0; i < np->num_tx_rings; i++) { in niu_free_channels()
4310 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_free_channels()
4314 kfree(np->tx_rings); in niu_free_channels()
4315 np->tx_rings = NULL; in niu_free_channels()
4316 np->num_tx_rings = 0; in niu_free_channels()
4325 rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *), in niu_alloc_rx_ring_info()
4327 if (!rp->rxhash) in niu_alloc_rx_ring_info()
4328 return -ENOMEM; in niu_alloc_rx_ring_info()
4330 rp->mbox = np->ops->alloc_coherent(np->device, in niu_alloc_rx_ring_info()
4332 &rp->mbox_dma, GFP_KERNEL); in niu_alloc_rx_ring_info()
4333 if (!rp->mbox) in niu_alloc_rx_ring_info()
4334 return -ENOMEM; in niu_alloc_rx_ring_info()
4335 if ((unsigned long)rp->mbox & (64UL - 1)) { in niu_alloc_rx_ring_info()
4336 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n", in niu_alloc_rx_ring_info()
4337 rp->mbox); in niu_alloc_rx_ring_info()
4338 return -EINVAL; in niu_alloc_rx_ring_info()
4341 rp->rcr = np->ops->alloc_coherent(np->device, in niu_alloc_rx_ring_info()
4343 &rp->rcr_dma, GFP_KERNEL); in niu_alloc_rx_ring_info()
4344 if (!rp->rcr) in niu_alloc_rx_ring_info()
4345 return -ENOMEM; in niu_alloc_rx_ring_info()
4346 if ((unsigned long)rp->rcr & (64UL - 1)) { in niu_alloc_rx_ring_info()
4347 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n", in niu_alloc_rx_ring_info()
4348 rp->rcr); in niu_alloc_rx_ring_info()
4349 return -EINVAL; in niu_alloc_rx_ring_info()
4351 rp->rcr_table_size = MAX_RCR_RING_SIZE; in niu_alloc_rx_ring_info()
4352 rp->rcr_index = 0; in niu_alloc_rx_ring_info()
4354 rp->rbr = np->ops->alloc_coherent(np->device, in niu_alloc_rx_ring_info()
4356 &rp->rbr_dma, GFP_KERNEL); in niu_alloc_rx_ring_info()
4357 if (!rp->rbr) in niu_alloc_rx_ring_info()
4358 return -ENOMEM; in niu_alloc_rx_ring_info()
4359 if ((unsigned long)rp->rbr & (64UL - 1)) { in niu_alloc_rx_ring_info()
4360 netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n", in niu_alloc_rx_ring_info()
4361 rp->rbr); in niu_alloc_rx_ring_info()
4362 return -EINVAL; in niu_alloc_rx_ring_info()
4364 rp->rbr_table_size = MAX_RBR_RING_SIZE; in niu_alloc_rx_ring_info()
4365 rp->rbr_index = 0; in niu_alloc_rx_ring_info()
4366 rp->rbr_pending = 0; in niu_alloc_rx_ring_info()
4373 int mtu = np->dev->mtu; in niu_set_max_burst()
4378 rp->max_burst = mtu + 32; in niu_set_max_burst()
4379 if (rp->max_burst > 4096) in niu_set_max_burst()
4380 rp->max_burst = 4096; in niu_set_max_burst()
4388 rp->mbox = np->ops->alloc_coherent(np->device, in niu_alloc_tx_ring_info()
4390 &rp->mbox_dma, GFP_KERNEL); in niu_alloc_tx_ring_info()
4391 if (!rp->mbox) in niu_alloc_tx_ring_info()
4392 return -ENOMEM; in niu_alloc_tx_ring_info()
4393 if ((unsigned long)rp->mbox & (64UL - 1)) { in niu_alloc_tx_ring_info()
4394 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n", in niu_alloc_tx_ring_info()
4395 rp->mbox); in niu_alloc_tx_ring_info()
4396 return -EINVAL; in niu_alloc_tx_ring_info()
4399 rp->descr = np->ops->alloc_coherent(np->device, in niu_alloc_tx_ring_info()
4401 &rp->descr_dma, GFP_KERNEL); in niu_alloc_tx_ring_info()
4402 if (!rp->descr) in niu_alloc_tx_ring_info()
4403 return -ENOMEM; in niu_alloc_tx_ring_info()
4404 if ((unsigned long)rp->descr & (64UL - 1)) { in niu_alloc_tx_ring_info()
4405 netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n", in niu_alloc_tx_ring_info()
4406 rp->descr); in niu_alloc_tx_ring_info()
4407 return -EINVAL; in niu_alloc_tx_ring_info()
4410 rp->pending = MAX_TX_RING_SIZE; in niu_alloc_tx_ring_info()
4411 rp->prod = 0; in niu_alloc_tx_ring_info()
4412 rp->cons = 0; in niu_alloc_tx_ring_info()
4413 rp->wrap_bit = 0; in niu_alloc_tx_ring_info()
4416 rp->mark_freq = rp->pending / 4; in niu_alloc_tx_ring_info()
4429 rp->rbr_block_size = 1 << bss; in niu_size_rbr()
4430 rp->rbr_blocks_per_page = 1 << (PAGE_SHIFT-bss); in niu_size_rbr()
4432 rp->rbr_sizes[0] = 256; in niu_size_rbr()
4433 rp->rbr_sizes[1] = 1024; in niu_size_rbr()
4434 if (np->dev->mtu > ETH_DATA_LEN) { in niu_size_rbr()
4437 rp->rbr_sizes[2] = 4096; in niu_size_rbr()
4441 rp->rbr_sizes[2] = 8192; in niu_size_rbr()
4445 rp->rbr_sizes[2] = 2048; in niu_size_rbr()
4447 rp->rbr_sizes[3] = rp->rbr_block_size; in niu_size_rbr()
4452 struct niu_parent *parent = np->parent; in niu_alloc_channels()
4459 port = np->port; in niu_alloc_channels()
4462 first_rx_channel += parent->rxchan_per_port[i]; in niu_alloc_channels()
4463 first_tx_channel += parent->txchan_per_port[i]; in niu_alloc_channels()
4466 num_rx_rings = parent->rxchan_per_port[port]; in niu_alloc_channels()
4467 num_tx_rings = parent->txchan_per_port[port]; in niu_alloc_channels()
4471 err = -ENOMEM; in niu_alloc_channels()
4475 np->num_rx_rings = num_rx_rings; in niu_alloc_channels()
4477 np->rx_rings = rx_rings; in niu_alloc_channels()
4479 netif_set_real_num_rx_queues(np->dev, num_rx_rings); in niu_alloc_channels()
4481 for (i = 0; i < np->num_rx_rings; i++) { in niu_alloc_channels()
4482 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_alloc_channels()
4484 rp->np = np; in niu_alloc_channels()
4485 rp->rx_channel = first_rx_channel + i; in niu_alloc_channels()
4494 rp->nonsyn_window = 64; in niu_alloc_channels()
4495 rp->nonsyn_threshold = rp->rcr_table_size - 64; in niu_alloc_channels()
4496 rp->syn_window = 64; in niu_alloc_channels()
4497 rp->syn_threshold = rp->rcr_table_size - 64; in niu_alloc_channels()
4498 rp->rcr_pkt_threshold = 16; in niu_alloc_channels()
4499 rp->rcr_timeout = 8; in niu_alloc_channels()
4500 rp->rbr_kick_thresh = RBR_REFILL_MIN; in niu_alloc_channels()
4501 if (rp->rbr_kick_thresh < rp->rbr_blocks_per_page) in niu_alloc_channels()
4502 rp->rbr_kick_thresh = rp->rbr_blocks_per_page; in niu_alloc_channels()
4511 err = -ENOMEM; in niu_alloc_channels()
4515 np->num_tx_rings = num_tx_rings; in niu_alloc_channels()
4517 np->tx_rings = tx_rings; in niu_alloc_channels()
4519 netif_set_real_num_tx_queues(np->dev, num_tx_rings); in niu_alloc_channels()
4521 for (i = 0; i < np->num_tx_rings; i++) { in niu_alloc_channels()
4522 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_alloc_channels()
4524 rp->np = np; in niu_alloc_channels()
4525 rp->tx_channel = first_tx_channel + i; in niu_alloc_channels()
4543 while (--limit > 0) { in niu_tx_cs_sng_poll()
4548 return -ENODEV; in niu_tx_cs_sng_poll()
4565 while (--limit > 0) { in niu_tx_cs_reset_poll()
4570 return -ENODEV; in niu_tx_cs_reset_poll()
4600 val = (u64)np->port << TX_LOG_PAGE_VLD_FUNC_SHIFT; in niu_tx_channel_lpage_init()
4616 mask = (u64)1 << np->port; in niu_txc_enable_port()
4635 val &= ~TXC_INT_MASK_VAL(np->port); in niu_txc_set_imask()
4636 val |= (imask << TXC_INT_MASK_VAL_SHIFT(np->port)); in niu_txc_set_imask()
4647 for (i = 0; i < np->num_tx_rings; i++) in niu_txc_port_dma_enable()
4648 val |= (1 << np->tx_rings[i].tx_channel); in niu_txc_port_dma_enable()
4650 nw64(TXC_PORT_DMA(np->port), val); in niu_txc_port_dma_enable()
4655 int err, channel = rp->tx_channel; in niu_init_one_tx_channel()
4670 nw64(TXC_DMA_MAX(channel), rp->max_burst); in niu_init_one_tx_channel()
4673 if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE | in niu_init_one_tx_channel()
4675 netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n", in niu_init_one_tx_channel()
4676 channel, (unsigned long long)rp->descr_dma); in niu_init_one_tx_channel()
4677 return -EINVAL; in niu_init_one_tx_channel()
4680 /* The length field in TX_RNG_CFIG is measured in 64-byte in niu_init_one_tx_channel()
4681 * blocks. rp->pending is the number of TX descriptors in in niu_init_one_tx_channel()
4685 ring_len = (rp->pending / 8); in niu_init_one_tx_channel()
4688 rp->descr_dma); in niu_init_one_tx_channel()
4691 if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) || in niu_init_one_tx_channel()
4692 ((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) { in niu_init_one_tx_channel()
4693 netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n", in niu_init_one_tx_channel()
4694 channel, (unsigned long long)rp->mbox_dma); in niu_init_one_tx_channel()
4695 return -EINVAL; in niu_init_one_tx_channel()
4697 nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32); in niu_init_one_tx_channel()
4698 nw64(TXDMA_MBL(channel), rp->mbox_dma & TXDMA_MBL_MBADDR); in niu_init_one_tx_channel()
4702 rp->last_pkt_cnt = 0; in niu_init_one_tx_channel()
4709 struct niu_rdc_tables *tp = &np->parent->rdc_group_cfg[np->port]; in niu_init_rdc_groups()
4710 int i, first_table_num = tp->first_table_num; in niu_init_rdc_groups()
4712 for (i = 0; i < tp->num_tables; i++) { in niu_init_rdc_groups()
4713 struct rdc_table *tbl = &tp->tables[i]; in niu_init_rdc_groups()
4719 tbl->rxdma_channel[slot]); in niu_init_rdc_groups()
4722 nw64(DEF_RDC(np->port), np->parent->rdc_default[np->port]); in niu_init_rdc_groups()
4727 int type = phy_decode(np->parent->port_phy, np->port); in niu_init_drr_weight()
4740 nw64(PT_DRR_WT(np->port), val); in niu_init_drr_weight()
4745 struct niu_parent *parent = np->parent; in niu_init_hostinfo()
4746 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; in niu_init_hostinfo()
4748 int first_rdc_table = tp->first_table_num; in niu_init_hostinfo()
4786 val = (u64)np->port << RX_LOG_PAGE_VLD_FUNC_SHIFT; in niu_rx_channel_lpage_init()
4797 val = (((u64)rp->nonsyn_window << RDC_RED_PARA_WIN_SHIFT) | in niu_rx_channel_wred_init()
4798 ((u64)rp->nonsyn_threshold << RDC_RED_PARA_THRE_SHIFT) | in niu_rx_channel_wred_init()
4799 ((u64)rp->syn_window << RDC_RED_PARA_WIN_SYN_SHIFT) | in niu_rx_channel_wred_init()
4800 ((u64)rp->syn_threshold << RDC_RED_PARA_THRE_SYN_SHIFT)); in niu_rx_channel_wred_init()
4801 nw64(RDC_RED_PARA(rp->rx_channel), val); in niu_rx_channel_wred_init()
4809 switch (rp->rbr_block_size) { in niu_compute_rbr_cfig_b()
4823 return -EINVAL; in niu_compute_rbr_cfig_b()
4826 switch (rp->rbr_sizes[2]) { in niu_compute_rbr_cfig_b()
4841 return -EINVAL; in niu_compute_rbr_cfig_b()
4844 switch (rp->rbr_sizes[1]) { in niu_compute_rbr_cfig_b()
4859 return -EINVAL; in niu_compute_rbr_cfig_b()
4862 switch (rp->rbr_sizes[0]) { in niu_compute_rbr_cfig_b()
4877 return -EINVAL; in niu_compute_rbr_cfig_b()
4896 while (--limit > 0) { in niu_enable_rx_channel()
4902 return -ENODEV; in niu_enable_rx_channel()
4908 int err, channel = rp->rx_channel; in niu_init_one_rx_channel()
4927 nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); in niu_init_one_rx_channel()
4929 ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) | in niu_init_one_rx_channel()
4932 ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | in niu_init_one_rx_channel()
4933 (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); in niu_init_one_rx_channel()
4939 ((u64)rp->rcr_table_size << RCRCFIG_A_LEN_SHIFT) | in niu_init_one_rx_channel()
4940 (rp->rcr_dma & (RCRCFIG_A_STADDR_BASE | RCRCFIG_A_STADDR))); in niu_init_one_rx_channel()
4942 ((u64)rp->rcr_pkt_threshold << RCRCFIG_B_PTHRES_SHIFT) | in niu_init_one_rx_channel()
4944 ((u64)rp->rcr_timeout << RCRCFIG_B_TIMEOUT_SHIFT)); in niu_init_one_rx_channel()
4950 nw64(RBR_KICK(channel), rp->rbr_index); in niu_init_one_rx_channel()
4966 nw64(RX_DMA_CK_DIV, np->parent->rxdma_clock_divider); in niu_init_rx_channels()
4979 for (i = 0; i < np->num_rx_rings; i++) { in niu_init_rx_channels()
4980 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_init_rx_channels()
4992 struct niu_parent *parent = np->parent; in niu_set_ip_frag_rule()
4993 struct niu_classifier *cp = &np->clas; in niu_set_ip_frag_rule()
4997 index = cp->tcam_top; in niu_set_ip_frag_rule()
4998 tp = &parent->tcam[index]; in niu_set_ip_frag_rule()
5004 tp->key[1] = TCAM_V4KEY1_NOPORT; in niu_set_ip_frag_rule()
5005 tp->key_mask[1] = TCAM_V4KEY1_NOPORT; in niu_set_ip_frag_rule()
5006 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | in niu_set_ip_frag_rule()
5008 err = tcam_write(np, index, tp->key, tp->key_mask); in niu_set_ip_frag_rule()
5011 err = tcam_assoc_write(np, index, tp->assoc_data); in niu_set_ip_frag_rule()
5014 tp->valid = 1; in niu_set_ip_frag_rule()
5015 cp->tcam_valid_entries++; in niu_set_ip_frag_rule()
5022 struct niu_parent *parent = np->parent; in niu_init_classifier_hw()
5023 struct niu_classifier *cp = &np->clas; in niu_init_classifier_hw()
5026 nw64(H1POLY, cp->h1_init); in niu_init_classifier_hw()
5027 nw64(H2POLY, cp->h2_init); in niu_init_classifier_hw()
5034 struct niu_vlan_rdc *vp = &cp->vlan_mappings[i]; in niu_init_classifier_hw()
5036 vlan_tbl_write(np, i, np->port, in niu_init_classifier_hw()
5037 vp->vlan_pref, vp->rdc_num); in niu_init_classifier_hw()
5040 for (i = 0; i < cp->num_alt_mac_mappings; i++) { in niu_init_classifier_hw()
5041 struct niu_altmac_rdc *ap = &cp->alt_mac_mappings[i]; in niu_init_classifier_hw()
5043 err = niu_set_alt_mac_rdc_table(np, ap->alt_mac_num, in niu_init_classifier_hw()
5044 ap->rdc_num, ap->mac_pref); in niu_init_classifier_hw()
5050 int index = i - CLASS_CODE_USER_PROG1; in niu_init_classifier_hw()
5052 err = niu_set_tcam_key(np, i, parent->tcam_key[index]); in niu_init_classifier_hw()
5055 err = niu_set_flow_key(np, i, parent->flow_key[index]); in niu_init_classifier_hw()
5080 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); in niu_zcp_write()
5093 netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n", in niu_zcp_read()
5101 (ZCP_RAM_SEL_CFIFO(np->port) << ZCP_RAM_ACC_RAM_SEL_SHIFT))); in niu_zcp_read()
5106 netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n", in niu_zcp_read()
5124 val |= RESET_CFIFO_RST(np->port); in niu_zcp_cfifo_reset()
5128 val &= ~RESET_CFIFO_RST(np->port); in niu_zcp_cfifo_reset()
5137 if (np->parent->plat_type != PLAT_TYPE_NIU) { in niu_init_zcp()
5138 if (np->port == 0 || np->port == 1) in niu_init_zcp()
5161 nw64(CFIFO_ECC(np->port), 0); in niu_init_zcp()
5204 if (np->parent->plat_type != PLAT_TYPE_NIU) { in niu_init_ipp()
5205 if (np->port == 0 || np->port == 1) in niu_init_ipp()
5255 if ((np->flags & NIU_FLAGS_10G) != 0 && in niu_handle_led()
5256 (np->flags & NIU_FLAGS_FIBER) != 0) { in niu_handle_led()
5271 struct niu_link_config *lp = &np->link_config; in niu_init_xif_xmac()
5274 if (np->flags & NIU_FLAGS_XCVR_SERDES) { in niu_init_xif_xmac()
5285 if (lp->loopback_mode == LOOPBACK_MAC) { in niu_init_xif_xmac()
5292 if (np->flags & NIU_FLAGS_10G) { in niu_init_xif_xmac()
5296 if (!(np->flags & NIU_FLAGS_FIBER) && in niu_init_xif_xmac()
5297 !(np->flags & NIU_FLAGS_XCVR_SERDES)) in niu_init_xif_xmac()
5305 if (lp->active_speed == SPEED_100) in niu_init_xif_xmac()
5314 if (np->flags & NIU_FLAGS_10G) { in niu_init_xif_xmac()
5317 if (lp->active_speed == SPEED_1000) in niu_init_xif_xmac()
5328 struct niu_link_config *lp = &np->link_config; in niu_init_xif_bmac()
5333 if (lp->loopback_mode == LOOPBACK_MAC) in niu_init_xif_bmac()
5338 if (lp->active_speed == SPEED_1000) in niu_init_xif_bmac()
5346 if (!(np->flags & NIU_FLAGS_10G) && in niu_init_xif_bmac()
5347 !(np->flags & NIU_FLAGS_FIBER) && in niu_init_xif_bmac()
5348 lp->active_speed == SPEED_100) in niu_init_xif_bmac()
5358 if (np->flags & NIU_FLAGS_XMAC) in niu_init_xif()
5370 while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) { in niu_pcs_mii_reset()
5382 while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) { in niu_xpcs_reset()
5390 struct niu_link_config *lp = &np->link_config; in niu_init_pcs()
5393 switch (np->flags & (NIU_FLAGS_10G | in niu_init_pcs()
5407 if (!(np->flags & NIU_FLAGS_XMAC)) in niu_init_pcs()
5408 return -EINVAL; in niu_init_pcs()
5418 if (lp->loopback_mode == LOOPBACK_PHY) in niu_init_pcs()
5446 return -EINVAL; in niu_init_pcs()
5466 while (--limit >= 0) { in niu_reset_tx_bmac()
5472 dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n", in niu_reset_tx_bmac()
5473 np->port, in niu_reset_tx_bmac()
5475 return -ENODEV; in niu_reset_tx_bmac()
5483 if (np->flags & NIU_FLAGS_XMAC) in niu_reset_tx_mac()
5505 if (np->flags & NIU_FLAGS_10G) { in niu_init_tx_xmac()
5547 if (np->dev->mtu > ETH_DATA_LEN) in niu_init_tx_mac()
5557 if (np->flags & NIU_FLAGS_XMAC) in niu_init_tx_mac()
5570 while (--limit >= 0) { in niu_reset_rx_xmac()
5577 dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n", in niu_reset_rx_xmac()
5578 np->port, in niu_reset_rx_xmac()
5580 return -ENODEV; in niu_reset_rx_xmac()
5592 while (--limit >= 0) { in niu_reset_rx_bmac()
5598 dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n", in niu_reset_rx_bmac()
5599 np->port, in niu_reset_rx_bmac()
5601 return -ENODEV; in niu_reset_rx_bmac()
5609 if (np->flags & NIU_FLAGS_XMAC) in niu_reset_rx_mac()
5617 struct niu_parent *parent = np->parent; in niu_init_rx_xmac()
5618 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; in niu_init_rx_xmac()
5619 int first_rdc_table = tp->first_table_num; in niu_init_rx_xmac()
5669 struct niu_parent *parent = np->parent; in niu_init_rx_bmac()
5670 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[np->port]; in niu_init_rx_bmac()
5671 int first_rdc_table = tp->first_table_num; in niu_init_rx_bmac()
5704 niu_set_primary_mac(np, np->dev->dev_addr); in niu_init_rx_mac()
5706 if (np->flags & NIU_FLAGS_XMAC) in niu_init_rx_mac()
5736 if (np->flags & NIU_FLAGS_XMAC) in niu_enable_tx_mac()
5749 if (np->flags & NIU_FLAGS_MCAST) in niu_enable_rx_xmac()
5751 if (np->flags & NIU_FLAGS_PROMISC) in niu_enable_rx_xmac()
5768 if (np->flags & NIU_FLAGS_MCAST) in niu_enable_rx_bmac()
5770 if (np->flags & NIU_FLAGS_PROMISC) in niu_enable_rx_bmac()
5782 if (np->flags & NIU_FLAGS_XMAC) in niu_enable_rx_mac()
5821 (void) niu_tx_channel_stop(np, rp->tx_channel); in niu_stop_one_tx_channel()
5828 for (i = 0; i < np->num_tx_rings; i++) { in niu_stop_tx_channels()
5829 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_stop_tx_channels()
5837 (void) niu_tx_channel_reset(np, rp->tx_channel); in niu_reset_one_tx_channel()
5844 for (i = 0; i < np->num_tx_rings; i++) { in niu_reset_tx_channels()
5845 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_reset_tx_channels()
5853 (void) niu_enable_rx_channel(np, rp->rx_channel, 0); in niu_stop_one_rx_channel()
5860 for (i = 0; i < np->num_rx_rings; i++) { in niu_stop_rx_channels()
5861 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_stop_rx_channels()
5869 int channel = rp->rx_channel; in niu_reset_one_rx_channel()
5881 for (i = 0; i < np->num_rx_rings; i++) { in niu_reset_rx_channels()
5882 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_reset_rx_channels()
5896 while (--limit >= 0 && (rd != wr)) { in niu_disable_ipp()
5902 netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n", in niu_disable_ipp()
5921 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n"); in niu_init_hw()
5926 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n"); in niu_init_hw()
5927 for (i = 0; i < np->num_tx_rings; i++) { in niu_init_hw()
5928 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_init_hw()
5935 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n"); in niu_init_hw()
5940 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n"); in niu_init_hw()
5945 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n"); in niu_init_hw()
5950 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n"); in niu_init_hw()
5955 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n"); in niu_init_hw()
5963 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n"); in niu_init_hw()
5967 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n"); in niu_init_hw()
5972 netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n"); in niu_init_hw()
5981 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n"); in niu_stop_hw()
5984 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n"); in niu_stop_hw()
5987 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n"); in niu_stop_hw()
5990 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n"); in niu_stop_hw()
5993 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n"); in niu_stop_hw()
5996 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n"); in niu_stop_hw()
5999 netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n"); in niu_stop_hw()
6005 int port = np->port; in niu_set_irq_name()
6008 sprintf(np->irq_name[0], "%s:MAC", np->dev->name); in niu_set_irq_name()
6011 sprintf(np->irq_name[1], "%s:MIF", np->dev->name); in niu_set_irq_name()
6012 sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name); in niu_set_irq_name()
6016 for (i = 0; i < np->num_ldg - j; i++) { in niu_set_irq_name()
6017 if (i < np->num_rx_rings) in niu_set_irq_name()
6018 sprintf(np->irq_name[i+j], "%s-rx-%d", in niu_set_irq_name()
6019 np->dev->name, i); in niu_set_irq_name()
6020 else if (i < np->num_tx_rings + np->num_rx_rings) in niu_set_irq_name()
6021 sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name, in niu_set_irq_name()
6022 i - np->num_rx_rings); in niu_set_irq_name()
6033 for (i = 0; i < np->num_ldg; i++) { in niu_request_irq()
6034 struct niu_ldg *lp = &np->ldg[i]; in niu_request_irq()
6036 err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED, in niu_request_irq()
6037 np->irq_name[i], lp); in niu_request_irq()
6047 struct niu_ldg *lp = &np->ldg[j]; in niu_request_irq()
6049 free_irq(lp->irq, lp); in niu_request_irq()
6058 for (i = 0; i < np->num_ldg; i++) { in niu_free_irq()
6059 struct niu_ldg *lp = &np->ldg[i]; in niu_free_irq()
6061 free_irq(lp->irq, lp); in niu_free_irq()
6069 for (i = 0; i < np->num_ldg; i++) in niu_enable_napi()
6070 napi_enable(&np->ldg[i].napi); in niu_enable_napi()
6077 for (i = 0; i < np->num_ldg; i++) in niu_disable_napi()
6078 napi_disable(&np->ldg[i].napi); in niu_disable_napi()
6081 static int niu_open(struct net_device *dev) in niu_open() argument
6083 struct niu *np = netdev_priv(dev); in niu_open()
6086 netif_carrier_off(dev); in niu_open()
6102 spin_lock_irq(&np->lock); in niu_open()
6106 timer_setup(&np->timer, niu_timer, 0); in niu_open()
6107 np->timer.expires = jiffies + HZ; in niu_open()
6114 spin_unlock_irq(&np->lock); in niu_open()
6121 netif_tx_start_all_queues(dev); in niu_open()
6123 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) in niu_open()
6124 netif_carrier_on(dev); in niu_open()
6126 add_timer(&np->timer); in niu_open()
6140 static void niu_full_shutdown(struct niu *np, struct net_device *dev) in niu_full_shutdown() argument
6142 cancel_work_sync(&np->reset_task); in niu_full_shutdown()
6145 netif_tx_stop_all_queues(dev); in niu_full_shutdown()
6147 del_timer_sync(&np->timer); in niu_full_shutdown()
6149 spin_lock_irq(&np->lock); in niu_full_shutdown()
6153 spin_unlock_irq(&np->lock); in niu_full_shutdown()
6156 static int niu_close(struct net_device *dev) in niu_close() argument
6158 struct niu *np = netdev_priv(dev); in niu_close()
6160 niu_full_shutdown(np, dev); in niu_close()
6173 struct niu_xmac_stats *mp = &np->mac_stats.xmac; in niu_sync_xmac_stats()
6175 mp->tx_frames += nr64_mac(TXMAC_FRM_CNT); in niu_sync_xmac_stats()
6176 mp->tx_bytes += nr64_mac(TXMAC_BYTE_CNT); in niu_sync_xmac_stats()
6178 mp->rx_link_faults += nr64_mac(LINK_FAULT_CNT); in niu_sync_xmac_stats()
6179 mp->rx_align_errors += nr64_mac(RXMAC_ALIGN_ERR_CNT); in niu_sync_xmac_stats()
6180 mp->rx_frags += nr64_mac(RXMAC_FRAG_CNT); in niu_sync_xmac_stats()
6181 mp->rx_mcasts += nr64_mac(RXMAC_MC_FRM_CNT); in niu_sync_xmac_stats()
6182 mp->rx_bcasts += nr64_mac(RXMAC_BC_FRM_CNT); in niu_sync_xmac_stats()
6183 mp->rx_hist_cnt1 += nr64_mac(RXMAC_HIST_CNT1); in niu_sync_xmac_stats()
6184 mp->rx_hist_cnt2 += nr64_mac(RXMAC_HIST_CNT2); in niu_sync_xmac_stats()
6185 mp->rx_hist_cnt3 += nr64_mac(RXMAC_HIST_CNT3); in niu_sync_xmac_stats()
6186 mp->rx_hist_cnt4 += nr64_mac(RXMAC_HIST_CNT4); in niu_sync_xmac_stats()
6187 mp->rx_hist_cnt5 += nr64_mac(RXMAC_HIST_CNT5); in niu_sync_xmac_stats()
6188 mp->rx_hist_cnt6 += nr64_mac(RXMAC_HIST_CNT6); in niu_sync_xmac_stats()
6189 mp->rx_hist_cnt7 += nr64_mac(RXMAC_HIST_CNT7); in niu_sync_xmac_stats()
6190 mp->rx_octets += nr64_mac(RXMAC_BT_CNT); in niu_sync_xmac_stats()
6191 mp->rx_code_violations += nr64_mac(RXMAC_CD_VIO_CNT); in niu_sync_xmac_stats()
6192 mp->rx_len_errors += nr64_mac(RXMAC_MPSZER_CNT); in niu_sync_xmac_stats()
6193 mp->rx_crc_errors += nr64_mac(RXMAC_CRC_ER_CNT); in niu_sync_xmac_stats()
6198 struct niu_bmac_stats *mp = &np->mac_stats.bmac; in niu_sync_bmac_stats()
6200 mp->tx_bytes += nr64_mac(BTXMAC_BYTE_CNT); in niu_sync_bmac_stats()
6201 mp->tx_frames += nr64_mac(BTXMAC_FRM_CNT); in niu_sync_bmac_stats()
6203 mp->rx_frames += nr64_mac(BRXMAC_FRAME_CNT); in niu_sync_bmac_stats()
6204 mp->rx_align_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); in niu_sync_bmac_stats()
6205 mp->rx_crc_errors += nr64_mac(BRXMAC_ALIGN_ERR_CNT); in niu_sync_bmac_stats()
6206 mp->rx_len_errors += nr64_mac(BRXMAC_CODE_VIOL_ERR_CNT); in niu_sync_bmac_stats()
6211 if (np->flags & NIU_FLAGS_XMAC) in niu_sync_mac_stats()
6226 rx_rings = READ_ONCE(np->rx_rings); in niu_get_rx_stats()
6230 for (i = 0; i < np->num_rx_rings; i++) { in niu_get_rx_stats()
6235 pkts += rp->rx_packets; in niu_get_rx_stats()
6236 bytes += rp->rx_bytes; in niu_get_rx_stats()
6237 dropped += rp->rx_dropped; in niu_get_rx_stats()
6238 errors += rp->rx_errors; in niu_get_rx_stats()
6242 stats->rx_packets = pkts; in niu_get_rx_stats()
6243 stats->rx_bytes = bytes; in niu_get_rx_stats()
6244 stats->rx_dropped = dropped; in niu_get_rx_stats()
6245 stats->rx_errors = errors; in niu_get_rx_stats()
6257 tx_rings = READ_ONCE(np->tx_rings); in niu_get_tx_stats()
6261 for (i = 0; i < np->num_tx_rings; i++) { in niu_get_tx_stats()
6264 pkts += rp->tx_packets; in niu_get_tx_stats()
6265 bytes += rp->tx_bytes; in niu_get_tx_stats()
6266 errors += rp->tx_errors; in niu_get_tx_stats()
6270 stats->tx_packets = pkts; in niu_get_tx_stats()
6271 stats->tx_bytes = bytes; in niu_get_tx_stats()
6272 stats->tx_errors = errors; in niu_get_tx_stats()
6275 static void niu_get_stats(struct net_device *dev, in niu_get_stats() argument
6278 struct niu *np = netdev_priv(dev); in niu_get_stats()
6280 if (netif_running(dev)) { in niu_get_stats()
6304 if (np->flags & NIU_FLAGS_XMAC) in niu_load_hash()
6310 static void niu_set_rx_mode(struct net_device *dev) in niu_set_rx_mode() argument
6312 struct niu *np = netdev_priv(dev); in niu_set_rx_mode()
6318 spin_lock_irqsave(&np->lock, flags); in niu_set_rx_mode()
6321 np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC); in niu_set_rx_mode()
6322 if (dev->flags & IFF_PROMISC) in niu_set_rx_mode()
6323 np->flags |= NIU_FLAGS_PROMISC; in niu_set_rx_mode()
6324 if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev))) in niu_set_rx_mode()
6325 np->flags |= NIU_FLAGS_MCAST; in niu_set_rx_mode()
6327 alt_cnt = netdev_uc_count(dev); in niu_set_rx_mode()
6330 np->flags |= NIU_FLAGS_PROMISC; in niu_set_rx_mode()
6336 netdev_for_each_uc_addr(ha, dev) { in niu_set_rx_mode()
6337 err = niu_set_alt_mac(np, index, ha->addr); in niu_set_rx_mode()
6339 netdev_warn(dev, "Error %d adding alt mac %d\n", in niu_set_rx_mode()
6343 netdev_warn(dev, "Error %d enabling alt mac %d\n", in niu_set_rx_mode()
6350 if (np->flags & NIU_FLAGS_XMAC) in niu_set_rx_mode()
6357 netdev_warn(dev, "Error %d disabling alt mac %d\n", in niu_set_rx_mode()
6361 if (dev->flags & IFF_ALLMULTI) { in niu_set_rx_mode()
6364 } else if (!netdev_mc_empty(dev)) { in niu_set_rx_mode()
6365 netdev_for_each_mc_addr(ha, dev) { in niu_set_rx_mode()
6366 u32 crc = ether_crc_le(ETH_ALEN, ha->addr); in niu_set_rx_mode()
6369 hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); in niu_set_rx_mode()
6373 if (np->flags & NIU_FLAGS_MCAST) in niu_set_rx_mode()
6377 spin_unlock_irqrestore(&np->lock, flags); in niu_set_rx_mode()
6380 static int niu_set_mac_addr(struct net_device *dev, void *p) in niu_set_mac_addr() argument
6382 struct niu *np = netdev_priv(dev); in niu_set_mac_addr()
6386 if (!is_valid_ether_addr(addr->sa_data)) in niu_set_mac_addr()
6387 return -EADDRNOTAVAIL; in niu_set_mac_addr()
6389 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); in niu_set_mac_addr()
6391 if (!netif_running(dev)) in niu_set_mac_addr()
6394 spin_lock_irqsave(&np->lock, flags); in niu_set_mac_addr()
6396 niu_set_primary_mac(np, dev->dev_addr); in niu_set_mac_addr()
6398 spin_unlock_irqrestore(&np->lock, flags); in niu_set_mac_addr()
6403 static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) in niu_ioctl() argument
6405 return -EOPNOTSUPP; in niu_ioctl()
6410 netif_trans_update(np->dev); /* prevent tx timeout */ in niu_netif_stop()
6414 netif_tx_disable(np->dev); in niu_netif_stop()
6423 netif_tx_wake_all_queues(np->dev); in niu_netif_start()
6434 if (np->rx_rings) { in niu_reset_buffers()
6435 for (i = 0; i < np->num_rx_rings; i++) { in niu_reset_buffers()
6436 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_reset_buffers()
6441 page = rp->rxhash[j]; in niu_reset_buffers()
6444 (struct page *) page->mapping; in niu_reset_buffers()
6445 u64 base = page->index; in niu_reset_buffers()
6447 rp->rbr[k++] = cpu_to_le32(base); in niu_reset_buffers()
6457 rp->rbr_index = rp->rbr_table_size - 1; in niu_reset_buffers()
6458 rp->rcr_index = 0; in niu_reset_buffers()
6459 rp->rbr_pending = 0; in niu_reset_buffers()
6460 rp->rbr_refill_pending = 0; in niu_reset_buffers()
6463 if (np->tx_rings) { in niu_reset_buffers()
6464 for (i = 0; i < np->num_tx_rings; i++) { in niu_reset_buffers()
6465 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_reset_buffers()
6468 if (rp->tx_buffs[j].skb) in niu_reset_buffers()
6472 rp->pending = MAX_TX_RING_SIZE; in niu_reset_buffers()
6473 rp->prod = 0; in niu_reset_buffers()
6474 rp->cons = 0; in niu_reset_buffers()
6475 rp->wrap_bit = 0; in niu_reset_buffers()
6486 spin_lock_irqsave(&np->lock, flags); in niu_reset_task()
6487 if (!netif_running(np->dev)) { in niu_reset_task()
6488 spin_unlock_irqrestore(&np->lock, flags); in niu_reset_task()
6492 spin_unlock_irqrestore(&np->lock, flags); in niu_reset_task()
6494 del_timer_sync(&np->timer); in niu_reset_task()
6498 spin_lock_irqsave(&np->lock, flags); in niu_reset_task()
6502 spin_unlock_irqrestore(&np->lock, flags); in niu_reset_task()
6506 spin_lock_irqsave(&np->lock, flags); in niu_reset_task()
6510 np->timer.expires = jiffies + HZ; in niu_reset_task()
6511 add_timer(&np->timer); in niu_reset_task()
6515 spin_unlock_irqrestore(&np->lock, flags); in niu_reset_task()
6518 static void niu_tx_timeout(struct net_device *dev, unsigned int txqueue) in niu_tx_timeout() argument
6520 struct niu *np = netdev_priv(dev); in niu_tx_timeout()
6522 dev_err(np->device, "%s: Transmit timed out, resetting\n", in niu_tx_timeout()
6523 dev->name); in niu_tx_timeout()
6525 schedule_work(&np->reset_task); in niu_tx_timeout()
6532 __le64 *desc = &rp->descr[index]; in niu_set_txd()
6548 eth_proto = be16_to_cpu(ehdr->h_proto); in niu_compute_tx_flags()
6552 __be16 val = vp->h_vlan_encapsulated_proto; in niu_compute_tx_flags()
6558 switch (skb->protocol) { in niu_compute_tx_flags()
6560 ip_proto = ip_hdr(skb)->protocol; in niu_compute_tx_flags()
6561 ihl = ip_hdr(skb)->ihl; in niu_compute_tx_flags()
6564 ip_proto = ipv6_hdr(skb)->nexthdr; in niu_compute_tx_flags()
6574 if (skb->ip_summed == CHECKSUM_PARTIAL) { in niu_compute_tx_flags()
6582 start = skb_checksum_start_offset(skb) - in niu_compute_tx_flags()
6584 stuff = start + skb->csum_offset; in niu_compute_tx_flags()
6590 l3off = skb_network_offset(skb) - in niu_compute_tx_flags()
6606 struct net_device *dev) in niu_start_xmit() argument
6608 struct niu *np = netdev_priv(dev); in niu_start_xmit()
6619 rp = &np->tx_rings[i]; in niu_start_xmit()
6620 txq = netdev_get_tx_queue(dev, i); in niu_start_xmit()
6622 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { in niu_start_xmit()
6624 dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name); in niu_start_xmit()
6625 rp->tx_errors++; in niu_start_xmit()
6644 align = ((unsigned long) skb->data & (16 - 1)); in niu_start_xmit()
6647 ehdr = (struct ethhdr *) skb->data; in niu_start_xmit()
6650 len = skb->len - sizeof(struct tx_pkt_hdr); in niu_start_xmit()
6651 tp->flags = cpu_to_le64(niu_compute_tx_flags(skb, ehdr, align, len)); in niu_start_xmit()
6652 tp->resv = 0; in niu_start_xmit()
6655 mapping = np->ops->map_single(np->device, skb->data, in niu_start_xmit()
6658 prod = rp->prod; in niu_start_xmit()
6660 rp->tx_buffs[prod].skb = skb; in niu_start_xmit()
6661 rp->tx_buffs[prod].mapping = mapping; in niu_start_xmit()
6664 if (++rp->mark_counter == rp->mark_freq) { in niu_start_xmit()
6665 rp->mark_counter = 0; in niu_start_xmit()
6667 rp->mark_pending++; in niu_start_xmit()
6671 nfg = skb_shinfo(skb)->nr_frags; in niu_start_xmit()
6673 tlen -= MAX_TX_DESC_LEN; in niu_start_xmit()
6688 len -= this_len; in niu_start_xmit()
6691 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in niu_start_xmit()
6692 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in niu_start_xmit()
6695 mapping = np->ops->map_page(np->device, skb_frag_page(frag), in niu_start_xmit()
6699 rp->tx_buffs[prod].skb = NULL; in niu_start_xmit()
6700 rp->tx_buffs[prod].mapping = mapping; in niu_start_xmit()
6707 if (prod < rp->prod) in niu_start_xmit()
6708 rp->wrap_bit ^= TX_RING_KICK_WRAP; in niu_start_xmit()
6709 rp->prod = prod; in niu_start_xmit()
6711 nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); in niu_start_xmit()
6723 rp->tx_errors++; in niu_start_xmit()
6728 static int niu_change_mtu(struct net_device *dev, int new_mtu) in niu_change_mtu() argument
6730 struct niu *np = netdev_priv(dev); in niu_change_mtu()
6733 orig_jumbo = (dev->mtu > ETH_DATA_LEN); in niu_change_mtu()
6736 dev->mtu = new_mtu; in niu_change_mtu()
6738 if (!netif_running(dev) || in niu_change_mtu()
6742 niu_full_shutdown(np, dev); in niu_change_mtu()
6752 spin_lock_irq(&np->lock); in niu_change_mtu()
6756 timer_setup(&np->timer, niu_timer, 0); in niu_change_mtu()
6757 np->timer.expires = jiffies + HZ; in niu_change_mtu()
6764 spin_unlock_irq(&np->lock); in niu_change_mtu()
6767 netif_tx_start_all_queues(dev); in niu_change_mtu()
6768 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) in niu_change_mtu()
6769 netif_carrier_on(dev); in niu_change_mtu()
6771 add_timer(&np->timer); in niu_change_mtu()
6777 static void niu_get_drvinfo(struct net_device *dev, in niu_get_drvinfo() argument
6780 struct niu *np = netdev_priv(dev); in niu_get_drvinfo()
6781 struct niu_vpd *vpd = &np->vpd; in niu_get_drvinfo()
6783 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); in niu_get_drvinfo()
6784 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); in niu_get_drvinfo()
6785 snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d", in niu_get_drvinfo()
6786 vpd->fcode_major, vpd->fcode_minor); in niu_get_drvinfo()
6787 if (np->parent->plat_type != PLAT_TYPE_NIU) in niu_get_drvinfo()
6788 strlcpy(info->bus_info, pci_name(np->pdev), in niu_get_drvinfo()
6789 sizeof(info->bus_info)); in niu_get_drvinfo()
6792 static int niu_get_link_ksettings(struct net_device *dev, in niu_get_link_ksettings() argument
6795 struct niu *np = netdev_priv(dev); in niu_get_link_ksettings()
6798 lp = &np->link_config; in niu_get_link_ksettings()
6801 cmd->base.phy_address = np->phy_addr; in niu_get_link_ksettings()
6802 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, in niu_get_link_ksettings()
6803 lp->supported); in niu_get_link_ksettings()
6804 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, in niu_get_link_ksettings()
6805 lp->active_advertising); in niu_get_link_ksettings()
6806 cmd->base.autoneg = lp->active_autoneg; in niu_get_link_ksettings()
6807 cmd->base.speed = lp->active_speed; in niu_get_link_ksettings()
6808 cmd->base.duplex = lp->active_duplex; in niu_get_link_ksettings()
6809 cmd->base.port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP; in niu_get_link_ksettings()
6814 static int niu_set_link_ksettings(struct net_device *dev, in niu_set_link_ksettings() argument
6817 struct niu *np = netdev_priv(dev); in niu_set_link_ksettings()
6818 struct niu_link_config *lp = &np->link_config; in niu_set_link_ksettings()
6820 ethtool_convert_link_mode_to_legacy_u32(&lp->advertising, in niu_set_link_ksettings()
6821 cmd->link_modes.advertising); in niu_set_link_ksettings()
6822 lp->speed = cmd->base.speed; in niu_set_link_ksettings()
6823 lp->duplex = cmd->base.duplex; in niu_set_link_ksettings()
6824 lp->autoneg = cmd->base.autoneg; in niu_set_link_ksettings()
6828 static u32 niu_get_msglevel(struct net_device *dev) in niu_get_msglevel() argument
6830 struct niu *np = netdev_priv(dev); in niu_get_msglevel()
6831 return np->msg_enable; in niu_get_msglevel()
6834 static void niu_set_msglevel(struct net_device *dev, u32 value) in niu_set_msglevel() argument
6836 struct niu *np = netdev_priv(dev); in niu_set_msglevel()
6837 np->msg_enable = value; in niu_set_msglevel()
6840 static int niu_nway_reset(struct net_device *dev) in niu_nway_reset() argument
6842 struct niu *np = netdev_priv(dev); in niu_nway_reset()
6844 if (np->link_config.autoneg) in niu_nway_reset()
6850 static int niu_get_eeprom_len(struct net_device *dev) in niu_get_eeprom_len() argument
6852 struct niu *np = netdev_priv(dev); in niu_get_eeprom_len()
6854 return np->eeprom_len; in niu_get_eeprom_len()
6857 static int niu_get_eeprom(struct net_device *dev, in niu_get_eeprom() argument
6860 struct niu *np = netdev_priv(dev); in niu_get_eeprom()
6863 offset = eeprom->offset; in niu_get_eeprom()
6864 len = eeprom->len; in niu_get_eeprom()
6867 return -EINVAL; in niu_get_eeprom()
6868 if (offset >= np->eeprom_len) in niu_get_eeprom()
6869 return -EINVAL; in niu_get_eeprom()
6870 if (offset + len > np->eeprom_len) in niu_get_eeprom()
6871 len = eeprom->len = np->eeprom_len - offset; in niu_get_eeprom()
6877 b_count = 4 - b_offset; in niu_get_eeprom()
6881 val = nr64(ESPC_NCR((offset - b_offset) / 4)); in niu_get_eeprom()
6884 len -= b_count; in niu_get_eeprom()
6891 len -= 4; in niu_get_eeprom()
6964 return -EINVAL; in niu_class_to_ethflow()
7060 nfc->data = 0; in niu_get_hash_opts()
7062 if (!niu_ethflow_to_class(nfc->flow_type, &class)) in niu_get_hash_opts()
7063 return -EINVAL; in niu_get_hash_opts()
7065 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & in niu_get_hash_opts()
7067 nfc->data = RXH_DISCARD; in niu_get_hash_opts()
7069 nfc->data = niu_flowkey_to_ethflow(np->parent->flow_key[class - in niu_get_hash_opts()
7080 tmp = (tp->key[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; in niu_get_ip4fs_from_tcam_key()
7081 fsp->h_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7083 tmp = (tp->key[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; in niu_get_ip4fs_from_tcam_key()
7084 fsp->h_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7086 tmp = (tp->key_mask[3] & TCAM_V4KEY3_SADDR) >> TCAM_V4KEY3_SADDR_SHIFT; in niu_get_ip4fs_from_tcam_key()
7087 fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7089 tmp = (tp->key_mask[3] & TCAM_V4KEY3_DADDR) >> TCAM_V4KEY3_DADDR_SHIFT; in niu_get_ip4fs_from_tcam_key()
7090 fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7092 fsp->h_u.tcp_ip4_spec.tos = (tp->key[2] & TCAM_V4KEY2_TOS) >> in niu_get_ip4fs_from_tcam_key()
7094 fsp->m_u.tcp_ip4_spec.tos = (tp->key_mask[2] & TCAM_V4KEY2_TOS) >> in niu_get_ip4fs_from_tcam_key()
7097 switch (fsp->flow_type) { in niu_get_ip4fs_from_tcam_key()
7101 prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7103 fsp->h_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); in niu_get_ip4fs_from_tcam_key()
7105 prt = ((tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7107 fsp->h_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); in niu_get_ip4fs_from_tcam_key()
7109 prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7111 fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(prt); in niu_get_ip4fs_from_tcam_key()
7113 prt = ((tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7115 fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(prt); in niu_get_ip4fs_from_tcam_key()
7119 tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7121 fsp->h_u.ah_ip4_spec.spi = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7123 tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7125 fsp->m_u.ah_ip4_spec.spi = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7128 tmp = (tp->key[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7130 fsp->h_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7132 tmp = (tp->key_mask[2] & TCAM_V4KEY2_PORT_SPI) >> in niu_get_ip4fs_from_tcam_key()
7134 fsp->m_u.usr_ip4_spec.l4_4_bytes = cpu_to_be32(tmp); in niu_get_ip4fs_from_tcam_key()
7136 fsp->h_u.usr_ip4_spec.proto = in niu_get_ip4fs_from_tcam_key()
7137 (tp->key[2] & TCAM_V4KEY2_PROTO) >> in niu_get_ip4fs_from_tcam_key()
7139 fsp->m_u.usr_ip4_spec.proto = in niu_get_ip4fs_from_tcam_key()
7140 (tp->key_mask[2] & TCAM_V4KEY2_PROTO) >> in niu_get_ip4fs_from_tcam_key()
7143 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; in niu_get_ip4fs_from_tcam_key()
7153 struct niu_parent *parent = np->parent; in niu_get_ethtool_tcam_entry()
7155 struct ethtool_rx_flow_spec *fsp = &nfc->fs; in niu_get_ethtool_tcam_entry()
7160 idx = tcam_get_index(np, (u16)nfc->fs.location); in niu_get_ethtool_tcam_entry()
7162 tp = &parent->tcam[idx]; in niu_get_ethtool_tcam_entry()
7163 if (!tp->valid) { in niu_get_ethtool_tcam_entry()
7164 netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n", in niu_get_ethtool_tcam_entry()
7165 parent->index, (u16)nfc->fs.location, idx); in niu_get_ethtool_tcam_entry()
7166 return -EINVAL; in niu_get_ethtool_tcam_entry()
7170 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> in niu_get_ethtool_tcam_entry()
7172 ret = niu_class_to_ethflow(class, &fsp->flow_type); in niu_get_ethtool_tcam_entry()
7174 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", in niu_get_ethtool_tcam_entry()
7175 parent->index); in niu_get_ethtool_tcam_entry()
7179 if (fsp->flow_type == AH_V4_FLOW || fsp->flow_type == AH_V6_FLOW) { in niu_get_ethtool_tcam_entry()
7180 u32 proto = (tp->key[2] & TCAM_V4KEY2_PROTO) >> in niu_get_ethtool_tcam_entry()
7183 if (fsp->flow_type == AH_V4_FLOW) in niu_get_ethtool_tcam_entry()
7184 fsp->flow_type = ESP_V4_FLOW; in niu_get_ethtool_tcam_entry()
7186 fsp->flow_type = ESP_V6_FLOW; in niu_get_ethtool_tcam_entry()
7190 switch (fsp->flow_type) { in niu_get_ethtool_tcam_entry()
7204 ret = -EINVAL; in niu_get_ethtool_tcam_entry()
7210 ret = -EINVAL; in niu_get_ethtool_tcam_entry()
7217 if (tp->assoc_data & TCAM_ASSOCDATA_DISC) in niu_get_ethtool_tcam_entry()
7218 fsp->ring_cookie = RX_CLS_FLOW_DISC; in niu_get_ethtool_tcam_entry()
7220 fsp->ring_cookie = (tp->assoc_data & TCAM_ASSOCDATA_OFFSET) >> in niu_get_ethtool_tcam_entry()
7224 nfc->data = tcam_get_size(np); in niu_get_ethtool_tcam_entry()
7233 struct niu_parent *parent = np->parent; in niu_get_ethtool_tcam_all()
7240 nfc->data = tcam_get_size(np); in niu_get_ethtool_tcam_all()
7243 for (cnt = 0, i = 0; i < nfc->data; i++) { in niu_get_ethtool_tcam_all()
7245 tp = &parent->tcam[idx]; in niu_get_ethtool_tcam_all()
7246 if (!tp->valid) in niu_get_ethtool_tcam_all()
7248 if (cnt == nfc->rule_cnt) { in niu_get_ethtool_tcam_all()
7249 ret = -EMSGSIZE; in niu_get_ethtool_tcam_all()
7257 nfc->rule_cnt = cnt; in niu_get_ethtool_tcam_all()
7262 static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, in niu_get_nfc() argument
7265 struct niu *np = netdev_priv(dev); in niu_get_nfc()
7268 switch (cmd->cmd) { in niu_get_nfc()
7273 cmd->data = np->num_rx_rings; in niu_get_nfc()
7276 cmd->rule_cnt = tcam_get_valid_entry_cnt(np); in niu_get_nfc()
7285 ret = -EINVAL; in niu_get_nfc()
7298 if (!niu_ethflow_to_class(nfc->flow_type, &class)) in niu_set_hash_opts()
7299 return -EINVAL; in niu_set_hash_opts()
7303 return -EINVAL; in niu_set_hash_opts()
7305 if (nfc->data & RXH_DISCARD) { in niu_set_hash_opts()
7307 flow_key = np->parent->tcam_key[class - in niu_set_hash_opts()
7310 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), flow_key); in niu_set_hash_opts()
7311 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = flow_key; in niu_set_hash_opts()
7316 if (np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] & in niu_set_hash_opts()
7319 flow_key = np->parent->tcam_key[class - in niu_set_hash_opts()
7322 nw64(TCAM_KEY(class - CLASS_CODE_USER_PROG1), in niu_set_hash_opts()
7324 np->parent->tcam_key[class - CLASS_CODE_USER_PROG1] = in niu_set_hash_opts()
7330 if (!niu_ethflow_to_flowkey(nfc->data, &flow_key)) in niu_set_hash_opts()
7331 return -EINVAL; in niu_set_hash_opts()
7334 nw64(FLOW_KEY(class - CLASS_CODE_USER_PROG1), flow_key); in niu_set_hash_opts()
7335 np->parent->flow_key[class - CLASS_CODE_USER_PROG1] = flow_key; in niu_set_hash_opts()
7349 sip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4src); in niu_get_tcamkey_from_ip4fs()
7350 sipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4src); in niu_get_tcamkey_from_ip4fs()
7351 dip = be32_to_cpu(fsp->h_u.tcp_ip4_spec.ip4dst); in niu_get_tcamkey_from_ip4fs()
7352 dipm = be32_to_cpu(fsp->m_u.tcp_ip4_spec.ip4dst); in niu_get_tcamkey_from_ip4fs()
7354 tp->key[0] = class << TCAM_V4KEY0_CLASS_CODE_SHIFT; in niu_get_tcamkey_from_ip4fs()
7355 tp->key_mask[0] = TCAM_V4KEY0_CLASS_CODE; in niu_get_tcamkey_from_ip4fs()
7356 tp->key[1] = (u64)l2_rdc_tab << TCAM_V4KEY1_L2RDCNUM_SHIFT; in niu_get_tcamkey_from_ip4fs()
7357 tp->key_mask[1] = TCAM_V4KEY1_L2RDCNUM; in niu_get_tcamkey_from_ip4fs()
7359 tp->key[3] = (u64)sip << TCAM_V4KEY3_SADDR_SHIFT; in niu_get_tcamkey_from_ip4fs()
7360 tp->key[3] |= dip; in niu_get_tcamkey_from_ip4fs()
7362 tp->key_mask[3] = (u64)sipm << TCAM_V4KEY3_SADDR_SHIFT; in niu_get_tcamkey_from_ip4fs()
7363 tp->key_mask[3] |= dipm; in niu_get_tcamkey_from_ip4fs()
7365 tp->key[2] |= ((u64)fsp->h_u.tcp_ip4_spec.tos << in niu_get_tcamkey_from_ip4fs()
7367 tp->key_mask[2] |= ((u64)fsp->m_u.tcp_ip4_spec.tos << in niu_get_tcamkey_from_ip4fs()
7369 switch (fsp->flow_type) { in niu_get_tcamkey_from_ip4fs()
7373 sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc); in niu_get_tcamkey_from_ip4fs()
7374 spm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc); in niu_get_tcamkey_from_ip4fs()
7375 dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst); in niu_get_tcamkey_from_ip4fs()
7376 dpm = be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst); in niu_get_tcamkey_from_ip4fs()
7378 tp->key[2] |= (((u64)sport << 16) | dport); in niu_get_tcamkey_from_ip4fs()
7379 tp->key_mask[2] |= (((u64)spm << 16) | dpm); in niu_get_tcamkey_from_ip4fs()
7380 niu_ethflow_to_l3proto(fsp->flow_type, &pid); in niu_get_tcamkey_from_ip4fs()
7384 spi = be32_to_cpu(fsp->h_u.ah_ip4_spec.spi); in niu_get_tcamkey_from_ip4fs()
7385 spim = be32_to_cpu(fsp->m_u.ah_ip4_spec.spi); in niu_get_tcamkey_from_ip4fs()
7387 tp->key[2] |= spi; in niu_get_tcamkey_from_ip4fs()
7388 tp->key_mask[2] |= spim; in niu_get_tcamkey_from_ip4fs()
7389 niu_ethflow_to_l3proto(fsp->flow_type, &pid); in niu_get_tcamkey_from_ip4fs()
7392 spi = be32_to_cpu(fsp->h_u.usr_ip4_spec.l4_4_bytes); in niu_get_tcamkey_from_ip4fs()
7393 spim = be32_to_cpu(fsp->m_u.usr_ip4_spec.l4_4_bytes); in niu_get_tcamkey_from_ip4fs()
7395 tp->key[2] |= spi; in niu_get_tcamkey_from_ip4fs()
7396 tp->key_mask[2] |= spim; in niu_get_tcamkey_from_ip4fs()
7397 pid = fsp->h_u.usr_ip4_spec.proto; in niu_get_tcamkey_from_ip4fs()
7403 tp->key[2] |= ((u64)pid << TCAM_V4KEY2_PROTO_SHIFT); in niu_get_tcamkey_from_ip4fs()
7405 tp->key_mask[2] |= TCAM_V4KEY2_PROTO; in niu_get_tcamkey_from_ip4fs()
7412 struct niu_parent *parent = np->parent; in niu_add_ethtool_tcam_entry()
7414 struct ethtool_rx_flow_spec *fsp = &nfc->fs; in niu_add_ethtool_tcam_entry()
7415 struct niu_rdc_tables *rdc_table = &parent->rdc_group_cfg[np->port]; in niu_add_ethtool_tcam_entry()
7416 int l2_rdc_table = rdc_table->first_table_num; in niu_add_ethtool_tcam_entry()
7424 idx = nfc->fs.location; in niu_add_ethtool_tcam_entry()
7426 return -EINVAL; in niu_add_ethtool_tcam_entry()
7428 if (fsp->flow_type == IP_USER_FLOW) { in niu_add_ethtool_tcam_entry()
7431 struct ethtool_usrip4_spec *uspec = &fsp->h_u.usr_ip4_spec; in niu_add_ethtool_tcam_entry()
7432 struct ethtool_usrip4_spec *umask = &fsp->m_u.usr_ip4_spec; in niu_add_ethtool_tcam_entry()
7434 if (uspec->ip_ver != ETH_RX_NFC_IP4) in niu_add_ethtool_tcam_entry()
7435 return -EINVAL; in niu_add_ethtool_tcam_entry()
7440 if (parent->l3_cls[i]) { in niu_add_ethtool_tcam_entry()
7441 if (uspec->proto == parent->l3_cls_pid[i]) { in niu_add_ethtool_tcam_entry()
7442 class = parent->l3_cls[i]; in niu_add_ethtool_tcam_entry()
7443 parent->l3_cls_refcnt[i]++; in niu_add_ethtool_tcam_entry()
7467 uspec->proto, in niu_add_ethtool_tcam_entry()
7468 uspec->tos, in niu_add_ethtool_tcam_entry()
7469 umask->tos); in niu_add_ethtool_tcam_entry()
7476 parent->l3_cls[i] = class; in niu_add_ethtool_tcam_entry()
7477 parent->l3_cls_pid[i] = uspec->proto; in niu_add_ethtool_tcam_entry()
7478 parent->l3_cls_refcnt[i]++; in niu_add_ethtool_tcam_entry()
7484 netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n", in niu_add_ethtool_tcam_entry()
7485 parent->index, __func__, uspec->proto); in niu_add_ethtool_tcam_entry()
7486 ret = -EINVAL; in niu_add_ethtool_tcam_entry()
7491 if (!niu_ethflow_to_class(fsp->flow_type, &class)) { in niu_add_ethtool_tcam_entry()
7492 return -EINVAL; in niu_add_ethtool_tcam_entry()
7499 tp = &parent->tcam[idx]; in niu_add_ethtool_tcam_entry()
7504 switch (fsp->flow_type) { in niu_add_ethtool_tcam_entry()
7518 netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n", in niu_add_ethtool_tcam_entry()
7519 parent->index, __func__, fsp->flow_type); in niu_add_ethtool_tcam_entry()
7520 ret = -EINVAL; in niu_add_ethtool_tcam_entry()
7526 netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n", in niu_add_ethtool_tcam_entry()
7527 parent->index, __func__, fsp->flow_type); in niu_add_ethtool_tcam_entry()
7528 ret = -EINVAL; in niu_add_ethtool_tcam_entry()
7532 /* fill in the assoc data */ in niu_add_ethtool_tcam_entry()
7533 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { in niu_add_ethtool_tcam_entry()
7534 tp->assoc_data = TCAM_ASSOCDATA_DISC; in niu_add_ethtool_tcam_entry()
7536 if (fsp->ring_cookie >= np->num_rx_rings) { in niu_add_ethtool_tcam_entry()
7537 netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n", in niu_add_ethtool_tcam_entry()
7538 parent->index, __func__, in niu_add_ethtool_tcam_entry()
7539 (long long)fsp->ring_cookie); in niu_add_ethtool_tcam_entry()
7540 ret = -EINVAL; in niu_add_ethtool_tcam_entry()
7543 tp->assoc_data = (TCAM_ASSOCDATA_TRES_USE_OFFSET | in niu_add_ethtool_tcam_entry()
7544 (fsp->ring_cookie << in niu_add_ethtool_tcam_entry()
7548 err = tcam_write(np, idx, tp->key, tp->key_mask); in niu_add_ethtool_tcam_entry()
7550 ret = -EINVAL; in niu_add_ethtool_tcam_entry()
7553 err = tcam_assoc_write(np, idx, tp->assoc_data); in niu_add_ethtool_tcam_entry()
7555 ret = -EINVAL; in niu_add_ethtool_tcam_entry()
7560 tp->valid = 1; in niu_add_ethtool_tcam_entry()
7561 np->clas.tcam_valid_entries++; in niu_add_ethtool_tcam_entry()
7570 struct niu_parent *parent = np->parent; in niu_del_ethtool_tcam_entry()
7578 return -EINVAL; in niu_del_ethtool_tcam_entry()
7583 tp = &parent->tcam[idx]; in niu_del_ethtool_tcam_entry()
7586 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> in niu_del_ethtool_tcam_entry()
7592 if (parent->l3_cls[i] == class) { in niu_del_ethtool_tcam_entry()
7593 parent->l3_cls_refcnt[i]--; in niu_del_ethtool_tcam_entry()
7594 if (!parent->l3_cls_refcnt[i]) { in niu_del_ethtool_tcam_entry()
7601 parent->l3_cls[i] = 0; in niu_del_ethtool_tcam_entry()
7602 parent->l3_cls_pid[i] = 0; in niu_del_ethtool_tcam_entry()
7608 netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n", in niu_del_ethtool_tcam_entry()
7609 parent->index, __func__, in niu_del_ethtool_tcam_entry()
7611 ret = -EINVAL; in niu_del_ethtool_tcam_entry()
7621 tp->valid = 0; in niu_del_ethtool_tcam_entry()
7622 np->clas.tcam_valid_entries--; in niu_del_ethtool_tcam_entry()
7629 static int niu_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd) in niu_set_nfc() argument
7631 struct niu *np = netdev_priv(dev); in niu_set_nfc()
7634 switch (cmd->cmd) { in niu_set_nfc()
7642 ret = niu_del_ethtool_tcam_entry(np, cmd->fs.location); in niu_set_nfc()
7645 ret = -EINVAL; in niu_set_nfc()
7730 static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data) in niu_get_strings() argument
7732 struct niu *np = netdev_priv(dev); in niu_get_strings()
7738 if (np->flags & NIU_FLAGS_XMAC) { in niu_get_strings()
7747 for (i = 0; i < np->num_rx_rings; i++) { in niu_get_strings()
7752 for (i = 0; i < np->num_tx_rings; i++) { in niu_get_strings()
7759 static int niu_get_sset_count(struct net_device *dev, int stringset) in niu_get_sset_count() argument
7761 struct niu *np = netdev_priv(dev); in niu_get_sset_count()
7764 return -EINVAL; in niu_get_sset_count()
7766 return (np->flags & NIU_FLAGS_XMAC ? in niu_get_sset_count()
7769 (np->num_rx_rings * NUM_RXCHAN_STAT_KEYS) + in niu_get_sset_count()
7770 (np->num_tx_rings * NUM_TXCHAN_STAT_KEYS); in niu_get_sset_count()
7773 static void niu_get_ethtool_stats(struct net_device *dev, in niu_get_ethtool_stats() argument
7776 struct niu *np = netdev_priv(dev); in niu_get_ethtool_stats()
7780 if (np->flags & NIU_FLAGS_XMAC) { in niu_get_ethtool_stats()
7781 memcpy(data, &np->mac_stats.xmac, in niu_get_ethtool_stats()
7785 memcpy(data, &np->mac_stats.bmac, in niu_get_ethtool_stats()
7789 for (i = 0; i < np->num_rx_rings; i++) { in niu_get_ethtool_stats()
7790 struct rx_ring_info *rp = &np->rx_rings[i]; in niu_get_ethtool_stats()
7794 data[0] = rp->rx_channel; in niu_get_ethtool_stats()
7795 data[1] = rp->rx_packets; in niu_get_ethtool_stats()
7796 data[2] = rp->rx_bytes; in niu_get_ethtool_stats()
7797 data[3] = rp->rx_dropped; in niu_get_ethtool_stats()
7798 data[4] = rp->rx_errors; in niu_get_ethtool_stats()
7801 for (i = 0; i < np->num_tx_rings; i++) { in niu_get_ethtool_stats()
7802 struct tx_ring_info *rp = &np->tx_rings[i]; in niu_get_ethtool_stats()
7804 data[0] = rp->tx_channel; in niu_get_ethtool_stats()
7805 data[1] = rp->tx_packets; in niu_get_ethtool_stats()
7806 data[2] = rp->tx_bytes; in niu_get_ethtool_stats()
7807 data[3] = rp->tx_errors; in niu_get_ethtool_stats()
7814 if (np->flags & NIU_FLAGS_XMAC) in niu_led_state_save()
7822 if (np->flags & NIU_FLAGS_XMAC) in niu_led_state_restore()
7832 if (np->flags & NIU_FLAGS_XMAC) { in niu_force_led()
7848 static int niu_set_phys_id(struct net_device *dev, in niu_set_phys_id() argument
7852 struct niu *np = netdev_priv(dev); in niu_set_phys_id()
7854 if (!netif_running(dev)) in niu_set_phys_id()
7855 return -EAGAIN; in niu_set_phys_id()
7859 np->orig_led_state = niu_led_state_save(np); in niu_set_phys_id()
7871 niu_led_state_restore(np, np->orig_led_state); in niu_set_phys_id()
7899 return -EINVAL; in niu_ldg_assign_ldn()
7901 return -EINVAL; in niu_ldg_assign_ldn()
7903 parent->ldg_map[ldn] = ldg; in niu_ldg_assign_ldn()
7905 if (np->parent->plat_type == PLAT_TYPE_NIU) { in niu_ldg_assign_ldn()
7906 /* On N2 NIU, the ldn-->ldg assignments are setup and fixed by in niu_ldg_assign_ldn()
7912 dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n", in niu_ldg_assign_ldn()
7913 np->port, ldn, ldg, in niu_ldg_assign_ldn()
7915 return -EINVAL; in niu_ldg_assign_ldn()
7926 return -EINVAL; in niu_set_ldg_timer_res()
7939 return -EINVAL; in niu_set_ldg_sid()
7953 return -EINVAL; in niu_pci_eeprom_read()
7963 } while (limit--); in niu_pci_eeprom_read()
7965 dev_err(np->device, "EEPROM read timeout frame[%llx]\n", in niu_pci_eeprom_read()
7967 return -ENODEV; in niu_pci_eeprom_read()
7978 } while (limit--); in niu_pci_eeprom_read()
7980 dev_err(np->device, "EEPROM read timeout frame[%llx]\n", in niu_pci_eeprom_read()
7982 return -ENODEV; in niu_pci_eeprom_read()
8037 return -EINVAL; in niu_pci_vpd_get_propname()
8044 struct niu_vpd *vpd = &np->vpd; in niu_vpd_parse_version()
8045 int len = strlen(vpd->version) + 1; in niu_vpd_parse_version()
8046 const char *s = vpd->version; in niu_vpd_parse_version()
8049 for (i = 0; i < len - 5; i++) { in niu_vpd_parse_version()
8053 if (i >= len - 5) in niu_vpd_parse_version()
8057 sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor); in niu_vpd_parse_version()
8059 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_vpd_parse_version()
8061 vpd->fcode_major, vpd->fcode_minor); in niu_vpd_parse_version()
8062 if (vpd->fcode_major > NIU_VPD_MIN_MAJOR || in niu_vpd_parse_version()
8063 (vpd->fcode_major == NIU_VPD_MIN_MAJOR && in niu_vpd_parse_version()
8064 vpd->fcode_minor >= NIU_VPD_MIN_MINOR)) in niu_vpd_parse_version()
8065 np->flags |= NIU_FLAGS_VPD_VALID; in niu_vpd_parse_version()
8080 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_vpd_scan_props()
8109 prop_buf = np->vpd.model; in niu_pci_vpd_scan_props()
8112 } else if (!strcmp(namebuf, "board-model")) { in niu_pci_vpd_scan_props()
8113 prop_buf = np->vpd.board_model; in niu_pci_vpd_scan_props()
8117 prop_buf = np->vpd.version; in niu_pci_vpd_scan_props()
8120 } else if (!strcmp(namebuf, "local-mac-address")) { in niu_pci_vpd_scan_props()
8121 prop_buf = np->vpd.local_mac; in niu_pci_vpd_scan_props()
8124 } else if (!strcmp(namebuf, "num-mac-addresses")) { in niu_pci_vpd_scan_props()
8125 prop_buf = &np->vpd.mac_num; in niu_pci_vpd_scan_props()
8128 } else if (!strcmp(namebuf, "phy-type")) { in niu_pci_vpd_scan_props()
8129 prop_buf = np->vpd.phy_type; in niu_pci_vpd_scan_props()
8135 dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len); in niu_pci_vpd_scan_props()
8136 return -EINVAL; in niu_pci_vpd_scan_props()
8143 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_vpd_scan_props()
8180 return -EINVAL; in niu_pci_vpd_fetch()
8261 np->flags &= ~(NIU_FLAGS_FIBER | in niu_phy_type_prop_decode()
8263 np->mac_xcvr = MAC_XCVR_MII; in niu_phy_type_prop_decode()
8266 np->flags |= (NIU_FLAGS_10G | in niu_phy_type_prop_decode()
8268 np->mac_xcvr = MAC_XCVR_XPCS; in niu_phy_type_prop_decode()
8271 np->flags &= ~NIU_FLAGS_10G; in niu_phy_type_prop_decode()
8272 np->flags |= NIU_FLAGS_FIBER; in niu_phy_type_prop_decode()
8273 np->mac_xcvr = MAC_XCVR_PCS; in niu_phy_type_prop_decode()
8276 np->flags |= NIU_FLAGS_10G; in niu_phy_type_prop_decode()
8277 np->flags &= ~NIU_FLAGS_FIBER; in niu_phy_type_prop_decode()
8278 np->mac_xcvr = MAC_XCVR_XPCS; in niu_phy_type_prop_decode()
8281 np->flags |= NIU_FLAGS_10G; in niu_phy_type_prop_decode()
8282 np->flags &= ~NIU_FLAGS_FIBER; in niu_phy_type_prop_decode()
8283 np->flags |= NIU_FLAGS_XCVR_SERDES; in niu_phy_type_prop_decode()
8284 np->mac_xcvr = MAC_XCVR_XPCS; in niu_phy_type_prop_decode()
8286 return -EINVAL; in niu_phy_type_prop_decode()
8295 if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || in niu_pci_vpd_get_nports()
8296 (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || in niu_pci_vpd_get_nports()
8297 (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || in niu_pci_vpd_get_nports()
8298 (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || in niu_pci_vpd_get_nports()
8299 (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { in niu_pci_vpd_get_nports()
8301 } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || in niu_pci_vpd_get_nports()
8302 (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || in niu_pci_vpd_get_nports()
8303 (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || in niu_pci_vpd_get_nports()
8304 (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { in niu_pci_vpd_get_nports()
8313 struct net_device *dev = np->dev; in niu_pci_vpd_validate() local
8314 struct niu_vpd *vpd = &np->vpd; in niu_pci_vpd_validate()
8317 if (!is_valid_ether_addr(&vpd->local_mac[0])) { in niu_pci_vpd_validate()
8318 dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n"); in niu_pci_vpd_validate()
8320 np->flags &= ~NIU_FLAGS_VPD_VALID; in niu_pci_vpd_validate()
8324 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || in niu_pci_vpd_validate()
8325 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { in niu_pci_vpd_validate()
8326 np->flags |= NIU_FLAGS_10G; in niu_pci_vpd_validate()
8327 np->flags &= ~NIU_FLAGS_FIBER; in niu_pci_vpd_validate()
8328 np->flags |= NIU_FLAGS_XCVR_SERDES; in niu_pci_vpd_validate()
8329 np->mac_xcvr = MAC_XCVR_PCS; in niu_pci_vpd_validate()
8330 if (np->port > 1) { in niu_pci_vpd_validate()
8331 np->flags |= NIU_FLAGS_FIBER; in niu_pci_vpd_validate()
8332 np->flags &= ~NIU_FLAGS_10G; in niu_pci_vpd_validate()
8334 if (np->flags & NIU_FLAGS_10G) in niu_pci_vpd_validate()
8335 np->mac_xcvr = MAC_XCVR_XPCS; in niu_pci_vpd_validate()
8336 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { in niu_pci_vpd_validate()
8337 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | in niu_pci_vpd_validate()
8339 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { in niu_pci_vpd_validate()
8340 dev_err(np->device, "Illegal phy string [%s]\n", in niu_pci_vpd_validate()
8341 np->vpd.phy_type); in niu_pci_vpd_validate()
8342 dev_err(np->device, "Falling back to SPROM\n"); in niu_pci_vpd_validate()
8343 np->flags &= ~NIU_FLAGS_VPD_VALID; in niu_pci_vpd_validate()
8347 memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN); in niu_pci_vpd_validate()
8349 val8 = dev->dev_addr[5]; in niu_pci_vpd_validate()
8350 dev->dev_addr[5] += np->port; in niu_pci_vpd_validate()
8351 if (dev->dev_addr[5] < val8) in niu_pci_vpd_validate()
8352 dev->dev_addr[4]++; in niu_pci_vpd_validate()
8357 struct net_device *dev = np->dev; in niu_pci_probe_sprom() local
8366 np->eeprom_len = len; in niu_pci_probe_sprom()
8368 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8379 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8382 dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff)); in niu_pci_probe_sprom()
8383 return -EINVAL; in niu_pci_probe_sprom()
8387 switch (np->port) { in niu_pci_probe_sprom()
8405 dev_err(np->device, "Bogus port number %u\n", in niu_pci_probe_sprom()
8406 np->port); in niu_pci_probe_sprom()
8407 return -EINVAL; in niu_pci_probe_sprom()
8409 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8415 np->flags &= ~(NIU_FLAGS_FIBER | in niu_pci_probe_sprom()
8417 np->mac_xcvr = MAC_XCVR_MII; in niu_pci_probe_sprom()
8422 np->flags &= ~NIU_FLAGS_10G; in niu_pci_probe_sprom()
8423 np->flags |= NIU_FLAGS_FIBER; in niu_pci_probe_sprom()
8424 np->mac_xcvr = MAC_XCVR_PCS; in niu_pci_probe_sprom()
8429 np->flags |= NIU_FLAGS_10G; in niu_pci_probe_sprom()
8430 np->flags &= ~NIU_FLAGS_FIBER; in niu_pci_probe_sprom()
8431 np->mac_xcvr = MAC_XCVR_XPCS; in niu_pci_probe_sprom()
8436 np->flags |= (NIU_FLAGS_10G | in niu_pci_probe_sprom()
8438 np->mac_xcvr = MAC_XCVR_XPCS; in niu_pci_probe_sprom()
8442 dev_err(np->device, "Bogus SPROM phy type %u\n", val8); in niu_pci_probe_sprom()
8443 return -EINVAL; in niu_pci_probe_sprom()
8447 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8449 dev->dev_addr[0] = (val >> 0) & 0xff; in niu_pci_probe_sprom()
8450 dev->dev_addr[1] = (val >> 8) & 0xff; in niu_pci_probe_sprom()
8451 dev->dev_addr[2] = (val >> 16) & 0xff; in niu_pci_probe_sprom()
8452 dev->dev_addr[3] = (val >> 24) & 0xff; in niu_pci_probe_sprom()
8455 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8457 dev->dev_addr[4] = (val >> 0) & 0xff; in niu_pci_probe_sprom()
8458 dev->dev_addr[5] = (val >> 8) & 0xff; in niu_pci_probe_sprom()
8460 if (!is_valid_ether_addr(&dev->dev_addr[0])) { in niu_pci_probe_sprom()
8461 dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n", in niu_pci_probe_sprom()
8462 dev->dev_addr); in niu_pci_probe_sprom()
8463 return -EINVAL; in niu_pci_probe_sprom()
8466 val8 = dev->dev_addr[5]; in niu_pci_probe_sprom()
8467 dev->dev_addr[5] += np->port; in niu_pci_probe_sprom()
8468 if (dev->dev_addr[5] < val8) in niu_pci_probe_sprom()
8469 dev->dev_addr[4]++; in niu_pci_probe_sprom()
8472 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8475 return -EINVAL; in niu_pci_probe_sprom()
8480 np->vpd.model[i + 3] = (tmp >> 0) & 0xff; in niu_pci_probe_sprom()
8481 np->vpd.model[i + 2] = (tmp >> 8) & 0xff; in niu_pci_probe_sprom()
8482 np->vpd.model[i + 1] = (tmp >> 16) & 0xff; in niu_pci_probe_sprom()
8483 np->vpd.model[i + 0] = (tmp >> 24) & 0xff; in niu_pci_probe_sprom()
8485 np->vpd.model[val] = '\0'; in niu_pci_probe_sprom()
8488 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8491 return -EINVAL; in niu_pci_probe_sprom()
8496 np->vpd.board_model[i + 3] = (tmp >> 0) & 0xff; in niu_pci_probe_sprom()
8497 np->vpd.board_model[i + 2] = (tmp >> 8) & 0xff; in niu_pci_probe_sprom()
8498 np->vpd.board_model[i + 1] = (tmp >> 16) & 0xff; in niu_pci_probe_sprom()
8499 np->vpd.board_model[i + 0] = (tmp >> 24) & 0xff; in niu_pci_probe_sprom()
8501 np->vpd.board_model[val] = '\0'; in niu_pci_probe_sprom()
8503 np->vpd.mac_num = in niu_pci_probe_sprom()
8505 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_pci_probe_sprom()
8506 "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num); in niu_pci_probe_sprom()
8513 struct niu_parent *parent = np->parent; in niu_get_and_validate_port()
8515 if (np->port <= 1) in niu_get_and_validate_port()
8516 np->flags |= NIU_FLAGS_XMAC; in niu_get_and_validate_port()
8518 if (!parent->num_ports) { in niu_get_and_validate_port()
8519 if (parent->plat_type == PLAT_TYPE_NIU) { in niu_get_and_validate_port()
8520 parent->num_ports = 2; in niu_get_and_validate_port()
8522 parent->num_ports = niu_pci_vpd_get_nports(np); in niu_get_and_validate_port()
8523 if (!parent->num_ports) { in niu_get_and_validate_port()
8527 parent->num_ports = nr64(ESPC_NUM_PORTS_MACS) & in niu_get_and_validate_port()
8531 * Maramba on-board parts. in niu_get_and_validate_port()
8533 if (!parent->num_ports) in niu_get_and_validate_port()
8534 parent->num_ports = 4; in niu_get_and_validate_port()
8539 if (np->port >= parent->num_ports) in niu_get_and_validate_port()
8540 return -ENODEV; in niu_get_and_validate_port()
8566 parent->index, id, in phy_record()
8571 if (p->cur[type] >= NIU_MAX_PORTS) { in phy_record()
8573 return -EINVAL; in phy_record()
8575 idx = p->cur[type]; in phy_record()
8576 p->phy_id[type][idx] = id; in phy_record()
8577 p->phy_port[type][idx] = phy_port; in phy_record()
8578 p->cur[type] = idx + 1; in phy_record()
8586 for (i = 0; i < p->cur[PHY_TYPE_PMA_PMD]; i++) { in port_has_10g()
8587 if (p->phy_port[PHY_TYPE_PMA_PMD][i] == port) in port_has_10g()
8590 for (i = 0; i < p->cur[PHY_TYPE_PCS]; i++) { in port_has_10g()
8591 if (p->phy_port[PHY_TYPE_PCS][i] == port) in port_has_10g()
8618 if (p->cur[PHY_TYPE_MII]) in count_1g_ports()
8619 *lowest = p->phy_port[PHY_TYPE_MII][0]; in count_1g_ports()
8621 return p->cur[PHY_TYPE_MII]; in count_1g_ports()
8626 int num_ports = parent->num_ports; in niu_n2_divide_channels()
8630 parent->rxchan_per_port[i] = (16 / num_ports); in niu_n2_divide_channels()
8631 parent->txchan_per_port[i] = (16 / num_ports); in niu_n2_divide_channels()
8634 parent->index, i, in niu_n2_divide_channels()
8635 parent->rxchan_per_port[i], in niu_n2_divide_channels()
8636 parent->txchan_per_port[i]); in niu_n2_divide_channels()
8643 int num_ports = parent->num_ports; in niu_divide_channels()
8655 rx_chans_per_10g = (NIU_NUM_RXCHAN - in niu_divide_channels()
8660 tx_chans_per_10g = (NIU_NUM_TXCHAN - in niu_divide_channels()
8667 int type = phy_decode(parent->port_phy, i); in niu_divide_channels()
8670 parent->rxchan_per_port[i] = rx_chans_per_10g; in niu_divide_channels()
8671 parent->txchan_per_port[i] = tx_chans_per_10g; in niu_divide_channels()
8673 parent->rxchan_per_port[i] = rx_chans_per_1g; in niu_divide_channels()
8674 parent->txchan_per_port[i] = tx_chans_per_1g; in niu_divide_channels()
8677 parent->index, i, in niu_divide_channels()
8678 parent->rxchan_per_port[i], in niu_divide_channels()
8679 parent->txchan_per_port[i]); in niu_divide_channels()
8680 tot_rx += parent->rxchan_per_port[i]; in niu_divide_channels()
8681 tot_tx += parent->txchan_per_port[i]; in niu_divide_channels()
8686 parent->index, tot_rx); in niu_divide_channels()
8688 parent->rxchan_per_port[i] = 1; in niu_divide_channels()
8692 parent->index, tot_tx); in niu_divide_channels()
8694 parent->txchan_per_port[i] = 1; in niu_divide_channels()
8698 parent->index, tot_rx, tot_tx); in niu_divide_channels()
8705 int i, num_ports = parent->num_ports; in niu_divide_rdc_groups()
8715 struct niu_rdc_tables *tp = &parent->rdc_group_cfg[i]; in niu_divide_rdc_groups()
8716 int grp, num_channels = parent->rxchan_per_port[i]; in niu_divide_rdc_groups()
8719 tp->first_table_num = rdc_group; in niu_divide_rdc_groups()
8720 tp->num_tables = rdc_groups_per_port; in niu_divide_rdc_groups()
8722 for (grp = 0; grp < tp->num_tables; grp++) { in niu_divide_rdc_groups()
8723 struct rdc_table *rt = &tp->tables[grp]; in niu_divide_rdc_groups()
8727 parent->index, i, tp->first_table_num + grp); in niu_divide_rdc_groups()
8729 rt->rxdma_channel[slot] = in niu_divide_rdc_groups()
8732 pr_cont("%d ", rt->rxdma_channel[slot]); in niu_divide_rdc_groups()
8740 parent->rdc_default[i] = rdc_channel_base; in niu_divide_rdc_groups()
8791 struct phy_probe_info *info = &parent->phy_probe_info; in walk_phys()
8799 if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || in walk_phys()
8800 !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { in walk_phys()
8803 parent->plat_type = PLAT_TYPE_ATCA_CP3220; in walk_phys()
8804 parent->num_ports = 4; in walk_phys()
8809 } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { in walk_phys()
8812 parent->num_ports = 2; in walk_phys()
8815 } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) && in walk_phys()
8816 (parent->plat_type == PLAT_TYPE_NIU)) { in walk_phys()
8818 if (np->flags & NIU_FLAGS_10G) { in walk_phys()
8836 parent->plat_type = PLAT_TYPE_VF_P0; in walk_phys()
8838 parent->plat_type = PLAT_TYPE_VF_P1; in walk_phys()
8856 val = phy_encode(PORT_TYPE_10G, np->port); in walk_phys()
8861 parent->plat_type = PLAT_TYPE_VF_P0; in walk_phys()
8863 parent->plat_type = PLAT_TYPE_VF_P1; in walk_phys()
8883 parent->plat_type = PLAT_TYPE_VF_P0; in walk_phys()
8885 parent->plat_type = PLAT_TYPE_VF_P1; in walk_phys()
8898 return -EINVAL; in walk_phys()
8902 parent->port_phy = val; in walk_phys()
8904 if (parent->plat_type == PLAT_TYPE_NIU) in walk_phys()
8915 return -EINVAL; in walk_phys()
8920 struct niu_parent *parent = np->parent; in niu_probe_ports()
8923 if (parent->port_phy == PORT_PHY_UNKNOWN) { in niu_probe_ports()
8933 if (parent->port_phy == PORT_PHY_INVALID) in niu_probe_ports()
8934 return -EINVAL; in niu_probe_ports()
8941 struct niu_classifier *cp = &np->clas; in niu_classifier_swstate_init()
8943 cp->tcam_top = (u16) np->port; in niu_classifier_swstate_init()
8944 cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports; in niu_classifier_swstate_init()
8945 cp->h1_init = 0xffffffff; in niu_classifier_swstate_init()
8946 cp->h2_init = 0xffff; in niu_classifier_swstate_init()
8953 struct niu_link_config *lp = &np->link_config; in niu_link_config_init()
8955 lp->advertising = (ADVERTISED_10baseT_Half | in niu_link_config_init()
8963 lp->speed = lp->active_speed = SPEED_INVALID; in niu_link_config_init()
8964 lp->duplex = DUPLEX_FULL; in niu_link_config_init()
8965 lp->active_duplex = DUPLEX_INVALID; in niu_link_config_init()
8966 lp->autoneg = 1; in niu_link_config_init()
8968 lp->loopback_mode = LOOPBACK_MAC; in niu_link_config_init()
8969 lp->active_speed = SPEED_10000; in niu_link_config_init()
8970 lp->active_duplex = DUPLEX_FULL; in niu_link_config_init()
8972 lp->loopback_mode = LOOPBACK_DISABLED; in niu_link_config_init()
8978 switch (np->port) { in niu_init_mac_ipp_pcs_base()
8980 np->mac_regs = np->regs + XMAC_PORT0_OFF; in niu_init_mac_ipp_pcs_base()
8981 np->ipp_off = 0x00000; in niu_init_mac_ipp_pcs_base()
8982 np->pcs_off = 0x04000; in niu_init_mac_ipp_pcs_base()
8983 np->xpcs_off = 0x02000; in niu_init_mac_ipp_pcs_base()
8987 np->mac_regs = np->regs + XMAC_PORT1_OFF; in niu_init_mac_ipp_pcs_base()
8988 np->ipp_off = 0x08000; in niu_init_mac_ipp_pcs_base()
8989 np->pcs_off = 0x0a000; in niu_init_mac_ipp_pcs_base()
8990 np->xpcs_off = 0x08000; in niu_init_mac_ipp_pcs_base()
8994 np->mac_regs = np->regs + BMAC_PORT2_OFF; in niu_init_mac_ipp_pcs_base()
8995 np->ipp_off = 0x04000; in niu_init_mac_ipp_pcs_base()
8996 np->pcs_off = 0x0e000; in niu_init_mac_ipp_pcs_base()
8997 np->xpcs_off = ~0UL; in niu_init_mac_ipp_pcs_base()
9001 np->mac_regs = np->regs + BMAC_PORT3_OFF; in niu_init_mac_ipp_pcs_base()
9002 np->ipp_off = 0x0c000; in niu_init_mac_ipp_pcs_base()
9003 np->pcs_off = 0x12000; in niu_init_mac_ipp_pcs_base()
9004 np->xpcs_off = ~0UL; in niu_init_mac_ipp_pcs_base()
9008 dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port); in niu_init_mac_ipp_pcs_base()
9009 return -EINVAL; in niu_init_mac_ipp_pcs_base()
9018 struct niu_parent *parent = np->parent; in niu_try_msix()
9019 struct pci_dev *pdev = np->pdev; in niu_try_msix()
9023 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; in niu_try_msix()
9024 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++) in niu_try_msix()
9027 num_irqs = (parent->rxchan_per_port[np->port] + in niu_try_msix()
9028 parent->txchan_per_port[np->port] + in niu_try_msix()
9029 (np->port == 0 ? 3 : 1)); in niu_try_msix()
9030 BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); in niu_try_msix()
9039 np->flags &= ~NIU_FLAGS_MSIX; in niu_try_msix()
9043 np->flags |= NIU_FLAGS_MSIX; in niu_try_msix()
9045 np->ldg[i].irq = msi_vec[i].vector; in niu_try_msix()
9046 np->num_ldg = num_irqs; in niu_try_msix()
9052 struct platform_device *op = np->op; in niu_n2_irq_init()
9056 int_prop = of_get_property(op->dev.of_node, "interrupts", NULL); in niu_n2_irq_init()
9058 return -ENODEV; in niu_n2_irq_init()
9060 for (i = 0; i < op->archdata.num_irqs; i++) { in niu_n2_irq_init()
9062 np->ldg[i].irq = op->archdata.irqs[i]; in niu_n2_irq_init()
9065 np->num_ldg = op->archdata.num_irqs; in niu_n2_irq_init()
9069 return -EINVAL; in niu_n2_irq_init()
9075 struct niu_parent *parent = np->parent; in niu_ldg_init()
9081 np->num_ldg = 1; in niu_ldg_init()
9082 np->ldg[0].irq = np->dev->irq; in niu_ldg_init()
9083 if (parent->plat_type == PLAT_TYPE_NIU) { in niu_ldg_init()
9090 port = np->port; in niu_ldg_init()
9091 for (i = 0; i < np->num_ldg; i++) { in niu_ldg_init()
9092 struct niu_ldg *lp = &np->ldg[i]; in niu_ldg_init()
9094 netif_napi_add(np->dev, &lp->napi, niu_poll, 64); in niu_ldg_init()
9096 lp->np = np; in niu_ldg_init()
9097 lp->ldg_num = ldg_num_map[i]; in niu_ldg_init()
9098 lp->timer = 2; /* XXX */ in niu_ldg_init()
9104 if (np->parent->plat_type != PLAT_TYPE_NIU) { in niu_ldg_init()
9105 err = niu_set_ldg_sid(np, lp->ldg_num, port, i); in niu_ldg_init()
9130 if (ldg_rotor == np->num_ldg) in niu_ldg_init()
9141 if (ldg_rotor == np->num_ldg) in niu_ldg_init()
9151 if (ldg_rotor == np->num_ldg) in niu_ldg_init()
9158 first_chan += parent->rxchan_per_port[i]; in niu_ldg_init()
9159 num_chan = parent->rxchan_per_port[port]; in niu_ldg_init()
9168 if (ldg_rotor == np->num_ldg) in niu_ldg_init()
9174 first_chan += parent->txchan_per_port[i]; in niu_ldg_init()
9175 num_chan = parent->txchan_per_port[port]; in niu_ldg_init()
9183 if (ldg_rotor == np->num_ldg) in niu_ldg_init()
9192 if (np->flags & NIU_FLAGS_MSIX) in niu_ldg_free()
9193 pci_disable_msix(np->pdev); in niu_ldg_free()
9199 struct net_device *dev = np->dev; in niu_get_of_props() local
9206 if (np->parent->plat_type == PLAT_TYPE_NIU) in niu_get_of_props()
9207 dp = np->op->dev.of_node; in niu_get_of_props()
9209 dp = pci_device_to_OF_node(np->pdev); in niu_get_of_props()
9211 phy_type = of_get_property(dp, "phy-type", NULL); in niu_get_of_props()
9213 netdev_err(dev, "%pOF: OF node lacks phy-type property\n", dp); in niu_get_of_props()
9214 return -EINVAL; in niu_get_of_props()
9218 return -ENODEV; in niu_get_of_props()
9220 strcpy(np->vpd.phy_type, phy_type); in niu_get_of_props()
9222 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { in niu_get_of_props()
9223 netdev_err(dev, "%pOF: Illegal phy string [%s]\n", in niu_get_of_props()
9224 dp, np->vpd.phy_type); in niu_get_of_props()
9225 return -EINVAL; in niu_get_of_props()
9228 mac_addr = of_get_property(dp, "local-mac-address", &prop_len); in niu_get_of_props()
9230 netdev_err(dev, "%pOF: OF node lacks local-mac-address property\n", in niu_get_of_props()
9232 return -EINVAL; in niu_get_of_props()
9234 if (prop_len != dev->addr_len) { in niu_get_of_props()
9235 netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n", in niu_get_of_props()
9238 memcpy(dev->dev_addr, mac_addr, dev->addr_len); in niu_get_of_props()
9239 if (!is_valid_ether_addr(&dev->dev_addr[0])) { in niu_get_of_props()
9240 netdev_err(dev, "%pOF: OF MAC address is invalid\n", dp); in niu_get_of_props()
9241 netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr); in niu_get_of_props()
9242 return -EINVAL; in niu_get_of_props()
9248 strcpy(np->vpd.model, model); in niu_get_of_props()
9250 if (of_find_property(dp, "hot-swappable-phy", NULL)) { in niu_get_of_props()
9251 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | in niu_get_of_props()
9257 return -EINVAL; in niu_get_of_props()
9267 if (err == -ENODEV) in niu_get_invariants()
9282 if (np->parent->plat_type == PLAT_TYPE_NIU) in niu_get_invariants()
9283 return -EINVAL; in niu_get_invariants()
9287 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_get_invariants()
9296 if (np->flags & NIU_FLAGS_VPD_VALID) { in niu_get_invariants()
9303 if (!(np->flags & NIU_FLAGS_VPD_VALID)) { in niu_get_invariants()
9333 static ssize_t show_port_phy(struct device *dev, in show_port_phy() argument
9336 struct platform_device *plat_dev = to_platform_device(dev); in show_port_phy()
9337 struct niu_parent *p = dev_get_platdata(&plat_dev->dev); in show_port_phy()
9338 u32 port_phy = p->port_phy; in show_port_phy()
9346 for (i = 0; i < p->num_ports; i++) { in show_port_phy()
9360 return buf - orig_buf; in show_port_phy()
9363 static ssize_t show_plat_type(struct device *dev, in show_plat_type() argument
9366 struct platform_device *plat_dev = to_platform_device(dev); in show_plat_type()
9367 struct niu_parent *p = dev_get_platdata(&plat_dev->dev); in show_plat_type()
9370 switch (p->plat_type) { in show_plat_type()
9391 static ssize_t __show_chan_per_port(struct device *dev, in __show_chan_per_port() argument
9395 struct platform_device *plat_dev = to_platform_device(dev); in __show_chan_per_port()
9396 struct niu_parent *p = dev_get_platdata(&plat_dev->dev); in __show_chan_per_port()
9401 arr = (rx ? p->rxchan_per_port : p->txchan_per_port); in __show_chan_per_port()
9403 for (i = 0; i < p->num_ports; i++) { in __show_chan_per_port()
9410 return buf - orig_buf; in __show_chan_per_port()
9413 static ssize_t show_rxchan_per_port(struct device *dev, in show_rxchan_per_port() argument
9416 return __show_chan_per_port(dev, attr, buf, 1); in show_rxchan_per_port()
9419 static ssize_t show_txchan_per_port(struct device *dev, in show_txchan_per_port() argument
9422 return __show_chan_per_port(dev, attr, buf, 1); in show_txchan_per_port()
9425 static ssize_t show_num_ports(struct device *dev, in show_num_ports() argument
9428 struct platform_device *plat_dev = to_platform_device(dev); in show_num_ports()
9429 struct niu_parent *p = dev_get_platdata(&plat_dev->dev); in show_num_ports()
9431 return sprintf(buf, "%d\n", p->num_ports); in show_num_ports()
9450 plat_dev = platform_device_register_simple("niu-board", niu_parent_index, in niu_new_parent()
9456 int err = device_create_file(&plat_dev->dev, in niu_new_parent()
9466 p->index = niu_parent_index++; in niu_new_parent()
9468 plat_dev->dev.platform_data = p; in niu_new_parent()
9469 p->plat_dev = plat_dev; in niu_new_parent()
9471 memcpy(&p->id, id, sizeof(*id)); in niu_new_parent()
9472 p->plat_type = ptype; in niu_new_parent()
9473 INIT_LIST_HEAD(&p->list); in niu_new_parent()
9474 atomic_set(&p->refcnt, 0); in niu_new_parent()
9475 list_add(&p->list, &niu_parent_list); in niu_new_parent()
9476 spin_lock_init(&p->lock); in niu_new_parent()
9478 p->rxdma_clock_divider = 7500; in niu_new_parent()
9480 p->tcam_num_entries = NIU_PCI_TCAM_ENTRIES; in niu_new_parent()
9481 if (p->plat_type == PLAT_TYPE_NIU) in niu_new_parent()
9482 p->tcam_num_entries = NIU_NONPCI_TCAM_ENTRIES; in niu_new_parent()
9485 int index = i - CLASS_CODE_USER_PROG1; in niu_new_parent()
9487 p->tcam_key[index] = TCAM_KEY_TSEL; in niu_new_parent()
9488 p->flow_key[index] = (FLOW_KEY_IPSA | in niu_new_parent()
9498 p->ldg_map[i] = LDG_INVALID; in niu_new_parent()
9511 int port = np->port; in niu_get_parent()
9516 if (!memcmp(id, &tmp->id, sizeof(*id))) { in niu_get_parent()
9529 err = sysfs_create_link(&p->plat_dev->dev.kobj, in niu_get_parent()
9530 &np->device->kobj, in niu_get_parent()
9533 p->ports[port] = np; in niu_get_parent()
9534 atomic_inc(&p->refcnt); in niu_get_parent()
9544 struct niu_parent *p = np->parent; in niu_put_parent()
9545 u8 port = np->port; in niu_put_parent()
9548 BUG_ON(!p || p->ports[port] != np); in niu_put_parent()
9550 netif_printk(np, probe, KERN_DEBUG, np->dev, in niu_put_parent()
9557 sysfs_remove_link(&p->plat_dev->dev.kobj, port_name); in niu_put_parent()
9559 p->ports[port] = NULL; in niu_put_parent()
9560 np->parent = NULL; in niu_put_parent()
9562 if (atomic_dec_and_test(&p->refcnt)) { in niu_put_parent()
9563 list_del(&p->list); in niu_put_parent()
9564 platform_device_unregister(p->plat_dev); in niu_put_parent()
9570 static void *niu_pci_alloc_coherent(struct device *dev, size_t size, in niu_pci_alloc_coherent() argument
9576 ret = dma_alloc_coherent(dev, size, &dh, flag); in niu_pci_alloc_coherent()
9582 static void niu_pci_free_coherent(struct device *dev, size_t size, in niu_pci_free_coherent() argument
9585 dma_free_coherent(dev, size, cpu_addr, handle); in niu_pci_free_coherent()
9588 static u64 niu_pci_map_page(struct device *dev, struct page *page, in niu_pci_map_page() argument
9592 return dma_map_page(dev, page, offset, size, direction); in niu_pci_map_page()
9595 static void niu_pci_unmap_page(struct device *dev, u64 dma_address, in niu_pci_unmap_page() argument
9598 dma_unmap_page(dev, dma_address, size, direction); in niu_pci_unmap_page()
9601 static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, in niu_pci_map_single() argument
9605 return dma_map_single(dev, cpu_addr, size, direction); in niu_pci_map_single()
9608 static void niu_pci_unmap_single(struct device *dev, u64 dma_address, in niu_pci_unmap_single() argument
9612 dma_unmap_single(dev, dma_address, size, direction); in niu_pci_unmap_single()
9637 struct net_device *dev; in niu_alloc_and_init() local
9640 dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN); in niu_alloc_and_init()
9641 if (!dev) in niu_alloc_and_init()
9644 SET_NETDEV_DEV(dev, gen_dev); in niu_alloc_and_init()
9646 np = netdev_priv(dev); in niu_alloc_and_init()
9647 np->dev = dev; in niu_alloc_and_init()
9648 np->pdev = pdev; in niu_alloc_and_init()
9649 np->op = op; in niu_alloc_and_init()
9650 np->device = gen_dev; in niu_alloc_and_init()
9651 np->ops = ops; in niu_alloc_and_init()
9653 np->msg_enable = niu_debug; in niu_alloc_and_init()
9655 spin_lock_init(&np->lock); in niu_alloc_and_init()
9656 INIT_WORK(&np->reset_task, niu_reset_task); in niu_alloc_and_init()
9658 np->port = port; in niu_alloc_and_init()
9660 return dev; in niu_alloc_and_init()
9676 static void niu_assign_netdev_ops(struct net_device *dev) in niu_assign_netdev_ops() argument
9678 dev->netdev_ops = &niu_netdev_ops; in niu_assign_netdev_ops()
9679 dev->ethtool_ops = &niu_ethtool_ops; in niu_assign_netdev_ops()
9680 dev->watchdog_timeo = NIU_TX_TIMEOUT; in niu_assign_netdev_ops()
9685 struct net_device *dev = np->dev; in niu_device_announce() local
9687 pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr); in niu_device_announce()
9689 if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { in niu_device_announce()
9691 dev->name, in niu_device_announce()
9692 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), in niu_device_announce()
9693 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), in niu_device_announce()
9694 (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"), in niu_device_announce()
9695 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : in niu_device_announce()
9696 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), in niu_device_announce()
9697 np->vpd.phy_type); in niu_device_announce()
9700 dev->name, in niu_device_announce()
9701 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), in niu_device_announce()
9702 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), in niu_device_announce()
9703 (np->flags & NIU_FLAGS_FIBER ? "FIBER" : in niu_device_announce()
9704 (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" : in niu_device_announce()
9706 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : in niu_device_announce()
9707 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), in niu_device_announce()
9708 np->vpd.phy_type); in niu_device_announce()
9712 static void niu_set_basic_features(struct net_device *dev) in niu_set_basic_features() argument
9714 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH; in niu_set_basic_features()
9715 dev->features |= dev->hw_features | NETIF_F_RXCSUM; in niu_set_basic_features()
9722 struct net_device *dev; in niu_pci_init_one() local
9730 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); in niu_pci_init_one()
9736 dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n"); in niu_pci_init_one()
9737 err = -ENODEV; in niu_pci_init_one()
9743 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); in niu_pci_init_one()
9748 dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n"); in niu_pci_init_one()
9749 err = -ENODEV; in niu_pci_init_one()
9753 dev = niu_alloc_and_init(&pdev->dev, pdev, NULL, in niu_pci_init_one()
9754 &niu_pci_ops, PCI_FUNC(pdev->devfn)); in niu_pci_init_one()
9755 if (!dev) { in niu_pci_init_one()
9756 err = -ENOMEM; in niu_pci_init_one()
9759 np = netdev_priv(dev); in niu_pci_init_one()
9762 parent_id.pci.domain = pci_domain_nr(pdev->bus); in niu_pci_init_one()
9763 parent_id.pci.bus = pdev->bus->number; in niu_pci_init_one()
9764 parent_id.pci.device = PCI_SLOT(pdev->devfn); in niu_pci_init_one()
9766 np->parent = niu_get_parent(np, &parent_id, in niu_pci_init_one()
9768 if (!np->parent) { in niu_pci_init_one()
9769 err = -ENOMEM; in niu_pci_init_one()
9779 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); in niu_pci_init_one()
9781 dev->features |= NETIF_F_HIGHDMA; in niu_pci_init_one()
9783 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); in niu_pci_init_one()
9785 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); in niu_pci_init_one()
9790 niu_set_basic_features(dev); in niu_pci_init_one()
9792 dev->priv_flags |= IFF_UNICAST_FLT; in niu_pci_init_one()
9794 np->regs = pci_ioremap_bar(pdev, 0); in niu_pci_init_one()
9795 if (!np->regs) { in niu_pci_init_one()
9796 dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); in niu_pci_init_one()
9797 err = -ENOMEM; in niu_pci_init_one()
9804 dev->irq = pdev->irq; in niu_pci_init_one()
9806 /* MTU range: 68 - 9216 */ in niu_pci_init_one()
9807 dev->min_mtu = ETH_MIN_MTU; in niu_pci_init_one()
9808 dev->max_mtu = NIU_MAX_MTU; in niu_pci_init_one()
9810 niu_assign_netdev_ops(dev); in niu_pci_init_one()
9814 if (err != -ENODEV) in niu_pci_init_one()
9815 dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n"); in niu_pci_init_one()
9819 err = register_netdev(dev); in niu_pci_init_one()
9821 dev_err(&pdev->dev, "Cannot register net device, aborting\n"); in niu_pci_init_one()
9825 pci_set_drvdata(pdev, dev); in niu_pci_init_one()
9832 if (np->regs) { in niu_pci_init_one()
9833 iounmap(np->regs); in niu_pci_init_one()
9834 np->regs = NULL; in niu_pci_init_one()
9841 free_netdev(dev); in niu_pci_init_one()
9854 struct net_device *dev = pci_get_drvdata(pdev); in niu_pci_remove_one() local
9856 if (dev) { in niu_pci_remove_one()
9857 struct niu *np = netdev_priv(dev); in niu_pci_remove_one()
9859 unregister_netdev(dev); in niu_pci_remove_one()
9860 if (np->regs) { in niu_pci_remove_one()
9861 iounmap(np->regs); in niu_pci_remove_one()
9862 np->regs = NULL; in niu_pci_remove_one()
9869 free_netdev(dev); in niu_pci_remove_one()
9877 struct net_device *dev = dev_get_drvdata(dev_d); in niu_suspend() local
9878 struct niu *np = netdev_priv(dev); in niu_suspend()
9881 if (!netif_running(dev)) in niu_suspend()
9884 flush_work(&np->reset_task); in niu_suspend()
9887 del_timer_sync(&np->timer); in niu_suspend()
9889 spin_lock_irqsave(&np->lock, flags); in niu_suspend()
9891 spin_unlock_irqrestore(&np->lock, flags); in niu_suspend()
9893 netif_device_detach(dev); in niu_suspend()
9895 spin_lock_irqsave(&np->lock, flags); in niu_suspend()
9897 spin_unlock_irqrestore(&np->lock, flags); in niu_suspend()
9904 struct net_device *dev = dev_get_drvdata(dev_d); in niu_resume() local
9905 struct niu *np = netdev_priv(dev); in niu_resume()
9909 if (!netif_running(dev)) in niu_resume()
9912 netif_device_attach(dev); in niu_resume()
9914 spin_lock_irqsave(&np->lock, flags); in niu_resume()
9918 np->timer.expires = jiffies + HZ; in niu_resume()
9919 add_timer(&np->timer); in niu_resume()
9923 spin_unlock_irqrestore(&np->lock, flags); in niu_resume()
9939 static void *niu_phys_alloc_coherent(struct device *dev, size_t size, in niu_phys_alloc_coherent() argument
9953 static void niu_phys_free_coherent(struct device *dev, size_t size, in niu_phys_free_coherent() argument
9961 static u64 niu_phys_map_page(struct device *dev, struct page *page, in niu_phys_map_page() argument
9968 static void niu_phys_unmap_page(struct device *dev, u64 dma_address, in niu_phys_unmap_page() argument
9974 static u64 niu_phys_map_single(struct device *dev, void *cpu_addr, in niu_phys_map_single() argument
9981 static void niu_phys_unmap_single(struct device *dev, u64 dma_address, in niu_phys_unmap_single() argument
10000 struct net_device *dev; in niu_of_probe() local
10007 reg = of_get_property(op->dev.of_node, "reg", NULL); in niu_of_probe()
10009 dev_err(&op->dev, "%pOF: No 'reg' property, aborting\n", in niu_of_probe()
10010 op->dev.of_node); in niu_of_probe()
10011 return -ENODEV; in niu_of_probe()
10014 dev = niu_alloc_and_init(&op->dev, NULL, op, in niu_of_probe()
10016 if (!dev) { in niu_of_probe()
10017 err = -ENOMEM; in niu_of_probe()
10020 np = netdev_priv(dev); in niu_of_probe()
10023 parent_id.of = of_get_parent(op->dev.of_node); in niu_of_probe()
10025 np->parent = niu_get_parent(np, &parent_id, in niu_of_probe()
10027 if (!np->parent) { in niu_of_probe()
10028 err = -ENOMEM; in niu_of_probe()
10032 niu_set_basic_features(dev); in niu_of_probe()
10034 np->regs = of_ioremap(&op->resource[1], 0, in niu_of_probe()
10035 resource_size(&op->resource[1]), in niu_of_probe()
10037 if (!np->regs) { in niu_of_probe()
10038 dev_err(&op->dev, "Cannot map device registers, aborting\n"); in niu_of_probe()
10039 err = -ENOMEM; in niu_of_probe()
10043 np->vir_regs_1 = of_ioremap(&op->resource[2], 0, in niu_of_probe()
10044 resource_size(&op->resource[2]), in niu_of_probe()
10045 "niu vregs-1"); in niu_of_probe()
10046 if (!np->vir_regs_1) { in niu_of_probe()
10047 dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n"); in niu_of_probe()
10048 err = -ENOMEM; in niu_of_probe()
10052 np->vir_regs_2 = of_ioremap(&op->resource[3], 0, in niu_of_probe()
10053 resource_size(&op->resource[3]), in niu_of_probe()
10054 "niu vregs-2"); in niu_of_probe()
10055 if (!np->vir_regs_2) { in niu_of_probe()
10056 dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n"); in niu_of_probe()
10057 err = -ENOMEM; in niu_of_probe()
10061 niu_assign_netdev_ops(dev); in niu_of_probe()
10065 if (err != -ENODEV) in niu_of_probe()
10066 dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n"); in niu_of_probe()
10070 err = register_netdev(dev); in niu_of_probe()
10072 dev_err(&op->dev, "Cannot register net device, aborting\n"); in niu_of_probe()
10076 platform_set_drvdata(op, dev); in niu_of_probe()
10083 if (np->vir_regs_1) { in niu_of_probe()
10084 of_iounmap(&op->resource[2], np->vir_regs_1, in niu_of_probe()
10085 resource_size(&op->resource[2])); in niu_of_probe()
10086 np->vir_regs_1 = NULL; in niu_of_probe()
10089 if (np->vir_regs_2) { in niu_of_probe()
10090 of_iounmap(&op->resource[3], np->vir_regs_2, in niu_of_probe()
10091 resource_size(&op->resource[3])); in niu_of_probe()
10092 np->vir_regs_2 = NULL; in niu_of_probe()
10095 if (np->regs) { in niu_of_probe()
10096 of_iounmap(&op->resource[1], np->regs, in niu_of_probe()
10097 resource_size(&op->resource[1])); in niu_of_probe()
10098 np->regs = NULL; in niu_of_probe()
10105 free_netdev(dev); in niu_of_probe()
10113 struct net_device *dev = platform_get_drvdata(op); in niu_of_remove() local
10115 if (dev) { in niu_of_remove()
10116 struct niu *np = netdev_priv(dev); in niu_of_remove()
10118 unregister_netdev(dev); in niu_of_remove()
10120 if (np->vir_regs_1) { in niu_of_remove()
10121 of_iounmap(&op->resource[2], np->vir_regs_1, in niu_of_remove()
10122 resource_size(&op->resource[2])); in niu_of_remove()
10123 np->vir_regs_1 = NULL; in niu_of_remove()
10126 if (np->vir_regs_2) { in niu_of_remove()
10127 of_iounmap(&op->resource[3], np->vir_regs_2, in niu_of_remove()
10128 resource_size(&op->resource[3])); in niu_of_remove()
10129 np->vir_regs_2 = NULL; in niu_of_remove()
10132 if (np->regs) { in niu_of_remove()
10133 of_iounmap(&op->resource[1], np->regs, in niu_of_remove()
10134 resource_size(&op->resource[1])); in niu_of_remove()
10135 np->regs = NULL; in niu_of_remove()
10142 free_netdev(dev); in niu_of_remove()